Skip to content
Open
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -170,3 +170,5 @@ grafana/runtime-data/*

prometheus/data/*
!prometheus/data/.gitkeep

*.swp
2 changes: 1 addition & 1 deletion docker/api.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ COPY --from=sev /app/sev/libsevguest.h /app/nilai-api/src/nilai_api/sev/libsevgu
WORKDIR /app/nilai-api/

RUN apt-get update && \
apt-get install build-essential curl -y && \
apt-get install build-essential curl git -y && \
apt-get clean && \
apt-get autoremove && \
rm -rf /var/lib/apt/lists/* && \
Expand Down
58 changes: 58 additions & 0 deletions docker/compose/docker-compose.llama-1b-gpu.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
services:
llama_32_tool_gpu:
build:
context: .
dockerfile: docker/vllm.Dockerfile
deploy:
resources:
reservations:
devices:
- capabilities:
- gpu
driver: nvidia
ipc: host
depends_on:
etcd:
condition: service_healthy
watt_tool_gpu:
condition: service_healthy
command:
- --model
- meta-llama/Llama-3.2-1B-Instruct
- --max-model-len
- "10000"
- --device
- cuda
- --gpu-memory-utilization
- "0.45"
- --enable-auto-tool-choice
- --tool-call-parser
- llama3_json
- --chat-template
- /tmp/tool_chat_template.jinja
env_file:
- .env
environment:
SVC_HOST: "llama_32_tool_gpu"
SVC_PORT: "8000"
ETCD_HOST: "etcd"
ETCD_PORT: "2379"
TOOL_SUPPORT: true
MODEL_ROLE: "reasoning"
networks:
- backend_net
volumes:
- type: volume
source: hugging_face_models
target: /root/.cache/huggingface
volume: {}
- type: bind
source: $PWD/docker/compose/tool_chat_template_llama3.2_json.jinja
target: /tmp/tool_chat_template.jinja
bind:
create_host_path: true
volumes:
hugging_face_models:

networks:
backend_net:
54 changes: 54 additions & 0 deletions docker/compose/docker-compose.npw.deepseek14.yml
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this differ significantly in behaviour from the original Deepseek 14B docker compose that we had before? If the behaviour is better (which I assume, given your improvements), would it make sense to merge both into a single Deepseek 14B file? This avoids having to maintain multiple source files after this.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I committed the new one so that we can discuss the differences to see if we want to integrate new points.

  1. I noticed that start up of two models simultaneously on the H100 often failed. I put in the depends_on for the worker model. I propose we always have a worker model available, but it doesn't need to live on the same server...
  2. I didn't include the tensor-parallel-size, it's set to the default value
  3. I added the "reasoning" MODEL_ROLE flag

wdyt?

Copy link
Member

@jcabrero jcabrero Mar 11, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't really know. I have a discussion with ecosystem and product to decide which models we include by default. I wonder whether the worker model preserves privacy and it's something we are willing to live with.

Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
services:
deepseek_14b_gpu:
build:
context: .
dockerfile: docker/vllm.Dockerfile
deploy:
resources:
reservations:
devices:
- capabilities:
- gpu
driver: nvidia
ipc: host
depends_on:
etcd:
condition: service_healthy
watt_tool_gpu:
condition: service_healthy
command:
- --model
- deepseek-ai/DeepSeek-R1-Distill-Qwen-14B
- --max-model-len
- "10000"
- --device
- cuda
- --gpu-memory-utilization
- "0.45"
env_file:
- .env
environment:
SVC_HOST: "deepseek_14b_gpu"
SVC_PORT: "8000"
ETCD_HOST: "etcd"
ETCD_PORT: "2379"
TOOL_SUPPORT: true
MODEL_ROLE: "reasoning"
networks:
- backend_net
volumes:
- type: volume
source: hugging_face_models
target: /root/.cache/huggingface
volume: {}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
retries: 3
start_period: 60s
timeout: 10s
volumes:
hugging_face_models:

networks:
backend_net:
62 changes: 62 additions & 0 deletions docker/compose/docker-compose.watt-8b-gpu.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
services:
watt_tool_gpu:
build:
context: .
dockerfile: docker/vllm.Dockerfile
deploy:
resources:
reservations:
devices:
- capabilities:
- gpu
driver: nvidia
ipc: host
depends_on:
etcd:
condition: service_healthy
command:
- --model
- watt-ai/watt-tool-8B
- --max-model-len
- "10000"
- --device
- cuda
- --gpu-memory-utilization
- "0.45"
- --enable-auto-tool-choice
- --tool-call-parser
- llama3_json
- --chat-template
- /tmp/tool_chat_template.jinja
env_file:
- .env
environment:
SVC_HOST: "watt_tool_gpu"
SVC_PORT: "8000"
ETCD_HOST: "etcd"
ETCD_PORT: "2379"
TOOL_SUPPORT: true
MODEL_ROLE: "worker"
networks:
- backend_net
volumes:
- type: volume
source: hugging_face_models
target: /root/.cache/huggingface
volume: {}
- type: bind
source: $PWD/docker/compose/tool_chat_template_llama3.1_json.jinja
target: /tmp/tool_chat_template.jinja
bind:
create_host_path: true
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
retries: 3
start_period: 60s
timeout: 10s
volumes:
hugging_face_models:

networks:
backend_net:
120 changes: 120 additions & 0 deletions docker/compose/tool_chat_template_llama3.1_json.jinja
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
{{- bos_token }}
{%- if custom_tools is defined %}
{%- set tools = custom_tools %}
{%- endif %}
{%- if not tools_in_user_message is defined %}
{#- Llama 3.1 doesn't pass all tests if the tools are in the system prompt #}
{%- set tools_in_user_message = true %}
{%- endif %}
{%- if not date_string is defined %}
{%- if strftime_now is defined %}
{%- set date_string = strftime_now("%d %b %Y") %}
{%- else %}
{%- set date_string = "26 Jul 2024" %}
{%- endif %}
{%- endif %}
{%- if not tools is defined %}
{%- set tools = none %}
{%- endif %}

{#- This block extracts the system message, so we can slot it into the right place. #}
{%- if messages[0]['role'] == 'system' %}
{%- if messages[0]['content'] is string %}
{%- set system_message = messages[0]['content']|trim %}
{%- else %}
{%- set system_message = messages[0]['content'][0]['text']|trim %}
{%- endif %}
{%- set messages = messages[1:] %}
{%- else %}
{%- if tools is not none %}
{%- set system_message = "You are a helpful assistant with tool calling capabilities. Only reply with a tool call if the function exists in the library provided by the user. If it doesn't exist, just reply directly in natural language. When you receive a tool call response, use the output to format an answer to the original user question." %}
{%- else %}
{%- set system_message = "" %}
{%- endif %}
{%- endif %}

{#- System message #}
{{- "<|start_header_id|>system<|end_header_id|>\n\n" }}
{%- if tools is not none %}
{{- "Environment: ipython\n" }}
{%- endif %}
{{- "Cutting Knowledge Date: December 2023\n" }}
{{- "Today Date: " + date_string + "\n\n" }}
{%- if tools is not none and not tools_in_user_message %}
{{- "You have access to the following functions. To call a function, please respond with JSON for a function call. " }}
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. ' }}
{{- "Do not use variables.\n\n" }}
{%- for t in tools %}
{{- t | tojson(indent=4) }}
{{- "\n\n" }}
{%- endfor %}
{%- endif %}
{{- system_message }}
{{- "<|eot_id|>" }}

{#- Custom tools are passed in a user message with some extra guidance #}
{%- if tools_in_user_message and not tools is none %}
{#- Extract the first user message so we can plug it in here #}
{%- if messages | length != 0 %}
{%- if messages[0]['content'] is string %}
{%- set first_user_message = messages[0]['content']|trim %}
{%- else %}
{%- set first_user_message = messages[0]['content'] | selectattr('type', 'equalto', 'text') | map(attribute='text') | map('trim') | join('\n') %}
{%- endif %}
{%- set messages = messages[1:] %}
{%- else %}
{{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }}
{%- endif %}
{{- '<|start_header_id|>user<|end_header_id|>\n\n' -}}
{{- "Given the following functions, please respond with a JSON for a function call " }}
{{- "with its proper arguments that best answers the given prompt.\n\n" }}
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. ' }}
{{- "Do not use variables.\n\n" }}
{%- for t in tools %}
{{- t | tojson(indent=4) }}
{{- "\n\n" }}
{%- endfor %}
{{- first_user_message + "<|eot_id|>"}}
{%- endif %}

{%- for message in messages %}
{%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}
{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' }}
{%- if message['content'] is string %}
{{- message['content'] | trim}}
{%- else %}
{%- for content in message['content'] %}
{%- if content['type'] == 'text' %}
{{- content['text'] | trim }}
{%- endif %}
{%- endfor %}
{%- endif %}
{{- '<|eot_id|>' }}
{%- elif 'tool_calls' in message %}
{%- if not message.tool_calls|length == 1 %}
{{- raise_exception("This model only supports single tool-calls at once!") }}
{%- endif %}
{%- set tool_call = message.tool_calls[0].function %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
{{- '{"name": "' + tool_call.name + '", ' }}
{{- '"parameters": ' }}
{{- tool_call.arguments | tojson }}
{{- "}" }}
{{- "<|eot_id|>" }}
{%- elif message.role == "tool" or message.role == "ipython" %}
{{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }}
{%- if message.content is string %}
{{- { "output": message.content } | tojson }}
{%- else %}
{%- for content in message['content'] %}
{%- if content['type'] == 'text' %}
{{- { "output": content['text'] } | tojson }}
{%- endif %}
{%- endfor %}
{%- endif %}
{{- "<|eot_id|>" }}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
{%- endif %}
Loading