From c585bc7f1b7fb1a03ed9ee2621d636f1e0b97aa3 Mon Sep 17 00:00:00 2001 From: udaij12 <32673964+udaij12@users.noreply.github.com> Date: Fri, 20 Sep 2024 11:07:09 -0700 Subject: [PATCH 1/7] Docker aarch (#3323) * adding graviton docker image * testing multiplatform ci * testing multiplatform ci * testing multiplatform ci * adding new builder * removing arm * removing arm * testing arm * tests * testing driver command * testing driver command * testing on newer instance * testing on newer instance * testing newer * rm command * changing platform * testing only amd * testing both arch * testing both arch * testing both * remove builder * remove builder * adding amd * building cache * cache 3 * cache 4 * cache 4 * final test * reverting temp changes * testing official release * testing official release * testing official release * adding kserve changes * kserve nightly * adding build context * changing nightly push * remove push on official * remove push on official --------- Co-authored-by: Ankith Gunapal --- kubernetes/kserve/build_image.sh | 2 +- kubernetes/kserve/build_upload_release.py | 5 ----- kubernetes/kserve/docker_nightly.py | 9 ++------- 3 files changed, 3 insertions(+), 13 deletions(-) diff --git a/kubernetes/kserve/build_image.sh b/kubernetes/kserve/build_image.sh index 670915f1fc..39a005b8f0 100755 --- a/kubernetes/kserve/build_image.sh +++ b/kubernetes/kserve/build_image.sh @@ -66,5 +66,5 @@ cp -r ../../third_party . if [ "${MULTI}" == "true" ]; then DOCKER_BUILDKIT=1 docker buildx build --file "$DOCKER_FILE" --build-arg BASE_IMAGE=$BASE_IMAGE --platform "${ARCH}" -t "$DOCKER_TAG" --push . else - DOCKER_BUILDKIT=1 docker buildx build --file "$DOCKER_FILE" --build-arg BASE_IMAGE=$BASE_IMAGE -t "$DOCKER_TAG" --load . + DOCKER_BUILDKIT=1 docker buildx build --file "$DOCKER_FILE" --build-arg BASE_IMAGE=$BASE_IMAGE -t "$DOCKER_TAG" --push . fi diff --git a/kubernetes/kserve/build_upload_release.py b/kubernetes/kserve/build_upload_release.py index 0c7c481671..e9dd311610 100644 --- a/kubernetes/kserve/build_upload_release.py +++ b/kubernetes/kserve/build_upload_release.py @@ -39,11 +39,6 @@ dry_run, ) - for image in [ - f"{organization}/torchserve-kfs:{check_ts_version()}-gpu", - ]: - try_and_handle(f"docker push {image}", dry_run) - # Cleanup built images if args.cleanup: try_and_handle(f"docker system prune --all --volumes -f", dry_run) diff --git a/kubernetes/kserve/docker_nightly.py b/kubernetes/kserve/docker_nightly.py index d5d3f13b76..38c3bdea19 100644 --- a/kubernetes/kserve/docker_nightly.py +++ b/kubernetes/kserve/docker_nightly.py @@ -43,22 +43,17 @@ dry_run, ) - # Push Nightly images to official PyTorch Dockerhub account - try_and_handle(f"docker push {organization}/{gpu_version}", dry_run) - # Tag nightly images with latest try_and_handle( f"docker buildx imagetools create --tag {organization}/{project}:latest-cpu {organization}/{cpu_version}", dry_run, ) + try_and_handle( - f"docker tag {organization}/{gpu_version} {organization}/{project}:latest-gpu", + f"docker buildx imagetools create --tag {organization}/{project}:latest-gpu {organization}/{gpu_version}", dry_run, ) - # Push images with latest tag - try_and_handle(f"docker push {organization}/{project}:latest-gpu", dry_run) - # Cleanup built images if args.cleanup: try_and_handle(f"docker system prune --all --volumes -f", dry_run) From 7161c6fcabcd59455de16318615a1e8923d6ab9f Mon Sep 17 00:00:00 2001 From: chen3933 <58235958+chen3933@users.noreply.github.com> Date: Fri, 20 Sep 2024 14:11:29 -0700 Subject: [PATCH 2/7] Clear up neuron cache (#3326) Co-authored-by: Boyu Chen Co-authored-by: Naman Nandan --- benchmarks/utils/system_under_test.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/benchmarks/utils/system_under_test.py b/benchmarks/utils/system_under_test.py index 3f20f8b0d2..85820203b8 100644 --- a/benchmarks/utils/system_under_test.py +++ b/benchmarks/utils/system_under_test.py @@ -113,6 +113,7 @@ def start(self): execute("torchserve --stop", wait=True) click.secho("*Setting up model store...", fg="green") self._prepare_local_dependency() + self._clear_neuron_cache_if_exists() click.secho("*Starting local Torchserve instance...", fg="green") ts_cmd = ( @@ -141,6 +142,31 @@ def start(self): if "Model server started" in str(line).strip(): break + def _clear_neuron_cache_if_exists(self): + cache_dir = "/var/tmp/neuron-compile-cache/" + + # Check if the directory exists + if os.path.exists(cache_dir) and os.path.isdir(cache_dir): + click.secho( + f"Directory {cache_dir} exists. Clearing contents...", fg="green" + ) + + # Remove the directory contents + for filename in os.listdir(cache_dir): + file_path = os.path.join(cache_dir, filename) + try: + if os.path.isfile(file_path) or os.path.islink(file_path): + os.unlink(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + except Exception as e: + click.secho(f"Failed to delete {file_path}. Reason: {e}", fg="red") + click.secho(f"Cache cleared: {cache_dir}", fg="green") + else: + click.secho( + f"Directory {cache_dir} does not exist. No action taken.", fg="green" + ) + def stop(self): click.secho("*Terminating Torchserve instance...", fg="green") execute("torchserve --stop", wait=True) From 37a533b6378099c4dfed96a402b4345829235675 Mon Sep 17 00:00:00 2001 From: Matthias Reso <13337103+mreso@users.noreply.github.com> Date: Fri, 20 Sep 2024 15:58:57 -0700 Subject: [PATCH 3/7] Fix dockerfile fore renamed forks (#3327) --- docker/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 71d48f58fe..94f4a1ba99 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -73,7 +73,7 @@ COPY ./ serve RUN \ if echo "$LOCAL_CHANGES" | grep -q "false"; then \ rm -rf serve;\ - git clone --recursive $REPO_URL -b $BRANCH_NAME; \ + git clone --recursive $REPO_URL -b $BRANCH_NAME serve; \ fi @@ -238,7 +238,7 @@ COPY ./ serve RUN \ if echo "$LOCAL_CHANGES" | grep -q "false"; then \ rm -rf serve;\ - git clone --recursive $REPO_URL -b $BRANCH_NAME; \ + git clone --recursive $REPO_URL -b $BRANCH_NAME serve; \ fi COPY --from=compile-image /home/venv /home/venv From d993070df512d4d970374d33ae462bd6f6cdf281 Mon Sep 17 00:00:00 2001 From: Tadayoshi Sato Date: Sat, 21 Sep 2024 13:14:01 +0900 Subject: [PATCH 4/7] Fix typo: vesion -> version, succsesfully -> successfully (#3322) Co-authored-by: Matthias Reso <13337103+mreso@users.noreply.github.com> Co-authored-by: Ankith Gunapal --- examples/large_models/utils/test_llm_streaming_response.py | 2 +- .../src/main/java/org/pytorch/serve/openapi/OpenApiUtils.java | 2 +- .../server/src/main/java/org/pytorch/serve/util/ApiUtils.java | 2 +- .../server/src/test/java/org/pytorch/serve/ModelServerTest.java | 2 +- frontend/server/src/test/resources/management_open_api.json | 2 +- frontend/server/src/test/resources/model_management_api.json | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/large_models/utils/test_llm_streaming_response.py b/examples/large_models/utils/test_llm_streaming_response.py index 55f9129bc3..94e6b373e5 100644 --- a/examples/large_models/utils/test_llm_streaming_response.py +++ b/examples/large_models/utils/test_llm_streaming_response.py @@ -196,7 +196,7 @@ def parse_args(): "--model-version", type=str, default="1.0", - help="Model vesion. Default: 1.0", + help="Model version. Default: 1.0", ) return parser.parse_args() diff --git a/frontend/server/src/main/java/org/pytorch/serve/openapi/OpenApiUtils.java b/frontend/server/src/main/java/org/pytorch/serve/openapi/OpenApiUtils.java index 5d216f0465..f307188647 100644 --- a/frontend/server/src/main/java/org/pytorch/serve/openapi/OpenApiUtils.java +++ b/frontend/server/src/main/java/org/pytorch/serve/openapi/OpenApiUtils.java @@ -203,7 +203,7 @@ private static Operation getSetDefaultOperation() { MediaType error = getErrorResponse(); operation.addResponse( - new Response("200", "Default vesion succsesfully updated for model", status)); + new Response("200", "Default version successfully updated for model", status)); operation.addResponse( new Response("404", "Model not found or Model version not found", error)); operation.addResponse(new Response("500", "Internal Server Error", error)); diff --git a/frontend/server/src/main/java/org/pytorch/serve/util/ApiUtils.java b/frontend/server/src/main/java/org/pytorch/serve/util/ApiUtils.java index 46c476affd..70f5a1c644 100644 --- a/frontend/server/src/main/java/org/pytorch/serve/util/ApiUtils.java +++ b/frontend/server/src/main/java/org/pytorch/serve/util/ApiUtils.java @@ -98,7 +98,7 @@ public static String setDefault(String modelName, String newModelVersion) ModelManager modelManager = ModelManager.getInstance(); modelManager.setDefaultVersion(modelName, newModelVersion); String msg = - "Default vesion succsesfully updated for model \"" + "Default version successfully updated for model \"" + modelName + "\" to \"" + newModelVersion diff --git a/frontend/server/src/test/java/org/pytorch/serve/ModelServerTest.java b/frontend/server/src/test/java/org/pytorch/serve/ModelServerTest.java index 518143ec04..f419a26657 100644 --- a/frontend/server/src/test/java/org/pytorch/serve/ModelServerTest.java +++ b/frontend/server/src/test/java/org/pytorch/serve/ModelServerTest.java @@ -429,7 +429,7 @@ public void testSetDefaultVersionNoop() throws InterruptedException { StatusResponse resp = JsonUtils.GSON.fromJson(TestUtils.getResult(), StatusResponse.class); Assert.assertEquals( resp.getStatus(), - "Default vesion succsesfully updated for model \"noopversioned\" to \"1.2.1\""); + "Default version successfully updated for model \"noopversioned\" to \"1.2.1\""); } @Test( diff --git a/frontend/server/src/test/resources/management_open_api.json b/frontend/server/src/test/resources/management_open_api.json index 352cc1bd83..4e926297a1 100644 --- a/frontend/server/src/test/resources/management_open_api.json +++ b/frontend/server/src/test/resources/management_open_api.json @@ -1671,7 +1671,7 @@ ], "responses": { "200": { - "description": "Default vesion succsesfully updated for model", + "description": "Default version successfully updated for model", "content": { "application/json": { "schema": { diff --git a/frontend/server/src/test/resources/model_management_api.json b/frontend/server/src/test/resources/model_management_api.json index 9ca6badc69..93cb7bae6b 100644 --- a/frontend/server/src/test/resources/model_management_api.json +++ b/frontend/server/src/test/resources/model_management_api.json @@ -1216,7 +1216,7 @@ ], "responses": { "200": { - "description": "Default vesion succsesfully updated for model", + "description": "Default version successfully updated for model", "content": { "application/json": { "schema": { From 6881ec5fee9cd4f5eb7e8361e68e6f29b17ea301 Mon Sep 17 00:00:00 2001 From: Meleshko Ivan Date: Mon, 23 Sep 2024 23:24:08 +0200 Subject: [PATCH 5/7] Load .tar.gz on `load_models=all` (#3329) --- .../server/src/main/java/org/pytorch/serve/ModelServer.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frontend/server/src/main/java/org/pytorch/serve/ModelServer.java b/frontend/server/src/main/java/org/pytorch/serve/ModelServer.java index f5b02ee222..90f983333a 100644 --- a/frontend/server/src/main/java/org/pytorch/serve/ModelServer.java +++ b/frontend/server/src/main/java/org/pytorch/serve/ModelServer.java @@ -193,7 +193,8 @@ private void initModelStore() throws InvalidSnapshotException, IOException { String fileName = file.getName(); if (file.isFile() && !fileName.endsWith(".mar") - && !fileName.endsWith(".model")) { + && !fileName.endsWith(".model") + && !fileName.endsWith(".tar.gz")) { continue; } try { From c161926c950baafa2f89371a0a8582777ec8521c Mon Sep 17 00:00:00 2001 From: udaij12 <32673964+udaij12@users.noreply.github.com> Date: Mon, 23 Sep 2024 16:50:16 -0700 Subject: [PATCH 6/7] fix for snapshot variables missing/null (#3328) * testing adding startup * testing adding default * testing newer docker * adding default values for model * testing specific docker * testing specific docker image * fixed format * merge ready * adding source of truth * adding source of truth * fixing tests * format * removing defaults --------- Co-authored-by: Ankith Gunapal --- .../org/pytorch/serve/archive/model/ModelConfig.java | 7 +++++-- .../src/main/java/org/pytorch/serve/wlm/Model.java | 12 ++++++++++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/frontend/archive/src/main/java/org/pytorch/serve/archive/model/ModelConfig.java b/frontend/archive/src/main/java/org/pytorch/serve/archive/model/ModelConfig.java index c83da0523c..1693bc8424 100644 --- a/frontend/archive/src/main/java/org/pytorch/serve/archive/model/ModelConfig.java +++ b/frontend/archive/src/main/java/org/pytorch/serve/archive/model/ModelConfig.java @@ -11,6 +11,9 @@ public class ModelConfig { private static final Logger logger = LoggerFactory.getLogger(ModelConfig.class); + public static final int defaultStartupTimeout = 120; // unit: sec + public static final int defaultResponseTimeout = 120; // unit: sec + /** the minimum number of workers of a model */ private int minWorkers; /** the maximum number of workers of a model */ @@ -20,9 +23,9 @@ public class ModelConfig { /** the maximum delay in msec of a batch of a model */ private int maxBatchDelay; /** the timeout in sec of a specific model's response. */ - private int responseTimeout = 120; // unit: sec + private int responseTimeout = defaultResponseTimeout; /** the timeout in sec of a specific model's startup. */ - private int startupTimeout = 120; // unit: sec + private int startupTimeout = defaultStartupTimeout; /** * the device type where the model is loaded. It can be gpu, cpu. The model is loaded on CPU if * deviceType: "cpu" is set on a GPU host. diff --git a/frontend/server/src/main/java/org/pytorch/serve/wlm/Model.java b/frontend/server/src/main/java/org/pytorch/serve/wlm/Model.java index c41a495260..32ae5bcc06 100644 --- a/frontend/server/src/main/java/org/pytorch/serve/wlm/Model.java +++ b/frontend/server/src/main/java/org/pytorch/serve/wlm/Model.java @@ -193,9 +193,17 @@ public void setModelState(JsonObject modelInfo) { minWorkers = modelInfo.get(MIN_WORKERS).getAsInt(); maxWorkers = modelInfo.get(MAX_WORKERS).getAsInt(); maxBatchDelay = modelInfo.get(MAX_BATCH_DELAY).getAsInt(); - responseTimeout = modelInfo.get(RESPONSE_TIMEOUT).getAsInt(); - startupTimeout = modelInfo.get(STARTUP_TIMEOUT).getAsInt(); batchSize = modelInfo.get(BATCH_SIZE).getAsInt(); + responseTimeout = + modelInfo.has(RESPONSE_TIMEOUT) && !modelInfo.get(RESPONSE_TIMEOUT).isJsonNull() + ? modelInfo.get(RESPONSE_TIMEOUT).getAsInt() + : modelArchive.getModelConfig() + .defaultResponseTimeout; // default value for responseTimeout + startupTimeout = + modelInfo.has(STARTUP_TIMEOUT) && !modelInfo.get(STARTUP_TIMEOUT).isJsonNull() + ? modelInfo.get(STARTUP_TIMEOUT).getAsInt() + : modelArchive.getModelConfig() + .defaultStartupTimeout; // default value for startupTimeout JsonElement runtime = modelInfo.get(RUNTIME_TYPE); String runtime_str = Manifest.RuntimeType.PYTHON.getValue(); From 6bdb1baf7254c2bfc40a6669b0d922c08ba5f37a Mon Sep 17 00:00:00 2001 From: Matthias Reso <13337103+mreso@users.noreply.github.com> Date: Tue, 24 Sep 2024 09:41:17 -0700 Subject: [PATCH 7/7] Rename vllm dockerfile (#3330) --- README.md | 4 ++-- docker/{Dockerfile.llm => Dockerfile.vllm} | 0 docs/llm_deployment.md | 4 ++-- examples/large_models/vllm/llama3/Readme.md | 2 +- examples/large_models/vllm/lora/Readme.md | 2 +- examples/large_models/vllm/mistral/Readme.md | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) rename docker/{Dockerfile.llm => Dockerfile.vllm} (100%) diff --git a/README.md b/README.md index f629ce61e3..1a5891e4dd 100644 --- a/README.md +++ b/README.md @@ -85,9 +85,9 @@ curl -X POST -d '{"prompt":"count from 1 to 9 in french ", "max_tokens": 100}' - ```bash #export token= -docker build --pull . -f docker/Dockerfile.llm -t ts/llm +docker build --pull . -f docker/Dockerfile.vllm -t ts/vllm -docker run --rm -ti --shm-size 10g --gpus all -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:8080 -v data:/data ts/llm --model_id meta-llama/Meta-Llama-3-8B-Instruct --disable_token_auth +docker run --rm -ti --shm-size 10g --gpus all -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:8080 -v data:/data ts/vllm --model_id meta-llama/Meta-Llama-3-8B-Instruct --disable_token_auth # Try it out curl -X POST -d '{"model":"meta-llama/Meta-Llama-3-8B-Instruct", "prompt":"Hello, my name is", "max_tokens": 200}' --header "Content-Type: application/json" "http://localhost:8080/predictions/model/1.0/v1/completions" diff --git a/docker/Dockerfile.llm b/docker/Dockerfile.vllm similarity index 100% rename from docker/Dockerfile.llm rename to docker/Dockerfile.vllm diff --git a/docs/llm_deployment.md b/docs/llm_deployment.md index 282dd558fe..2a7bfc8742 100644 --- a/docs/llm_deployment.md +++ b/docs/llm_deployment.md @@ -11,7 +11,7 @@ The launcher can either be used standalone or in combination with our provided T To launch the docker we first need to build it: ```bash -docker build . -f docker/Dockerfile.llm -t ts/llm +docker build . -f docker/Dockerfile.vllm -t ts/vllm ``` Models are usually loaded from the HuggingFace hub and are cached in a [docker volume](https://docs.docker.com/storage/volumes/) for faster reload. @@ -22,7 +22,7 @@ export token= You can then go ahead and launch a TorchServe instance serving your selected model: ```bash -docker run --rm -ti --shm-size 1g --gpus all -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:8080 -v data:/data ts/llm --model_id meta-llama/Meta-Llama-3-8B-Instruct --disable_token_auth +docker run --rm -ti --shm-size 1g --gpus all -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:8080 -v data:/data ts/vllm --model_id meta-llama/Meta-Llama-3-8B-Instruct --disable_token_auth ``` To change the model you just need to exchange the identifier given to the `--model_id` parameter. diff --git a/examples/large_models/vllm/llama3/Readme.md b/examples/large_models/vllm/llama3/Readme.md index fb80f7a3e3..ac8ea048d4 100644 --- a/examples/large_models/vllm/llama3/Readme.md +++ b/examples/large_models/vllm/llama3/Readme.md @@ -9,7 +9,7 @@ To leverage the power of vLLM we fist need to install it using pip in out develo ```bash python -m pip install -r ../requirements.txt ``` -For later deployments we can make vLLM part of the deployment environment by adding the requirements.txt while building the model archive in step 2 (see [here](../../../../model-archiver/README.md#model-specific-custom-python-requirements) for details) or we can make it part of a docker image like [here](../../../../docker/Dockerfile.llm). +For later deployments we can make vLLM part of the deployment environment by adding the requirements.txt while building the model archive in step 2 (see [here](../../../../model-archiver/README.md#model-specific-custom-python-requirements) for details) or we can make it part of a docker image like [here](../../../../docker/Dockerfile.vllm). ### Step 1: Download Model from HuggingFace diff --git a/examples/large_models/vllm/lora/Readme.md b/examples/large_models/vllm/lora/Readme.md index 0469d53567..0b855261e3 100644 --- a/examples/large_models/vllm/lora/Readme.md +++ b/examples/large_models/vllm/lora/Readme.md @@ -9,7 +9,7 @@ To leverage the power of vLLM we fist need to install it using pip in out develo ```bash python -m pip install -r ../requirements.txt ``` -For later deployments we can make vLLM part of the deployment environment by adding the requirements.txt while building the model archive in step 2 (see [here](../../../../model-archiver/README.md#model-specific-custom-python-requirements) for details) or we can make it part of a docker image like [here](../../../../docker/Dockerfile.llm). +For later deployments we can make vLLM part of the deployment environment by adding the requirements.txt while building the model archive in step 2 (see [here](../../../../model-archiver/README.md#model-specific-custom-python-requirements) for details) or we can make it part of a docker image like [here](../../../../docker/Dockerfile.vllm). ### Step 1: Download Model from HuggingFace diff --git a/examples/large_models/vllm/mistral/Readme.md b/examples/large_models/vllm/mistral/Readme.md index 4816adcae5..d7c504a54c 100644 --- a/examples/large_models/vllm/mistral/Readme.md +++ b/examples/large_models/vllm/mistral/Readme.md @@ -9,7 +9,7 @@ To leverage the power of vLLM we fist need to install it using pip in out develo ```bash python -m pip install -r ../requirements.txt ``` -For later deployments we can make vLLM part of the deployment environment by adding the requirements.txt while building the model archive in step 2 (see [here](../../../../model-archiver/README.md#model-specific-custom-python-requirements) for details) or we can make it part of a docker image like [here](../../../../docker/Dockerfile.llm). +For later deployments we can make vLLM part of the deployment environment by adding the requirements.txt while building the model archive in step 2 (see [here](../../../../model-archiver/README.md#model-specific-custom-python-requirements) for details) or we can make it part of a docker image like [here](../../../../docker/Dockerfile.vllm). ### Step 1: Download Model from HuggingFace