From 5063a99dcfedbb8624250338c868ad5312b60ff0 Mon Sep 17 00:00:00 2001
From: Misha Chornyi <99709299+mc-nv@users.noreply.github.com>
Date: Fri, 8 Nov 2024 14:37:30 -0800
Subject: [PATCH 1/4] Update README and versions for 1.46.0 / 24.11 (#942)
---
Dockerfile | 4 ++--
README.md | 4 ++--
docs/bls_quick_start.md | 4 ++--
docs/config.md | 2 +-
docs/ensemble_quick_start.md | 4 ++--
docs/kubernetes_deploy.md | 2 +-
docs/mm_quick_start.md | 4 ++--
docs/quick_start.md | 4 ++--
helm-chart/values.yaml | 2 +-
model_analyzer/config/input/config_defaults.py | 2 +-
10 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/Dockerfile b/Dockerfile
index 2bda69a8..f1bc9ef1 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ARG BASE_IMAGE=nvcr.io/nvidia/tritonserver:24.10-py3
-ARG TRITONSDK_BASE_IMAGE=nvcr.io/nvidia/tritonserver:24.10-py3-sdk
+ARG BASE_IMAGE=nvcr.io/nvidia/tritonserver:24.11-py3
+ARG TRITONSDK_BASE_IMAGE=nvcr.io/nvidia/tritonserver:24.11-py3-sdk
ARG MODEL_ANALYZER_VERSION=1.47.0dev
ARG MODEL_ANALYZER_CONTAINER_VERSION=24.12dev
diff --git a/README.md b/README.md
index b5c57636..3a825635 100644
--- a/README.md
+++ b/README.md
@@ -23,8 +23,8 @@ limitations under the License.
> ##### LATEST RELEASE
>
> You are currently on the `main` branch which tracks under-development progress towards the next release.
-> The latest release of the Triton Model Analyzer is 1.45.0 and is available on branch
-> [r24.10](https://github.com/triton-inference-server/model_analyzer/tree/r24.10).
+> The latest release of the Triton Model Analyzer is 1.46.0 and is available on branch
+> [r24.11](https://github.com/triton-inference-server/model_analyzer/tree/r24.11).
Triton Model Analyzer is a CLI tool which can help you find a more optimal configuration, on a given piece of hardware, for single, multiple, ensemble, or BLS models running on a [Triton Inference Server](https://github.com/triton-inference-server/server/). Model Analyzer will also generate reports to help you better understand the trade-offs of the different configurations along with their compute and memory requirements.
diff --git a/docs/bls_quick_start.md b/docs/bls_quick_start.md
index 71aa9439..39c4c3d5 100644
--- a/docs/bls_quick_start.md
+++ b/docs/bls_quick_start.md
@@ -49,7 +49,7 @@ git pull origin main
**1. Pull the SDK container:**
```
-docker pull nvcr.io/nvidia/tritonserver:24.10-py3-sdk
+docker pull nvcr.io/nvidia/tritonserver:24.11-py3-sdk
```
**2. Run the SDK container**
@@ -59,7 +59,7 @@ docker run -it --gpus 1 \
--shm-size 2G \
-v /var/run/docker.sock:/var/run/docker.sock \
-v $(pwd)/examples/quick-start:$(pwd)/examples/quick-start \
- --net=host nvcr.io/nvidia/tritonserver:24.10-py3-sdk
+ --net=host nvcr.io/nvidia/tritonserver:24.11-py3-sdk
```
**Important:** The example above uses a single GPU. If you are running on multiple GPUs, you may need to increase the shared memory size accordingly
diff --git a/docs/config.md b/docs/config.md
index f25fc52f..132ace31 100644
--- a/docs/config.md
+++ b/docs/config.md
@@ -153,7 +153,7 @@ cpu_only_composing_models:
[ reload_model_disable: | default: false]
# Triton Docker image tag used when launching using Docker mode
-[ triton_docker_image: | default: nvcr.io/nvidia/tritonserver:24.10-py3 ]
+[ triton_docker_image: | default: nvcr.io/nvidia/tritonserver:24.11-py3 ]
# Triton Server HTTP endpoint url used by Model Analyzer client"
[ triton_http_endpoint: | default: localhost:8000 ]
diff --git a/docs/ensemble_quick_start.md b/docs/ensemble_quick_start.md
index 68a4cbbc..2086a0b1 100644
--- a/docs/ensemble_quick_start.md
+++ b/docs/ensemble_quick_start.md
@@ -55,7 +55,7 @@ mkdir examples/quick-start/ensemble_add_sub/1
**1. Pull the SDK container:**
```
-docker pull nvcr.io/nvidia/tritonserver:24.10-py3-sdk
+docker pull nvcr.io/nvidia/tritonserver:24.11-py3-sdk
```
**2. Run the SDK container**
@@ -65,7 +65,7 @@ docker run -it --gpus 1 \
--shm-size 1G \
-v /var/run/docker.sock:/var/run/docker.sock \
-v $(pwd)/examples/quick-start:$(pwd)/examples/quick-start \
- --net=host nvcr.io/nvidia/tritonserver:24.10-py3-sdk
+ --net=host nvcr.io/nvidia/tritonserver:24.11-py3-sdk
```
**Important:** The example above uses a single GPU. If you are running on multiple GPUs, you may need to increase the shared memory size accordingly
diff --git a/docs/kubernetes_deploy.md b/docs/kubernetes_deploy.md
index d0ab0326..d35a1972 100644
--- a/docs/kubernetes_deploy.md
+++ b/docs/kubernetes_deploy.md
@@ -79,7 +79,7 @@ images:
triton:
image: nvcr.io/nvidia/tritonserver
- tag: 24.10-py3
+ tag: 24.11-py3
```
The model analyzer executable uses the config file defined in `helm-chart/templates/config-map.yaml`. This config can be modified to supply arguments to model analyzer. Only the content under the `config.yaml` section of the file should be modified.
diff --git a/docs/mm_quick_start.md b/docs/mm_quick_start.md
index 5da130a5..aeb798d6 100644
--- a/docs/mm_quick_start.md
+++ b/docs/mm_quick_start.md
@@ -49,7 +49,7 @@ git pull origin main
**1. Pull the SDK container:**
```
-docker pull nvcr.io/nvidia/tritonserver:24.10-py3-sdk
+docker pull nvcr.io/nvidia/tritonserver:24.11-py3-sdk
```
**2. Run the SDK container**
@@ -58,7 +58,7 @@ docker pull nvcr.io/nvidia/tritonserver:24.10-py3-sdk
docker run -it --gpus all \
-v /var/run/docker.sock:/var/run/docker.sock \
-v $(pwd)/examples/quick-start:$(pwd)/examples/quick-start \
- --net=host nvcr.io/nvidia/tritonserver:24.10-py3-sdk
+ --net=host nvcr.io/nvidia/tritonserver:24.11-py3-sdk
```
## `Step 3:` Profile both models concurrently
diff --git a/docs/quick_start.md b/docs/quick_start.md
index 2655754d..8538a1b7 100644
--- a/docs/quick_start.md
+++ b/docs/quick_start.md
@@ -49,7 +49,7 @@ git pull origin main
**1. Pull the SDK container:**
```
-docker pull nvcr.io/nvidia/tritonserver:24.10-py3-sdk
+docker pull nvcr.io/nvidia/tritonserver:24.11-py3-sdk
```
**2. Run the SDK container**
@@ -58,7 +58,7 @@ docker pull nvcr.io/nvidia/tritonserver:24.10-py3-sdk
docker run -it --gpus all \
-v /var/run/docker.sock:/var/run/docker.sock \
-v $(pwd)/examples/quick-start:$(pwd)/examples/quick-start \
- --net=host nvcr.io/nvidia/tritonserver:24.10-py3-sdk
+ --net=host nvcr.io/nvidia/tritonserver:24.11-py3-sdk
```
## `Step 3:` Profile the `add_sub` model
diff --git a/helm-chart/values.yaml b/helm-chart/values.yaml
index 427fb3e3..caf10481 100644
--- a/helm-chart/values.yaml
+++ b/helm-chart/values.yaml
@@ -41,4 +41,4 @@ images:
triton:
image: nvcr.io/nvidia/tritonserver
- tag: 24.10-py3
+ tag: 24.11-py3
diff --git a/model_analyzer/config/input/config_defaults.py b/model_analyzer/config/input/config_defaults.py
index 6c520ec6..68d31fda 100755
--- a/model_analyzer/config/input/config_defaults.py
+++ b/model_analyzer/config/input/config_defaults.py
@@ -63,7 +63,7 @@
DEFAULT_REQUEST_RATE_SEARCH_ENABLE = False
DEFAULT_CONCURRENCY_SWEEP_DISABLE = False
DEFAULT_TRITON_LAUNCH_MODE = "local"
-DEFAULT_TRITON_DOCKER_IMAGE = "nvcr.io/nvidia/tritonserver:24.10-py3"
+DEFAULT_TRITON_DOCKER_IMAGE = "nvcr.io/nvidia/tritonserver:24.11-py3"
DEFAULT_TRITON_HTTP_ENDPOINT = "localhost:8000"
DEFAULT_TRITON_GRPC_ENDPOINT = "localhost:8001"
DEFAULT_TRITON_METRICS_URL = "http://localhost:8002/metrics"
From 5fa372ca0ed59f112ffc32646af93ee6f011a995 Mon Sep 17 00:00:00 2001
From: Misha Chornyi <99709299+mc-nv@users.noreply.github.com>
Date: Tue, 12 Nov 2024 21:27:00 -0800
Subject: [PATCH 2/4] Update base versions (#944)
---
Dockerfile | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Dockerfile b/Dockerfile
index f1bc9ef1..dd81ddd9 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -26,7 +26,7 @@ ARG BASE_IMAGE
ARG TRITONSDK_BASE_IMAGE
# DCGM version to install for Model Analyzer
-ENV DCGM_VERSION=3.2.6
+ENV DCGM_VERSION=3.3.6
# Ensure apt-get won't prompt for selecting options
ENV DEBIAN_FRONTEND=noninteractive
@@ -41,7 +41,7 @@ RUN mkdir -p /opt/triton-model-analyzer
RUN [ "$(uname -m)" != "x86_64" ] && arch="sbsa" || arch="x86_64" && \
curl -o /tmp/cuda-keyring.deb \
- https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/$arch/cuda-keyring_1.0-1_all.deb && \
+ https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/$arch/cuda-keyring_1.1-1_all.deb && \
apt-get install /tmp/cuda-keyring.deb && rm /tmp/cuda-keyring.deb && \
apt-get update && apt-get install -y --no-install-recommends software-properties-common && \
apt-get install -y datacenter-gpu-manager=1:${DCGM_VERSION};
From 264fcd559983e365aa3af72021022f6457ecceae Mon Sep 17 00:00:00 2001
From: Misha Chornyi <99709299+mc-nv@users.noreply.github.com>
Date: Wed, 13 Nov 2024 07:22:27 -0800
Subject: [PATCH 3/4] Remove pip upgrade (#945)
---
Dockerfile | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/Dockerfile b/Dockerfile
index dd81ddd9..399b285b 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -70,8 +70,7 @@ RUN chmod +x /opt/triton-model-analyzer/nvidia_entrypoint.sh
RUN chmod +x build_wheel.sh && \
./build_wheel.sh perf_analyzer true && \
rm -f perf_analyzer
-RUN python3 -m pip install --upgrade pip && \
- python3 -m pip install nvidia-pyindex && \
+RUN python3 -m pip install nvidia-pyindex && \
python3 -m pip install wheels/triton_model_analyzer-*-manylinux*.whl
#Other pip packages
RUN python3 -m pip install coverage
From e8a5a8f04ae3b46976420346d23ccedee777e174 Mon Sep 17 00:00:00 2001
From: Misha Chornyi <99709299+mc-nv@users.noreply.github.com>
Date: Thu, 21 Nov 2024 12:34:33 -0800
Subject: [PATCH 4/4] Lock grpcio version (#946)
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index a95c2798..af59c407 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -16,7 +16,7 @@ cryptography>=3.3.2
distro>=1.5.0
docker>=4.3.1
gevent>=22.08.0
-grpcio>=1.41.0
+grpcio>=1.41.0,<1.68
httplib2>=0.19.0
importlib_metadata>=7.1.0
matplotlib>=3.3.4