Skip to content

Commit 1582bcc

Browse files
Merge pull request #1439 from roboflow/fix/inference-cli-jetson-5.1.1
Export PYTHONPATH so inference is picked up correctly by inference-cli
2 parents fe76cc1 + 2755073 commit 1582bcc

File tree

5 files changed

+66
-62
lines changed

5 files changed

+66
-62
lines changed

docker/dockerfiles/Dockerfile.onnx.jetson.4.6.1

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -85,34 +85,34 @@ COPY Makefile Makefile
8585
RUN make create_inference_cli_whl PYTHON=python3.9
8686
RUN python3.9 -m pip install dist/inference_cli*.whl
8787

88-
ENV VERSION_CHECK_MODE=continuous
89-
ENV PROJECT=roboflow-platform
90-
ENV ORT_TENSORRT_FP16_ENABLE=1
91-
ENV ORT_TENSORRT_ENGINE_CACHE_ENABLE=1
92-
ENV PROJECT=roboflow-platform
93-
ENV NUM_WORKERS=1
94-
ENV HOST=0.0.0.0
95-
ENV PORT=9001
96-
ENV OPENBLAS_CORETYPE=ARMV8
97-
ENV WORKFLOWS_STEP_EXECUTION_MODE=local
98-
ENV WORKFLOWS_MAX_CONCURRENT_STEPS=2
99-
ENV API_LOGGING_ENABLED=True
100-
ENV RUNS_ON_JETSON=True
101-
ENV ENABLE_PROMETHEUS=True
102-
ENV ENABLE_STREAM_API=True
103-
ENV STREAM_API_PRELOADED_PROCESSES=2
104-
105-
ENV CORE_MODEL_GAZE_ENABLED=False
106-
ENV CORE_MODEL_OWLV2_ENABLED=False
107-
ENV CORE_MODEL_PE_ENABLED=False
108-
ENV CORE_MODEL_SAM_ENABLED=False
109-
ENV CORE_MODEL_SAM2_ENABLED=False
110-
ENV CORE_MODEL_TROCR_ENABLED=False
111-
ENV DEPTH_ESTIMATION_ENABLED=False
112-
ENV FLORENCE2_ENABLED=False
113-
ENV MOONDREAM2_ENABLED=False
114-
ENV PALIGEMMA_ENABLED=False
115-
ENV QWEN_2_5_ENABLED=False
116-
ENV SMOLVLM2_ENABLED=False
88+
ENV VERSION_CHECK_MODE=continuous \
89+
PROJECT=roboflow-platform \
90+
ORT_TENSORRT_FP16_ENABLE=1 \
91+
ORT_TENSORRT_ENGINE_CACHE_ENABLE=1 \
92+
PROJECT=roboflow-platform \
93+
NUM_WORKERS=1 \
94+
HOST=0.0.0.0 \
95+
PORT=9001 \
96+
OPENBLAS_CORETYPE=ARMV8 \
97+
WORKFLOWS_STEP_EXECUTION_MODE=local \
98+
WORKFLOWS_MAX_CONCURRENT_STEPS=2 \
99+
API_LOGGING_ENABLED=True \
100+
RUNS_ON_JETSON=True \
101+
ENABLE_PROMETHEUS=True \
102+
ENABLE_STREAM_API=True \
103+
STREAM_API_PRELOADED_PROCESSES=2 \
104+
CORE_MODEL_GAZE_ENABLED=False \
105+
CORE_MODEL_OWLV2_ENABLED=False \
106+
CORE_MODEL_PE_ENABLED=False \
107+
CORE_MODEL_SAM_ENABLED=False \
108+
CORE_MODEL_SAM2_ENABLED=False \
109+
CORE_MODEL_TROCR_ENABLED=False \
110+
DEPTH_ESTIMATION_ENABLED=False \
111+
FLORENCE2_ENABLED=False \
112+
MOONDREAM2_ENABLED=False \
113+
PALIGEMMA_ENABLED=False \
114+
QWEN_2_5_ENABLED=False \
115+
SMOLVLM2_ENABLED=False \
116+
PYTHONPATH=/app:$PYTHONPATH
117117

118118
ENTRYPOINT uvicorn gpu_http:app --workers $NUM_WORKERS --host $HOST --port $PORT

docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1

Lines changed: 20 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -76,24 +76,25 @@ COPY Makefile Makefile
7676
RUN make create_inference_cli_whl PYTHON=python3.9
7777
RUN python3.9 -m pip install dist/inference_cli*.whl
7878

79-
ENV VERSION_CHECK_MODE=continuous
80-
ENV PROJECT=roboflow-platform
81-
ENV ORT_TENSORRT_FP16_ENABLE=1
82-
ENV ORT_TENSORRT_ENGINE_CACHE_ENABLE=1
83-
ENV CORE_MODEL_SAM_ENABLED=False
84-
ENV PROJECT=roboflow-platform
85-
ENV NUM_WORKERS=1
86-
ENV HOST=0.0.0.0
87-
ENV PORT=9001
88-
ENV OPENBLAS_CORETYPE=ARMV8
89-
ENV LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1
90-
ENV WORKFLOWS_STEP_EXECUTION_MODE=local
91-
ENV WORKFLOWS_MAX_CONCURRENT_STEPS=2
92-
ENV API_LOGGING_ENABLED=True
93-
ENV CORE_MODEL_TROCR_ENABLED=false
94-
ENV RUNS_ON_JETSON=True
95-
ENV ENABLE_PROMETHEUS=True
96-
ENV ENABLE_STREAM_API=True
97-
ENV STREAM_API_PRELOADED_PROCESSES=2
79+
ENV VERSION_CHECK_MODE=continuous \
80+
PROJECT=roboflow-platform \
81+
ORT_TENSORRT_FP16_ENABLE=1 \
82+
ORT_TENSORRT_ENGINE_CACHE_ENABLE=1 \
83+
CORE_MODEL_SAM_ENABLED=False \
84+
PROJECT=roboflow-platform \
85+
NUM_WORKERS=1 \
86+
HOST=0.0.0.0 \
87+
PORT=9001 \
88+
OPENBLAS_CORETYPE=ARMV8 \
89+
LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1 \
90+
WORKFLOWS_STEP_EXECUTION_MODE=local \
91+
WORKFLOWS_MAX_CONCURRENT_STEPS=2 \
92+
API_LOGGING_ENABLED=True \
93+
CORE_MODEL_TROCR_ENABLED=false \
94+
RUNS_ON_JETSON=True \
95+
ENABLE_PROMETHEUS=True \
96+
ENABLE_STREAM_API=True \
97+
STREAM_API_PRELOADED_PROCESSES=2 \
98+
PYTHONPATH=/app:$PYTHONPATH
9899

99100
ENTRYPOINT uvicorn gpu_http:app --workers $NUM_WORKERS --host $HOST --port $PORT

docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -65,17 +65,18 @@ COPY Makefile Makefile
6565
RUN make create_inference_cli_whl PYTHON=python3.9
6666
RUN python3.9 -m pip install dist/inference_cli*.whl
6767

68-
ENV ORT_TENSORRT_FP16_ENABLE=1
69-
ENV ORT_TENSORRT_ENGINE_CACHE_ENABLE=1
70-
ENV CORE_MODEL_SAM_ENABLED=False
71-
ENV OPENBLAS_CORETYPE=ARMV8
72-
ENV LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1:/usr/local/lib/python3.8/dist-packages/torch.libs/libgomp-d22c30c5.so.1.0.0
73-
ENV VERSION_CHECK_MODE=continuous
74-
ENV PROJECT=roboflow-platform
75-
ENV HOST=0.0.0.0
76-
ENV PORT=7070
77-
ENV WORKFLOWS_STEP_EXECUTION_MODE=local
78-
ENV WORKFLOWS_MAX_CONCURRENT_STEPS=1
79-
ENV SUPERVISON_DEPRECATION_WARNING=0
68+
ENV ORT_TENSORRT_FP16_ENABLE=1 \
69+
ORT_TENSORRT_ENGINE_CACHE_ENABLE=1 \
70+
CORE_MODEL_SAM_ENABLED=False \
71+
OPENBLAS_CORETYPE=ARMV8 \
72+
LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1:/usr/local/lib/python3.8/dist-packages/torch.libs/libgomp-d22c30c5.so.1.0.0 \
73+
VERSION_CHECK_MODE=continuous \
74+
PROJECT=roboflow-platform \
75+
HOST=0.0.0.0 \
76+
PORT=7070 \
77+
WORKFLOWS_STEP_EXECUTION_MODE=local \
78+
WORKFLOWS_MAX_CONCURRENT_STEPS=1 \
79+
SUPERVISON_DEPRECATION_WARNING=0 \
80+
PYTHONPATH=/app:$PYTHONPATH
8081

8182
ENTRYPOINT ["python3.9", "-m", "inference.enterprise.stream_management.manager.app"]

docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,8 @@ ENV VERSION_CHECK_MODE=continuous \
7373
RUNS_ON_JETSON=True \
7474
ENABLE_PROMETHEUS=True \
7575
ENABLE_STREAM_API=True \
76-
STREAM_API_PRELOADED_PROCESSES=2
76+
STREAM_API_PRELOADED_PROCESSES=2 \
77+
PYTHONPATH=/app:$PYTHONPATH
7778

7879
# Expose the application port
7980
EXPOSE 9001

docker/dockerfiles/Dockerfile.onnx.jetson.6.2.0

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,8 @@ ENV VERSION_CHECK_MODE=continuous \
7878
RUNS_ON_JETSON=True \
7979
ENABLE_PROMETHEUS=True \
8080
ENABLE_STREAM_API=True \
81-
STREAM_API_PRELOADED_PROCESSES=2
81+
STREAM_API_PRELOADED_PROCESSES=2 \
82+
PYTHONPATH=/app:$PYTHONPATH
8283

8384
# Expose the application port
8485
EXPOSE 9001

0 commit comments

Comments
 (0)