Skip to content

Commit eccc3cf

Browse files
committed
Fix jetson
1 parent a629ff1 commit eccc3cf

File tree

1 file changed

+4
-2
lines changed

1 file changed

+4
-2
lines changed

qa/L0_backend_python/python_test.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@
3838
import os
3939

4040
from tritonclient.utils import *
41-
import tritonclient.utils.cuda_shared_memory as cuda_shared_memory
4241
import tritonclient.http as httpclient
4342

4443
TEST_JETSON = bool(int(os.environ.get('TEST_JETSON', 0)))
@@ -155,9 +154,11 @@ def test_growth_error(self):
155154
self._infer_help(model_name, shape, dtype)
156155

157156
# GPU tensors are not supported on jetson
157+
# CUDA Shared memory is not supported on jetson
158158
if not TEST_JETSON:
159-
# CUDA Shared memory is not supported on jetson
159+
160160
def test_gpu_tensor_error(self):
161+
import tritonclient.utils.cuda_shared_memory as cuda_shared_memory
161162
model_name = 'identity_bool'
162163
with httpclient.InferenceServerClient("localhost:8000") as client:
163164
input_data = np.array([[True] * 1000], dtype=bool)
@@ -184,6 +185,7 @@ def test_gpu_tensor_error(self):
184185
cuda_shared_memory.destroy_shared_memory_region(shm0_handle)
185186

186187
def test_dlpack_tensor_error(self):
188+
import tritonclient.utils.cuda_shared_memory as cuda_shared_memory
187189
model_name = 'dlpack_identity'
188190
with httpclient.InferenceServerClient("localhost:8000") as client:
189191
input_data = np.array([[1] * 1000], dtype=np.float32)

0 commit comments

Comments
 (0)