File tree 1 file changed +4
-2
lines changed
1 file changed +4
-2
lines changed Original file line number Diff line number Diff line change 38
38
import os
39
39
40
40
from tritonclient .utils import *
41
- import tritonclient .utils .cuda_shared_memory as cuda_shared_memory
42
41
import tritonclient .http as httpclient
43
42
44
43
TEST_JETSON = bool (int (os .environ .get ('TEST_JETSON' , 0 )))
@@ -155,9 +154,11 @@ def test_growth_error(self):
155
154
self ._infer_help (model_name , shape , dtype )
156
155
157
156
# GPU tensors are not supported on jetson
157
+ # CUDA Shared memory is not supported on jetson
158
158
if not TEST_JETSON :
159
- # CUDA Shared memory is not supported on jetson
159
+
160
160
def test_gpu_tensor_error (self ):
161
+ import tritonclient .utils .cuda_shared_memory as cuda_shared_memory
161
162
model_name = 'identity_bool'
162
163
with httpclient .InferenceServerClient ("localhost:8000" ) as client :
163
164
input_data = np .array ([[True ] * 1000 ], dtype = bool )
@@ -184,6 +185,7 @@ def test_gpu_tensor_error(self):
184
185
cuda_shared_memory .destroy_shared_memory_region (shm0_handle )
185
186
186
187
def test_dlpack_tensor_error (self ):
188
+ import tritonclient .utils .cuda_shared_memory as cuda_shared_memory
187
189
model_name = 'dlpack_identity'
188
190
with httpclient .InferenceServerClient ("localhost:8000" ) as client :
189
191
input_data = np .array ([[1 ] * 1000 ], dtype = np .float32 )
You can’t perform that action at this time.
0 commit comments