Skip to content

Commit 3c30a14

Browse files
committed
Updated dlpack_test
1 parent 8f42ffc commit 3c30a14

File tree

1 file changed

+52
-10
lines changed

1 file changed

+52
-10
lines changed

qa/python_models/dlpack_test/model.py

+52-10
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
# Copyright 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
#
33
# Redistribution and use in source and binary forms, with or without
44
# modification, are permitted provided that the following conditions
@@ -56,6 +56,23 @@ def test_pytorch_dlpack(self):
5656
self.assertTrue(
5757
pytorch_tensor.type() == pytorch_tensor_dlpack.type())
5858

59+
# Now let's check that upgraded DLPack implementation also
60+
# works as expected, i.e. from_dlpack should work with
61+
# external pytorch tensor directly
62+
63+
pb_tensor_upgraded = pb_utils.Tensor.from_dlpack('test_tensor',
64+
pytorch_tensor)
65+
self.assertTrue(
66+
np.all(pb_tensor_upgraded.as_numpy() == pytorch_tensor.numpy()))
67+
68+
# Here we check that `pb_tensor` as a producer, properly
69+
# invokes `__dlpack__` and `__dlpack_device__`
70+
pytorch_tensor_dlpack = from_dlpack(pb_tensor_upgraded)
71+
self.assertTrue(torch.all(pytorch_tensor_dlpack == pytorch_tensor))
72+
73+
self.assertTrue(
74+
pytorch_tensor.type() == pytorch_tensor_dlpack.type())
75+
5976
def test_non_contiguous_error(self):
6077
pytorch_tensor = torch.rand([20, 30], dtype=torch.float16)
6178

@@ -83,6 +100,8 @@ def test_dlpack_string_tensor(self):
83100

84101
def test_dlpack_gpu_tensors(self):
85102
# Test different dtypes
103+
# PyTorch does not support DLPack bool type yet:
104+
# https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/DLConvertor.cpp
86105
pytorch_dtypes = [
87106
torch.float16, torch.float32, torch.float64, torch.int8,
88107
torch.int16, torch.int32, torch.int64, torch.uint8
@@ -100,27 +119,50 @@ def test_dlpack_gpu_tensors(self):
100119
# the same
101120
pytorch_tensor_dlpack = from_dlpack(pb_tensor.to_dlpack())
102121
self.assertTrue(torch.all(pytorch_tensor_dlpack == pytorch_tensor))
103-
104-
# DLPack does not properly support bool type:
105-
# https://github.com/google/jax/issues/4719
106-
if pytorch_dtype != torch.bool:
107-
self.assertTrue(
108-
pytorch_tensor.type() == pytorch_tensor_dlpack.type())
109-
else:
110-
self.assertFalse(
111-
pytorch_tensor.type() == pytorch_tensor_dlpack.type())
122+
self.assertTrue(
123+
pytorch_tensor.type() == pytorch_tensor_dlpack.type())
124+
125+
# Now we make sure that updated DLPack implementation works
126+
# with GPU as well
127+
pb_tensor = pb_utils.Tensor.from_dlpack('test_tensor',
128+
pytorch_tensor)
129+
pytorch_tensor_dlpack = from_dlpack(pb_tensor)
130+
self.assertTrue(torch.all(pytorch_tensor_dlpack == pytorch_tensor))
131+
self.assertTrue(
132+
pytorch_tensor.type() == pytorch_tensor_dlpack.type())
133+
112134

113135
def test_dlpack_gpu_numpy(self):
114136
# DLPack tesnors that are in GPU cannot be converted to NumPy
115137
pytorch_tensor = torch.rand([100], dtype=torch.float16,
116138
device='cuda') * 100
117139
pb_tensor = pb_utils.Tensor.from_dlpack('tensor',
118140
to_dlpack(pytorch_tensor))
141+
# Make sure that `__dlpack_device__` works as expected
142+
self.assertTrue(pb_tensor.__dlpack_device__() == pytorch_tensor.__dlpack_device__())
143+
119144
with self.assertRaises(Exception) as e:
120145
pb_tensor.as_numpy()
121146
self.assertTrue(
122147
str(e.exception) ==
123148
'Tensor is stored in GPU and cannot be converted to NumPy.')
149+
150+
def test_dlpack_cpu_numpy(self):
151+
# Check compatibiity of PbTensor DLPack implementation
152+
# with numpy
153+
pytorch_tensor = torch.rand([100], dtype=torch.float16,
154+
device='cpu') * 100
155+
pb_tensor = pb_utils.Tensor.from_dlpack('tensor', pytorch_tensor)
156+
numpy_tensor_dlpack = np.from_dlpack(pb_tensor)
157+
self.assertTrue(np.all(numpy_tensor_dlpack == pytorch_tensor.numpy()))
158+
# Make sure that `__dlpack_device__` works as expected
159+
self.assertTrue(pb_tensor.__dlpack_device__() == pytorch_tensor.__dlpack_device__())
160+
161+
def test_pdtensor_bool_internal_support(self):
162+
bool_array = np.asarray([False, True])
163+
bool_tensor = pb_utils.Tensor('tensor', bool_array)
164+
bool_tensor_dlpack = pb_utils.Tensor.from_dlpack('tensor', bool_tensor)
165+
self.assertTrue(np.all(bool_array == bool_tensor_dlpack.as_numpy()))
124166

125167

126168
class TritonPythonModel:

0 commit comments

Comments
 (0)