Skip to content

Commit 497d4a2

Browse files
committed
Update the model to use the same add sub logic
1 parent 594ff70 commit 497d4a2

File tree

4 files changed

+52
-35
lines changed

4 files changed

+52
-35
lines changed

qa/L0_libtorch_instance_group_kind_model/client.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -78,8 +78,8 @@ def test_infer(self):
7878
output0_data = results.as_numpy('OUTPUT__0')
7979
output1_data = results.as_numpy('OUTPUT__1')
8080

81-
expected_output_0 = input0_data + input0_data
82-
expected_output_1 = input1_data + input1_data
81+
expected_output_0 = input0_data + input1_data
82+
expected_output_1 = input0_data - input1_data
8383

8484
self.assertEqual(output0_data.shape, (1, 16))
8585
self.assertEqual(output1_data.shape, (1, 16))

qa/L0_libtorch_instance_group_kind_model/gen_models.py

+25-14
Original file line numberDiff line numberDiff line change
@@ -30,36 +30,47 @@
3030

3131

3232
class SumModule(nn.Module):
33-
3433
def __init__(self):
3534
super(SumModule, self).__init__()
3635

37-
def forward(self, x):
38-
return torch.sum(x, dim=1)
36+
def forward(self, INPUT0, INPUT1):
37+
print('SumModule - INPUT0 device: {}, INPUT1 device: {}\n'.format(
38+
INPUT0.device, INPUT1.device))
39+
return INPUT0 + INPUT1
40+
41+
class DiffModule(nn.Module):
42+
def __init__(self):
43+
super(DiffModule, self).__init__()
44+
45+
def forward(self, INPUT0, INPUT1):
46+
print('DiffModule - INPUT0 device: {}, INPUT1 device: {}\n'.format(
47+
INPUT0.device, INPUT1.device))
48+
return INPUT0 - INPUT1
3949

4050

4151
class TestModel(nn.Module):
4252

43-
def __init__(self, device1, device2):
53+
def __init__(self, device0, device1):
4454
super(TestModel, self).__init__()
55+
self.device0 = device0
4556
self.device1 = device1
46-
self.device2 = device2
47-
self.layers1 = SumModule().to(self.device1)
48-
self.layers2 = SumModule().to(self.device2)
57+
58+
self.layers1 = SumModule().to(self.device0)
59+
self.layers2 = DiffModule().to(self.device1)
4960

5061
def forward(self, INPUT0, INPUT1):
51-
INPUT0 = INPUT0.to(self.device1)
52-
INPUT1 = INPUT1.to(self.device2)
53-
print('INPUT0 device: {}, INPUT1 device: {}\n'.format(
54-
INPUT0.device, INPUT1.device))
62+
INPUT0_0 = INPUT0.to(self.device0)
63+
INPUT0_1 = INPUT0.to(self.device1)
64+
INPUT1_0 = INPUT1.to(self.device0)
65+
INPUT1_1 = INPUT1.to(self.device1)
5566

56-
op0 = self.layers1(torch.stack([INPUT0, INPUT0], dim=1))
57-
op1 = self.layers2(torch.stack([INPUT1, INPUT1], dim=1))
67+
op0 = self.layers1(INPUT0_0, INPUT1_0)
68+
op1 = self.layers2(INPUT0_1, INPUT1_1)
5869
return op0, op1
5970

6071

6172
devices = [("cuda:2", "cuda:0"), ("cpu", "cuda:3")]
62-
model_names = ["libtorch_multi_gpu", "libtorch_multi_devices"]
73+
model_names = ["libtorch_multi_gpu", "libtorch_multi_device"]
6374

6475
for device_pair, model_name in zip(devices, model_names):
6576
model = TestModel(device_pair[0], device_pair[1])

qa/L0_libtorch_instance_group_kind_model/models/libtorch_multi_devices/config.pbtxt renamed to qa/L0_libtorch_instance_group_kind_model/models/libtorch_multi_device/config.pbtxt

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
2525
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2626

27-
name: "libtorch_multi_devices"
27+
name: "libtorch_multi_device"
2828
platform: "pytorch_libtorch"
2929
max_batch_size: 8
3030

qa/L0_libtorch_instance_group_kind_model/test.sh

+24-18
Original file line numberDiff line numberDiff line change
@@ -57,11 +57,11 @@ RET=0
5757

5858
rm -f *.log *.txt
5959

60-
mkdir -p models/libtorch_multi_devices/1
60+
mkdir -p models/libtorch_multi_device/1
6161
mkdir -p models/libtorch_multi_gpu/1
62-
cp models/libtorch_multi_devices/config.pbtxt models/libtorch_multi_gpu/.
62+
cp models/libtorch_multi_device/config.pbtxt models/libtorch_multi_gpu/.
6363
(cd models/libtorch_multi_gpu && \
64-
sed -i "s/name: \"libtorch_multi_devices\"/name: \"libtorch_multi_gpu\"/" config.pbtxt)
64+
sed -i "s/name: \"libtorch_multi_device\"/name: \"libtorch_multi_gpu\"/" config.pbtxt)
6565

6666
# Generate the models which are partioned across multiple devices
6767
set +e
@@ -82,8 +82,7 @@ fi
8282

8383
set +e
8484

85-
MESSAGE="INPUT0 device: cpu, INPUT1 device: cuda:3"
86-
export MODEL_NAME='libtorch_multi_devices'
85+
export MODEL_NAME='libtorch_multi_device'
8786
python3 $CLIENT_PY >> $CLIENT_LOG 2>&1
8887
if [ $? -ne 0 ]; then
8988
echo -e "\n***\n*** Model $MODEL_NAME FAILED. \n***"
@@ -98,14 +97,17 @@ else
9897
fi
9998
fi
10099

101-
if grep "$MESSAGE" $SERVER_LOG; then
102-
echo -e "Found \"$MESSAGE\"" >> $CLIENT_LOG
103-
else
104-
echo -e "Not found \"$MESSAGE\"" >> $CLIENT_LOG
105-
RET=1
106-
fi
100+
MESSAGES=("SumModule - INPUT0 device: cpu, INPUT1 device: cpu"
101+
"DiffModule - INPUT0 device: cuda:3, INPUT1 device: cuda:3")
102+
for MESSAGE in "${MESSAGES[@]}"; do
103+
if grep -q "$MESSAGE" "$SERVER_LOG"; then
104+
echo -e "Found \"$MESSAGE\"" >> "$CLIENT_LOG"
105+
else
106+
echo -e "Not found \"$MESSAGE\"" >> "$CLIENT_LOG"
107+
RET=1
108+
fi
109+
done
107110

108-
MESSAGE="INPUT0 device: cuda:2, INPUT1 device: cuda:0"
109111
export MODEL_NAME='libtorch_multi_gpu'
110112
python3 $CLIENT_PY >> $CLIENT_LOG 2>&1
111113
if [ $? -ne 0 ]; then
@@ -121,12 +123,16 @@ else
121123
fi
122124
fi
123125

124-
if grep "$MESSAGE" $SERVER_LOG; then
125-
echo -e "Found \"$MESSAGE\"" >> $CLIENT_LOG
126-
else
127-
echo -e "Not found \"$MESSAGE\"" >> $CLIENT_LOG
128-
RET=1
129-
fi
126+
MESSAGES=("SumModule - INPUT0 device: cuda:2, INPUT1 device: cuda:2"
127+
"DiffModule - INPUT0 device: cuda:0, INPUT1 device: cuda:0")
128+
for MESSAGE in "${MESSAGES[@]}"; do
129+
if grep -q "$MESSAGE" "$SERVER_LOG"; then
130+
echo -e "Found \"$MESSAGE\"" >> "$CLIENT_LOG"
131+
else
132+
echo -e "Not found \"$MESSAGE\"" >> "$CLIENT_LOG"
133+
RET=1
134+
fi
135+
done
130136

131137
set -e
132138

0 commit comments

Comments
 (0)