Skip to content

[NNPA] Update configurations for test cases of backend test for dynamic dimensions #2831

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 10 commits into from
Jun 12, 2024
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,8 @@ bool checkLegalityPoolOpsCommon(POOLOP op, Value Y) {
// Check "MaxPool2D/AvgPool2D Parameter Restrictions". These restrictions are
// described in "zDNN API Reference". Input tensor N(batchNum) and C(Channel)
// dimensions must always match the output tensor's respective dimensions.
if (shapeInput[0] != shapeOutput[0] || shapeInput[1] != shapeOutput[1])
if ((inputType.hasStaticShape() && outputType.hasStaticShape()) &&
(shapeInput[0] != shapeOutput[0] || shapeInput[1] != shapeOutput[1]))
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we use DimAnalysis to check dynamic dims here?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done. Thanks!

return false;

// Check if kernelShape is literal. Only static value is supported.
Expand Down
60 changes: 30 additions & 30 deletions test/accelerators/NNPA/backend/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -118,32 +118,32 @@ set(NNPA_TEST_LIST
# ==LIM== - `auto_pad` must be `NOTSET`, `VALID`, and `SAME_UPPER`. If `NOTSET` is used, `pads` must be set so that the padding valid type or same upper.<br>- `ceil_mode` must be default value(0) <br>- Input and output tensors must be 4D tensors (N x C x H x W).<br>- `kernel_shape` must be static.<br>- `count_include_pad` must be default value(0).<br>- `ceil_mode` must be default value(0).
# test_averagepool_1d_default_cpu
# test_averagepool_2d_ceil_cpu
test_averagepool_2d_default_cpu,zdnn_avgpool2d,NO_DYNAMIC_SHAPE_TEST
test_averagepool_2d_default_cpu,zdnn_avgpool2d
# test_averagepool_2d_pads_count_include_pad_cpu
# test_averagepool_2d_pads_cpu
# test_averagepool_2d_precomputed_pads_count_include_pad_cpu
test_averagepool_2d_precomputed_pads_cpu,zdnn_avgpool2d,NO_DYNAMIC_SHAPE_TEST
test_averagepool_2d_precomputed_same_upper_cpu,zdnn_avgpool2d,NO_DYNAMIC_SHAPE_TEST
test_averagepool_2d_precomputed_strides_cpu,zdnn_avgpool2d,NO_DYNAMIC_SHAPE_TEST
test_averagepool_2d_precomputed_pads_cpu,zdnn_avgpool2d,NO_DYNAMIC_SHAPE_TEST # Padding type is not `SAME_PADDING` or `VALID_PADDING`
test_averagepool_2d_precomputed_same_upper_cpu,zdnn_avgpool2d
test_averagepool_2d_precomputed_strides_cpu,zdnn_avgpool2d
# test_averagepool_2d_same_lower_cpu
test_averagepool_2d_same_upper_cpu,zdnn_avgpool2d,NO_DYNAMIC_SHAPE_TEST
test_averagepool_2d_strides_cpu,zdnn_avgpool2d,NO_DYNAMIC_SHAPE_TEST
test_averagepool_2d_same_upper_cpu,zdnn_avgpool2d
test_averagepool_2d_strides_cpu,zdnn_avgpool2d
# test_averagepool_3d_default_cpu

# ==OP== BatchNormalization
# ==MIN== 6
# ==LIM== Input and output tensor must be 4D(N x C x H x W).
test_batchnorm_epsilon_cpu,zdnn_mul_ext,NO_DYNAMIC_SHAPE_TEST
test_batchnorm_example_cpu,zdnn_mul_ext,NO_DYNAMIC_SHAPE_TEST
test_batchnorm_epsilon_cpu,zdnn_mul_ext,"0:0=a,1=b,2=c,3=d|1:0=b|2:0=b|3:0=b|4:0=b"
test_batchnorm_example_cpu,zdnn_mul_ext,"0:0=a,1=b,2=c,3=d|1:0=b|2:0=b|3:0=b|4:0=b"

# ==OP== Conv
# ==MIN== 1
# ==LIM== - `auto_pad` must be `NOTSET`, `VALID`, and `SAME_UPPER`. If `NOTSET` is used, `pads` must be set so that the padding valid type or same upper.<br>- Dimension in Height and weight must be static.<br>- `group` must be default value(1).<br>- `dilations` must be default value(1).<br>- Input and output tensors must have 4D (N x C x H x W).<br>- `kernel_shape` must be static.
test_basic_conv_with_padding_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST
test_basic_conv_without_padding_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST
test_basic_conv_with_padding_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # Height and/or width must be static dimensions.
test_basic_conv_without_padding_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # Height and/or width must be static dimensions.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Comments here look the same as ==LIM==, are they necessary?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Removed. Thanks.

# test_conv_with_autopad_same_cpu
test_conv_with_strides_no_padding_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST
test_conv_with_strides_padding_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST
test_conv_with_strides_no_padding_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # Height and/or width must be static dimensions.
test_conv_with_strides_padding_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # Height and/or width must be static dimensions.
# test_conv_with_strides_and_asymmetric_padding_cpu

# ==OP== ConvTranspose
Expand All @@ -152,13 +152,13 @@ set(NNPA_TEST_LIST
# Disable backend tests for ConvTranspose, because they do not work on CPU.
# test_convtranspose_1d_cpu,zdnn_conv1d
# test_convtranspose_3d_cpu
test_convtranspose_autopad_same_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # not work on CPU
test_convtranspose_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # not work on CPU
test_convtranspose_autopad_same_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # Spatial dims must be static even on CPU.
test_convtranspose_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # Spatial dims must be static even on CPU.
# test_convtranspose_dilations_cpu,zdnn_conv2d
test_convtranspose_kernel_shape_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # not work on CPU
test_convtranspose_output_shape_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # not work on CPU
test_convtranspose_pad_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # not work on CPU
test_convtranspose_pads_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # not work on CPU
test_convtranspose_kernel_shape_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # Spatial dims must be static even on CPU.
test_convtranspose_output_shape_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # Spatial dims must be static even on CPU.
test_convtranspose_pad_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # Spatial dims must be static even on CPU.
test_convtranspose_pads_cpu,zdnn_conv2d,NO_DYNAMIC_SHAPE_TEST # Spatial dims must be static even on CPU.

# ==OP== Div
# ==MIN== 6
Expand Down Expand Up @@ -191,8 +191,8 @@ set(NNPA_TEST_LIST
# ==OP== GlobalAveragePool
# ==MIN== 1
# ==LIM== - Input shape must be 4D tensor(NCHW).<br>- Dimensions in `H` and `W` must be static.
test_globalaveragepool_cpu,zdnn_meanreduce2d,NO_DYNAMIC_SHAPE_TEST
test_globalaveragepool_precomputed_cpu,zdnn_meanreduce2d,NO_DYNAMIC_SHAPE_TEST
test_globalaveragepool_cpu,zdnn_meanreduce2d,NO_DYNAMIC_SHAPE_TEST # Height and Width dimension must be static dimension.
test_globalaveragepool_precomputed_cpu,zdnn_meanreduce2d,NO_DYNAMIC_SHAPE_TEST # Height and Width dimension must be static dimension.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Comments here look the same as ==LIM==, are they necessary?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Removed. Thanks.

# GlobalMaxPool
# test_globalmaxpool_cpu
# test_globalmaxpool_precomputed_cpu
Expand Down Expand Up @@ -222,7 +222,7 @@ set(NNPA_TEST_LIST
# test_logsoftmax_axis_0_cpu
# test_logsoftmax_axis_1_cpu
# test_logsoftmax_axis_2_cpu,zdnn_log_ext
test_logsoftmax_example_1_cpu,zdnn_softmax_ext,NO_DYNAMIC_SHAPE_TEST
test_logsoftmax_example_1_cpu,zdnn_softmax_ext
# test_logsoftmax_default_axis_cpu
# test_logsoftmax_negative_axis_cpu,zdnn_log_ext
# test_logsoftmax_large_number_cpu # accuracy error in test_logsoftmax_large_number_cpu
Expand All @@ -248,7 +248,7 @@ set(NNPA_TEST_LIST
#test_max_one_input_cpu
test_max_two_inputs_cpu,zdnn_max_ext,"0:0=a|1:0=a"
# test_max_float16_cpu
test_max_float32_cpu,zdnn_max_ext,NO_DYNAMIC_SHAPE_TEST
test_max_float32_cpu,zdnn_max_ext,"0:0=a|1:0=a"
# test_max_float64_cpu
# test_max_int8_cpu
# test_max_int16_cpu
Expand All @@ -264,15 +264,15 @@ set(NNPA_TEST_LIST
# ==LIM== - `auto_pad` must be `NOTSET`, `VALID`, and `SAME_UPPER`. If `NOTSET` is used, `pads` must be set so that the padding valid type or same upper.<br>- `ceil_mode` must be default value(0) <br>- Input and output tensors must be 4D tensors(N x C x H x W).<br>- `kernel_shape` must be static.<br>- `ceil_mode` must be default value(0).<br>- `dilations` must be default value(1).
# test_maxpool_1d_default_cpu
# test_maxpool_2d_ceil_cpu
test_maxpool_2d_default_cpu,zdnn_avgpool2d,NO_DYNAMIC_SHAPE_TEST
test_maxpool_2d_default_cpu,zdnn_maxpool2d
# test_maxpool_2d_dilations_cpu
# test_maxpool_2d_pads_cpu
# test_maxpool_2d_precomputed_pads_cpu
test_maxpool_2d_precomputed_same_upper_cpu,zdnn_avgpool2d,NO_DYNAMIC_SHAPE_TEST
test_maxpool_2d_precomputed_strides_cpu,zdnn_avgpool2d,NO_DYNAMIC_SHAPE_TEST
test_maxpool_2d_precomputed_pads_cpu,zdnn_maxpool2d,NO_DYNAMIC_SHAPE_TEST
test_maxpool_2d_precomputed_same_upper_cpu,zdnn_maxpool2d
test_maxpool_2d_precomputed_strides_cpu,zdnn_maxpool2d
# test_maxpool_2d_same_lower_cpu
test_maxpool_2d_same_upper_cpu,zdnn_avgpool2d,NO_DYNAMIC_SHAPE_TEST
test_maxpool_2d_strides_cpu,zdnn_avgpool2d,NO_DYNAMIC_SHAPE_TEST
test_maxpool_2d_same_upper_cpu,zdnn_maxpool2d
test_maxpool_2d_strides_cpu,zdnn_maxpool2d
# test_maxpool_3d_default_cpu

# ==OP== Min
Expand All @@ -282,7 +282,7 @@ set(NNPA_TEST_LIST
# test_min_one_input_cpu
test_min_two_inputs_cpu,zdnn_min_ext,"0:0=a|1:0=a"
# test_min_float16_cpu
test_min_float32_cpu,zdnn_min_ext,NO_DYNAMIC_SHAPE_TEST
test_min_float32_cpu,zdnn_min_ext,"0:0=a|1:0=a"
# test_min_float64_cpu
# test_min_int8_cpu
# test_min_int16_cpu
Expand Down Expand Up @@ -329,7 +329,7 @@ set(NNPA_TEST_LIST
# test_softmax_axis_1_cpu
# test_softmax_axis_2_cpu
# test_softmax_default_axis_cpu
test_softmax_example_cpu,zdnn_softmax_ext,NO_DYNAMIC_SHAPE_TEST
test_softmax_example_cpu,zdnn_softmax_ext
# test_softmax_large_number_cpu # accuracy error

# ==OP== Sub
Expand Down
Loading