From d10f85b5131c9e3eb75277cdbb95b90fcc20dec4 Mon Sep 17 00:00:00 2001 From: Iman Tabrizian Date: Wed, 20 Sep 2023 17:21:31 -0400 Subject: [PATCH 1/7] Refactor model generation scripts --- qa/common/gen_common.py | 144 ++++++++++++++++++ qa/common/gen_ensemble_model_utils.py | 28 +--- .../gen_qa_dyna_sequence_implicit_models.py | 67 +------- qa/common/gen_qa_dyna_sequence_models.py | 125 +-------------- qa/common/gen_qa_identity_models.py | 124 +-------------- qa/common/gen_qa_implicit_models.py | 98 +----------- qa/common/gen_qa_models.py | 127 +-------------- qa/common/gen_qa_noshape_models.py | 54 +------ qa/common/gen_qa_ragged_models.py | 99 +----------- qa/common/gen_qa_reshape_models.py | 126 +-------------- qa/common/gen_qa_sequence_models.py | 125 +-------------- qa/common/gen_qa_trt_data_dependent_shape.py | 40 +---- qa/common/gen_qa_trt_format_models.py | 42 +---- qa/common/gen_qa_trt_plugin_models.py | 42 +---- 14 files changed, 208 insertions(+), 1033 deletions(-) create mode 100644 qa/common/gen_common.py diff --git a/qa/common/gen_common.py b/qa/common/gen_common.py new file mode 100644 index 0000000000..9ef41a3a19 --- /dev/null +++ b/qa/common/gen_common.py @@ -0,0 +1,144 @@ +# Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of NVIDIA CORPORATION nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Common utilities for model generation scripts + + +def np_to_onnx_dtype(np_dtype): + if np_dtype == bool: + return onnx.TensorProto.BOOL + elif np_dtype == np.int8: + return onnx.TensorProto.INT8 + elif np_dtype == np.int16: + return onnx.TensorProto.INT16 + elif np_dtype == np.int32: + return onnx.TensorProto.INT32 + elif np_dtype == np.int64: + return onnx.TensorProto.INT64 + elif np_dtype == np.uint8: + return onnx.TensorProto.UINT8 + elif np_dtype == np.uint16: + return onnx.TensorProto.UINT16 + elif np_dtype == np.float16: + return onnx.TensorProto.FLOAT16 + elif np_dtype == np.float32: + return onnx.TensorProto.FLOAT + elif np_dtype == np.float64: + return onnx.TensorProto.DOUBLE + elif np_dtype == np_dtype_string: + return onnx.TensorProto.STRING + + +def np_to_model_dtype(np_dtype): + if np_dtype == bool: + return "TYPE_BOOL" + elif np_dtype == np.int8: + return "TYPE_INT8" + elif np_dtype == np.int16: + return "TYPE_INT16" + elif np_dtype == np.int32: + return "TYPE_INT32" + elif np_dtype == np.int64: + return "TYPE_INT64" + elif np_dtype == np.uint8: + return "TYPE_UINT8" + elif np_dtype == np.uint16: + return "TYPE_UINT16" + elif np_dtype == np.float16: + return "TYPE_FP16" + elif np_dtype == np.float32: + return "TYPE_FP32" + elif np_dtype == np.float64: + return "TYPE_FP64" + elif np_dtype == np_dtype_string: + return "TYPE_STRING" + return None + + +def np_to_trt_dtype(np_dtype): + if np_dtype == bool: + return trt.bool + elif np_dtype == np.int8: + return trt.int8 + elif np_dtype == np.int32: + return trt.int32 + elif np_dtype == np.float16: + return trt.float16 + elif np_dtype == np.float32: + return trt.float32 + return None + + +def np_to_tf_dtype(np_dtype): + if np_dtype == bool: + return tf.bool + elif np_dtype == np.int8: + return tf.int8 + elif np_dtype == np.int16: + return tf.int16 + elif np_dtype == np.int32: + return tf.int32 + elif np_dtype == np.int64: + return tf.int64 + elif np_dtype == np.uint8: + return tf.uint8 + elif np_dtype == np.uint16: + return tf.uint16 + elif np_dtype == np.float16: + return tf.float16 + elif np_dtype == np.float32: + return tf.float32 + elif np_dtype == np.float64: + return tf.float64 + elif np_dtype == np_dtype_string: + return tf.string + return None + + +def np_to_torch_dtype(np_dtype): + if np_dtype == bool: + return torch.bool + elif np_dtype == np.int8: + return torch.int8 + elif np_dtype == np.int16: + return torch.int16 + elif np_dtype == np.int32: + return torch.int + elif np_dtype == np.int64: + return torch.long + elif np_dtype == np.uint8: + return torch.uint8 + elif np_dtype == np.uint16: + return None # Not supported in Torch + elif np_dtype == np.float16: + return None + elif np_dtype == np.float32: + return torch.float + elif np_dtype == np.float64: + return torch.double + elif np_dtype == np_dtype_string: + return None # Not supported in Torch + return None diff --git a/qa/common/gen_ensemble_model_utils.py b/qa/common/gen_ensemble_model_utils.py index ceac0340dc..6528b51464 100755 --- a/qa/common/gen_ensemble_model_utils.py +++ b/qa/common/gen_ensemble_model_utils.py @@ -31,37 +31,13 @@ import numpy as np import test_util as tu +from .gen_common import np_to_model_dtype + BASIC_ENSEMBLE_TYPES = ["simple", "sequence", "fan"] np_dtype_string = np.dtype(object) -def np_to_model_dtype(np_dtype): - if np_dtype == bool: - return "TYPE_BOOL" - elif np_dtype == np.int8: - return "TYPE_INT8" - elif np_dtype == np.int16: - return "TYPE_INT16" - elif np_dtype == np.int32: - return "TYPE_INT32" - elif np_dtype == np.int64: - return "TYPE_INT64" - elif np_dtype == np.uint8: - return "TYPE_UINT8" - elif np_dtype == np.uint16: - return "TYPE_UINT16" - elif np_dtype == np.float16: - return "TYPE_FP16" - elif np_dtype == np.float32: - return "TYPE_FP32" - elif np_dtype == np.float64: - return "TYPE_FP64" - elif np_dtype == np_dtype_string: - return "TYPE_STRING" - return None - - def fixed_to_variable_size(shape): return [-1] * len(shape) diff --git a/qa/common/gen_qa_dyna_sequence_implicit_models.py b/qa/common/gen_qa_dyna_sequence_implicit_models.py index 1c4815d5dd..b8cb4e59b3 100755 --- a/qa/common/gen_qa_dyna_sequence_implicit_models.py +++ b/qa/common/gen_qa_dyna_sequence_implicit_models.py @@ -31,75 +31,12 @@ import numpy as np +from .gen_common import np_to_model_dtype, np_to_onnx_dtype, np_to_trt_dtype + FLAGS = None np_dtype_string = np.dtype(object) -def np_to_onnx_dtype(np_dtype): - if np_dtype == bool: - return onnx.TensorProto.BOOL - elif np_dtype == np.int8: - return onnx.TensorProto.INT8 - elif np_dtype == np.int16: - return onnx.TensorProto.INT16 - elif np_dtype == np.int32: - return onnx.TensorProto.INT32 - elif np_dtype == np.int64: - return onnx.TensorProto.INT64 - elif np_dtype == np.uint8: - return onnx.TensorProto.UINT8 - elif np_dtype == np.uint16: - return onnx.TensorProto.UINT16 - elif np_dtype == np.float16: - return onnx.TensorProto.FLOAT16 - elif np_dtype == np.float32: - return onnx.TensorProto.FLOAT - elif np_dtype == np.float64: - return onnx.TensorProto.DOUBLE - elif np_dtype == np_dtype_string: - return onnx.TensorProto.STRING - - -def np_to_model_dtype(np_dtype): - if np_dtype == bool: - return "TYPE_BOOL" - elif np_dtype == np.int8: - return "TYPE_INT8" - elif np_dtype == np.int16: - return "TYPE_INT16" - elif np_dtype == np.int32: - return "TYPE_INT32" - elif np_dtype == np.int64: - return "TYPE_INT64" - elif np_dtype == np.uint8: - return "TYPE_UINT8" - elif np_dtype == np.uint16: - return "TYPE_UINT16" - elif np_dtype == np.float16: - return "TYPE_FP16" - elif np_dtype == np.float32: - return "TYPE_FP32" - elif np_dtype == np.float64: - return "TYPE_FP64" - elif np_dtype == np_dtype_string: - return "TYPE_STRING" - return None - - -def np_to_trt_dtype(np_dtype): - if np_dtype == bool: - return trt.bool - elif np_dtype == np.int8: - return trt.int8 - elif np_dtype == np.int32: - return trt.int32 - elif np_dtype == np.float16: - return trt.float16 - elif np_dtype == np.float32: - return trt.float32 - return None - - def create_onnx_modelfile(models_dir, model_version, max_batch, dtype, shape): if not tu.validate_for_onnx_model(dtype, dtype, dtype, shape, shape, shape): return diff --git a/qa/common/gen_qa_dyna_sequence_models.py b/qa/common/gen_qa_dyna_sequence_models.py index 8a02497bfa..27aef7d861 100755 --- a/qa/common/gen_qa_dyna_sequence_models.py +++ b/qa/common/gen_qa_dyna_sequence_models.py @@ -31,127 +31,18 @@ import numpy as np +from .gen_common import ( + np_to_model_dtype, + np_to_onnx_dtype, + np_to_tf_dtype, + np_to_torch_dtype, + np_to_trt_dtype, +) + FLAGS = None np_dtype_string = np.dtype(object) -def np_to_model_dtype(np_dtype): - if np_dtype == bool: - return "TYPE_BOOL" - elif np_dtype == np.int8: - return "TYPE_INT8" - elif np_dtype == np.int16: - return "TYPE_INT16" - elif np_dtype == np.int32: - return "TYPE_INT32" - elif np_dtype == np.int64: - return "TYPE_INT64" - elif np_dtype == np.uint8: - return "TYPE_UINT8" - elif np_dtype == np.uint16: - return "TYPE_UINT16" - elif np_dtype == np.float16: - return "TYPE_FP16" - elif np_dtype == np.float32: - return "TYPE_FP32" - elif np_dtype == np.float64: - return "TYPE_FP64" - elif np_dtype == np_dtype_string: - return "TYPE_STRING" - return None - - -def np_to_tf_dtype(np_dtype): - if np_dtype == bool: - return tf.bool - elif np_dtype == np.int8: - return tf.int8 - elif np_dtype == np.int16: - return tf.int16 - elif np_dtype == np.int32: - return tf.int32 - elif np_dtype == np.int64: - return tf.int64 - elif np_dtype == np.uint8: - return tf.uint8 - elif np_dtype == np.uint16: - return tf.uint16 - elif np_dtype == np.float16: - return tf.float16 - elif np_dtype == np.float32: - return tf.float32 - elif np_dtype == np.float64: - return tf.float64 - elif np_dtype == np_dtype_string: - return tf.string - return None - - -def np_to_trt_dtype(np_dtype): - if np_dtype == bool: - return trt.bool - elif np_dtype == np.int8: - return trt.int8 - elif np_dtype == np.int32: - return trt.int32 - elif np_dtype == np.float16: - return trt.float16 - elif np_dtype == np.float32: - return trt.float32 - return None - - -def np_to_onnx_dtype(np_dtype): - if np_dtype == bool: - return onnx.TensorProto.BOOL - elif np_dtype == np.int8: - return onnx.TensorProto.INT8 - elif np_dtype == np.int16: - return onnx.TensorProto.INT16 - elif np_dtype == np.int32: - return onnx.TensorProto.INT32 - elif np_dtype == np.int64: - return onnx.TensorProto.INT64 - elif np_dtype == np.uint8: - return onnx.TensorProto.UINT8 - elif np_dtype == np.uint16: - return onnx.TensorProto.UINT16 - elif np_dtype == np.float16: - return onnx.TensorProto.FLOAT16 - elif np_dtype == np.float32: - return onnx.TensorProto.FLOAT - elif np_dtype == np.float64: - return onnx.TensorProto.DOUBLE - elif np_dtype == np_dtype_string: - return onnx.TensorProto.STRING - - -def np_to_torch_dtype(np_dtype): - if np_dtype == bool: - return torch.bool - elif np_dtype == np.int8: - return torch.int8 - elif np_dtype == np.int16: - return torch.int16 - elif np_dtype == np.int32: - return torch.int - elif np_dtype == np.int64: - return torch.long - elif np_dtype == np.uint8: - return torch.uint8 - elif np_dtype == np.uint16: - return None # Not supported in Torch - elif np_dtype == np.float16: - return None - elif np_dtype == np.float32: - return torch.float - elif np_dtype == np.float64: - return torch.double - elif np_dtype == np_dtype_string: - return None # Not supported in Torch - return None - - def create_tf_modelfile( create_savedmodel, models_dir, model_version, max_batch, dtype, shape ): diff --git a/qa/common/gen_qa_identity_models.py b/qa/common/gen_qa_identity_models.py index 0aaac8d9e4..f864ab8b10 100755 --- a/qa/common/gen_qa_identity_models.py +++ b/qa/common/gen_qa_identity_models.py @@ -33,128 +33,18 @@ import gen_ensemble_model_utils as emu import numpy as np +from .gen_common import ( + np_to_model_dtype, + np_to_onnx_dtype, + np_to_tf_dtype, + np_to_trt_dtype, +) + FLAGS = None np_dtype_string = np.dtype(object) from typing import List, Tuple -def np_to_model_dtype(np_dtype): - if np_dtype == bool: - return "TYPE_BOOL" - elif np_dtype == np.int8: - return "TYPE_INT8" - elif np_dtype == np.int16: - return "TYPE_INT16" - elif np_dtype == np.int32: - return "TYPE_INT32" - elif np_dtype == np.int64: - return "TYPE_INT64" - elif np_dtype == np.uint8: - return "TYPE_UINT8" - elif np_dtype == np.uint16: - return "TYPE_UINT16" - elif np_dtype == np.float16: - return "TYPE_FP16" - elif np_dtype == np.float32: - return "TYPE_FP32" - elif np_dtype == np.float64: - return "TYPE_FP64" - elif np_dtype == np_dtype_string: - return "TYPE_STRING" - return None - - -def np_to_tf_dtype(np_dtype): - if np_dtype == bool: - return tf.bool - elif np_dtype == np.int8: - return tf.int8 - elif np_dtype == np.int16: - return tf.int16 - elif np_dtype == np.int32: - return tf.int32 - elif np_dtype == np.int64: - return tf.int64 - elif np_dtype == np.uint8: - return tf.uint8 - elif np_dtype == np.uint16: - return tf.uint16 - elif np_dtype == np.float16: - return tf.float16 - elif np_dtype == np.float32: - return tf.float32 - elif np_dtype == np.float64: - return tf.float64 - elif np_dtype == np_dtype_string: - return tf.string - return None - - -def np_to_trt_dtype(np_dtype): - if np_dtype == bool: - return trt.bool - elif np_dtype == np.int8: - return trt.int8 - elif np_dtype == np.int32: - return trt.int32 - elif np_dtype == np.float16: - return trt.float16 - elif np_dtype == np.float32: - return trt.float32 - return None - - -def np_to_onnx_dtype(np_dtype): - if np_dtype == bool: - return onnx.TensorProto.BOOL - elif np_dtype == np.int8: - return onnx.TensorProto.INT8 - elif np_dtype == np.int16: - return onnx.TensorProto.INT16 - elif np_dtype == np.int32: - return onnx.TensorProto.INT32 - elif np_dtype == np.int64: - return onnx.TensorProto.INT64 - elif np_dtype == np.uint8: - return onnx.TensorProto.UINT8 - elif np_dtype == np.uint16: - return onnx.TensorProto.UINT16 - elif np_dtype == np.float16: - return onnx.TensorProto.FLOAT16 - elif np_dtype == np.float32: - return onnx.TensorProto.FLOAT - elif np_dtype == np.float64: - return onnx.TensorProto.DOUBLE - elif np_dtype == np_dtype_string: - return onnx.TensorProto.STRING - return None - - -def np_to_torch_dtype(np_dtype): - if np_dtype == bool: - return torch.bool - elif np_dtype == np.int8: - return torch.int8 - elif np_dtype == np.int16: - return torch.int16 - elif np_dtype == np.int32: - return torch.int - elif np_dtype == np.int64: - return torch.long - elif np_dtype == np.uint8: - return torch.uint8 - elif np_dtype == np.uint16: - return None # Not supported in Torch - elif np_dtype == np.float16: - return None - elif np_dtype == np.float32: - return torch.float - elif np_dtype == np.float64: - return torch.double - elif np_dtype == np_dtype_string: - return List[str] - - def create_tf_modelfile( create_savedmodel, models_dir, model_version, io_cnt, max_batch, dtype, shape ): diff --git a/qa/common/gen_qa_implicit_models.py b/qa/common/gen_qa_implicit_models.py index 84ca98c47c..3e17df23c8 100755 --- a/qa/common/gen_qa_implicit_models.py +++ b/qa/common/gen_qa_implicit_models.py @@ -33,101 +33,17 @@ import gen_ensemble_model_utils as emu import numpy as np +from .gen_common import ( + np_to_model_dtype, + np_to_onnx_dtype, + np_to_torch_dtype, + np_to_trt_dtype, +) + FLAGS = None np_dtype_string = np.dtype(object) -def np_to_model_dtype(np_dtype): - if np_dtype == bool: - return "TYPE_BOOL" - elif np_dtype == np.int8: - return "TYPE_INT8" - elif np_dtype == np.int16: - return "TYPE_INT16" - elif np_dtype == np.int32: - return "TYPE_INT32" - elif np_dtype == np.int64: - return "TYPE_INT64" - elif np_dtype == np.uint8: - return "TYPE_UINT8" - elif np_dtype == np.uint16: - return "TYPE_UINT16" - elif np_dtype == np.float16: - return "TYPE_FP16" - elif np_dtype == np.float32: - return "TYPE_FP32" - elif np_dtype == np.float64: - return "TYPE_FP64" - elif np_dtype == np_dtype_string: - return "TYPE_STRING" - return None - - -def np_to_onnx_dtype(np_dtype): - if np_dtype == bool: - return onnx.TensorProto.BOOL - elif np_dtype == np.int8: - return onnx.TensorProto.INT8 - elif np_dtype == np.int16: - return onnx.TensorProto.INT16 - elif np_dtype == np.int32: - return onnx.TensorProto.INT32 - elif np_dtype == np.int64: - return onnx.TensorProto.INT64 - elif np_dtype == np.uint8: - return onnx.TensorProto.UINT8 - elif np_dtype == np.uint16: - return onnx.TensorProto.UINT16 - elif np_dtype == np.float16: - return onnx.TensorProto.FLOAT16 - elif np_dtype == np.float32: - return onnx.TensorProto.FLOAT - elif np_dtype == np.float64: - return onnx.TensorProto.DOUBLE - elif np_dtype == np_dtype_string: - return onnx.TensorProto.STRING - - -def np_to_trt_dtype(np_dtype): - if np_dtype == bool: - return trt.bool - elif np_dtype == np.int8: - return trt.int8 - elif np_dtype == np.int32: - return trt.int32 - elif np_dtype == np.float16: - return trt.float16 - elif np_dtype == np.float32: - return trt.float32 - return None - - -def np_to_torch_dtype(np_dtype): - if np_dtype == bool: - return torch.bool - elif np_dtype == np.int8: - return torch.int8 - elif np_dtype == np.int16: - return torch.int16 - elif np_dtype == np.int32: - return torch.int - elif np_dtype == np.int64: - return torch.long - elif np_dtype == np.uint8: - return torch.uint8 - elif np_dtype == np.uint16: - return None # Not supported in Torch - elif np_dtype == np.float16: - return None - elif np_dtype == np.float32: - return torch.float - elif np_dtype == np.float64: - return torch.double - elif np_dtype == np_dtype_string: - return List[str] - return None - - def create_onnx_modelfile_wo_initial_state( models_dir, model_version, max_batch, dtype, shape ): diff --git a/qa/common/gen_qa_models.py b/qa/common/gen_qa_models.py index f050861a62..961185fa37 100755 --- a/qa/common/gen_qa_models.py +++ b/qa/common/gen_qa_models.py @@ -33,130 +33,19 @@ import gen_ensemble_model_utils as emu import numpy as np +from .gen_common import ( + np_to_model_dtype, + np_to_onnx_dtype, + np_to_tf_dtype, + np_to_torch_dtype, + np_to_trt_dtype, +) + FLAGS = None np_dtype_string = np.dtype(object) from typing import List, Tuple -def np_to_model_dtype(np_dtype): - if np_dtype == bool: - return "TYPE_BOOL" - elif np_dtype == np.int8: - return "TYPE_INT8" - elif np_dtype == np.int16: - return "TYPE_INT16" - elif np_dtype == np.int32: - return "TYPE_INT32" - elif np_dtype == np.int64: - return "TYPE_INT64" - elif np_dtype == np.uint8: - return "TYPE_UINT8" - elif np_dtype == np.uint16: - return "TYPE_UINT16" - elif np_dtype == np.float16: - return "TYPE_FP16" - elif np_dtype == np.float32: - return "TYPE_FP32" - elif np_dtype == np.float64: - return "TYPE_FP64" - elif np_dtype == np_dtype_string: - return "TYPE_STRING" - return None - - -def np_to_tf_dtype(np_dtype): - if np_dtype == bool: - return tf.bool - elif np_dtype == np.int8: - return tf.int8 - elif np_dtype == np.int16: - return tf.int16 - elif np_dtype == np.int32: - return tf.int32 - elif np_dtype == np.int64: - return tf.int64 - elif np_dtype == np.uint8: - return tf.uint8 - elif np_dtype == np.uint16: - return tf.uint16 - elif np_dtype == np.float16: - return tf.float16 - elif np_dtype == np.float32: - return tf.float32 - elif np_dtype == np.float64: - return tf.float64 - elif np_dtype == np_dtype_string: - return tf.string - return None - - -def np_to_trt_dtype(np_dtype): - if np_dtype == bool: - return trt.bool - elif np_dtype == np.int8: - return trt.int8 - elif np_dtype == np.int32: - return trt.int32 - elif np_dtype == np.uint8: - return trt.uint8 - elif np_dtype == np.float16: - return trt.float16 - elif np_dtype == np.float32: - return trt.float32 - return None - - -def np_to_onnx_dtype(np_dtype): - if np_dtype == bool: - return onnx.TensorProto.BOOL - elif np_dtype == np.int8: - return onnx.TensorProto.INT8 - elif np_dtype == np.int16: - return onnx.TensorProto.INT16 - elif np_dtype == np.int32: - return onnx.TensorProto.INT32 - elif np_dtype == np.int64: - return onnx.TensorProto.INT64 - elif np_dtype == np.uint8: - return onnx.TensorProto.UINT8 - elif np_dtype == np.uint16: - return onnx.TensorProto.UINT16 - elif np_dtype == np.float16: - return onnx.TensorProto.FLOAT16 - elif np_dtype == np.float32: - return onnx.TensorProto.FLOAT - elif np_dtype == np.float64: - return onnx.TensorProto.DOUBLE - elif np_dtype == np_dtype_string: - return onnx.TensorProto.STRING - return None - - -def np_to_torch_dtype(np_dtype): - if np_dtype == bool: - return torch.bool - elif np_dtype == np.int8: - return torch.int8 - elif np_dtype == np.int16: - return torch.int16 - elif np_dtype == np.int32: - return torch.int - elif np_dtype == np.int64: - return torch.long - elif np_dtype == np.uint8: - return torch.uint8 - elif np_dtype == np.uint16: - return None # Not supported in Torch - elif np_dtype == np.float16: - return None - elif np_dtype == np.float32: - return torch.float - elif np_dtype == np.float64: - return torch.double - elif np_dtype == np_dtype_string: - return List[str] - - def create_graphdef_modelfile( models_dir, max_batch, diff --git a/qa/common/gen_qa_noshape_models.py b/qa/common/gen_qa_noshape_models.py index b37f1c1bbb..75e6396504 100755 --- a/qa/common/gen_qa_noshape_models.py +++ b/qa/common/gen_qa_noshape_models.py @@ -32,62 +32,12 @@ import numpy as np +from .gen_common import np_to_model_dtype, np_to_tf_dtype + FLAGS = None np_dtype_string = np.dtype(object) -def np_to_model_dtype(np_dtype): - if np_dtype == bool: - return "TYPE_BOOL" - elif np_dtype == np.int8: - return "TYPE_INT8" - elif np_dtype == np.int16: - return "TYPE_INT16" - elif np_dtype == np.int32: - return "TYPE_INT32" - elif np_dtype == np.int64: - return "TYPE_INT64" - elif np_dtype == np.uint8: - return "TYPE_UINT8" - elif np_dtype == np.uint16: - return "TYPE_UINT16" - elif np_dtype == np.float16: - return "TYPE_FP16" - elif np_dtype == np.float32: - return "TYPE_FP32" - elif np_dtype == np.float64: - return "TYPE_FP64" - elif np_dtype == np_dtype_string: - return "TYPE_STRING" - return None - - -def np_to_tf_dtype(np_dtype): - if np_dtype == bool: - return tf.bool - elif np_dtype == np.int8: - return tf.int8 - elif np_dtype == np.int16: - return tf.int16 - elif np_dtype == np.int32: - return tf.int32 - elif np_dtype == np.int64: - return tf.int64 - elif np_dtype == np.uint8: - return tf.uint8 - elif np_dtype == np.uint16: - return tf.uint16 - elif np_dtype == np.float16: - return tf.float16 - elif np_dtype == np.float32: - return tf.float32 - elif np_dtype == np.float64: - return tf.float64 - elif np_dtype == np_dtype_string: - return tf.string - return None - - def create_savedmodel_modelfile( models_dir, max_batch, diff --git a/qa/common/gen_qa_ragged_models.py b/qa/common/gen_qa_ragged_models.py index 0ac70e80f6..c4292f9420 100755 --- a/qa/common/gen_qa_ragged_models.py +++ b/qa/common/gen_qa_ragged_models.py @@ -31,99 +31,14 @@ import numpy as np -np_dtype_string = np.dtype(object) - +from .gen_common import ( + np_to_model_dtype, + np_to_onnx_dtype, + np_to_tf_dtype, + np_to_trt_dtype, +) -def np_to_model_dtype(np_dtype): - if np_dtype == bool: - return "TYPE_BOOL" - elif np_dtype == np.int8: - return "TYPE_INT8" - elif np_dtype == np.int16: - return "TYPE_INT16" - elif np_dtype == np.int32: - return "TYPE_INT32" - elif np_dtype == np.int64: - return "TYPE_INT64" - elif np_dtype == np.uint8: - return "TYPE_UINT8" - elif np_dtype == np.uint16: - return "TYPE_UINT16" - elif np_dtype == np.float16: - return "TYPE_FP16" - elif np_dtype == np.float32: - return "TYPE_FP32" - elif np_dtype == np.float64: - return "TYPE_FP64" - elif np_dtype == np_dtype_string: - return "TYPE_STRING" - return None - - -def np_to_trt_dtype(np_dtype): - if np_dtype == bool: - return trt.bool - elif np_dtype == np.int8: - return trt.int8 - elif np_dtype == np.int32: - return trt.int32 - elif np_dtype == np.float16: - return trt.float16 - elif np_dtype == np.float32: - return trt.float32 - return None - - -def np_to_tf_dtype(np_dtype): - if np_dtype == bool: - return tf.bool - elif np_dtype == np.int8: - return tf.int8 - elif np_dtype == np.int16: - return tf.int16 - elif np_dtype == np.int32: - return tf.int32 - elif np_dtype == np.int64: - return tf.int64 - elif np_dtype == np.uint8: - return tf.uint8 - elif np_dtype == np.uint16: - return tf.uint16 - elif np_dtype == np.float16: - return tf.float16 - elif np_dtype == np.float32: - return tf.float32 - elif np_dtype == np.float64: - return tf.float64 - elif np_dtype == np_dtype_string: - return tf.string - return None - - -def np_to_onnx_dtype(np_dtype): - if np_dtype == bool: - return onnx.TensorProto.BOOL - elif np_dtype == np.int8: - return onnx.TensorProto.INT8 - elif np_dtype == np.int16: - return onnx.TensorProto.INT16 - elif np_dtype == np.int32: - return onnx.TensorProto.INT32 - elif np_dtype == np.int64: - return onnx.TensorProto.INT64 - elif np_dtype == np.uint8: - return onnx.TensorProto.UINT8 - elif np_dtype == np.uint16: - return onnx.TensorProto.UINT16 - elif np_dtype == np.float16: - return onnx.TensorProto.FLOAT16 - elif np_dtype == np.float32: - return onnx.TensorProto.FLOAT - elif np_dtype == np.float64: - return onnx.TensorProto.DOUBLE - elif np_dtype == np_dtype_string: - return onnx.TensorProto.STRING - return None +np_dtype_string = np.dtype(object) def create_savedmodel_modelfile(models_dir, model_version, dtype): diff --git a/qa/common/gen_qa_reshape_models.py b/qa/common/gen_qa_reshape_models.py index 983f06c1d0..fe34446786 100755 --- a/qa/common/gen_qa_reshape_models.py +++ b/qa/common/gen_qa_reshape_models.py @@ -33,129 +33,19 @@ import gen_ensemble_model_utils as emu import numpy as np +from .gen_common import ( + np_to_model_dtype, + np_to_onnx_dtype, + np_to_tf_dtype, + np_to_torch_dtype, + np_to_trt_dtype, +) + FLAGS = None np_dtype_string = np.dtype(object) from typing import List -def np_to_model_dtype(np_dtype): - if np_dtype == bool: - return "TYPE_BOOL" - elif np_dtype == np.int8: - return "TYPE_INT8" - elif np_dtype == np.int16: - return "TYPE_INT16" - elif np_dtype == np.int32: - return "TYPE_INT32" - elif np_dtype == np.int64: - return "TYPE_INT64" - elif np_dtype == np.uint8: - return "TYPE_UINT8" - elif np_dtype == np.uint16: - return "TYPE_UINT16" - elif np_dtype == np.float16: - return "TYPE_FP16" - elif np_dtype == np.float32: - return "TYPE_FP32" - elif np_dtype == np.float64: - return "TYPE_FP64" - elif np_dtype == np_dtype_string: - return "TYPE_STRING" - return None - - -def np_to_tf_dtype(np_dtype): - if np_dtype == bool: - return tf.bool - elif np_dtype == np.int8: - return tf.int8 - elif np_dtype == np.int16: - return tf.int16 - elif np_dtype == np.int32: - return tf.int32 - elif np_dtype == np.int64: - return tf.int64 - elif np_dtype == np.uint8: - return tf.uint8 - elif np_dtype == np.uint16: - return tf.uint16 - elif np_dtype == np.float16: - return tf.float16 - elif np_dtype == np.float32: - return tf.float32 - elif np_dtype == np.float64: - return tf.float64 - elif np_dtype == np_dtype_string: - return tf.string - return None - - -def np_to_trt_dtype(np_dtype): - if np_dtype == bool: - return trt.bool - elif np_dtype == np.int8: - return trt.int8 - elif np_dtype == np.int32: - return trt.int32 - elif np_dtype == np.float16: - return trt.float16 - elif np_dtype == np.float32: - return trt.float32 - return None - - -def np_to_onnx_dtype(np_dtype): - if np_dtype == bool: - return onnx.TensorProto.BOOL - elif np_dtype == np.int8: - return onnx.TensorProto.INT8 - elif np_dtype == np.int16: - return onnx.TensorProto.INT16 - elif np_dtype == np.int32: - return onnx.TensorProto.INT32 - elif np_dtype == np.int64: - return onnx.TensorProto.INT64 - elif np_dtype == np.uint8: - return onnx.TensorProto.UINT8 - elif np_dtype == np.uint16: - return onnx.TensorProto.UINT16 - elif np_dtype == np.float16: - return onnx.TensorProto.FLOAT16 - elif np_dtype == np.float32: - return onnx.TensorProto.FLOAT - elif np_dtype == np.float64: - return onnx.TensorProto.DOUBLE - elif np_dtype == np_dtype_string: - return onnx.TensorProto.STRING - return None - - -def np_to_torch_dtype(np_dtype): - if np_dtype == bool: - return torch.bool - elif np_dtype == np.int8: - return torch.int8 - elif np_dtype == np.int16: - return torch.int16 - elif np_dtype == np.int32: - return torch.int - elif np_dtype == np.int64: - return torch.long - elif np_dtype == np.uint8: - return torch.uint8 - elif np_dtype == np.uint16: - return None # Not supported in Torch - elif np_dtype == np.float16: - return None - elif np_dtype == np.float32: - return torch.float - elif np_dtype == np.float64: - return torch.double - elif np_dtype == np_dtype_string: - return List[str] - return None - - def create_tf_modelfile( create_savedmodel, models_dir, diff --git a/qa/common/gen_qa_sequence_models.py b/qa/common/gen_qa_sequence_models.py index 01cd515c15..c23c078f9c 100755 --- a/qa/common/gen_qa_sequence_models.py +++ b/qa/common/gen_qa_sequence_models.py @@ -32,127 +32,18 @@ import gen_ensemble_model_utils as emu import numpy as np +from .gen_common import ( + np_to_model_dtype, + np_to_onnx_dtype, + np_to_tf_dtype, + np_to_torch_dtype, + np_to_trt_dtype, +) + FLAGS = None np_dtype_string = np.dtype(object) -def np_to_model_dtype(np_dtype): - if np_dtype == bool: - return "TYPE_BOOL" - elif np_dtype == np.int8: - return "TYPE_INT8" - elif np_dtype == np.int16: - return "TYPE_INT16" - elif np_dtype == np.int32: - return "TYPE_INT32" - elif np_dtype == np.int64: - return "TYPE_INT64" - elif np_dtype == np.uint8: - return "TYPE_UINT8" - elif np_dtype == np.uint16: - return "TYPE_UINT16" - elif np_dtype == np.float16: - return "TYPE_FP16" - elif np_dtype == np.float32: - return "TYPE_FP32" - elif np_dtype == np.float64: - return "TYPE_FP64" - elif np_dtype == np_dtype_string: - return "TYPE_STRING" - return None - - -def np_to_tf_dtype(np_dtype): - if np_dtype == bool: - return tf.bool - elif np_dtype == np.int8: - return tf.int8 - elif np_dtype == np.int16: - return tf.int16 - elif np_dtype == np.int32: - return tf.int32 - elif np_dtype == np.int64: - return tf.int64 - elif np_dtype == np.uint8: - return tf.uint8 - elif np_dtype == np.uint16: - return tf.uint16 - elif np_dtype == np.float16: - return tf.float16 - elif np_dtype == np.float32: - return tf.float32 - elif np_dtype == np.float64: - return tf.float64 - elif np_dtype == np_dtype_string: - return tf.string - return None - - -def np_to_trt_dtype(np_dtype): - if np_dtype == bool: - return trt.bool - elif np_dtype == np.int8: - return trt.int8 - elif np_dtype == np.int32: - return trt.int32 - elif np_dtype == np.float16: - return trt.float16 - elif np_dtype == np.float32: - return trt.float32 - return None - - -def np_to_onnx_dtype(np_dtype): - if np_dtype == bool: - return onnx.TensorProto.BOOL - elif np_dtype == np.int8: - return onnx.TensorProto.INT8 - elif np_dtype == np.int16: - return onnx.TensorProto.INT16 - elif np_dtype == np.int32: - return onnx.TensorProto.INT32 - elif np_dtype == np.int64: - return onnx.TensorProto.INT64 - elif np_dtype == np.uint8: - return onnx.TensorProto.UINT8 - elif np_dtype == np.uint16: - return onnx.TensorProto.UINT16 - elif np_dtype == np.float16: - return onnx.TensorProto.FLOAT16 - elif np_dtype == np.float32: - return onnx.TensorProto.FLOAT - elif np_dtype == np.float64: - return onnx.TensorProto.DOUBLE - elif np_dtype == np_dtype_string: - return onnx.TensorProto.STRING - - -def np_to_torch_dtype(np_dtype): - if np_dtype == bool: - return torch.bool - elif np_dtype == np.int8: - return torch.int8 - elif np_dtype == np.int16: - return torch.int16 - elif np_dtype == np.int32: - return torch.int - elif np_dtype == np.int64: - return torch.long - elif np_dtype == np.uint8: - return torch.uint8 - elif np_dtype == np.uint16: - return None # Not supported in Torch - elif np_dtype == np.float16: - return None - elif np_dtype == np.float32: - return torch.float - elif np_dtype == np.float64: - return torch.double - elif np_dtype == np_dtype_string: - return None # Not supported in Torch - return None - - def create_tf_modelfile( create_savedmodel, models_dir, model_version, max_batch, dtype, shape ): diff --git a/qa/common/gen_qa_trt_data_dependent_shape.py b/qa/common/gen_qa_trt_data_dependent_shape.py index 1b40455fd6..a0dc38083a 100755 --- a/qa/common/gen_qa_trt_data_dependent_shape.py +++ b/qa/common/gen_qa_trt_data_dependent_shape.py @@ -33,45 +33,7 @@ import tensorrt as trt import test_util as tu - -def np_to_model_dtype(np_dtype): - if np_dtype == bool: - return "TYPE_BOOL" - elif np_dtype == np.int8: - return "TYPE_INT8" - elif np_dtype == np.int16: - return "TYPE_INT16" - elif np_dtype == np.int32: - return "TYPE_INT32" - elif np_dtype == np.int64: - return "TYPE_INT64" - elif np_dtype == np.uint8: - return "TYPE_UINT8" - elif np_dtype == np.uint16: - return "TYPE_UINT16" - elif np_dtype == np.float16: - return "TYPE_FP16" - elif np_dtype == np.float32: - return "TYPE_FP32" - elif np_dtype == np.float64: - return "TYPE_FP64" - elif np_dtype == np_dtype_string: - return "TYPE_STRING" - return None - - -def np_to_trt_dtype(np_dtype): - if np_dtype == bool: - return trt.bool - elif np_dtype == np.int8: - return trt.int8 - elif np_dtype == np.int32: - return trt.int32 - elif np_dtype == np.float16: - return trt.float16 - elif np_dtype == np.float32: - return trt.float32 - return None +from .gen_common import np_to_model_dtype, np_to_trt_dtype # The 'nonzero' model that we use for data dependent shape is naturally diff --git a/qa/common/gen_qa_trt_format_models.py b/qa/common/gen_qa_trt_format_models.py index 9502cdb972..518f42cc61 100755 --- a/qa/common/gen_qa_trt_format_models.py +++ b/qa/common/gen_qa_trt_format_models.py @@ -33,47 +33,9 @@ import tensorrt as trt import test_util as tu -np_dtype_string = np.dtype(object) - +from .gen_common import np_to_model_dtype, np_to_trt_dtype -def np_to_model_dtype(np_dtype): - if np_dtype == bool: - return "TYPE_BOOL" - elif np_dtype == np.int8: - return "TYPE_INT8" - elif np_dtype == np.int16: - return "TYPE_INT16" - elif np_dtype == np.int32: - return "TYPE_INT32" - elif np_dtype == np.int64: - return "TYPE_INT64" - elif np_dtype == np.uint8: - return "TYPE_UINT8" - elif np_dtype == np.uint16: - return "TYPE_UINT16" - elif np_dtype == np.float16: - return "TYPE_FP16" - elif np_dtype == np.float32: - return "TYPE_FP32" - elif np_dtype == np.float64: - return "TYPE_FP64" - elif np_dtype == np_dtype_string: - return "TYPE_STRING" - return None - - -def np_to_trt_dtype(np_dtype): - if np_dtype == bool: - return trt.bool - elif np_dtype == np.int8: - return trt.int8 - elif np_dtype == np.int32: - return trt.int32 - elif np_dtype == np.float16: - return trt.float16 - elif np_dtype == np.float32: - return trt.float32 - return None +np_dtype_string = np.dtype(object) def trt_format_to_string(trt_format): diff --git a/qa/common/gen_qa_trt_plugin_models.py b/qa/common/gen_qa_trt_plugin_models.py index ce6b309f3d..bf4f2288e5 100755 --- a/qa/common/gen_qa_trt_plugin_models.py +++ b/qa/common/gen_qa_trt_plugin_models.py @@ -32,6 +32,8 @@ import numpy as np import tensorrt as trt +from .gen_common import np_to_model_dtype, np_to_trt_dtype + np_dtype_string = np.dtype(object) TRT_LOGGER = trt.Logger() @@ -40,46 +42,6 @@ PLUGIN_CREATORS = trt.get_plugin_registry().plugin_creator_list -def np_to_model_dtype(np_dtype): - if np_dtype == bool: - return "TYPE_BOOL" - elif np_dtype == np.int8: - return "TYPE_INT8" - elif np_dtype == np.int16: - return "TYPE_INT16" - elif np_dtype == np.int32: - return "TYPE_INT32" - elif np_dtype == np.int64: - return "TYPE_INT64" - elif np_dtype == np.uint8: - return "TYPE_UINT8" - elif np_dtype == np.uint16: - return "TYPE_UINT16" - elif np_dtype == np.float16: - return "TYPE_FP16" - elif np_dtype == np.float32: - return "TYPE_FP32" - elif np_dtype == np.float64: - return "TYPE_FP64" - elif np_dtype == np_dtype_string: - return "TYPE_STRING" - return None - - -def np_to_trt_dtype(np_dtype): - if np_dtype == bool: - return trt.bool - elif np_dtype == np.int8: - return trt.int8 - elif np_dtype == np.int32: - return trt.int32 - elif np_dtype == np.float16: - return trt.float16 - elif np_dtype == np.float32: - return trt.float32 - return None - - def get_trt_plugin(plugin_name): plugin = None field_collection = None From c6c8a8be33c0bdd29288afd786e6f70f634a6e57 Mon Sep 17 00:00:00 2001 From: Iman Tabrizian Date: Wed, 20 Sep 2023 17:51:56 -0400 Subject: [PATCH 2/7] Fix codeql --- qa/common/gen_common.py | 1 + 1 file changed, 1 insertion(+) diff --git a/qa/common/gen_common.py b/qa/common/gen_common.py index 9ef41a3a19..8d4ac2cbec 100644 --- a/qa/common/gen_common.py +++ b/qa/common/gen_common.py @@ -50,6 +50,7 @@ def np_to_onnx_dtype(np_dtype): return onnx.TensorProto.DOUBLE elif np_dtype == np_dtype_string: return onnx.TensorProto.STRING + return None def np_to_model_dtype(np_dtype): From 5d44a31c5435b985e6eb78606d01eb13f156bee6 Mon Sep 17 00:00:00 2001 From: Iman Tabrizian Date: Thu, 21 Sep 2023 11:29:53 -0400 Subject: [PATCH 3/7] Fix relative path import --- qa/common/gen_ensemble_model_utils.py | 3 +-- qa/common/gen_qa_dyna_sequence_implicit_models.py | 3 +-- qa/common/gen_qa_dyna_sequence_models.py | 3 +-- qa/common/gen_qa_identity_models.py | 3 +-- qa/common/gen_qa_implicit_models.py | 3 +-- qa/common/gen_qa_models.py | 3 +-- qa/common/gen_qa_noshape_models.py | 3 +-- qa/common/gen_qa_ragged_models.py | 3 +-- qa/common/gen_qa_reshape_models.py | 3 +-- qa/common/gen_qa_sequence_models.py | 3 +-- qa/common/gen_qa_trt_data_dependent_shape.py | 3 +-- qa/common/gen_qa_trt_format_models.py | 3 +-- qa/common/gen_qa_trt_plugin_models.py | 3 +-- 13 files changed, 13 insertions(+), 26 deletions(-) diff --git a/qa/common/gen_ensemble_model_utils.py b/qa/common/gen_ensemble_model_utils.py index 6528b51464..dd4f6e326a 100755 --- a/qa/common/gen_ensemble_model_utils.py +++ b/qa/common/gen_ensemble_model_utils.py @@ -30,8 +30,7 @@ import numpy as np import test_util as tu - -from .gen_common import np_to_model_dtype +from gen_common import np_to_model_dtype BASIC_ENSEMBLE_TYPES = ["simple", "sequence", "fan"] diff --git a/qa/common/gen_qa_dyna_sequence_implicit_models.py b/qa/common/gen_qa_dyna_sequence_implicit_models.py index b8cb4e59b3..ffa3f48ede 100755 --- a/qa/common/gen_qa_dyna_sequence_implicit_models.py +++ b/qa/common/gen_qa_dyna_sequence_implicit_models.py @@ -30,8 +30,7 @@ import os import numpy as np - -from .gen_common import np_to_model_dtype, np_to_onnx_dtype, np_to_trt_dtype +from gen_common import np_to_model_dtype, np_to_onnx_dtype, np_to_trt_dtype FLAGS = None np_dtype_string = np.dtype(object) diff --git a/qa/common/gen_qa_dyna_sequence_models.py b/qa/common/gen_qa_dyna_sequence_models.py index 27aef7d861..469d524ffb 100755 --- a/qa/common/gen_qa_dyna_sequence_models.py +++ b/qa/common/gen_qa_dyna_sequence_models.py @@ -30,8 +30,7 @@ import os import numpy as np - -from .gen_common import ( +from gen_common import ( np_to_model_dtype, np_to_onnx_dtype, np_to_tf_dtype, diff --git a/qa/common/gen_qa_identity_models.py b/qa/common/gen_qa_identity_models.py index f864ab8b10..60b045a09c 100755 --- a/qa/common/gen_qa_identity_models.py +++ b/qa/common/gen_qa_identity_models.py @@ -32,8 +32,7 @@ import gen_ensemble_model_utils as emu import numpy as np - -from .gen_common import ( +from gen_common import ( np_to_model_dtype, np_to_onnx_dtype, np_to_tf_dtype, diff --git a/qa/common/gen_qa_implicit_models.py b/qa/common/gen_qa_implicit_models.py index 3e17df23c8..89872c3b92 100755 --- a/qa/common/gen_qa_implicit_models.py +++ b/qa/common/gen_qa_implicit_models.py @@ -32,8 +32,7 @@ import gen_ensemble_model_utils as emu import numpy as np - -from .gen_common import ( +from gen_common import ( np_to_model_dtype, np_to_onnx_dtype, np_to_torch_dtype, diff --git a/qa/common/gen_qa_models.py b/qa/common/gen_qa_models.py index 961185fa37..82d241f470 100755 --- a/qa/common/gen_qa_models.py +++ b/qa/common/gen_qa_models.py @@ -32,8 +32,7 @@ import gen_ensemble_model_utils as emu import numpy as np - -from .gen_common import ( +from gen_common import ( np_to_model_dtype, np_to_onnx_dtype, np_to_tf_dtype, diff --git a/qa/common/gen_qa_noshape_models.py b/qa/common/gen_qa_noshape_models.py index 75e6396504..af26017495 100755 --- a/qa/common/gen_qa_noshape_models.py +++ b/qa/common/gen_qa_noshape_models.py @@ -31,8 +31,7 @@ from builtins import range import numpy as np - -from .gen_common import np_to_model_dtype, np_to_tf_dtype +from gen_common import np_to_model_dtype, np_to_tf_dtype FLAGS = None np_dtype_string = np.dtype(object) diff --git a/qa/common/gen_qa_ragged_models.py b/qa/common/gen_qa_ragged_models.py index c4292f9420..18d465dc94 100755 --- a/qa/common/gen_qa_ragged_models.py +++ b/qa/common/gen_qa_ragged_models.py @@ -30,8 +30,7 @@ import os import numpy as np - -from .gen_common import ( +from gen_common import ( np_to_model_dtype, np_to_onnx_dtype, np_to_tf_dtype, diff --git a/qa/common/gen_qa_reshape_models.py b/qa/common/gen_qa_reshape_models.py index fe34446786..4ac5347a79 100755 --- a/qa/common/gen_qa_reshape_models.py +++ b/qa/common/gen_qa_reshape_models.py @@ -32,8 +32,7 @@ import gen_ensemble_model_utils as emu import numpy as np - -from .gen_common import ( +from gen_common import ( np_to_model_dtype, np_to_onnx_dtype, np_to_tf_dtype, diff --git a/qa/common/gen_qa_sequence_models.py b/qa/common/gen_qa_sequence_models.py index c23c078f9c..4c9ca9d8e5 100755 --- a/qa/common/gen_qa_sequence_models.py +++ b/qa/common/gen_qa_sequence_models.py @@ -31,8 +31,7 @@ import gen_ensemble_model_utils as emu import numpy as np - -from .gen_common import ( +from gen_common import ( np_to_model_dtype, np_to_onnx_dtype, np_to_tf_dtype, diff --git a/qa/common/gen_qa_trt_data_dependent_shape.py b/qa/common/gen_qa_trt_data_dependent_shape.py index a0dc38083a..c6600ed919 100755 --- a/qa/common/gen_qa_trt_data_dependent_shape.py +++ b/qa/common/gen_qa_trt_data_dependent_shape.py @@ -32,8 +32,7 @@ import numpy as np import tensorrt as trt import test_util as tu - -from .gen_common import np_to_model_dtype, np_to_trt_dtype +from gen_common import np_to_model_dtype, np_to_trt_dtype # The 'nonzero' model that we use for data dependent shape is naturally diff --git a/qa/common/gen_qa_trt_format_models.py b/qa/common/gen_qa_trt_format_models.py index 518f42cc61..e077139aec 100755 --- a/qa/common/gen_qa_trt_format_models.py +++ b/qa/common/gen_qa_trt_format_models.py @@ -32,8 +32,7 @@ import numpy as np import tensorrt as trt import test_util as tu - -from .gen_common import np_to_model_dtype, np_to_trt_dtype +from gen_common import np_to_model_dtype, np_to_trt_dtype np_dtype_string = np.dtype(object) diff --git a/qa/common/gen_qa_trt_plugin_models.py b/qa/common/gen_qa_trt_plugin_models.py index bf4f2288e5..10c9d5284a 100755 --- a/qa/common/gen_qa_trt_plugin_models.py +++ b/qa/common/gen_qa_trt_plugin_models.py @@ -31,8 +31,7 @@ import numpy as np import tensorrt as trt - -from .gen_common import np_to_model_dtype, np_to_trt_dtype +from gen_common import np_to_model_dtype, np_to_trt_dtype np_dtype_string = np.dtype(object) From 5726204bda6edb0646090bad98a20a57d9a74b5c Mon Sep 17 00:00:00 2001 From: Iman Tabrizian Date: Thu, 21 Sep 2023 11:43:37 -0400 Subject: [PATCH 4/7] Fix package structure --- qa/common/__init__.py | 25 +++++++++++++++++++++++++ qa/common/gen_common.py | 13 +++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 qa/common/__init__.py diff --git a/qa/common/__init__.py b/qa/common/__init__.py new file mode 100644 index 0000000000..f3a6dc3f00 --- /dev/null +++ b/qa/common/__init__.py @@ -0,0 +1,25 @@ +# Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of NVIDIA CORPORATION nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/qa/common/gen_common.py b/qa/common/gen_common.py index 8d4ac2cbec..d2968ed178 100644 --- a/qa/common/gen_common.py +++ b/qa/common/gen_common.py @@ -25,9 +25,14 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Common utilities for model generation scripts +import numpy as np + +np_dtype_string = np.dtype(object) def np_to_onnx_dtype(np_dtype): + import onnx + if np_dtype == bool: return onnx.TensorProto.BOOL elif np_dtype == np.int8: @@ -54,6 +59,8 @@ def np_to_onnx_dtype(np_dtype): def np_to_model_dtype(np_dtype): + import numpy as np + if np_dtype == bool: return "TYPE_BOOL" elif np_dtype == np.int8: @@ -80,6 +87,8 @@ def np_to_model_dtype(np_dtype): def np_to_trt_dtype(np_dtype): + import tensorrt as trt + if np_dtype == bool: return trt.bool elif np_dtype == np.int8: @@ -94,6 +103,8 @@ def np_to_trt_dtype(np_dtype): def np_to_tf_dtype(np_dtype): + import tensorflow as tf + if np_dtype == bool: return tf.bool elif np_dtype == np.int8: @@ -120,6 +131,8 @@ def np_to_tf_dtype(np_dtype): def np_to_torch_dtype(np_dtype): + import torch + if np_dtype == bool: return torch.bool elif np_dtype == np.int8: From 56a22ac1ec6bfb56f29308837b0f03a69f56e613 Mon Sep 17 00:00:00 2001 From: Iman Tabrizian Date: Thu, 21 Sep 2023 11:46:21 -0400 Subject: [PATCH 5/7] Copy the gen_common file --- qa/common/__init__.py | 25 ------------------------- qa/common/gen_qa_model_repository | 1 + 2 files changed, 1 insertion(+), 25 deletions(-) delete mode 100644 qa/common/__init__.py diff --git a/qa/common/__init__.py b/qa/common/__init__.py deleted file mode 100644 index f3a6dc3f00..0000000000 --- a/qa/common/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of NVIDIA CORPORATION nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/qa/common/gen_qa_model_repository b/qa/common/gen_qa_model_repository index 85db0730b2..55bef11c46 100755 --- a/qa/common/gen_qa_model_repository +++ b/qa/common/gen_qa_model_repository @@ -154,6 +154,7 @@ cp ./gen_qa_ragged_models.py $HOST_SRCDIR/. cp ./test_util.py $HOST_SRCDIR/. cp ./gen_tag_sigdef.py $HOST_SRCDIR/. cp ./gen_qa_tf_parameters.py $HOST_SRCDIR/. +cp ./gen_common.py $HOST_SRCDIR/. ONNXSCRIPT=onnx_gen.cmds OPENVINOSCRIPT=openvino_gen.cmds From 250f4f698c870d6f36be52e5c6e3ad98a1dcbe5c Mon Sep 17 00:00:00 2001 From: Iman Tabrizian Date: Thu, 21 Sep 2023 11:55:15 -0400 Subject: [PATCH 6/7] Add missing uint8 --- qa/common/gen_common.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/qa/common/gen_common.py b/qa/common/gen_common.py index d2968ed178..0ac87d5fd5 100644 --- a/qa/common/gen_common.py +++ b/qa/common/gen_common.py @@ -95,6 +95,8 @@ def np_to_trt_dtype(np_dtype): return trt.int8 elif np_dtype == np.int32: return trt.int32 + elif np_dtype == np.uint8: + return trt.uint8 elif np_dtype == np.float16: return trt.float16 elif np_dtype == np.float32: From 760da92390cf27aacab205b5d17edcb1c2f7e0e4 Mon Sep 17 00:00:00 2001 From: Iman Tabrizian Date: Mon, 25 Sep 2023 10:13:22 -0400 Subject: [PATCH 7/7] Remove duplicate import --- qa/common/gen_common.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/qa/common/gen_common.py b/qa/common/gen_common.py index 0ac87d5fd5..8bd97720b5 100644 --- a/qa/common/gen_common.py +++ b/qa/common/gen_common.py @@ -59,8 +59,6 @@ def np_to_onnx_dtype(np_dtype): def np_to_model_dtype(np_dtype): - import numpy as np - if np_dtype == bool: return "TYPE_BOOL" elif np_dtype == np.int8: