We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 8b5e3ba commit 5b8c263Copy full SHA for 5b8c263
src/instance_state.cc
@@ -778,9 +778,9 @@ ModelInstanceState::Run(
778
payload_->responses_,
779
TRITONSERVER_ErrorNew(
780
TRITONSERVER_ERROR_INVALID_ARG,
781
- (std::string("tensor for input '") + name +
782
- "' expected byte size is " +
783
- std::to_string(total_byte_size) + ", got " +
+ (std::string("input byte size mismatch for input '") + name +
+ "'" + " for model '" + model_state_->Name() +
+ "'. Expected " + std::to_string(total_byte_size) + ", got " +
784
std::to_string(req_data_byte_size))
785
.c_str()),
786
"failed to run TRT inference");
0 commit comments