Skip to content

Commit 4f487a0

Browse files
authored
Add MLFlow test for already loaded models. Update copyright year (#5808)
1 parent b99ddd2 commit 4f487a0

File tree

3 files changed

+12
-4
lines changed

3 files changed

+12
-4
lines changed

deploy/mlflow-triton-plugin/mlflow_triton/config.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
# Copyright 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
#
33
# Redistribution and use in source and binary forms, with or without
44
# modification, are permitted provided that the following conditions

deploy/mlflow-triton-plugin/mlflow_triton/deployments.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
# Copyright 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
#
33
# Redistribution and use in source and binary forms, with or without
44
# modification, are permitted provided that the following conditions

qa/L0_mlflow/test.sh

+10-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
#!/bin/bash
2-
# Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2+
# Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
33
#
44
# Redistribution and use in source and binary forms, with or without
55
# modification, are permitted provided that the following conditions
@@ -63,8 +63,12 @@ EOF
6363

6464
rm -rf ./models
6565
mkdir -p ./models
66+
# Put some models in model repository to make sure MLFlow plugin would ignore
67+
# model that is not registered via MLFlow
68+
cp -r ./mlflow-triton-plugin/examples/onnx_float32_int32_int32 ./models/existing_model
69+
6670
SERVER=/opt/tritonserver/bin/tritonserver
67-
SERVER_ARGS="--model-repository=./models --strict-model-config=false --model-control-mode=explicit"
71+
SERVER_ARGS="--model-repository=./models --strict-model-config=false --model-control-mode=explicit --load-model=*"
6872
SERVER_LOG="./inference_server.log"
6973
run_server
7074
if [ "$SERVER_PID" == "0" ]; then
@@ -102,6 +106,10 @@ if [ $CLI_RET -eq 0 ]; then
102106
echo -e "\n***\n*** Expect deployed 'triton' flavor model to be listed\n***"
103107
CLI_RET=1
104108
fi
109+
if [ `grep -c "existing_model.*READY" $CLI_LOG` != "0" ]; then
110+
echo -e "\n***\n*** Unexpected non-MLflow model listed\n***"
111+
CLI_RET=1
112+
fi
105113
fi
106114
if [ $CLI_RET -eq 0 ]; then
107115
mlflow deployments get -t triton --name onnx_float32_int32_int32 >>$CLI_LOG 2>&1

0 commit comments

Comments
 (0)