Skip to content

Commit 09f1952

Browse files
authored
bump version to v1.0.0rc1 (#1595)
1 parent deaefac commit 09f1952

File tree

7 files changed

+63
-63
lines changed

7 files changed

+63
-63
lines changed

CMakeLists.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ endif ()
55
message(STATUS "CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}")
66

77
cmake_minimum_required(VERSION 3.14)
8-
project(MMDeploy VERSION 0.10.0)
8+
project(MMDeploy VERSION 0.12.0)
99

1010
set(CMAKE_CXX_STANDARD 17)
1111

docs/en/02-how-to-run/prebuilt_package_windows.md

+19-19
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
______________________________________________________________________
2323

24-
This tutorial takes `mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1.zip` and `mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0.zip` as examples to show how to use the prebuilt packages.
24+
This tutorial takes `mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1.zip` and `mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0.zip` as examples to show how to use the prebuilt packages.
2525

2626
The directory structure of the prebuilt package is as follows, where the `dist` folder is about model converter, and the `sdk` folder is related to model inference.
2727

@@ -80,9 +80,9 @@ In order to use `ONNX Runtime` backend, you should also do the following steps.
8080
5. Install `mmdeploy` (Model Converter) and `mmdeploy_python` (SDK Python API).
8181

8282
```bash
83-
# download mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1.zip
84-
pip install .\mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1\dist\mmdeploy-1.0.0rc0-py38-none-win_amd64.whl
85-
pip install .\mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1\sdk\python\mmdeploy_python-1.0.0rc0-cp38-none-win_amd64.whl
83+
# download mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1.zip
84+
pip install .\mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1\dist\mmdeploy-1.0.0rc1-py38-none-win_amd64.whl
85+
pip install .\mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1\sdk\python\mmdeploy_python-1.0.0rc1-cp38-none-win_amd64.whl
8686
```
8787

8888
:point_right: If you have installed it before, please uninstall it first.
@@ -107,9 +107,9 @@ In order to use `TensorRT` backend, you should also do the following steps.
107107
5. Install `mmdeploy` (Model Converter) and `mmdeploy_python` (SDK Python API).
108108

109109
```bash
110-
# download mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0.zip
111-
pip install .\mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0\dist\mmdeploy-1.0.0rc0-py38-none-win_amd64.whl
112-
pip install .\mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0\sdk\python\mmdeploy_python-1.0.0rc0-cp38-none-win_amd64.whl
110+
# download mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0.zip
111+
pip install .\mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0\dist\mmdeploy-1.0.0rc1-py38-none-win_amd64.whl
112+
pip install .\mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0\sdk\python\mmdeploy_python-1.0.0rc1-cp38-none-win_amd64.whl
113113
```
114114

115115
:point_right: If you have installed it before, please uninstall it first.
@@ -138,7 +138,7 @@ After preparation work, the structure of the current working directory should be
138138

139139
```
140140
..
141-
|-- mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1
141+
|-- mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1
142142
|-- mmclassification
143143
|-- mmdeploy
144144
`-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
@@ -186,7 +186,7 @@ After installation of mmdeploy-tensorrt prebuilt package, the structure of the c
186186

187187
```
188188
..
189-
|-- mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0
189+
|-- mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0
190190
|-- mmclassification
191191
|-- mmdeploy
192192
`-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
@@ -249,8 +249,8 @@ The structure of current working directory:
249249

250250
```
251251
.
252-
|-- mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0
253-
|-- mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1
252+
|-- mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0
253+
|-- mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1
254254
|-- mmclassification
255255
|-- mmdeploy
256256
|-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
@@ -311,15 +311,15 @@ The following describes how to use the SDK's C API for inference
311311

312312
1. Build examples
313313

314-
Under `mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1\sdk\example` directory
314+
Under `mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1\sdk\example` directory
315315

316316
```
317317
// Path should be modified according to the actual location
318318
mkdir build
319319
cd build
320320
cmake ..\cpp -A x64 -T v142 `
321321
-DOpenCV_DIR=C:\Deps\opencv\build\x64\vc15\lib `
322-
-DMMDeploy_DIR=C:\workspace\mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1\sdk\lib\cmake\MMDeploy `
322+
-DMMDeploy_DIR=C:\workspace\mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1\sdk\lib\cmake\MMDeploy `
323323
-DONNXRUNTIME_DIR=C:\Deps\onnxruntime\onnxruntime-win-gpu-x64-1.8.1
324324
325325
cmake --build . --config Release
@@ -329,15 +329,15 @@ The following describes how to use the SDK's C API for inference
329329

330330
:point_right: The purpose is to make the exe find the relevant dll
331331

332-
If choose to add environment variables, add the runtime libraries path of `mmdeploy` (`mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1\sdk\bin`) to the `PATH`.
332+
If choose to add environment variables, add the runtime libraries path of `mmdeploy` (`mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1\sdk\bin`) to the `PATH`.
333333

334334
If choose to copy the dynamic libraries, copy the dll in the bin directory to the same level directory of the just compiled exe (build/Release).
335335

336336
3. Inference:
337337

338338
It is recommended to use `CMD` here.
339339

340-
Under `mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1\\sdk\\example\\build\\Release` directory:
340+
Under `mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1\\sdk\\example\\build\\Release` directory:
341341

342342
```
343343
.\image_classification.exe cpu C:\workspace\work_dir\onnx\resnet\ C:\workspace\mmclassification\demo\demo.JPEG
@@ -347,15 +347,15 @@ The following describes how to use the SDK's C API for inference
347347

348348
1. Build examples
349349

350-
Under `mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0\\sdk\\example` directory
350+
Under `mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0\\sdk\\example` directory
351351

352352
```
353353
// Path should be modified according to the actual location
354354
mkdir build
355355
cd build
356356
cmake ..\cpp -A x64 -T v142 `
357357
-DOpenCV_DIR=C:\Deps\opencv\build\x64\vc15\lib `
358-
-DMMDeploy_DIR=C:\workspace\mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8 2.3.0\sdk\lib\cmake\MMDeploy `
358+
-DMMDeploy_DIR=C:\workspace\mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8 2.3.0\sdk\lib\cmake\MMDeploy `
359359
-DTENSORRT_DIR=C:\Deps\tensorrt\TensorRT-8.2.3.0 `
360360
-DCUDNN_DIR=C:\Deps\cudnn\8.2.1
361361
cmake --build . --config Release
@@ -365,15 +365,15 @@ The following describes how to use the SDK's C API for inference
365365

366366
:point_right: The purpose is to make the exe find the relevant dll
367367

368-
If choose to add environment variables, add the runtime libraries path of `mmdeploy` (`mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0\sdk\bin`) to the `PATH`.
368+
If choose to add environment variables, add the runtime libraries path of `mmdeploy` (`mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0\sdk\bin`) to the `PATH`.
369369

370370
If choose to copy the dynamic libraries, copy the dll in the bin directory to the same level directory of the just compiled exe (build/Release).
371371

372372
3. Inference
373373

374374
It is recommended to use `CMD` here.
375375

376-
Under `mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0\\sdk\\example\\build\\Release` directory
376+
Under `mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0\\sdk\\example\\build\\Release` directory
377377

378378
```
379379
.\image_classification.exe cuda C:\workspace\work_dir\trt\resnet C:\workspace\mmclassification\demo\demo.JPEG

docs/en/get_started.md

+11-11
Original file line numberDiff line numberDiff line change
@@ -118,11 +118,11 @@ Take the latest precompiled package as example, you can install it as follows:
118118

119119
```shell
120120
# install MMDeploy
121-
wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0rc0/mmdeploy-1.0.0rc0-linux-x86_64-onnxruntime1.8.1.tar.gz
122-
tar -zxvf mmdeploy-1.0.0rc0-linux-x86_64-onnxruntime1.8.1.tar.gz
123-
cd mmdeploy-1.0.0rc0-linux-x86_64-onnxruntime1.8.1
124-
pip install dist/mmdeploy-1.0.0rc0-py3-none-linux_x86_64.whl
125-
pip install sdk/python/mmdeploy_python-1.0.0rc0-cp38-none-linux_x86_64.whl
121+
wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0rc1/mmdeploy-1.0.0rc1-linux-x86_64-onnxruntime1.8.1.tar.gz
122+
tar -zxvf mmdeploy-1.0.0rc1-linux-x86_64-onnxruntime1.8.1.tar.gz
123+
cd mmdeploy-1.0.0rc1-linux-x86_64-onnxruntime1.8.1
124+
pip install dist/mmdeploy-1.0.0rc1-py3-none-linux_x86_64.whl
125+
pip install sdk/python/mmdeploy_python-1.0.0rc1-cp38-none-linux_x86_64.whl
126126
cd ..
127127
# install inference engine: ONNX Runtime
128128
pip install onnxruntime==1.8.1
@@ -139,11 +139,11 @@ export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH
139139

140140
```shell
141141
# install MMDeploy
142-
wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0rc0/mmdeploy-1.0.0rc0-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz
143-
tar -zxvf mmdeploy-1.0.0rc0-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz
144-
cd mmdeploy-1.0.0rc0-linux-x86_64-cuda11.1-tensorrt8.2.3.0
145-
pip install dist/mmdeploy-1.0.0rc0-py3-none-linux_x86_64.whl
146-
pip install sdk/python/mmdeploy_python-1.0.0rc0-cp38-none-linux_x86_64.whl
142+
wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0rc1/mmdeploy-1.0.0rc1-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz
143+
tar -zxvf mmdeploy-1.0.0rc1-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz
144+
cd mmdeploy-1.0.0rc1-linux-x86_64-cuda11.1-tensorrt8.2.3.0
145+
pip install dist/mmdeploy-1.0.0rc1-py3-none-linux_x86_64.whl
146+
pip install sdk/python/mmdeploy_python-1.0.0rc1-cp38-none-linux_x86_64.whl
147147
cd ..
148148
# install inference engine: TensorRT
149149
# !!! Download TensorRT-8.2.3.0 CUDA 11.x tar package from NVIDIA, and extract it to the current directory
@@ -232,7 +232,7 @@ result = inference_model(
232232
You can directly run MMDeploy demo programs in the precompiled package to get inference results.
233233

234234
```shell
235-
cd mmdeploy-1.0.0rc0-linux-x86_64-cuda11.1-tensorrt8.2.3.0
235+
cd mmdeploy-1.0.0rc1-linux-x86_64-cuda11.1-tensorrt8.2.3.0
236236
# run python demo
237237
python sdk/example/python/object_detection.py cuda ../mmdeploy_model/faster-rcnn ../mmdetection/demo/demo.jpg
238238
# run C/C++ demo

docs/zh_cn/02-how-to-run/prebuilt_package_windows.md

+19-19
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ ______________________________________________________________________
2323

2424
目前,`MMDeploy``Windows`平台下提供`TensorRT`以及`ONNX Runtime`两种预编译包,可以从[Releases](https://github.com/open-mmlab/mmdeploy/releases)获取。
2525

26-
本篇教程以`mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1.zip``mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0.zip`为例,展示预编译包的使用方法。
26+
本篇教程以`mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1.zip``mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0.zip`为例,展示预编译包的使用方法。
2727

2828
为了方便使用者快速上手,本教程以分类模型(mmclassification)为例,展示两种预编译包的使用方法。
2929

@@ -88,9 +88,9 @@ ______________________________________________________________________
8888
5. 安装`mmdeploy`(模型转换)以及`mmdeploy_python`(模型推理Python API)的预编译包
8989

9090
```bash
91-
# 先下载 mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1.zip
92-
pip install .\mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1\dist\mmdeploy-1.0.0rc0-py38-none-win_amd64.whl
93-
pip install .\mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1\sdk\python\mmdeploy_python-1.0.0rc0-cp38-none-win_amd64.whl
91+
# 先下载 mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1.zip
92+
pip install .\mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1\dist\mmdeploy-1.0.0rc1-py38-none-win_amd64.whl
93+
pip install .\mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1\sdk\python\mmdeploy_python-1.0.0rc1-cp38-none-win_amd64.whl
9494
```
9595

9696
:point_right: 如果之前安装过,需要先卸载后再安装。
@@ -115,9 +115,9 @@ ______________________________________________________________________
115115
5. 安装`mmdeploy`(模型转换)以及`mmdeploy_python`(模型推理Python API)的预编译包
116116

117117
```bash
118-
# 先下载 mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0.zip
119-
pip install .\mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0\dist\mmdeploy-1.0.0rc0-py38-none-win_amd64.whl
120-
pip install .\mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0\sdk\python\mmdeploy_python-1.0.0rc0-cp38-none-win_amd64.whl
118+
# 先下载 mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0.zip
119+
pip install .\mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0\dist\mmdeploy-1.0.0rc1-py38-none-win_amd64.whl
120+
pip install .\mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0\sdk\python\mmdeploy_python-1.0.0rc1-cp38-none-win_amd64.whl
121121
```
122122

123123
:point_right: 如果之前安装过,需要先卸载后再安装
@@ -146,7 +146,7 @@ ______________________________________________________________________
146146

147147
```
148148
..
149-
|-- mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1
149+
|-- mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1
150150
|-- mmclassification
151151
|-- mmdeploy
152152
`-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
@@ -194,7 +194,7 @@ export2SDK(deploy_cfg, model_cfg, work_dir, pth=model_checkpoint, device=device)
194194

195195
```
196196
..
197-
|-- mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0
197+
|-- mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0
198198
|-- mmclassification
199199
|-- mmdeploy
200200
`-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
@@ -257,8 +257,8 @@ export2SDK(deploy_cfg, model_cfg, work_dir, pth=model_checkpoint, device=device)
257257

258258
```
259259
.
260-
|-- mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0
261-
|-- mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1
260+
|-- mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0
261+
|-- mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1
262262
|-- mmclassification
263263
|-- mmdeploy
264264
|-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
@@ -327,15 +327,15 @@ python .\mmdeploy\demo\python\image_classification.py cpu .\work_dir\onnx\resnet
327327

328328
1. 编译 examples
329329

330-
`mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1\sdk\example`目录下
330+
`mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1\sdk\example`目录下
331331

332332
```
333333
// 部分路径根据实际位置进行修改
334334
mkdir build
335335
cd build
336336
cmake ..\cpp -A x64 -T v142 `
337337
-DOpenCV_DIR=C:\Deps\opencv\build\x64\vc15\lib `
338-
-DMMDeploy_DIR=C:\workspace\mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1\sdk\lib\cmake\MMDeploy `
338+
-DMMDeploy_DIR=C:\workspace\mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1\sdk\lib\cmake\MMDeploy `
339339
-DONNXRUNTIME_DIR=C:\Deps\onnxruntime\onnxruntime-win-gpu-x64-1.8.1
340340
341341
cmake --build . --config Release
@@ -345,15 +345,15 @@ python .\mmdeploy\demo\python\image_classification.py cpu .\work_dir\onnx\resnet
345345

346346
:point_right: 目的是使exe运行时可以正确找到相关dll
347347

348-
若选择添加环境变量,则将`mmdeploy`的运行时库路径(`mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1\sdk\bin`)添加到PATH,可参考onnxruntime的添加过程。
348+
若选择添加环境变量,则将`mmdeploy`的运行时库路径(`mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1\sdk\bin`)添加到PATH,可参考onnxruntime的添加过程。
349349

350350
若选择拷贝动态库,而将bin目录中的dll拷贝到刚才编译出的exe(build/Release)的同级目录下。
351351

352352
3. 推理:
353353

354354
这里建议使用cmd,这样如果exe运行时如果找不到相关的dll的话会有弹窗
355355

356-
在mmdeploy-1.0.0rc0-windows-amd64-onnxruntime1.8.1\\sdk\\example\\build\\Release目录下:
356+
在mmdeploy-1.0.0rc1-windows-amd64-onnxruntime1.8.1\\sdk\\example\\build\\Release目录下:
357357

358358
```
359359
.\image_classification.exe cpu C:\workspace\work_dir\onnx\resnet\ C:\workspace\mmclassification\demo\demo.JPEG
@@ -363,15 +363,15 @@ python .\mmdeploy\demo\python\image_classification.py cpu .\work_dir\onnx\resnet
363363

364364
1. 编译 examples
365365

366-
在mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0\\sdk\\example目录下
366+
在mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0\\sdk\\example目录下
367367

368368
```
369369
// 部分路径根据所在硬盘的位置进行修改
370370
mkdir build
371371
cd build
372372
cmake ..\cpp -A x64 -T v142 `
373373
-DOpenCV_DIR=C:\Deps\opencv\build\x64\vc15\lib `
374-
-DMMDeploy_DIR=C:\workspace\mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8 2.3.0\sdk\lib\cmake\MMDeploy `
374+
-DMMDeploy_DIR=C:\workspace\mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8 2.3.0\sdk\lib\cmake\MMDeploy `
375375
-DTENSORRT_DIR=C:\Deps\tensorrt\TensorRT-8.2.3.0 `
376376
-DCUDNN_DIR=C:\Deps\cudnn\8.2.1
377377
cmake --build . --config Release
@@ -381,15 +381,15 @@ python .\mmdeploy\demo\python\image_classification.py cpu .\work_dir\onnx\resnet
381381

382382
:point_right: 目的是使exe运行时可以正确找到相关dll
383383

384-
若选择添加环境变量,则将`mmdeploy`的运行时库路径(`mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0\sdk\bin`)添加到PATH,可参考onnxruntime的添加过程。
384+
若选择添加环境变量,则将`mmdeploy`的运行时库路径(`mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0\sdk\bin`)添加到PATH,可参考onnxruntime的添加过程。
385385

386386
若选择拷贝动态库,而将bin目录中的dll拷贝到刚才编译出的exe(build/Release)的同级目录下。
387387

388388
3. 推理
389389

390390
这里建议使用cmd,这样如果exe运行时如果找不到相关的dll的话会有弹窗
391391

392-
在mmdeploy-1.0.0rc0-windows-amd64-cuda11.1-tensorrt8.2.3.0\\sdk\\example\\build\\Release目录下:
392+
在mmdeploy-1.0.0rc1-windows-amd64-cuda11.1-tensorrt8.2.3.0\\sdk\\example\\build\\Release目录下:
393393

394394
```
395395
.\image_classification.exe cuda C:\workspace\work_dir\trt\resnet C:\workspace\mmclassification\demo\demo.JPEG

docs/zh_cn/get_started.md

+11-11
Original file line numberDiff line numberDiff line change
@@ -113,11 +113,11 @@ mim install "mmcv>=2.0.0rc2"
113113

114114
```shell
115115
# 安装 MMDeploy ONNX Runtime 自定义算子库和推理 SDK
116-
wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0rc0/mmdeploy-1.0.0rc0-linux-x86_64-onnxruntime1.8.1.tar.gz
117-
tar -zxvf mmdeploy-1.0.0rc0-linux-x86_64-onnxruntime1.8.1.tar.gz
118-
cd mmdeploy-1.0.0rc0-linux-x86_64-onnxruntime1.8.1
119-
pip install dist/mmdeploy-1.0.0rc0-py3-none-linux_x86_64.whl
120-
pip install sdk/python/mmdeploy_python-1.0.0rc0-cp38-none-linux_x86_64.whl
116+
wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0rc1/mmdeploy-1.0.0rc1-linux-x86_64-onnxruntime1.8.1.tar.gz
117+
tar -zxvf mmdeploy-1.0.0rc1-linux-x86_64-onnxruntime1.8.1.tar.gz
118+
cd mmdeploy-1.0.0rc1-linux-x86_64-onnxruntime1.8.1
119+
pip install dist/mmdeploy-1.0.0rc1-py3-none-linux_x86_64.whl
120+
pip install sdk/python/mmdeploy_python-1.0.0rc1-cp38-none-linux_x86_64.whl
121121
cd ..
122122
# 安装推理引擎 ONNX Runtime
123123
pip install onnxruntime==1.8.1
@@ -134,11 +134,11 @@ export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH
134134

135135
```shell
136136
# 安装 MMDeploy TensorRT 自定义算子库和推理 SDK
137-
wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0rc0/mmdeploy-1.0.0rc0-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz
138-
tar -zxvf mmdeploy-1.0.0rc0-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz
139-
cd mmdeploy-1.0.0rc0-linux-x86_64-cuda11.1-tensorrt8.2.3.0
140-
pip install dist/mmdeploy-1.0.0rc0-py3-none-linux_x86_64.whl
141-
pip install sdk/python/mmdeploy_python-1.0.0rc0-cp38-none-linux_x86_64.whl
137+
wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0rc1/mmdeploy-1.0.0rc1-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz
138+
tar -zxvf mmdeploy-1.0.0rc1-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz
139+
cd mmdeploy-1.0.0rc1-linux-x86_64-cuda11.1-tensorrt8.2.3.0
140+
pip install dist/mmdeploy-1.0.0rc1-py3-none-linux_x86_64.whl
141+
pip install sdk/python/mmdeploy_python-1.0.0rc1-cp38-none-linux_x86_64.whl
142142
cd ..
143143
# 安装推理引擎 TensorRT
144144
# !!! 从 NVIDIA 官网下载 TensorRT-8.2.3.0 CUDA 11.x 安装包并解压到当前目录
@@ -226,7 +226,7 @@ result = inference_model(
226226
你可以直接运行预编译包中的 demo 程序,输入 SDK Model 和图像,进行推理,并查看推理结果。
227227

228228
```shell
229-
cd mmdeploy-1.0.0rc0-linux-x86_64-cuda11.1-tensorrt8.2.3.0
229+
cd mmdeploy-1.0.0rc1-linux-x86_64-cuda11.1-tensorrt8.2.3.0
230230
# 运行 python demo
231231
python sdk/example/python/object_detection.py cuda ../mmdeploy_model/faster-rcnn ../mmdetection/demo/demo.jpg
232232
# 运行 C/C++ demo

mmdeploy/version.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# Copyright (c) OpenMMLab. All rights reserved.
22
from typing import Tuple
33

4-
__version__ = '1.0.0rc0'
4+
__version__ = '1.0.0rc1'
55
short_version = __version__
66

77

Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
# Copyright (c) OpenMMLab. All rights reserved.
2-
__version__ = '1.0.0rc0'
2+
__version__ = '1.0.0rc1'

0 commit comments

Comments
 (0)