Skip to content

Commit c84c9f6

Browse files
committed
added support for OpenVINO 2020
1 parent 7050ad4 commit c84c9f6

File tree

5 files changed

+42
-20
lines changed

5 files changed

+42
-20
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
2727
- React & Redux & Antd based dashboard
2828
- Yolov3 interpretation script fix and changes to mapping.json
2929
- YOLO format support ([#1151](https://github.com/opencv/cvat/pull/1151))
30+
- Added support for OpenVINO 2020
3031

3132
### Deprecated
3233
-

cvat/apps/auto_annotation/inference_engine.py

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
#
33
# SPDX-License-Identifier: MIT
44

5-
from openvino.inference_engine import IENetwork, IEPlugin
5+
from openvino.inference_engine import IENetwork, IEPlugin, IECore, get_version
66

77
import subprocess
88
import os
@@ -19,7 +19,20 @@ def _check_instruction(instruction):
1919
)
2020

2121

22-
def make_plugin():
22+
def make_plugin_or_core():
23+
version = get_version()
24+
use_core_openvino = False
25+
try:
26+
major, minor, reference = [int(x) for x in version.split('.')]
27+
if major >= 2 and minor >= 1 and reference >= 37988:
28+
use_core_openvino = True
29+
except Exception:
30+
pass
31+
32+
if use_core_openvino:
33+
ie = IECore()
34+
return ie
35+
2336
if _IE_PLUGINS_PATH is None:
2437
raise OSError('Inference engine plugin path env not found in the system.')
2538

cvat/apps/auto_annotation/model_loader.py

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -8,25 +8,22 @@
88
import os
99
import numpy as np
1010

11-
from cvat.apps.auto_annotation.inference_engine import make_plugin, make_network
11+
from cvat.apps.auto_annotation.inference_engine import make_plugin_or_core, make_network
1212

1313
class ModelLoader():
1414
def __init__(self, model, weights):
1515
self._model = model
1616
self._weights = weights
1717

18-
IE_PLUGINS_PATH = os.getenv("IE_PLUGINS_PATH")
19-
if not IE_PLUGINS_PATH:
20-
raise OSError("Inference engine plugin path env not found in the system.")
21-
22-
plugin = make_plugin()
18+
core_or_plugin = make_plugin_or_core()
2319
network = make_network(self._model, self._weights)
2420

25-
supported_layers = plugin.get_supported_layers(network)
26-
not_supported_layers = [l for l in network.layers.keys() if l not in supported_layers]
27-
if len(not_supported_layers) != 0:
28-
raise Exception("Following layers are not supported by the plugin for specified device {}:\n {}".
29-
format(plugin.device, ", ".join(not_supported_layers)))
21+
if getattr(core_or_plugin, 'get_supported_layers', False):
22+
supported_layers = core_or_plugin.get_supported_layers(network)
23+
not_supported_layers = [l for l in network.layers.keys() if l not in supported_layers]
24+
if len(not_supported_layers) != 0:
25+
raise Exception("Following layers are not supported by the plugin for specified device {}:\n {}".
26+
format(core_or_plugin.device, ", ".join(not_supported_layers)))
3027

3128
iter_inputs = iter(network.inputs)
3229
self._input_blob_name = next(iter_inputs)
@@ -45,7 +42,12 @@ def __init__(self, model, weights):
4542
if self._input_blob_name in info_names:
4643
self._input_blob_name = next(iter_inputs)
4744

48-
self._net = plugin.load(network=network, num_requests=2)
45+
if getattr(core_or_plugin, 'load_network', False):
46+
self._net = core_or_plugin.load_network(network,
47+
"CPU",
48+
num_requests=2)
49+
else:
50+
self._net = core_or_plugin.load(network=network, num_requests=2)
4951
input_type = network.inputs[self._input_blob_name]
5052
self._input_layout = input_type if isinstance(input_type, list) else input_type.shape
5153

cvat/apps/dextr_segmentation/dextr.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
#
44
# SPDX-License-Identifier: MIT
55

6-
from cvat.apps.auto_annotation.inference_engine import make_plugin, make_network
6+
from cvat.apps.auto_annotation.inference_engine import make_plugin_or_core, make_network
77

88
import os
99
import cv2
@@ -32,12 +32,15 @@ def __init__(self):
3232
def handle(self, im_path, points):
3333
# Lazy initialization
3434
if not self._plugin:
35-
self._plugin = make_plugin()
35+
self._plugin = make_plugin_or_core()
3636
self._network = make_network(os.path.join(_DEXTR_MODEL_DIR, 'dextr.xml'),
3737
os.path.join(_DEXTR_MODEL_DIR, 'dextr.bin'))
3838
self._input_blob = next(iter(self._network.inputs))
3939
self._output_blob = next(iter(self._network.outputs))
40-
self._exec_network = self._plugin.load(network=self._network)
40+
if getattr(self._plugin, 'load_network', False):
41+
self._exec_network = self._plugin.load_network(self._network)
42+
else:
43+
self._exec_network = self._plugin.load(network=self._network)
4144

4245
image = PIL.Image.open(im_path)
4346
numpy_image = np.array(image)

cvat/apps/tf_annotation/views.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def load_image_into_numpy(image):
3030

3131

3232
def run_inference_engine_annotation(image_list, labels_mapping, treshold):
33-
from cvat.apps.auto_annotation.inference_engine import make_plugin, make_network
33+
from cvat.apps.auto_annotation.inference_engine import make_plugin_or_core, make_network
3434

3535
def _normalize_box(box, w, h, dw, dh):
3636
xmin = min(int(box[0] * dw * w), w)
@@ -44,11 +44,14 @@ def _normalize_box(box, w, h, dw, dh):
4444
if MODEL_PATH is None:
4545
raise OSError('Model path env not found in the system.')
4646

47-
plugin = make_plugin()
47+
core_or_plugin = make_plugin_or_core()
4848
network = make_network('{}.xml'.format(MODEL_PATH), '{}.bin'.format(MODEL_PATH))
4949
input_blob_name = next(iter(network.inputs))
5050
output_blob_name = next(iter(network.outputs))
51-
executable_network = plugin.load(network=network)
51+
if getattr(core_or_plugin, 'load_network', False):
52+
executable_network = core_or_plugin.load_network(network)
53+
else:
54+
executable_network = core_or_plugin.load(network=network)
5255
job = rq.get_current_job()
5356

5457
del network

0 commit comments

Comments
 (0)