Skip to content

Commit 0244625

Browse files
committed
resolved comments
1 parent ead1c79 commit 0244625

File tree

12 files changed

+394
-366
lines changed

12 files changed

+394
-366
lines changed

notebooks/000_getting_started/001_getting_started.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -168,9 +168,9 @@
168168
"from anomalib import TaskType\n",
169169
"from anomalib.data import MVTec\n",
170170
"from anomalib.data.utils import read_image\n",
171-
"from anomalib.deploy import OpenVINOInferencer\n",
171+
"from anomalib.deploy import OpenVINOInferencer, ExportType\n",
172172
"from anomalib.engine import Engine\n",
173-
"from anomalib.models import Padim, ExportType"
173+
"from anomalib.models import Padim"
174174
]
175175
},
176176
{

src/anomalib/deploy/__init__.py

+2-5
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,7 @@
33
# Copyright (C) 2022-2024 Intel Corporation
44
# SPDX-License-Identifier: Apache-2.0
55

6+
from .export import ExportType
67
from .inferencers import Inferencer, OpenVINOInferencer, TorchInferencer
78

8-
__all__ = [
9-
"Inferencer",
10-
"OpenVINOInferencer",
11-
"TorchInferencer",
12-
]
9+
__all__ = ["Inferencer", "OpenVINOInferencer", "TorchInferencer", "ExportType"]

src/anomalib/deploy/export.py

+373
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,373 @@
1+
"""Utilities for optimization and OpenVINO conversion."""
2+
3+
# Copyright (C) 2022-2024 Intel Corporation
4+
# SPDX-License-Identifier: Apache-2.0
5+
6+
7+
import json
8+
import logging
9+
from collections.abc import Callable
10+
from enum import Enum
11+
from pathlib import Path
12+
from typing import TYPE_CHECKING, Any
13+
14+
import numpy as np
15+
import torch
16+
from torch import nn
17+
from torchvision.transforms.v2 import CenterCrop, Compose, Resize, Transform
18+
19+
from anomalib import TaskType
20+
from anomalib.data.transforms import ExportableCenterCrop
21+
from anomalib.utils.exceptions import try_import
22+
23+
if TYPE_CHECKING:
24+
from torch.types import Number
25+
26+
logger = logging.getLogger("anomalib")
27+
28+
if try_import("openvino"):
29+
from openvino.runtime import serialize
30+
from openvino.tools.ovc import convert_model
31+
32+
33+
class ExportType(str, Enum):
34+
"""Model export type.
35+
36+
Examples:
37+
>>> from anomalib.deploy import ExportType
38+
>>> ExportType.ONNX
39+
'onnx'
40+
>>> ExportType.OPENVINO
41+
'openvino'
42+
>>> ExportType.TORCH
43+
'torch'
44+
"""
45+
46+
ONNX = "onnx"
47+
OPENVINO = "openvino"
48+
TORCH = "torch"
49+
50+
51+
class InferenceModel(nn.Module):
52+
"""Inference model for export.
53+
54+
The InferenceModel is used to wrap the model and transform for exporting to torch and ONNX/OpenVINO.
55+
56+
Args:
57+
model (nn.Module): Model to export.
58+
transform (Transform): Input transform for the model.
59+
disable_antialias (bool, optional): Disable antialiasing in the Resize transforms of the given transform. This
60+
is needed for ONNX/OpenVINO export, as antialiasing is not supported in the ONNX opset.
61+
"""
62+
63+
def __init__(self, model: nn.Module, transform: Transform, disable_antialias: bool = False) -> None:
64+
super().__init__()
65+
self.model = model
66+
self.transform = transform
67+
self.convert_center_crop()
68+
if disable_antialias:
69+
self.disable_antialias()
70+
71+
def forward(self, batch: torch.Tensor) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
72+
"""Transform the input batch and pass it through the model."""
73+
batch = self.transform(batch)
74+
return self.model(batch)
75+
76+
def disable_antialias(self) -> None:
77+
"""Disable antialiasing in the Resize transforms of the given transform.
78+
79+
This is needed for ONNX/OpenVINO export, as antialiasing is not supported in the ONNX opset.
80+
"""
81+
if isinstance(self.transform, Resize):
82+
self.transform.antialias = False
83+
if isinstance(self.transform, Compose):
84+
for transform in self.transform.transforms:
85+
if isinstance(transform, Resize):
86+
transform.antialias = False
87+
88+
def convert_center_crop(self) -> None:
89+
"""Convert CenterCrop to ExportableCenterCrop for ONNX export.
90+
91+
The original CenterCrop transform is not supported in ONNX export. This method replaces the CenterCrop to
92+
ExportableCenterCrop, which is supported in ONNX export. For more details, see the implementation of
93+
ExportableCenterCrop.
94+
"""
95+
if isinstance(self.transform, CenterCrop):
96+
self.transform = ExportableCenterCrop(size=self.transform.size)
97+
elif isinstance(self.transform, Compose):
98+
transforms = self.transform.transforms
99+
for index in range(len(transforms)):
100+
if isinstance(transforms[index], CenterCrop):
101+
transforms[index] = ExportableCenterCrop(size=transforms[index].size)
102+
103+
104+
class ExportMixin:
105+
"""This mixin allows exporting models to torch and ONNX/OpenVINO."""
106+
107+
model: nn.Module
108+
transform: Transform
109+
configure_transforms: Callable
110+
device: torch.device
111+
112+
def to_torch(
113+
self,
114+
export_root: Path | str,
115+
transform: Transform | None = None,
116+
task: TaskType | None = None,
117+
) -> Path:
118+
"""Export AnomalibModel to torch.
119+
120+
Args:
121+
export_root (Path): Path to the output folder.
122+
transform (Transform, optional): Input transforms used for the model. If not provided, the transform is
123+
taken from the model.
124+
Defaults to ``None``.
125+
task (TaskType | None): Task type.
126+
Defaults to ``None``.
127+
128+
Returns:
129+
Path: Path to the exported pytorch model.
130+
131+
Examples:
132+
Assume that we have a model to train and we want to export it to torch format.
133+
134+
>>> from anomalib.data import Visa
135+
>>> from anomalib.models import Patchcore
136+
>>> from anomalib.engine import Engine
137+
...
138+
>>> datamodule = Visa()
139+
>>> model = Patchcore()
140+
>>> engine = Engine()
141+
...
142+
>>> engine.fit(model, datamodule)
143+
144+
Now that we have a model trained, we can export it to torch format.
145+
146+
>>> model.to_torch(
147+
... export_root="path/to/export",
148+
... transform=datamodule.test_data.transform,
149+
... task=datamodule.test_data.task,
150+
... )
151+
"""
152+
transform = transform or self.transform or self.configure_transforms()
153+
inference_model = InferenceModel(model=self.model, transform=transform)
154+
export_root = _create_export_root(export_root, ExportType.TORCH)
155+
metadata = self.get_metadata(task=task)
156+
pt_model_path = export_root / "model.pt"
157+
torch.save(
158+
obj={"model": inference_model, "metadata": metadata},
159+
f=pt_model_path,
160+
)
161+
return pt_model_path
162+
163+
def to_onnx(
164+
self,
165+
export_root: Path | str,
166+
transform: Transform | None = None,
167+
task: TaskType | None = None,
168+
) -> Path:
169+
"""Export model to onnx.
170+
171+
Args:
172+
export_root (Path): Path to the root folder of the exported model.
173+
transform (Transform, optional): Input transforms used for the model. If not provided, the transform is
174+
taken from the model.
175+
Defaults to ``None``.
176+
task (TaskType | None): Task type.
177+
Defaults to ``None``.
178+
export_type (ExportType): Mode to export the model. Since this method is used by OpenVINO export as well, we
179+
need to pass the export type so that the right export path is created.
180+
Defaults to ``ExportType.ONNX``.
181+
182+
Returns:
183+
Path: Path to the exported onnx model.
184+
185+
Examples:
186+
Export the Lightning Model to ONNX:
187+
188+
>>> from anomalib.models import Patchcore
189+
>>> from anomalib.data import Visa
190+
...
191+
>>> datamodule = Visa()
192+
>>> model = Patchcore()
193+
...
194+
>>> model.to_onnx(
195+
... export_root="path/to/export",
196+
... transform=datamodule.test_data.transform,
197+
... task=datamodule.test_data.task
198+
... )
199+
200+
Using Custom Transforms:
201+
This example shows how to use a custom ``Compose`` object for the ``transform`` argument.
202+
203+
>>> model.to_onnx(
204+
... export_root="path/to/export",
205+
... task="segmentation",
206+
... )
207+
"""
208+
transform = transform or self.transform or self.configure_transforms()
209+
inference_model = InferenceModel(model=self.model, transform=transform, disable_antialias=True)
210+
export_root = _create_export_root(export_root, ExportType.ONNX)
211+
self._write_metadata_to_json(export_root, task)
212+
onnx_path = export_root / "model.onnx"
213+
torch.onnx.export(
214+
inference_model,
215+
torch.zeros((1, 3, 1, 1)).to(self.device),
216+
str(onnx_path),
217+
opset_version=14,
218+
dynamic_axes={"input": {0: "batch_size", 2: "height", 3: "weight"}, "output": {0: "batch_size"}},
219+
input_names=["input"],
220+
output_names=["output"],
221+
)
222+
223+
return onnx_path
224+
225+
def to_openvino(
226+
self,
227+
export_root: Path | str,
228+
transform: Transform | None = None,
229+
ov_args: dict[str, Any] | None = None,
230+
task: TaskType | None = None,
231+
) -> Path:
232+
"""Convert onnx model to OpenVINO IR.
233+
234+
Args:
235+
export_root (Path): Path to the export folder.
236+
transform (Transform, optional): Input transforms used for the model. If not provided, the transform is
237+
taken from the model.
238+
Defaults to ``None``.
239+
ov_args: Model optimizer arguments for OpenVINO model conversion.
240+
Defaults to ``None``.
241+
task (TaskType | None): Task type.
242+
Defaults to ``None``.
243+
244+
Returns:
245+
Path: Path to the exported onnx model.
246+
247+
Raises:
248+
ModuleNotFoundError: If OpenVINO is not installed.
249+
250+
Returns:
251+
Path: Path to the exported OpenVINO IR.
252+
253+
Examples:
254+
Export the Lightning Model to OpenVINO IR:
255+
This example demonstrates how to export the Lightning Model to OpenVINO IR.
256+
257+
>>> from anomalib.models import Patchcore
258+
>>> from anomalib.data import Visa
259+
...
260+
>>> datamodule = Visa()
261+
>>> model = Patchcore()
262+
...
263+
>>> model.to_openvino(
264+
... export_root="path/to/export",
265+
... transform=datamodule.test_data.transform,
266+
... task=datamodule.test_data.task
267+
... )
268+
269+
Using Custom Transforms:
270+
This example shows how to use a custom ``Transform`` object for the ``transform`` argument.
271+
272+
>>> from torchvision.transforms.v2 import Resize
273+
>>> transform = Resize(224, 224)
274+
...
275+
>>> model.to_openvino(
276+
... export_root="path/to/export",
277+
... transform=transform,
278+
... task="segmentation",
279+
... )
280+
281+
"""
282+
transform = transform or self.transform or self.configure_transforms()
283+
export_root = _create_export_root(export_root, ExportType.OPENVINO)
284+
inference_model = InferenceModel(model=self.model, transform=transform, disable_antialias=True)
285+
self._write_metadata_to_json(export_root, task)
286+
ov_model_path = export_root / "model.xml"
287+
ov_args = {} if ov_args is None else ov_args
288+
ov_args.update({"example_input": torch.zeros((1, 3, 1, 1)).to(self.device)})
289+
if convert_model is not None and serialize is not None:
290+
model = convert_model(inference_model, **ov_args)
291+
serialize(model, ov_model_path)
292+
else:
293+
logger.exception("Could not find OpenVINO methods. Please check OpenVINO installation.")
294+
raise ModuleNotFoundError
295+
return ov_model_path
296+
297+
def get_metadata(
298+
self,
299+
task: TaskType | None = None,
300+
) -> dict[str, Any]:
301+
"""Get metadata for the exported model.
302+
303+
Args:
304+
task (TaskType | None): Task type.
305+
Defaults to None.
306+
307+
Returns:
308+
dict[str, Any]: Metadata for the exported model.
309+
"""
310+
data_metadata = {"task": task}
311+
model_metadata = self._get_model_metadata()
312+
metadata = {**data_metadata, **model_metadata}
313+
314+
# Convert torch tensors to python lists or values for json serialization.
315+
for key, value in metadata.items():
316+
if isinstance(value, torch.Tensor):
317+
metadata[key] = value.numpy().tolist()
318+
319+
return metadata
320+
321+
def _get_model_metadata(self) -> dict[str, torch.Tensor]:
322+
"""Get meta data related to normalization from model.
323+
324+
Returns:
325+
dict[str, torch.Tensor]: Model metadata
326+
"""
327+
metadata = {}
328+
cached_metadata: dict[str, Number | torch.Tensor] = {}
329+
for threshold_name in ("image_threshold", "pixel_threshold"):
330+
if hasattr(self, threshold_name):
331+
cached_metadata[threshold_name] = getattr(self, threshold_name).cpu().value.item()
332+
if hasattr(self, "normalization_metrics") and self.normalization_metrics.state_dict() is not None:
333+
for key, value in self.normalization_metrics.state_dict().items():
334+
cached_metadata[key] = value.cpu()
335+
# Remove undefined values by copying in a new dict
336+
for key, val in cached_metadata.items():
337+
if not np.isinf(val).all():
338+
metadata[key] = val
339+
del cached_metadata
340+
return metadata
341+
342+
def _write_metadata_to_json(
343+
self,
344+
export_root: Path,
345+
task: TaskType | None = None,
346+
) -> None:
347+
"""Write metadata to json file.
348+
349+
Args:
350+
export_root (Path): Path to the exported model.
351+
transform (dict[str, Any] | AnomalibDataset | AnomalibDataModule | A.Compose): Data transforms
352+
(augmentations) used for the model.
353+
task (TaskType | None): Task type.
354+
Defaults to None.
355+
"""
356+
metadata = self.get_metadata(task=task)
357+
with (export_root / "metadata.json").open("w", encoding="utf-8") as metadata_file:
358+
json.dump(metadata, metadata_file, ensure_ascii=False, indent=4)
359+
360+
361+
def _create_export_root(export_root: str | Path, export_type: ExportType) -> Path:
362+
"""Create export directory.
363+
364+
Args:
365+
export_root (str | Path): Path to the root folder of the exported model.
366+
export_type (ExportType): Mode to export the model. Torch, ONNX or OpenVINO.
367+
368+
Returns:
369+
Path: Path to the export directory.
370+
"""
371+
export_root = Path(export_root) / "weights" / export_type.value
372+
export_root.mkdir(parents=True, exist_ok=True)
373+
return export_root

0 commit comments

Comments
 (0)