|
10 | 10 |
|
11 | 11 | import torch
|
12 | 12 |
|
13 |
| -from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD |
14 | 13 | from .convert import convert_state_dict
|
15 | 14 | from .model import CLIP, CustomTextCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
|
16 | 15 | resize_pos_embed, get_cast_dtype, resize_text_pos_embed, set_model_preprocess_cfg
|
17 | 16 | from .coca_model import CoCa
|
18 | 17 | from .loss import ClipLoss, DistillClipLoss, CoCaLoss, SigLipLoss
|
19 |
| -from .openai import load_openai_model |
20 | 18 | from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained,\
|
21 | 19 | list_pretrained_tags_by_model, download_pretrained_from_hf
|
22 | 20 | from .transform import image_transform_v2, AugmentationCfg, PreprocessCfg, merge_preprocess_dict, merge_preprocess_kwargs
|
|
27 | 25 | _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
|
28 | 26 |
|
29 | 27 |
|
30 |
| -try: |
31 |
| - import _codecs |
32 |
| - import numpy as np |
33 |
| - # add safe globals that are known to be needed for metaclip weights loading in weights_only=True mode |
34 |
| - torch.serialization.add_safe_globals([ |
35 |
| - _codecs.encode, # this one not needed for PyTorch >= 2.5.0 |
36 |
| - np.core.multiarray.scalar, |
37 |
| - np.dtype, |
38 |
| - np.dtypes.Float64DType, |
39 |
| - ]) |
40 |
| -except Exception: |
41 |
| - pass |
42 |
| - |
43 |
| - |
44 | 28 | def _natural_key(string_):
|
45 | 29 | return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
|
46 | 30 |
|
|
0 commit comments