@@ -1445,14 +1445,14 @@ def load(cls,
1445
1445
filename: The path to the file in which to save the trained model.
1446
1446
backend: A string identifying the used backend.
1447
1447
weights_only: Indicates whether unpickler should be restricted to loading only tensors, primitive types,
1448
- dictionaries and any types added via ` py:func:torch.serialization.add_safe_globals`.
1449
- See ` py:func:torch.load` with ``weights_only=True`` for more details. It it recommended to leave this
1448
+ dictionaries and any types added via : py:func:` torch.serialization.add_safe_globals`.
1449
+ See : py:func:` torch.load` with ``weights_only=True`` for more details. It it recommended to leave this
1450
1450
at the default value of ``None``, which sets the argument to ``False`` for torch<2.6, and ``True`` for
1451
- higher versions of torch. If you experience issues with loading custom models (specified outside
1451
+ higher versions of torch. If you experience issues with loading custom models (specified outside
1452
1452
of the CEBRA package), you can try to set this to ``False`` if you trust the source of the model.
1453
1453
kwargs: Optional keyword arguments passed directly to the loader.
1454
1454
1455
- Return :
1455
+ Returns :
1456
1456
The model to load.
1457
1457
1458
1458
Note:
@@ -1462,7 +1462,6 @@ def load(cls,
1462
1462
For information about the file format please refer to :py:meth:`cebra.CEBRA.save`.
1463
1463
1464
1464
Example:
1465
-
1466
1465
>>> import cebra
1467
1466
>>> import numpy as np
1468
1467
>>> import tempfile
@@ -1476,7 +1475,6 @@ def load(cls,
1476
1475
>>> loaded_model = cebra.CEBRA.load(tmp_file)
1477
1476
>>> embedding = loaded_model.transform(dataset)
1478
1477
>>> tmp_file.unlink()
1479
-
1480
1478
"""
1481
1479
supported_backends = ["auto" , "sklearn" , "torch" ]
1482
1480
if backend not in supported_backends :
0 commit comments