Skip to content

Commit ff8fddc

Browse files
authored
Relative -> Absolute imports (#343)
1 parent 7a2afa5 commit ff8fddc

File tree

22 files changed

+88
-83
lines changed

22 files changed

+88
-83
lines changed

finetrainers/data/dataloader.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import torch.distributed.checkpoint.stateful
55
import torchdata.stateful_dataloader
66

7-
from ..logging import get_logger
7+
from finetrainers.logging import get_logger
88

99

1010
logger = get_logger()

finetrainers/data/dataset.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,12 @@
1717
from huggingface_hub import list_repo_files, repo_exists, snapshot_download
1818
from tqdm.auto import tqdm
1919

20-
from .. import constants
21-
from .. import functional as FF
22-
from ..logging import get_logger
23-
from ..utils.import_utils import is_datasets_version
24-
from . import utils
20+
from finetrainers import constants
21+
from finetrainers import functional as FF
22+
from finetrainers.logging import get_logger
23+
from finetrainers.utils.import_utils import is_datasets_version
24+
25+
from .utils import find_files
2526

2627

2728
import decord # isort:skip
@@ -48,7 +49,7 @@ def __init__(self, root: str, infinite: bool = False) -> None:
4849
self.infinite = infinite
4950

5051
data = []
51-
caption_files = sorted(utils.find_files(self.root.as_posix(), "*.txt", depth=0))
52+
caption_files = sorted(find_files(self.root.as_posix(), "*.txt", depth=0))
5253
for caption_file in caption_files:
5354
data_file = self._find_data_file(caption_file)
5455
if data_file:
@@ -122,7 +123,7 @@ def __init__(self, root: str, infinite: bool = False) -> None:
122123
self.infinite = infinite
123124

124125
data = []
125-
caption_files = sorted(utils.find_files(self.root.as_posix(), "*.txt", depth=0))
126+
caption_files = sorted(find_files(self.root.as_posix(), "*.txt", depth=0))
126127
for caption_file in caption_files:
127128
data_file = self._find_data_file(caption_file)
128129
if data_file:
@@ -926,7 +927,7 @@ def _initialize_webdataset(
926927
def _has_data_caption_file_pairs(root: Union[pathlib.Path, List[str]], remote: bool = False) -> bool:
927928
# TODO(aryan): this logic can be improved
928929
if not remote:
929-
caption_files = utils.find_files(root.as_posix(), "*.txt", depth=0)
930+
caption_files = find_files(root.as_posix(), "*.txt", depth=0)
930931
for caption_file in caption_files:
931932
caption_file = pathlib.Path(caption_file)
932933
for extension in [*constants.SUPPORTED_IMAGE_FILE_EXTENSIONS, *constants.SUPPORTED_VIDEO_FILE_EXTENSIONS]:

finetrainers/data/precomputation.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44
import torch
55
from tqdm.auto import tqdm
66

7-
from .. import utils
8-
from ..logging import get_logger
7+
from finetrainers.logging import get_logger
8+
from finetrainers.utils import delete_files
99

1010

1111
logger = get_logger()
@@ -147,7 +147,7 @@ def __init__(
147147
self._save_dir.mkdir(parents=True, exist_ok=True)
148148

149149
subdirectories = [f for f in self._save_dir.iterdir() if f.is_dir()]
150-
utils.delete_files(subdirectories)
150+
delete_files(subdirectories)
151151

152152
def consume(
153153
self,

finetrainers/models/cogvideox/base_specification.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,14 @@
1313
from PIL.Image import Image
1414
from transformers import AutoModel, AutoTokenizer, T5EncoderModel, T5Tokenizer
1515

16-
from ... import data
17-
from ...logging import get_logger
18-
from ...processors import ProcessorMixin, T5Processor
19-
from ...typing import ArtifactType, SchedulerType
20-
from ...utils import _enable_vae_memory_optimizations, get_non_null_items
21-
from ..modeling_utils import ModelSpecification
22-
from ..utils import DiagonalGaussianDistribution
16+
from finetrainers.data import VideoArtifact
17+
from finetrainers.logging import get_logger
18+
from finetrainers.models.modeling_utils import ModelSpecification
19+
from finetrainers.models.utils import DiagonalGaussianDistribution
20+
from finetrainers.processors import ProcessorMixin, T5Processor
21+
from finetrainers.typing import ArtifactType, SchedulerType
22+
from finetrainers.utils import _enable_vae_memory_optimizations, get_non_null_items
23+
2324
from .utils import prepare_rotary_positional_embeddings
2425

2526

@@ -359,7 +360,7 @@ def validation(
359360
}
360361
generation_kwargs = get_non_null_items(generation_kwargs)
361362
video = pipeline(**generation_kwargs).frames[0]
362-
return [data.VideoArtifact(value=video)]
363+
return [VideoArtifact(value=video)]
363364

364365
def _save_lora_weights(
365366
self,

finetrainers/models/cogview4/base_specification.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,13 @@
1212
from diffusers.models.autoencoders.vae import DiagonalGaussianDistribution
1313
from transformers import AutoTokenizer, GlmModel
1414

15-
from ... import data
16-
from ... import functional as FF
17-
from ...logging import get_logger
18-
from ...processors import CogView4GLMProcessor, ProcessorMixin
19-
from ...typing import ArtifactType, SchedulerType
20-
from ...utils import _enable_vae_memory_optimizations, get_non_null_items
21-
from ..modeling_utils import ModelSpecification
15+
import finetrainers.functional as FF
16+
from finetrainers.data import ImageArtifact
17+
from finetrainers.logging import get_logger
18+
from finetrainers.models.modeling_utils import ModelSpecification
19+
from finetrainers.processors import CogView4GLMProcessor, ProcessorMixin
20+
from finetrainers.typing import ArtifactType, SchedulerType
21+
from finetrainers.utils import _enable_vae_memory_optimizations, get_non_null_items
2222

2323

2424
logger = get_logger()
@@ -345,7 +345,7 @@ def validation(
345345
}
346346
generation_kwargs = get_non_null_items(generation_kwargs)
347347
image = pipeline(**generation_kwargs).images[0]
348-
return [data.ImageArtifact(value=image)]
348+
return [ImageArtifact(value=image)]
349349

350350
def _save_lora_weights(
351351
self,

finetrainers/models/hunyuan_video/base_specification.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,13 @@
1212
from diffusers.models.autoencoders.vae import DiagonalGaussianDistribution
1313
from transformers import AutoTokenizer, CLIPTextModel, CLIPTokenizer, LlamaModel
1414

15-
from ... import data
16-
from ... import functional as FF
17-
from ...logging import get_logger
18-
from ...processors import CLIPPooledProcessor, LlamaProcessor, ProcessorMixin
19-
from ...typing import ArtifactType, SchedulerType
20-
from ...utils import _enable_vae_memory_optimizations, get_non_null_items
21-
from ..modeling_utils import ModelSpecification
15+
import finetrainers.functional as FF
16+
from finetrainers.data import VideoArtifact
17+
from finetrainers.logging import get_logger
18+
from finetrainers.models.modeling_utils import ModelSpecification
19+
from finetrainers.processors import CLIPPooledProcessor, LlamaProcessor, ProcessorMixin
20+
from finetrainers.typing import ArtifactType, SchedulerType
21+
from finetrainers.utils import _enable_vae_memory_optimizations, get_non_null_items
2222

2323

2424
logger = get_logger()
@@ -347,7 +347,7 @@ def validation(
347347
}
348348
generation_kwargs = get_non_null_items(generation_kwargs)
349349
video = pipeline(**generation_kwargs).frames[0]
350-
return [data.VideoArtifact(value=video)]
350+
return [VideoArtifact(value=video)]
351351

352352
def _save_lora_weights(
353353
self,

finetrainers/models/ltx_video/base_specification.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -15,14 +15,14 @@
1515
from PIL.Image import Image
1616
from transformers import AutoModel, AutoTokenizer, T5EncoderModel, T5Tokenizer
1717

18-
from ... import data
19-
from ... import functional as FF
20-
from ...logging import get_logger
21-
from ...parallel import ParallelBackendEnum
22-
from ...processors import ProcessorMixin, T5Processor
23-
from ...typing import ArtifactType, SchedulerType
24-
from ...utils import _enable_vae_memory_optimizations, get_non_null_items
25-
from ..modeling_utils import ModelSpecification
18+
import finetrainers.functional as FF
19+
from finetrainers.data import VideoArtifact
20+
from finetrainers.logging import get_logger
21+
from finetrainers.models.modeling_utils import ModelSpecification
22+
from finetrainers.parallel import ParallelBackendEnum
23+
from finetrainers.processors import ProcessorMixin, T5Processor
24+
from finetrainers.typing import ArtifactType, SchedulerType
25+
from finetrainers.utils import _enable_vae_memory_optimizations, get_non_null_items
2626

2727

2828
logger = get_logger()
@@ -373,7 +373,7 @@ def validation(
373373
}
374374
generation_kwargs = get_non_null_items(generation_kwargs)
375375
video = pipeline(**generation_kwargs).frames[0]
376-
return [data.VideoArtifact(value=video)]
376+
return [VideoArtifact(value=video)]
377377

378378
def _save_lora_weights(
379379
self,

finetrainers/models/modeling_utils.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,11 @@
55
from diffusers.configuration_utils import FrozenDict
66
from PIL.Image import Image
77

8-
from ..logging import get_logger
9-
from ..parallel import ParallelBackendEnum
10-
from ..processors import ProcessorMixin
11-
from ..typing import ArtifactType, SchedulerType, TokenizerType
12-
from ..utils import resolve_component_cls
8+
from finetrainers.logging import get_logger
9+
from finetrainers.parallel import ParallelBackendEnum
10+
from finetrainers.processors import ProcessorMixin
11+
from finetrainers.typing import ArtifactType, SchedulerType, TokenizerType
12+
from finetrainers.utils import resolve_component_cls
1313

1414

1515
logger = get_logger()

finetrainers/models/wan/base_specification.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -14,13 +14,13 @@
1414
from PIL.Image import Image
1515
from transformers import AutoModel, AutoTokenizer, UMT5EncoderModel
1616

17-
from ... import data
18-
from ... import functional as FF
19-
from ...logging import get_logger
20-
from ...processors import ProcessorMixin, T5Processor
21-
from ...typing import ArtifactType, SchedulerType
22-
from ...utils import get_non_null_items
23-
from ..modeling_utils import ModelSpecification
17+
import finetrainers.functional as FF
18+
from finetrainers.data import VideoArtifact
19+
from finetrainers.logging import get_logger
20+
from finetrainers.models.modeling_utils import ModelSpecification
21+
from finetrainers.processors import ProcessorMixin, T5Processor
22+
from finetrainers.typing import ArtifactType, SchedulerType
23+
from finetrainers.utils import get_non_null_items
2424

2525

2626
logger = get_logger()
@@ -335,7 +335,7 @@ def validation(
335335
}
336336
generation_kwargs = get_non_null_items(generation_kwargs)
337337
video = pipeline(**generation_kwargs).frames[0]
338-
return [data.VideoArtifact(value=video)]
338+
return [VideoArtifact(value=video)]
339339

340340
def _save_lora_weights(
341341
self,

finetrainers/parallel/accelerate.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,9 @@
88
import torch
99
from diffusers.utils import is_accelerate_available
1010

11-
from ..logging import get_logger
12-
from ..utils import get_device_info
11+
from finetrainers.logging import get_logger
12+
from finetrainers.utils import get_device_info
13+
1314
from .base import BaseCheckpointer, BaseParallelBackend
1415

1516

0 commit comments

Comments
 (0)