Skip to content

Commit 7c6e078

Browse files
authored
Bump ruff version (#367)
* bump ruff version * make style
1 parent 3fe1db0 commit 7c6e078

File tree

7 files changed

+20
-19
lines changed

7 files changed

+20
-19
lines changed

.github/workflows/pr_tests.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ jobs:
2121
- name: Install dependencies
2222
run: |
2323
python -m pip install --upgrade pip
24-
pip install ruff==0.1.5
24+
pip install ruff==0.9.10
2525
- name: Check quality
2626
run: make quality
2727
- name: Check if failure

finetrainers/args.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -842,9 +842,9 @@ def _map_to_args_type(args: Dict[str, Any]) -> BaseArgs:
842842

843843
def _validate_model_args(args: BaseArgs):
844844
if args.training_type == "full-finetune":
845-
assert (
846-
"transformer" not in args.layerwise_upcasting_modules
847-
), "Layerwise upcasting is not supported for full-finetune training"
845+
assert "transformer" not in args.layerwise_upcasting_modules, (
846+
"Layerwise upcasting is not supported for full-finetune training"
847+
)
848848

849849

850850
def _validate_dataset_args(args: BaseArgs):

finetrainers/data/dataset.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -436,9 +436,9 @@ def __init__(
436436
) -> None:
437437
super().__init__()
438438

439-
assert weights == -1 or isinstance(
440-
weights, dict
441-
), "`weights` must be a dictionary of probabilities for each caption column"
439+
assert weights == -1 or isinstance(weights, dict), (
440+
"`weights` must be a dictionary of probabilities for each caption column"
441+
)
442442

443443
self.dataset_name = dataset_name
444444
self.infinite = infinite
@@ -530,9 +530,9 @@ def __init__(
530530
) -> None:
531531
super().__init__()
532532

533-
assert weights == -1 or isinstance(
534-
weights, dict
535-
), "`weights` must be a dictionary of probabilities for each caption column"
533+
assert weights == -1 or isinstance(weights, dict), (
534+
"`weights` must be a dictionary of probabilities for each caption column"
535+
)
536536

537537
self.dataset_name = dataset_name
538538
self.infinite = infinite

finetrainers/utils/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def get_parameter_names(obj: Any, method_name: Optional[str] = None) -> Set[str]
3737

3838

3939
def get_non_null_items(
40-
x: Union[List[Any], Tuple[Any], Dict[str, Any]]
40+
x: Union[List[Any], Tuple[Any], Dict[str, Any]],
4141
) -> Union[List[Any], Tuple[Any], Dict[str, Any]]:
4242
if isinstance(x, dict):
4343
return {k: v for k, v in x.items() if v is not None}

finetrainers/utils/torch.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -266,10 +266,10 @@ def _get_total_norm(
266266
if len(tensors) == 0:
267267
return torch.tensor(0.0)
268268
first_device = tensors[0].device
269-
grouped_tensors: dict[
270-
tuple[torch.device, torch.dtype], tuple[list[list[torch.Tensor]], list[int]]
271-
] = _group_tensors_by_device_and_dtype(
272-
[tensors] # type: ignore[list-item]
269+
grouped_tensors: dict[tuple[torch.device, torch.dtype], tuple[list[list[torch.Tensor]], list[int]]] = (
270+
_group_tensors_by_device_and_dtype(
271+
[tensors] # type: ignore[list-item]
272+
)
273273
) # type: ignore[assignment]
274274

275275
norms: List[torch.Tensor] = []
@@ -308,9 +308,9 @@ def _clip_grads_with_norm_(
308308
max_norm = float(max_norm)
309309
if len(grads) == 0:
310310
return
311-
grouped_grads: dict[
312-
Tuple[torch.device, torch.dtype], Tuple[List[List[torch.Tensor]], List[int]]
313-
] = _group_tensors_by_device_and_dtype([grads]) # type: ignore[assignment]
311+
grouped_grads: dict[Tuple[torch.device, torch.dtype], Tuple[List[List[torch.Tensor]], List[int]]] = (
312+
_group_tensors_by_device_and_dtype([grads])
313+
) # type: ignore[assignment]
314314

315315
clip_coef = max_norm / (total_norm + 1e-6)
316316
# Note: multiplying by the clamped coef is redundant when the coef is clamped to 1, but doing so

requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,3 +16,4 @@ torchao>=0.7.0
1616
sentencepiece>=0.2.0
1717
imageio-ffmpeg>=0.5.1
1818
numpy>=1.26.4
19+
ruff==0.9.10

tests/models/ltx_video/_test_tp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ def main(world_size: int, rank: int):
151151
torch.allclose(output, output_tp, atol=1e-5, rtol=1e-5),
152152
(output - output_tp).abs().max(),
153153
)
154-
print(f"Max memory reserved ({rank=}): {torch.cuda.max_memory_reserved(rank) / 1024 ** 3:.2f} GB")
154+
print(f"Max memory reserved ({rank=}): {torch.cuda.max_memory_reserved(rank) / 1024**3:.2f} GB")
155155

156156
if rank == 0:
157157
print()

0 commit comments

Comments
 (0)