Skip to content

Commit e8963dc

Browse files
committed
guards for empty self.parallelism_config
1 parent 76a546f commit e8963dc

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

src/accelerate/accelerator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1675,7 +1675,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, e
16751675
elif device_placement and not self.verify_device_map(model):
16761676
model = model.to(self.device)
16771677
if not evaluation_mode:
1678-
if self.multi_device and not self.parallelism_config.tp_enabled:
1678+
if self.multi_device and not (self.parallelism_config and self.parallelism_config.tp_enabled):
16791679
if model_has_dtensor(model):
16801680
raise ValueError(
16811681
"Your model contains `DTensor` parameters, which is incompatible with DDP. Maybe you loaded your model with `device_map='auto'`? Specify `device_map='cuda'` or 'cpu' instead."
@@ -1695,7 +1695,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, e
16951695
)
16961696
if self.ddp_handler is not None:
16971697
self.ddp_handler.register_comm_hook(model)
1698-
elif self.parallelism_config.tp_enabled:
1698+
elif self.parallelism_config and self.parallelism_config.tp_enabled:
16991699
if not hasattr(model, "tp_size"):
17001700
raise NotImplementedError(
17011701
"Model should undergo tensor parallel before passing it to accelerate."

0 commit comments

Comments
 (0)