Skip to content

Commit 4e50ffd

Browse files
authored
Merge pull request #247 from jrzaurin/fix/fix-bug-in-felbo-finetunning-routine
fixed indentation error in the felbo finetunning routine
2 parents e26c3fb + a4c35a1 commit 4e50ffd

File tree

1 file changed

+20
-20
lines changed

1 file changed

+20
-20
lines changed

pytorch_widedeep/training/_finetune.py

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -298,31 +298,31 @@ def finetune_felbo( # noqa: C901
298298
for p in layer.parameters():
299299
p.requires_grad = False
300300

301-
if self.verbose:
302-
print("Training one last epoch...")
301+
if self.verbose:
302+
print("Training one last epoch...")
303303

304-
for layer in layers:
305-
for p in layer.parameters():
306-
p.requires_grad = True
304+
for layer in layers:
305+
for p in layer.parameters():
306+
p.requires_grad = True
307307

308-
params_, max_lr_, base_lr_ = [], [], []
309-
for lr, layer in zip(layers_max_lr, layers):
310-
params_ += [{"params": layer.parameters(), "lr": lr / 10.0}]
311-
max_lr_ += [lr]
312-
base_lr_ += [lr / 10.0]
308+
params_, max_lr_, base_lr_ = [], [], []
309+
for lr, layer in zip(layers_max_lr, layers):
310+
params_ += [{"params": layer.parameters(), "lr": lr / 10.0}]
311+
max_lr_ += [lr]
312+
base_lr_ += [lr / 10.0]
313313

314-
optimizer = torch.optim.AdamW(params_)
314+
optimizer = torch.optim.AdamW(params_)
315315

316-
scheduler = torch.optim.lr_scheduler.CyclicLR(
317-
optimizer,
318-
base_lr=base_lr_,
319-
max_lr=max_lr_,
320-
step_size_up=step_size_up,
321-
step_size_down=step_size_down,
322-
cycle_momentum=False,
323-
)
316+
scheduler = torch.optim.lr_scheduler.CyclicLR(
317+
optimizer,
318+
base_lr=base_lr_,
319+
max_lr=max_lr_,
320+
step_size_up=step_size_up,
321+
step_size_down=step_size_down,
322+
cycle_momentum=False,
323+
)
324324

325-
self._train(model, model_name, loader, optimizer, scheduler, idx=idx)
325+
self._train(model, model_name, loader, optimizer, scheduler, idx=idx)
326326

327327
def finetune_one(
328328
self,

0 commit comments

Comments
 (0)