@@ -298,31 +298,31 @@ def finetune_felbo( # noqa: C901
298
298
for p in layer .parameters ():
299
299
p .requires_grad = False
300
300
301
- if self .verbose :
302
- print ("Training one last epoch..." )
301
+ if self .verbose :
302
+ print ("Training one last epoch..." )
303
303
304
- for layer in layers :
305
- for p in layer .parameters ():
306
- p .requires_grad = True
304
+ for layer in layers :
305
+ for p in layer .parameters ():
306
+ p .requires_grad = True
307
307
308
- params_ , max_lr_ , base_lr_ = [], [], []
309
- for lr , layer in zip (layers_max_lr , layers ):
310
- params_ += [{"params" : layer .parameters (), "lr" : lr / 10.0 }]
311
- max_lr_ += [lr ]
312
- base_lr_ += [lr / 10.0 ]
308
+ params_ , max_lr_ , base_lr_ = [], [], []
309
+ for lr , layer in zip (layers_max_lr , layers ):
310
+ params_ += [{"params" : layer .parameters (), "lr" : lr / 10.0 }]
311
+ max_lr_ += [lr ]
312
+ base_lr_ += [lr / 10.0 ]
313
313
314
- optimizer = torch .optim .AdamW (params_ )
314
+ optimizer = torch .optim .AdamW (params_ )
315
315
316
- scheduler = torch .optim .lr_scheduler .CyclicLR (
317
- optimizer ,
318
- base_lr = base_lr_ ,
319
- max_lr = max_lr_ ,
320
- step_size_up = step_size_up ,
321
- step_size_down = step_size_down ,
322
- cycle_momentum = False ,
323
- )
316
+ scheduler = torch .optim .lr_scheduler .CyclicLR (
317
+ optimizer ,
318
+ base_lr = base_lr_ ,
319
+ max_lr = max_lr_ ,
320
+ step_size_up = step_size_up ,
321
+ step_size_down = step_size_down ,
322
+ cycle_momentum = False ,
323
+ )
324
324
325
- self ._train (model , model_name , loader , optimizer , scheduler , idx = idx )
325
+ self ._train (model , model_name , loader , optimizer , scheduler , idx = idx )
326
326
327
327
def finetune_one (
328
328
self ,
0 commit comments