This repository was archived by the owner on Dec 16, 2022. It is now read-only.
File tree 2 files changed +6
-1
lines changed
2 files changed +6
-1
lines changed Original file line number Diff line number Diff line change @@ -91,6 +91,7 @@ def test_trainer_can_run(self):
91
91
92
92
@pytest .mark .skipif (not torch .cuda .is_available (), reason = "No CUDA device registered." )
93
93
def test_trainer_can_run_cuda (self ):
94
+ self .model .cuda ()
94
95
trainer = Trainer (self .model , self .optimizer ,
95
96
self .iterator , self .instances , num_epochs = 2 ,
96
97
cuda_device = 0 )
@@ -99,7 +100,7 @@ def test_trainer_can_run_cuda(self):
99
100
@pytest .mark .skipif (torch .cuda .device_count () < 2 ,
100
101
reason = "Need multiple GPUs." )
101
102
def test_trainer_can_run_multiple_gpu (self ):
102
-
103
+ self . model . cuda ()
103
104
class MetaDataCheckWrapper (Model ):
104
105
"""
105
106
Checks that the metadata field has been correctly split across the batch dimension
Original file line number Diff line number Diff line change @@ -69,6 +69,10 @@ def __init__(self,
69
69
An AllenNLP model to be optimized. Pytorch Modules can also be optimized if
70
70
their ``forward`` method returns a dictionary with a "loss" key, containing a
71
71
scalar tensor representing the loss function to be optimized.
72
+
73
+ If you are training your model using GPUs, your model should already be
74
+ on the correct device. (If you use `Trainer.from_params` this will be
75
+ handled for you.)
72
76
optimizer : ``torch.nn.Optimizer``, required.
73
77
An instance of a Pytorch Optimizer, instantiated with the parameters of the
74
78
model to be optimized.
You can’t perform that action at this time.
0 commit comments