Skip to content
This repository was archived by the owner on Dec 16, 2022. It is now read-only.

Commit 4fe8fa0

Browse files
authored
move model to cuda in tests, add comment (#2384)
1 parent 8eb2d75 commit 4fe8fa0

File tree

2 files changed

+6
-1
lines changed

2 files changed

+6
-1
lines changed

allennlp/tests/training/trainer_test.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@ def test_trainer_can_run(self):
9191

9292
@pytest.mark.skipif(not torch.cuda.is_available(), reason="No CUDA device registered.")
9393
def test_trainer_can_run_cuda(self):
94+
self.model.cuda()
9495
trainer = Trainer(self.model, self.optimizer,
9596
self.iterator, self.instances, num_epochs=2,
9697
cuda_device=0)
@@ -99,7 +100,7 @@ def test_trainer_can_run_cuda(self):
99100
@pytest.mark.skipif(torch.cuda.device_count() < 2,
100101
reason="Need multiple GPUs.")
101102
def test_trainer_can_run_multiple_gpu(self):
102-
103+
self.model.cuda()
103104
class MetaDataCheckWrapper(Model):
104105
"""
105106
Checks that the metadata field has been correctly split across the batch dimension

allennlp/training/trainer.py

+4
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,10 @@ def __init__(self,
6969
An AllenNLP model to be optimized. Pytorch Modules can also be optimized if
7070
their ``forward`` method returns a dictionary with a "loss" key, containing a
7171
scalar tensor representing the loss function to be optimized.
72+
73+
If you are training your model using GPUs, your model should already be
74+
on the correct device. (If you use `Trainer.from_params` this will be
75+
handled for you.)
7276
optimizer : ``torch.nn.Optimizer``, required.
7377
An instance of a Pytorch Optimizer, instantiated with the parameters of the
7478
model to be optimized.

0 commit comments

Comments
 (0)