Skip to content
This repository was archived by the owner on Dec 16, 2022. It is now read-only.

Commit 87e3536

Browse files
authored
Make tests work again (#4865)
* New import paths * Duplicate entries * Dataset readers can't be lazy anymore
1 parent d16a5c7 commit 87e3536

12 files changed

+27
-57
lines changed

allennlp/data/dataset_readers/sharded_dataset_reader.py

-10
Original file line numberDiff line numberDiff line change
@@ -30,23 +30,13 @@ class ShardedDatasetReader(DatasetReader):
3030
3131
Registered as a `DatasetReader` with name "sharded".
3232
33-
This class accepts all additional parameters of any `DatasetReader` class via `**kwargs`.
34-
We give priority to the values set in the constructor for the instance of this class.
35-
Optionally, we will automatically inherit attributes from the `base_reader` when required.
36-
3733
# Parameters
3834
3935
base_reader : `DatasetReader`
4036
Reader with a read method that accepts a single file.
4137
"""
4238

4339
def __init__(self, base_reader: DatasetReader, **kwargs) -> None:
44-
# ShardedDatasetReader is a wrapper for the original base_reader so some of the parameters like 'lazy'
45-
# can be safely inherited. However, ShardedDatasetReader is a class instance of a DatasetReader as well.
46-
# So we give priority to the parameters for the current instance stored in 'kwargs'.
47-
# If not present, we check the ones in the base reader
48-
kwargs["lazy"] = kwargs.get("lazy", base_reader.lazy)
49-
5040
super().__init__(
5141
manual_distributed_sharding=True, manual_multi_process_sharding=True, **kwargs
5242
)

allennlp/models/vision_text_model.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
from overrides import overrides
66
import numpy as np
77
import torch
8+
from transformers import AutoModel
89

910
from allennlp.data.fields.text_field import TextFieldTensors
1011
from allennlp.data.vocabulary import Vocabulary
@@ -16,8 +17,6 @@
1617
TransformerPooler,
1718
)
1819

19-
from transformers.modeling_auto import AutoModel
20-
2120
logger = logging.getLogger(__name__)
2221

2322

@@ -107,7 +106,7 @@ def from_huggingface_model_name(
107106
if hasattr(transformer.config, "embedding_size"):
108107
config = transformer.config
109108

110-
from transformers.modeling_albert import AlbertModel
109+
from transformers.models.albert.modeling_albert import AlbertModel
111110

112111
if isinstance(transformer, AlbertModel):
113112
linear_transform = deepcopy(transformer.encoder.embedding_hidden_mapping_in)

allennlp/modules/elmo.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from allennlp.common.file_utils import cached_path
1414
from allennlp.common.util import lazy_groups_of
1515
from allennlp.data.instance import Instance
16-
from allennlp.data.tokenizers.token import Token
16+
from allennlp.data.tokenizers.token_class import Token
1717
from allennlp.data.vocabulary import Vocabulary
1818
from allennlp.data.batch import Batch
1919
from allennlp.data.fields import TextField

allennlp/modules/transformer/activation_layer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
from allennlp.modules.transformer.transformer_module import TransformerModule
77

8-
from transformers.modeling_bert import ACT2FN
8+
from transformers.models.bert.modeling_bert import ACT2FN
99

1010

1111
class ActivationLayer(TransformerModule, FromParams):

allennlp/nn/activations.py

-1
Original file line numberDiff line numberDiff line change
@@ -99,5 +99,4 @@ def _get_name(self):
9999
"softsign": (torch.nn.Softsign, None),
100100
"tanhshrink": (torch.nn.Tanhshrink, None),
101101
"selu": (torch.nn.SELU, None),
102-
"gelu": (torch.nn.GELU, None),
103102
}

tests/data/dataset_readers/sharded_dataset_reader_test.py

-17
Original file line numberDiff line numberDiff line change
@@ -73,20 +73,3 @@ def test_sharded_read_glob(self):
7373

7474
def test_sharded_read_archive(self):
7575
self.read_and_check_instances(str(self.archive_filename))
76-
77-
def test_attributes_inheritance(self):
78-
# current reader has lazy set to true
79-
base_reader = SequenceTaggingDatasetReader(lazy=True)
80-
reader = ShardedDatasetReader(base_reader=base_reader)
81-
82-
assert (
83-
reader.lazy
84-
), "The ShardedDatasetReader didn't inherit the 'lazy' attribute from base_reader"
85-
86-
def test_set_attributes_main(self):
87-
base_reader = SequenceTaggingDatasetReader(lazy=True)
88-
reader = ShardedDatasetReader(base_reader=base_reader, lazy=False)
89-
90-
assert (
91-
not reader.lazy
92-
), "The ShardedDatasetReader inherited the 'lazy' attribute from base_reader. It should be False"

tests/models/vilbert_vqa_test.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from transformers.modeling_auto import AutoModel
1+
from transformers import AutoModel
22

33
from allennlp.common.testing import ModelTestCase
44
from allennlp.data import Vocabulary

tests/models/visual_entailment_test.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from transformers.modeling_auto import AutoModel
1+
from transformers import AutoModel
22

33
from allennlp.common.testing import ModelTestCase
44
from allennlp.data import Vocabulary

tests/modules/transformer/self_attention_test.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,14 @@
99
from allennlp.modules.transformer import SelfAttention
1010
from allennlp.common.testing import AllenNlpTestCase
1111

12-
from transformers.configuration_bert import BertConfig
13-
from transformers.modeling_bert import BertSelfAttention
14-
from transformers.configuration_roberta import RobertaConfig
15-
from transformers.modeling_roberta import RobertaSelfAttention
16-
from transformers.configuration_electra import ElectraConfig
17-
from transformers.modeling_electra import ElectraSelfAttention
18-
from transformers.configuration_distilbert import DistilBertConfig
19-
from transformers.modeling_distilbert import MultiHeadSelfAttention
12+
from transformers.models.bert.configuration_bert import BertConfig
13+
from transformers.models.bert.modeling_bert import BertSelfAttention
14+
from transformers.models.roberta.configuration_roberta import RobertaConfig
15+
from transformers.models.roberta.modeling_roberta import RobertaSelfAttention
16+
from transformers.models.electra.configuration_electra import ElectraConfig
17+
from transformers.models.electra.modeling_electra import ElectraSelfAttention
18+
from transformers.models.distilbert.configuration_distilbert import DistilBertConfig
19+
from transformers.models.distilbert.modeling_distilbert import MultiHeadSelfAttention
2020

2121
PARAMS_DICT = {
2222
"hidden_size": 6,

tests/modules/transformer/toolkit_test.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import torch
22
from overrides import overrides
3-
4-
from transformers.modeling_albert import AlbertEmbeddings
3+
from transformers.models.albert.modeling_albert import AlbertEmbeddings
54

65
from allennlp.common import cached_transformers
76
from allennlp.common.testing import assert_equal_parameters

tests/modules/transformer/transformer_block_test.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,12 @@
99
from allennlp.modules.transformer import TransformerBlock
1010
from allennlp.common.testing import AllenNlpTestCase
1111

12-
from transformers.configuration_bert import BertConfig
13-
from transformers.modeling_bert import BertEncoder
14-
from transformers.configuration_roberta import RobertaConfig
15-
from transformers.modeling_roberta import RobertaEncoder
16-
from transformers.configuration_electra import ElectraConfig
17-
from transformers.modeling_electra import ElectraEncoder
12+
from transformers.models.bert.configuration_bert import BertConfig
13+
from transformers.models.bert.modeling_bert import BertEncoder
14+
from transformers.models.roberta.configuration_roberta import RobertaConfig
15+
from transformers.models.roberta.modeling_roberta import RobertaEncoder
16+
from transformers.models.electra.configuration_electra import ElectraConfig
17+
from transformers.models.electra.modeling_electra import ElectraEncoder
1818

1919
PARAMS_DICT = {
2020
"num_hidden_layers": 3,

tests/modules/transformer/transformer_layer_test.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -8,12 +8,12 @@
88
from allennlp.modules.transformer import AttentionLayer, TransformerLayer
99
from allennlp.common.testing import AllenNlpTestCase
1010

11-
from transformers.configuration_bert import BertConfig
12-
from transformers.modeling_bert import BertAttention, BertLayer
13-
from transformers.configuration_roberta import RobertaConfig
14-
from transformers.modeling_roberta import RobertaAttention, RobertaLayer
15-
from transformers.configuration_electra import ElectraConfig
16-
from transformers.modeling_electra import ElectraAttention, ElectraLayer
11+
from transformers.models.bert.configuration_bert import BertConfig
12+
from transformers.models.bert.modeling_bert import BertAttention, BertLayer
13+
from transformers.models.roberta.configuration_roberta import RobertaConfig
14+
from transformers.models.roberta.modeling_roberta import RobertaAttention, RobertaLayer
15+
from transformers.models.electra.configuration_electra import ElectraConfig
16+
from transformers.models.electra.modeling_electra import ElectraAttention, ElectraLayer
1717

1818
ATTENTION_PARAMS_DICT = {
1919
"hidden_size": 6,

0 commit comments

Comments
 (0)