|
| 1 | +# pylint: disable=no-self-use,invalid-name, protected-access |
| 2 | +import pytest |
| 3 | + |
| 4 | +from allennlp.common import Params |
| 5 | +from allennlp.common.util import ensure_list |
| 6 | +from allennlp.data.dataset_readers import DropReader |
| 7 | +from allennlp.common.testing import AllenNlpTestCase |
| 8 | + |
| 9 | + |
| 10 | +class TestDropReader: |
| 11 | + @pytest.mark.parametrize("lazy", (True, False)) |
| 12 | + def test_read_from_file(self, lazy): |
| 13 | + reader = DropReader(lazy=lazy) |
| 14 | + instances = ensure_list(reader.read(AllenNlpTestCase.FIXTURES_ROOT / 'data' / 'drop.json')) |
| 15 | + assert len(instances) == 19 |
| 16 | + |
| 17 | + instance = instances[0] |
| 18 | + assert set(instance.fields.keys()) == { |
| 19 | + 'question', |
| 20 | + 'passage', |
| 21 | + 'number_indices', |
| 22 | + 'numbers_in_passage', |
| 23 | + 'answer_as_passage_spans', |
| 24 | + 'answer_as_question_spans', |
| 25 | + 'answer_as_add_sub_expressions', |
| 26 | + 'answer_as_counts', |
| 27 | + 'metadata', |
| 28 | + } |
| 29 | + |
| 30 | + assert [t.text for t in instance["question"][:3]] == ["What", "happened", "second"] |
| 31 | + assert [t.text for t in instance["passage"][:3]] == ["The", "Port", "of"] |
| 32 | + assert [t.text for t in instance["passage"][-3:]] == ["cruise", "ships", "."] |
| 33 | + |
| 34 | + # Note that the last number in here is added as padding in case we don't find any numbers |
| 35 | + # in a particular passage. |
| 36 | + assert [f.sequence_index for f in instance["number_indices"]] == [ |
| 37 | + 16, 30, 36, 41, 52, 64, 80, 89, 147, 153, 166, 174, 177, 206, 245, 252, 267, 279, |
| 38 | + 283, 288, 296, -1 |
| 39 | + ] |
| 40 | + assert [t.text for t in instance["numbers_in_passage"]] == [ |
| 41 | + "1", "25", "2014", "5", "2018", "1", "2", "1", "54", "52", "6", "60", "58", "2010", |
| 42 | + "67", "2010", "1996", "3", "1", "6", "1", "0"] |
| 43 | + assert len(instance["answer_as_passage_spans"]) == 1 |
| 44 | + assert instance["answer_as_passage_spans"][0] == (46, 47) |
| 45 | + assert len(instance["answer_as_question_spans"]) == 1 |
| 46 | + assert instance["answer_as_question_spans"][0] == (5, 6) |
| 47 | + assert len(instance["answer_as_add_sub_expressions"]) == 1 |
| 48 | + assert instance["answer_as_add_sub_expressions"][0].labels == [0,] * 22 |
| 49 | + assert len(instance["answer_as_counts"]) == 1 |
| 50 | + assert instance["answer_as_counts"][0].label == -1 |
| 51 | + assert set(instance['metadata'].metadata.keys()) == { |
| 52 | + 'answer_annotations', |
| 53 | + 'answer_info', |
| 54 | + 'answer_texts', |
| 55 | + 'number_indices', |
| 56 | + 'number_tokens', |
| 57 | + 'original_numbers', |
| 58 | + 'original_passage', |
| 59 | + 'original_question', |
| 60 | + 'passage_id', |
| 61 | + 'passage_token_offsets', |
| 62 | + 'passage_tokens', |
| 63 | + 'question_id', |
| 64 | + 'question_token_offsets', |
| 65 | + 'question_tokens', |
| 66 | + } |
| 67 | + |
| 68 | + def test_read_in_bert_format(self): |
| 69 | + reader = DropReader(instance_format="bert") |
| 70 | + instances = ensure_list(reader.read(AllenNlpTestCase.FIXTURES_ROOT / 'data' / 'drop.json')) |
| 71 | + assert len(instances) == 19 |
| 72 | + |
| 73 | + print(instances[0]) |
| 74 | + instance = instances[0] |
| 75 | + assert set(instance.fields.keys()) == { |
| 76 | + 'answer_as_passage_spans', |
| 77 | + 'metadata', |
| 78 | + 'passage', |
| 79 | + 'question', |
| 80 | + 'question_and_passage', |
| 81 | + } |
| 82 | + |
| 83 | + assert [t.text for t in instance["question"][:3]] == ["What", "happened", "second"] |
| 84 | + assert [t.text for t in instance["passage"][:3]] == ["The", "Port", "of"] |
| 85 | + assert [t.text for t in instance["passage"][-3:]] == ["cruise", "ships", "."] |
| 86 | + question_length = len(instance['question']) |
| 87 | + passage_length = len(instance['passage']) |
| 88 | + assert len(instance['question_and_passage']) == question_length + passage_length + 1 |
| 89 | + |
| 90 | + assert len(instance["answer_as_passage_spans"]) == 1 |
| 91 | + assert instance["answer_as_passage_spans"][0] == (question_length + 1 + 46, |
| 92 | + question_length + 1 + 47) |
| 93 | + assert set(instance['metadata'].metadata.keys()) == { |
| 94 | + 'answer_annotations', |
| 95 | + 'answer_texts', |
| 96 | + 'original_passage', |
| 97 | + 'original_question', |
| 98 | + 'passage_id', |
| 99 | + 'passage_token_offsets', |
| 100 | + 'passage_tokens', |
| 101 | + 'question_id', |
| 102 | + 'question_tokens', |
| 103 | + } |
| 104 | + |
| 105 | + def test_read_in_squad_format(self): |
| 106 | + reader = DropReader(instance_format="squad") |
| 107 | + instances = ensure_list(reader.read(AllenNlpTestCase.FIXTURES_ROOT / 'data' / 'drop.json')) |
| 108 | + assert len(instances) == 19 |
| 109 | + |
| 110 | + print(instances[0]) |
| 111 | + instance = instances[0] |
| 112 | + assert set(instance.fields.keys()) == { |
| 113 | + 'question', |
| 114 | + 'passage', |
| 115 | + 'span_start', |
| 116 | + 'span_end', |
| 117 | + 'metadata', |
| 118 | + } |
| 119 | + |
| 120 | + assert [t.text for t in instance["question"][:3]] == ["What", "happened", "second"] |
| 121 | + assert [t.text for t in instance["passage"][:3]] == ["The", "Port", "of"] |
| 122 | + assert [t.text for t in instance["passage"][-3:]] == ["cruise", "ships", "."] |
| 123 | + |
| 124 | + assert instance["span_start"] == 46 |
| 125 | + assert instance["span_end"] == 47 |
| 126 | + assert set(instance['metadata'].metadata.keys()) == { |
| 127 | + 'answer_annotations', |
| 128 | + 'answer_texts', |
| 129 | + 'original_passage', |
| 130 | + 'original_question', |
| 131 | + 'passage_id', |
| 132 | + 'token_offsets', |
| 133 | + 'passage_tokens', |
| 134 | + 'question_id', |
| 135 | + 'question_tokens', |
| 136 | + 'valid_passage_spans', |
| 137 | + } |
| 138 | + |
| 139 | + def test_can_build_from_params(self): |
| 140 | + reader = DropReader.from_params(Params({})) |
| 141 | + assert reader._tokenizer.__class__.__name__ == 'WordTokenizer' |
| 142 | + assert reader._token_indexers["tokens"].__class__.__name__ == 'SingleIdTokenIndexer' |
0 commit comments