|
| 1 | +from typing import Iterable, Optional |
| 2 | + |
| 3 | +from allennlp.data import DatasetReader, Token |
| 4 | +from allennlp.data.fields import TextField, LabelField, ListField |
| 5 | +from allennlp.data.instance import Instance |
| 6 | +from datasets import load_dataset |
| 7 | +from datasets.features import ClassLabel, Sequence, Translation, TranslationVariableLanguages |
| 8 | +from datasets.features import Value |
| 9 | + |
| 10 | + |
| 11 | +class HuggingfaceDatasetSplitReader(DatasetReader): |
| 12 | + """ |
| 13 | + This reader implementation wraps the huggingface datasets package |
| 14 | + to utilize it's dataset management functionality and load the information in AllenNLP friendly formats |
| 15 | + Note: Reader works w.r.t to only one split of the dataset, |
| 16 | + i.e. you would need to create separate reader for separate splits |
| 17 | +
|
| 18 | + Following dataset and configurations have been verified and work with this reader |
| 19 | +
|
| 20 | + Dataset Dataset Configuration |
| 21 | + `xnli` `ar` |
| 22 | + `xnli` `en` |
| 23 | + `xnli` `de` |
| 24 | + `xnli` `all_languages` |
| 25 | + `glue` `cola` |
| 26 | + `glue` `mrpc` |
| 27 | + `glue` `sst2` |
| 28 | + `glue` `qqp` |
| 29 | + `glue` `mnli` |
| 30 | + `glue` `mnli_matched` |
| 31 | + `universal_dependencies` `en_lines` |
| 32 | + `universal_dependencies` `ko_kaist` |
| 33 | + `universal_dependencies` `af_afribooms` |
| 34 | + `afrikaans_ner_corpus` `NA` |
| 35 | + `swahili` `NA` |
| 36 | + `conll2003` `NA` |
| 37 | + `dbpedia_14` `NA` |
| 38 | + `trec` `NA` |
| 39 | + `emotion` `NA` |
| 40 | + """ |
| 41 | + |
| 42 | + def __init__( |
| 43 | + self, |
| 44 | + max_instances: Optional[int] = None, |
| 45 | + manual_distributed_sharding: bool = False, |
| 46 | + manual_multiprocess_sharding: bool = False, |
| 47 | + serialization_dir: Optional[str] = None, |
| 48 | + dataset_name: [str] = None, |
| 49 | + split: str = "train", |
| 50 | + config_name: Optional[str] = None, |
| 51 | + ) -> None: |
| 52 | + super().__init__( |
| 53 | + max_instances, |
| 54 | + manual_distributed_sharding, |
| 55 | + manual_multiprocess_sharding, |
| 56 | + serialization_dir, |
| 57 | + ) |
| 58 | + |
| 59 | + # It would be cleaner to create a separate reader object for different dataset |
| 60 | + self.dataset = None |
| 61 | + self.dataset_name = dataset_name |
| 62 | + self.config_name = config_name |
| 63 | + self.index = -1 |
| 64 | + |
| 65 | + if config_name: |
| 66 | + self.dataset = load_dataset(self.dataset_name, self.config_name, split=split) |
| 67 | + else: |
| 68 | + self.dataset = load_dataset(self.dataset_name, split=split) |
| 69 | + |
| 70 | + def _read(self, file_path) -> Iterable[Instance]: |
| 71 | + """ |
| 72 | + Reads the dataset and converts the entry to AllenNLP friendly instance |
| 73 | + """ |
| 74 | + for entry in self.dataset: |
| 75 | + yield self.text_to_instance(entry) |
| 76 | + |
| 77 | + def text_to_instance(self, *inputs) -> Instance: |
| 78 | + """ |
| 79 | + Takes care of converting dataset entry into AllenNLP friendly instance |
| 80 | + Currently it is implemented in an unseemly catch-up model |
| 81 | + where it converts datasets.features that are required for the supported dataset, |
| 82 | + ideally it would require design where we cleanly deliberate, decide |
| 83 | + map dataset.feature to an allenlp.data.field and then go ahead with converting it |
| 84 | + Doing that would provide the best chance of providing largest possible coverage with datasets |
| 85 | +
|
| 86 | + Currently this is how datasets.features types are mapped to AllenNLP Fields |
| 87 | +
|
| 88 | + dataset.feature type allennlp.data.fields |
| 89 | + `ClassLabel` `LabelField` in feature name namespace |
| 90 | + `Value.string` `TextField` with value as Token |
| 91 | + `Value.*` `LabelField` with value being label in feature name namespace |
| 92 | + `Sequence.string` `ListField` of `TextField` with individual string as token |
| 93 | + `Sequence.ClassLabel` `ListField` of `ClassLabel` in feature name namespace |
| 94 | + `Translation` `ListField` of 2 ListField (ClassLabel and TextField) |
| 95 | + `TranslationVariableLanguages` `ListField` of 2 ListField (ClassLabel and TextField) |
| 96 | + """ |
| 97 | + |
| 98 | + # features indicate the different information available in each entry from dataset |
| 99 | + # feature types decide what type of information they are |
| 100 | + # e.g. In a Sentiment dataset an entry could have one feature (of type text/string) indicating the text |
| 101 | + # and another indicate the sentiment (of typeint32/ClassLabel) |
| 102 | + features = self.dataset.features |
| 103 | + fields = dict() |
| 104 | + |
| 105 | + # TODO we need to support all different datasets features described |
| 106 | + # in https://huggingface.co/docs/datasets/features.html |
| 107 | + for feature in features: |
| 108 | + value = features[feature] |
| 109 | + |
| 110 | + # datasets ClassLabel maps to LabelField |
| 111 | + if isinstance(value, ClassLabel): |
| 112 | + field = LabelField(inputs[0][feature], label_namespace=feature, skip_indexing=True) |
| 113 | + |
| 114 | + # datasets Value can be of different types |
| 115 | + elif isinstance(value, Value): |
| 116 | + |
| 117 | + # String value maps to TextField |
| 118 | + if value.dtype == "string": |
| 119 | + # Since TextField has to be made of Tokens add whole text as a token |
| 120 | + # TODO Should we use simple heuristics to identify what is token and what is not? |
| 121 | + field = TextField([Token(inputs[0][feature])]) |
| 122 | + |
| 123 | + else: |
| 124 | + field = LabelField( |
| 125 | + inputs[0][feature], label_namespace=feature, skip_indexing=True |
| 126 | + ) |
| 127 | + |
| 128 | + elif isinstance(value, Sequence): |
| 129 | + # datasets Sequence of strings to ListField of TextField |
| 130 | + if value.feature.dtype == "string": |
| 131 | + field_list = list() |
| 132 | + for item in inputs[0][feature]: |
| 133 | + item_field = TextField([Token(item)]) |
| 134 | + field_list.append(item_field) |
| 135 | + if len(field_list) == 0: |
| 136 | + continue |
| 137 | + field = ListField(field_list) |
| 138 | + |
| 139 | + # datasets Sequence of strings to ListField of LabelField |
| 140 | + elif isinstance(value.feature, ClassLabel): |
| 141 | + field_list = list() |
| 142 | + for item in inputs[0][feature]: |
| 143 | + item_field = LabelField( |
| 144 | + label=item, label_namespace=feature, skip_indexing=True |
| 145 | + ) |
| 146 | + field_list.append(item_field) |
| 147 | + if len(field_list) == 0: |
| 148 | + continue |
| 149 | + field = ListField(field_list) |
| 150 | + |
| 151 | + # datasets.Translation cannot be mapped directly |
| 152 | + # but it's dict structure can be mapped to a ListField of 2 ListField |
| 153 | + elif isinstance(value, Translation): |
| 154 | + if value.dtype == "dict": |
| 155 | + input_dict = inputs[0][feature] |
| 156 | + langs = list(input_dict.keys()) |
| 157 | + field_langs = [LabelField(lang, label_namespace="languages") for lang in langs] |
| 158 | + langs_field = ListField(field_langs) |
| 159 | + texts = list() |
| 160 | + for lang in langs: |
| 161 | + texts.append(TextField([Token(input_dict[lang])])) |
| 162 | + field = ListField([langs_field, ListField(texts)]) |
| 163 | + |
| 164 | + # datasets.TranslationVariableLanguages |
| 165 | + # is functionally a pair of Lists and hence mapped to a ListField of 2 ListField |
| 166 | + elif isinstance(value, TranslationVariableLanguages): |
| 167 | + if value.dtype == "dict": |
| 168 | + input_dict = inputs[0][feature] |
| 169 | + langs = input_dict["language"] |
| 170 | + field_langs = [LabelField(lang, label_namespace="languages") for lang in langs] |
| 171 | + langs_field = ListField(field_langs) |
| 172 | + texts = list() |
| 173 | + for lang in langs: |
| 174 | + index = langs.index(lang) |
| 175 | + texts.append(TextField([Token(input_dict["translation"][index])])) |
| 176 | + field = ListField([langs_field, ListField(texts)]) |
| 177 | + |
| 178 | + else: |
| 179 | + raise ValueError(f"Datasets feature type {type(value)} is not supported yet.") |
| 180 | + |
| 181 | + fields[feature] = field |
| 182 | + |
| 183 | + return Instance(fields) |
0 commit comments