holylovenia commited on
Commit
b475775
·
1 Parent(s): ca65d62

Upload identic.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. identic.py +395 -0
identic.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """\
17
+ Data loader implementation for IDENTICv1.0 dataset.
18
+ """
19
+
20
+ import csv
21
+ from pathlib import Path
22
+ from typing import Dict, List, Tuple
23
+
24
+ import datasets
25
+ import pandas as pd
26
+
27
+ from nusacrowd.utils import schemas
28
+ from nusacrowd.utils.common_parser import load_ud_data
29
+ from nusacrowd.utils.configs import NusantaraConfig
30
+ from nusacrowd.utils.constants import Tasks
31
+
32
+ _CITATION = """\
33
+ @inproceedings{larasati-2012-identic,
34
+ title = "{IDENTIC} Corpus: Morphologically Enriched {I}ndonesian-{E}nglish Parallel Corpus",
35
+ author = "Larasati, Septina Dian",
36
+ booktitle = "Proceedings of the Eighth International Conference on Language Resources and Evaluation ({LREC}'12)",
37
+ month = may,
38
+ year = "2012",
39
+ address = "Istanbul, Turkey",
40
+ publisher = "European Language Resources Association (ELRA)",
41
+ url = "http://www.lrec-conf.org/proceedings/lrec2012/pdf/644_Paper.pdf",
42
+ pages = "902--906",
43
+ abstract = "This paper describes the creation process of an Indonesian-English parallel corpus (IDENTIC).
44
+ The corpus contains 45,000 sentences collected from different sources in different genres.
45
+ Several manual text preprocessing tasks, such as alignment and spelling correction, are applied to the corpus
46
+ to assure its quality. We also apply language specific text processing such as tokenization on both sides and
47
+ clitic normalization on the Indonesian side. The corpus is available in two different formats: ‘plain',
48
+ stored in text format and ‘morphologically enriched', stored in CoNLL format. Some parts of the corpus are
49
+ publicly available at the IDENTIC homepage.",
50
+ }
51
+ """
52
+
53
+ _DATASETNAME = "identic"
54
+
55
+ _DESCRIPTION = """\
56
+ IDENTIC is an Indonesian-English parallel corpus for research purposes.
57
+ The corpus is a bilingual corpus paired with English. The aim of this work is to build and provide
58
+ researchers a proper Indonesian-English textual data set and also to promote research in this language pair.
59
+ The corpus contains texts coming from different sources with different genres.
60
+ Additionally, the corpus contains tagged texts that follows MorphInd tagset (Larasati et. al., 2011).
61
+ """
62
+
63
+ _HOMEPAGE = "https://lindat.mff.cuni.cz/repository/xmlui/handle/11858/00-097C-0000-0005-BF85-F"
64
+
65
+ _LICENSE = "CC BY-NC-SA 3.0"
66
+
67
+ _URLS = {
68
+ _DATASETNAME: "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11858/00-097C-0000-0005-BF85-F/IDENTICv1.0.zip?sequence=1&isAllowed=y",
69
+ }
70
+
71
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION, Tasks.POS_TAGGING]
72
+
73
+ _SOURCE_VERSION = "1.0.0"
74
+
75
+ _NUSANTARA_VERSION = "1.0.0"
76
+
77
+ _LANGUAGES = ["ind", "eng"]
78
+
79
+ _LOCAL = False
80
+
81
+ SOURCE_VARIATION = ["raw", "tokenized", "noclitic"]
82
+
83
+ tagsets_map = {
84
+ # ind
85
+ "07<c>_CO-$": "CO-",
86
+ "176<c>_CO-$": "CO-",
87
+ "F--.^com.<f>_F--$": "X--",
88
+ "F--.^xi<x>_X--$.^b<x>_X--$.^2.<c>_CC-$": "X--",
89
+ "X--.^0.<c>_CC-$": "X--",
90
+ "X--.^a.<x>_X--$": "X--",
91
+ "X--.^b.<x>_X--$": "X--",
92
+ "X--.^c.<x>_X--$": "X--",
93
+ "X--.^com.<f>_F--$": "X--",
94
+ "X--.^gammima<x>_X--$.^ag.<f>_F--$": "X--",
95
+ "X--.^h.<x>_X--$": "X--",
96
+ "X--.^i.<x>_X--$": "X--",
97
+ "X--.^j.<x>_X--$": "X--",
98
+ "X--.^m.<f>_F--$": "X--",
99
+ "X--.^n.<x>_X--$": "X--",
100
+ "X--.^net.<x>_X--$": "X--",
101
+ "X--.^okezone<x>_X--$.^com.<f>_F--$": "X--",
102
+ "X--.^p<x>_X--$.^k.<x>_X--$": "X--",
103
+ "X--.^r.<x>_X--$": "X--",
104
+ "X--.^s.<x>_X--$": "X--",
105
+ "X--.^w.<x>_X--$": "D--",
106
+ "^ke+dua": "D--",
107
+ "^ke+p": "D--",
108
+ "^nya$": "D--",
109
+ "duanya<c>_CO-$": "CO-",
110
+ }
111
+
112
+
113
+ def nusantara_config_constructor(version, variation=None, task="source", lang="id"):
114
+ if variation not in SOURCE_VARIATION:
115
+ raise NotImplementedError("'{var}' is not available".format(var=variation))
116
+
117
+ ver = datasets.Version(version)
118
+
119
+ if task == "seq_label":
120
+ return NusantaraConfig(
121
+ name="identic_{lang}_nusantara_seq_label".format(lang=lang),
122
+ version=ver,
123
+ description="IDENTIC {lang} source schema".format(lang=lang),
124
+ schema="nusantara_seq_label",
125
+ subset_id="identic",
126
+ )
127
+ else:
128
+ return NusantaraConfig(
129
+ name="identic_{var}_{task}".format(var=variation, task=task),
130
+ version=ver,
131
+ description="IDENTIC {var} source schema".format(var=variation),
132
+ schema=task,
133
+ subset_id="identic",
134
+ )
135
+
136
+
137
+ def load_ud_data_as_pos_tag(filepath, lang):
138
+ dataset_source = list(load_ud_data(filepath))
139
+
140
+ if lang == "id":
141
+ return [{"id": str(i + 1), "tokens": row["form"], "labels": [tagsets_map.get(pos_tag, pos_tag) for pos_tag in row["xpos"]]} for (i, row) in enumerate(dataset_source)]
142
+ else:
143
+ return [{"id": str(i + 1), "tokens": row["form"], "labels": row["xpos"]} for (i, row) in enumerate(dataset_source)]
144
+
145
+
146
+ class IdenticDataset(datasets.GeneratorBasedBuilder):
147
+ """
148
+ IDENTIC is an Indonesian-English parallel corpus for research purposes. This dataset is used for ind -> eng translation and vice versa, as well for POS-Tagging task.
149
+ """
150
+
151
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
152
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
153
+
154
+ # Details of the tagsets in https://septinalarasati.com/morphind/
155
+ TAGSETS = [
156
+ # en
157
+ "#",
158
+ "$",
159
+ "''",
160
+ ",",
161
+ ".",
162
+ ":",
163
+ "CC",
164
+ "CD",
165
+ "DT",
166
+ "EX",
167
+ "FW",
168
+ "IN",
169
+ "JJ",
170
+ "JJR",
171
+ "JJS",
172
+ "LS",
173
+ "MD",
174
+ "NN",
175
+ "NNP",
176
+ "NNS",
177
+ "PDT",
178
+ "POS",
179
+ "PRP",
180
+ "PRP$",
181
+ "RB",
182
+ "RBR",
183
+ "RBS",
184
+ "RP",
185
+ "SYM",
186
+ "TO",
187
+ "UH",
188
+ "VB",
189
+ "VBD",
190
+ "VBG",
191
+ "VBN",
192
+ "VBP",
193
+ "VBZ",
194
+ "WDT",
195
+ "WP",
196
+ "WP$",
197
+ "WRB",
198
+ "``",
199
+ # id
200
+ "APP",
201
+ "ASP",
202
+ "ASS",
203
+ "B--",
204
+ "CC-",
205
+ "CD-",
206
+ "CO-",
207
+ "D--",
208
+ "F--",
209
+ "G--",
210
+ "H--",
211
+ "I--",
212
+ "M--",
213
+ "NPD",
214
+ "NSD",
215
+ "NSF",
216
+ "NSM",
217
+ "O--",
218
+ "PP1",
219
+ "PP3",
220
+ "PS1",
221
+ "PS2",
222
+ "PS3",
223
+ "R--",
224
+ "S--",
225
+ "T--",
226
+ "VPA",
227
+ "VPP",
228
+ "VSA",
229
+ "VSP",
230
+ "W--",
231
+ "X--",
232
+ "Z--",
233
+ ]
234
+
235
+ BUILDER_CONFIGS = (
236
+ [
237
+ NusantaraConfig(
238
+ name="identic_source",
239
+ version=SOURCE_VERSION,
240
+ description="identic source schema",
241
+ schema="source",
242
+ subset_id="identic",
243
+ ),
244
+ NusantaraConfig(
245
+ name="identic_id_source",
246
+ version=SOURCE_VERSION,
247
+ description="identic source schema",
248
+ schema="source",
249
+ subset_id="identic",
250
+ ),
251
+ NusantaraConfig(
252
+ name="identic_en_source",
253
+ version=SOURCE_VERSION,
254
+ description="identic source schema",
255
+ schema="source",
256
+ subset_id="identic",
257
+ ),
258
+ NusantaraConfig(
259
+ name="identic_nusantara_t2t",
260
+ version=NUSANTARA_VERSION,
261
+ description="Identic Nusantara schema",
262
+ schema="nusantara_t2t",
263
+ subset_id="identic",
264
+ ),
265
+ NusantaraConfig(
266
+ name="identic_nusantara_seq_label",
267
+ version=NUSANTARA_VERSION,
268
+ description="Identic Nusantara schema",
269
+ schema="nusantara_seq_label",
270
+ subset_id="identic",
271
+ ),
272
+ ]
273
+ + [nusantara_config_constructor(_NUSANTARA_VERSION, var) for var in SOURCE_VARIATION]
274
+ + [nusantara_config_constructor(_NUSANTARA_VERSION, var, "nusantara_t2t") for var in SOURCE_VARIATION]
275
+ + [nusantara_config_constructor(_NUSANTARA_VERSION, "raw", task="seq_label", lang=lang) for lang in ["en", "id"]]
276
+ )
277
+
278
+ DEFAULT_CONFIG_NAME = "identic_source"
279
+
280
+ def _info(self) -> datasets.DatasetInfo:
281
+ if self.config.schema == "source":
282
+ if self.config.name.endswith("id_source") or self.config.name.endswith("en_source"):
283
+ features = datasets.Features(
284
+ {
285
+ "id": [datasets.Value("string")],
286
+ "form": [datasets.Value("string")],
287
+ "lemma": [datasets.Value("string")],
288
+ "upos": [datasets.Value("string")],
289
+ "xpos": [datasets.Value("string")],
290
+ "feats": [datasets.Value("string")],
291
+ "head": [datasets.Value("string")],
292
+ "deprel": [datasets.Value("string")],
293
+ "deps": [datasets.Value("string")],
294
+ "misc": [datasets.Value("string")],
295
+ }
296
+ )
297
+ else:
298
+ features = datasets.Features(
299
+ {
300
+ "id": datasets.Value("string"),
301
+ "id_sentence": datasets.Value("string"),
302
+ "en_sentence": datasets.Value("string"),
303
+ }
304
+ )
305
+
306
+ elif self.config.schema == "nusantara_t2t":
307
+ features = schemas.text2text_features
308
+
309
+ elif self.config.schema == "nusantara_seq_label":
310
+ features = schemas.seq_label_features(self.TAGSETS)
311
+
312
+ return datasets.DatasetInfo(
313
+ description=_DESCRIPTION,
314
+ features=features,
315
+ homepage=_HOMEPAGE,
316
+ license=_LICENSE,
317
+ citation=_CITATION,
318
+ )
319
+
320
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
321
+ """Returns SplitGenerators."""
322
+
323
+ urls = _URLS[_DATASETNAME]
324
+ base_dir = dl_manager.download_and_extract(urls)
325
+
326
+ name_split = self.config.name.split("_")
327
+
328
+ lang = name_split[1] if name_split[1] in ["en", "id"] else None
329
+
330
+ if name_split[-1] == "source":
331
+ if len(name_split) == 2:
332
+ data_dir = base_dir + "/IDENTICv1.0/identic.raw.npp.txt"
333
+ else:
334
+ if name_split[1] in ["en", "id"]:
335
+ data_dir = base_dir + "/IDENTICv1.0/identic.raw.npp.txt"
336
+ else:
337
+ data_dir = base_dir + "/IDENTICv1.0/identic.{var}.npp.txt".format(var=name_split[1])
338
+ elif name_split[-1] == "t2t":
339
+ if len(name_split) == 3:
340
+ data_dir = base_dir + "/IDENTICv1.0/identic.raw.npp.txt"
341
+ else:
342
+ data_dir = base_dir + "/IDENTICv1.0/identic.{var}.npp.txt".format(var=name_split[1])
343
+ elif name_split[-1] == "label":
344
+ data_dir = base_dir + "/IDENTICv1.0/identic.raw.npp.txt"
345
+ else:
346
+ raise NotImplementedError("The defined task is not implemented")
347
+
348
+ return [
349
+ datasets.SplitGenerator(
350
+ name=datasets.Split.TRAIN,
351
+ gen_kwargs={"filepath": Path(data_dir), "split": datasets.Split.TRAIN, "lang": lang},
352
+ )
353
+ ]
354
+
355
+ def _generate_examples(self, filepath: Path, split: str, lang=None) -> Tuple[int, Dict]:
356
+ """Yields examples as (key, example) tuples."""
357
+
358
+ df = self._load_df_from_tsv(filepath)
359
+
360
+ if self.config.schema == "source":
361
+ if lang is None:
362
+ # T2T source
363
+ for id, row in df.iterrows():
364
+ yield id, {"id": row["id"], "id_sentence": row["id_sentence"], "en_sentence": row["en_sentence"]}
365
+ else:
366
+ # conll source
367
+ path = filepath.parent / "{lang}.npp.conll".format(lang=lang)
368
+ for key, example in enumerate(load_ud_data(path)):
369
+ yield key, example
370
+
371
+ elif self.config.schema == "nusantara_t2t":
372
+ for id, row in df.iterrows():
373
+ yield id, {
374
+ "id": str(id),
375
+ "text_1": row["id_sentence"],
376
+ "text_2": row["en_sentence"],
377
+ "text_1_name": "ind",
378
+ "text_2_name": "eng",
379
+ }
380
+
381
+ elif self.config.schema == "nusantara_seq_label":
382
+ if lang is None:
383
+ lang = "id"
384
+ path = filepath.parent / "{lang}.npp.conll".format(lang=lang)
385
+ for key, example in enumerate(load_ud_data_as_pos_tag(path, lang=lang)):
386
+ yield key, example
387
+
388
+ @staticmethod
389
+ def _load_df_from_tsv(path):
390
+ return pd.read_csv(
391
+ path,
392
+ sep="\t",
393
+ names=["id", "id_sentence", "en_sentence"],
394
+ quoting=csv.QUOTE_NONE,
395
+ )