maksim-tkachenko commited on
Commit
3aef59a
·
1 Parent(s): c8769aa

add data loader

Browse files
Files changed (1) hide show
  1. JGLUE.py +774 -0
JGLUE.py ADDED
@@ -0,0 +1,774 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SOURCE FOR THE LOADER: https://huggingface.co/datasets/shunk031/JGLUE
2
+ import json
3
+ import logging
4
+ import random
5
+ import string
6
+ import warnings
7
+ from dataclasses import dataclass
8
+ from typing import Dict, List, Literal, Optional
9
+
10
+ import datasets as ds
11
+ import pandas as pd
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ _JGLUE_CITATION = """\
16
+ @inproceedings{kurihara-lrec-2022-jglue,
17
+ title={JGLUE: Japanese general language understanding evaluation},
18
+ author={Kurihara, Kentaro and Kawahara, Daisuke and Shibata, Tomohide},
19
+ booktitle={Proceedings of the Thirteenth Language Resources and Evaluation Conference},
20
+ pages={2957--2966},
21
+ year={2022},
22
+ url={https://aclanthology.org/2022.lrec-1.317/}
23
+ }
24
+ @inproceedings{kurihara-nlp-2022-jglue,
25
+ title={JGLUE: 日本語言語理解ベンチマーク},
26
+ author={栗原健太郎 and 河原大輔 and 柴田知秀},
27
+ booktitle={言語処理学会第28回年次大会},
28
+ pages={2023--2028},
29
+ year={2022},
30
+ url={https://www.anlp.jp/proceedings/annual_meeting/2022/pdf_dir/E8-4.pdf},
31
+ note={in Japanese}
32
+ }
33
+ """
34
+
35
+ _JCOLA_CITATION = """\
36
+ @article{someya2023jcola,
37
+ title={JCoLA: Japanese Corpus of Linguistic Acceptability},
38
+ author={Taiga Someya and Yushi Sugimoto and Yohei Oseki},
39
+ year={2023},
40
+ eprint={2309.12676},
41
+ archivePrefix={arXiv},
42
+ primaryClass={cs.CL}
43
+ }
44
+ @inproceedings{someya-nlp-2022-jcola,
45
+ title={日本語版 CoLA の構築},
46
+ author={染谷 大河 and 大関 洋平},
47
+ booktitle={言語処理学会第28回年次大会},
48
+ pages={1872--1877},
49
+ year={2022},
50
+ url={https://www.anlp.jp/proceedings/annual_meeting/2022/pdf_dir/E7-1.pdf},
51
+ note={in Japanese}
52
+ }
53
+ """
54
+
55
+ _MARC_JA_CITATION = """\
56
+ @inproceedings{marc_reviews,
57
+ title={The Multilingual Amazon Reviews Corpus},
58
+ author={Keung, Phillip and Lu, Yichao and Szarvas, György and Smith, Noah A.},
59
+ booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing},
60
+ pages={4563--4568},
61
+ year={2020}
62
+ }
63
+ """
64
+
65
+ _JSTS_JNLI_CITATION = """\
66
+ @inproceedings{miyazaki2016cross,
67
+ title={Cross-lingual image caption generation},
68
+ author={Miyazaki, Takashi and Shimizu, Nobuyuki},
69
+ booktitle={Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
70
+ pages={1780--1790},
71
+ year={2016}
72
+ }
73
+ """
74
+
75
+ _DESCRIPTION = """\
76
+ JGLUE, Japanese General Language Understanding Evaluation, \
77
+ is built to measure the general NLU ability in Japanese. JGLUE has been constructed \
78
+ from scratch without translation. We hope that JGLUE will facilitate NLU research in Japanese.\
79
+ """
80
+
81
+ _JGLUE_HOMEPAGE = "https://github.com/yahoojapan/JGLUE"
82
+ _JCOLA_HOMEPAGE = "https://github.com/osekilab/JCoLA"
83
+ _MARC_JA_HOMEPAGE = "https://registry.opendata.aws/amazon-reviews-ml/"
84
+
85
+ _JGLUE_LICENSE = """\
86
+ This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License.\
87
+ """
88
+
89
+ _DESCRIPTION_CONFIGS = {
90
+ "MARC-ja": "MARC-ja is a dataset of the text classification task. This dataset is based on the Japanese portion of Multilingual Amazon Reviews Corpus (MARC) (Keung+, 2020).",
91
+ "JCoLA": "JCoLA (Japanese Corpus of Linguistic Accept010 ability) is a novel dataset for targeted syntactic evaluations of language models in Japanese, which consists of 10,020 sentences with acceptability judgments by linguists.",
92
+ "JSTS": "JSTS is a Japanese version of the STS (Semantic Textual Similarity) dataset. STS is a task to estimate the semantic similarity of a sentence pair.",
93
+ "JNLI": "JNLI is a Japanese version of the NLI (Natural Language Inference) dataset. NLI is a task to recognize the inference relation that a premise sentence has to a hypothesis sentence.",
94
+ "JSQuAD": "JSQuAD is a Japanese version of SQuAD (Rajpurkar+, 2016), one of the datasets of reading comprehension.",
95
+ "JCommonsenseQA": "JCommonsenseQA is a Japanese version of CommonsenseQA (Talmor+, 2019), which is a multiple-choice question answering dataset that requires commonsense reasoning ability.",
96
+ }
97
+
98
+ _URLS = {
99
+ "MARC-ja": {
100
+ "data": "https://s3.amazonaws.com/amazon-reviews-pds/tsv/amazon_reviews_multilingual_JP_v1_00.tsv.gz",
101
+ "filter_review_id_list": {
102
+ "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/preprocess/marc-ja/data/filter_review_id_list/valid.txt",
103
+ },
104
+ "label_conv_review_id_list": {
105
+ "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/preprocess/marc-ja/data/label_conv_review_id_list/valid.txt",
106
+ },
107
+ },
108
+ "JCoLA": {
109
+ "train": {
110
+ "in_domain": {
111
+ "json": "https://raw.githubusercontent.com/osekilab/JCoLA/main/data/jcola-v1.0/in_domain_train-v1.0.json",
112
+ }
113
+ },
114
+ "valid": {
115
+ "in_domain": {
116
+ "json": "https://raw.githubusercontent.com/osekilab/JCoLA/main/data/jcola-v1.0/in_domain_valid-v1.0.json",
117
+ },
118
+ "out_of_domain": {
119
+ "json": "https://raw.githubusercontent.com/osekilab/JCoLA/main/data/jcola-v1.0/out_of_domain_valid-v1.0.json",
120
+ "json_annotated": "https://raw.githubusercontent.com/osekilab/JCoLA/main/data/jcola-v1.0/out_of_domain_valid_annotated-v1.0.json",
121
+ },
122
+ },
123
+ },
124
+ "JSTS": {
125
+ "train": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsts-v1.1/train-v1.1.json",
126
+ "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsts-v1.1/valid-v1.1.json",
127
+ },
128
+ "JNLI": {
129
+ "train": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jnli-v1.1/train-v1.1.json",
130
+ "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jnli-v1.1/valid-v1.1.json",
131
+ },
132
+ "JSQuAD": {
133
+ "train": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsquad-v1.1/train-v1.1.json",
134
+ "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsquad-v1.1/valid-v1.1.json",
135
+ },
136
+ "JCommonsenseQA": {
137
+ "train": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jcommonsenseqa-v1.1/train-v1.1.json",
138
+ "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jcommonsenseqa-v1.1/valid-v1.1.json",
139
+ },
140
+ }
141
+
142
+
143
+ def dataset_info_jsts() -> ds.DatasetInfo:
144
+ features = ds.Features(
145
+ {
146
+ "sentence_pair_id": ds.Value("string"),
147
+ "yjcaptions_id": ds.Value("string"),
148
+ "sentence1": ds.Value("string"),
149
+ "sentence2": ds.Value("string"),
150
+ "label": ds.Value("float"),
151
+ }
152
+ )
153
+ return ds.DatasetInfo(
154
+ description=_DESCRIPTION,
155
+ citation=_JGLUE_CITATION,
156
+ homepage=f"{_JSTS_JNLI_CITATION}\n{_JGLUE_HOMEPAGE}",
157
+ license=_JGLUE_LICENSE,
158
+ features=features,
159
+ )
160
+
161
+
162
+ def dataset_info_jnli() -> ds.DatasetInfo:
163
+ features = ds.Features(
164
+ {
165
+ "sentence_pair_id": ds.Value("string"),
166
+ "yjcaptions_id": ds.Value("string"),
167
+ "sentence1": ds.Value("string"),
168
+ "sentence2": ds.Value("string"),
169
+ "label": ds.ClassLabel(num_classes=3, names=["entailment", "contradiction", "neutral"]),
170
+ }
171
+ )
172
+ return ds.DatasetInfo(
173
+ description=_DESCRIPTION,
174
+ citation=_JGLUE_CITATION,
175
+ homepage=f"{_JSTS_JNLI_CITATION}\n{_JGLUE_HOMEPAGE}",
176
+ license=_JGLUE_LICENSE,
177
+ features=features,
178
+ supervised_keys=None,
179
+ )
180
+
181
+
182
+ def dataset_info_jsquad() -> ds.DatasetInfo:
183
+ features = ds.Features(
184
+ {
185
+ "id": ds.Value("string"),
186
+ "title": ds.Value("string"),
187
+ "context": ds.Value("string"),
188
+ "question": ds.Value("string"),
189
+ "answers": ds.Sequence({"text": ds.Value("string"), "answer_start": ds.Value("int32")}),
190
+ "is_impossible": ds.Value("bool"),
191
+ }
192
+ )
193
+ return ds.DatasetInfo(
194
+ description=_DESCRIPTION,
195
+ citation=_JGLUE_CITATION,
196
+ homepage=_JGLUE_HOMEPAGE,
197
+ license=_JGLUE_LICENSE,
198
+ features=features,
199
+ supervised_keys=None,
200
+ )
201
+
202
+
203
+ def dataset_info_jcommonsenseqa() -> ds.DatasetInfo:
204
+ features = ds.Features(
205
+ {
206
+ "q_id": ds.Value("int64"),
207
+ "question": ds.Value("string"),
208
+ "choice0": ds.Value("string"),
209
+ "choice1": ds.Value("string"),
210
+ "choice2": ds.Value("string"),
211
+ "choice3": ds.Value("string"),
212
+ "choice4": ds.Value("string"),
213
+ "label": ds.ClassLabel(
214
+ num_classes=5,
215
+ names=["choice0", "choice1", "choice2", "choice3", "choice4"],
216
+ ),
217
+ }
218
+ )
219
+ return ds.DatasetInfo(
220
+ description=_DESCRIPTION,
221
+ citation=_JGLUE_CITATION,
222
+ homepage=_JGLUE_HOMEPAGE,
223
+ license=_JGLUE_LICENSE,
224
+ features=features,
225
+ )
226
+
227
+
228
+ def dataset_info_jcola() -> ds.DatasetInfo:
229
+ features = ds.Features(
230
+ {
231
+ "uid": ds.Value("int64"),
232
+ "source": ds.Value("string"),
233
+ "label": ds.ClassLabel(
234
+ num_classes=2,
235
+ names=["unacceptable", "acceptable"],
236
+ ),
237
+ "diacritic": ds.Value("string"),
238
+ "sentence": ds.Value("string"),
239
+ "original": ds.Value("string"),
240
+ "translation": ds.Value("string"),
241
+ "gloss": ds.Value("bool"),
242
+ "linguistic_phenomenon": {
243
+ "argument_structure": ds.Value("bool"),
244
+ "binding": ds.Value("bool"),
245
+ "control_raising": ds.Value("bool"),
246
+ "ellipsis": ds.Value("bool"),
247
+ "filler_gap": ds.Value("bool"),
248
+ "island_effects": ds.Value("bool"),
249
+ "morphology": ds.Value("bool"),
250
+ "nominal_structure": ds.Value("bool"),
251
+ "negative_polarity_concord_items": ds.Value("bool"),
252
+ "quantifier": ds.Value("bool"),
253
+ "verbal_agreement": ds.Value("bool"),
254
+ "simple": ds.Value("bool"),
255
+ },
256
+ }
257
+ )
258
+ return ds.DatasetInfo(
259
+ description=_DESCRIPTION,
260
+ citation=f"{_JCOLA_CITATION}\n{_JGLUE_CITATION}",
261
+ homepage=_JCOLA_HOMEPAGE,
262
+ features=features,
263
+ )
264
+
265
+
266
+ def dataset_info_marc_ja() -> ds.DatasetInfo:
267
+ features = ds.Features(
268
+ {
269
+ "sentence": ds.Value("string"),
270
+ "label": ds.ClassLabel(num_classes=3, names=["positive", "negative", "neutral"]),
271
+ "review_id": ds.Value("string"),
272
+ }
273
+ )
274
+ return ds.DatasetInfo(
275
+ description=_DESCRIPTION,
276
+ citation=f"{_MARC_JA_CITATION}\n{_JGLUE_CITATION}",
277
+ homepage=_MARC_JA_HOMEPAGE,
278
+ license=_JGLUE_LICENSE,
279
+ features=features,
280
+ )
281
+
282
+
283
+ @dataclass
284
+ class JGLUEConfig(ds.BuilderConfig):
285
+ """Class for JGLUE benchmark configuration"""
286
+
287
+
288
+ @dataclass
289
+ class MarcJaConfig(JGLUEConfig):
290
+ name: str = "MARC-ja"
291
+ is_han_to_zen: bool = False
292
+ max_instance_num: Optional[int] = None
293
+ max_char_length: int = 500
294
+ is_pos_neg: bool = True
295
+ train_ratio: float = 0.94
296
+ val_ratio: float = 0.03
297
+ test_ratio: float = 0.03
298
+ output_testset: bool = False
299
+ filter_review_id_list_valid: bool = True
300
+ label_conv_review_id_list_valid: bool = True
301
+
302
+ def __post_init__(self) -> None:
303
+ assert self.train_ratio + self.val_ratio + self.test_ratio == 1.0
304
+
305
+
306
+ JcolaDomain = Literal["in_domain", "out_of_domain"]
307
+
308
+
309
+ @dataclass
310
+ class JcolaConfig(JGLUEConfig):
311
+ name: str = "JCoLA"
312
+ domain: JcolaDomain = "in_domain"
313
+
314
+
315
+ def get_label(rating: int, is_pos_neg: bool = False) -> Optional[str]:
316
+ if rating >= 4:
317
+ return "positive"
318
+ elif rating <= 2:
319
+ return "negative"
320
+ else:
321
+ if is_pos_neg:
322
+ return None
323
+ else:
324
+ return "neutral"
325
+
326
+
327
+ def is_filtered_by_ascii_rate(text: str, threshold: float = 0.9) -> bool:
328
+ ascii_letters = set(string.printable)
329
+ rate = sum(c in ascii_letters for c in text) / len(text)
330
+ return rate >= threshold
331
+
332
+
333
+ def shuffle_dataframe(df: pd.DataFrame) -> pd.DataFrame:
334
+ instances = df.to_dict(orient="records")
335
+ random.seed(1)
336
+ random.shuffle(instances)
337
+ return pd.DataFrame(instances)
338
+
339
+
340
+ def get_filter_review_id_list(
341
+ filter_review_id_list_paths: Dict[str, str],
342
+ ) -> Dict[str, List[str]]:
343
+ filter_review_id_list_valid = filter_review_id_list_paths.get("valid")
344
+ filter_review_id_list_test = filter_review_id_list_paths.get("test")
345
+
346
+ filter_review_id_list = {}
347
+
348
+ if filter_review_id_list_valid is not None:
349
+ with open(filter_review_id_list_valid, "r", encoding="utf-8") as rf:
350
+ filter_review_id_list["valid"] = [line.rstrip() for line in rf]
351
+
352
+ if filter_review_id_list_test is not None:
353
+ with open(filter_review_id_list_test, "r", encoding="utf-8") as rf:
354
+ filter_review_id_list["test"] = [line.rstrip() for line in rf]
355
+
356
+ return filter_review_id_list
357
+
358
+
359
+ def get_label_conv_review_id_list(
360
+ label_conv_review_id_list_paths: Dict[str, str],
361
+ ) -> Dict[str, Dict[str, str]]:
362
+ import csv
363
+
364
+ label_conv_review_id_list_valid = label_conv_review_id_list_paths.get("valid")
365
+ label_conv_review_id_list_test = label_conv_review_id_list_paths.get("test")
366
+
367
+ label_conv_review_id_list: Dict[str, Dict[str, str]] = {}
368
+
369
+ if label_conv_review_id_list_valid is not None:
370
+ with open(label_conv_review_id_list_valid, "r", encoding="utf-8") as rf:
371
+ label_conv_review_id_list["valid"] = {row[0]: row[1] for row in csv.reader(rf)}
372
+
373
+ if label_conv_review_id_list_test is not None:
374
+ with open(label_conv_review_id_list_test, "r", encoding="utf-8") as rf:
375
+ label_conv_review_id_list["test"] = {row[0]: row[1] for row in csv.reader(rf)}
376
+
377
+ return label_conv_review_id_list
378
+
379
+
380
+ def output_data(
381
+ df: pd.DataFrame,
382
+ train_ratio: float,
383
+ val_ratio: float,
384
+ test_ratio: float,
385
+ output_testset: bool,
386
+ filter_review_id_list_paths: Dict[str, str],
387
+ label_conv_review_id_list_paths: Dict[str, str],
388
+ ) -> Dict[str, pd.DataFrame]:
389
+ instance_num = len(df)
390
+ split_dfs: Dict[str, pd.DataFrame] = {}
391
+ length1 = int(instance_num * train_ratio)
392
+ split_dfs["train"] = df.iloc[:length1]
393
+
394
+ length2 = int(instance_num * (train_ratio + val_ratio))
395
+ split_dfs["valid"] = df.iloc[length1:length2]
396
+ split_dfs["test"] = df.iloc[length2:]
397
+
398
+ filter_review_id_list = get_filter_review_id_list(
399
+ filter_review_id_list_paths=filter_review_id_list_paths,
400
+ )
401
+ label_conv_review_id_list = get_label_conv_review_id_list(
402
+ label_conv_review_id_list_paths=label_conv_review_id_list_paths,
403
+ )
404
+
405
+ for eval_type in ("valid", "test"):
406
+ if filter_review_id_list.get(eval_type):
407
+ df = split_dfs[eval_type]
408
+ df = df[~df["review_id"].isin(filter_review_id_list[eval_type])]
409
+ split_dfs[eval_type] = df
410
+
411
+ for eval_type in ("valid", "test"):
412
+ if label_conv_review_id_list.get(eval_type):
413
+ df = split_dfs[eval_type]
414
+ df = df.assign(converted_label=df["review_id"].map(label_conv_review_id_list["valid"]))
415
+ df = df.assign(
416
+ label=df[["label", "converted_label"]].apply(
417
+ lambda xs: xs["label"] if pd.isnull(xs["converted_label"]) else xs["converted_label"],
418
+ axis=1,
419
+ )
420
+ )
421
+ df = df.drop(columns=["converted_label"])
422
+ split_dfs[eval_type] = df
423
+
424
+ return {
425
+ "train": split_dfs["train"],
426
+ "valid": split_dfs["valid"],
427
+ }
428
+
429
+
430
+ def preprocess_for_marc_ja(
431
+ config: MarcJaConfig,
432
+ data_file_path: str,
433
+ filter_review_id_list_paths: Dict[str, str],
434
+ label_conv_review_id_list_paths: Dict[str, str],
435
+ ) -> Dict[str, pd.DataFrame]:
436
+ try:
437
+ import mojimoji
438
+
439
+ def han_to_zen(text: str) -> str:
440
+ return mojimoji.han_to_zen(text)
441
+
442
+ except ImportError:
443
+ warnings.warn(
444
+ "can't import `mojimoji`, failing back to method that do nothing. "
445
+ "We recommend running `pip install mojimoji` to reproduce the original preprocessing.",
446
+ UserWarning,
447
+ )
448
+
449
+ def han_to_zen(text: str) -> str:
450
+ return text
451
+
452
+ try:
453
+ from bs4 import BeautifulSoup
454
+
455
+ def cleanup_text(text: str) -> str:
456
+ return BeautifulSoup(text, "html.parser").get_text()
457
+
458
+ except ImportError:
459
+ warnings.warn(
460
+ "can't import `beautifulsoup4`, failing back to method that do nothing."
461
+ "We recommend running `pip install beautifulsoup4` to reproduce the original preprocessing.",
462
+ UserWarning,
463
+ )
464
+
465
+ def cleanup_text(text: str) -> str:
466
+ return text
467
+
468
+ from tqdm import tqdm
469
+
470
+ df = pd.read_csv(data_file_path, delimiter="\t")
471
+ df = df[["review_body", "star_rating", "review_id"]]
472
+
473
+ # rename columns
474
+ df = df.rename(columns={"review_body": "text", "star_rating": "rating"})
475
+
476
+ # convert the rating to label
477
+ tqdm.pandas(dynamic_ncols=True, desc="Convert the rating to the label")
478
+ df = df.assign(label=df["rating"].progress_apply(lambda rating: get_label(rating, config.is_pos_neg)))
479
+
480
+ # remove rows where the label is None
481
+ df = df[~df["label"].isnull()]
482
+
483
+ # remove html tags from the text
484
+ tqdm.pandas(dynamic_ncols=True, desc="Remove html tags from the text")
485
+ df = df.assign(text=df["text"].progress_apply(cleanup_text))
486
+
487
+ # filter by ascii rate
488
+ tqdm.pandas(dynamic_ncols=True, desc="Filter by ascii rate")
489
+ df = df[~df["text"].progress_apply(is_filtered_by_ascii_rate)]
490
+
491
+ if config.max_char_length is not None:
492
+ df = df[df["text"].str.len() <= config.max_char_length]
493
+
494
+ if config.is_han_to_zen:
495
+ df = df.assign(text=df["text"].apply(han_to_zen))
496
+
497
+ df = df[["text", "label", "review_id"]]
498
+ df = df.rename(columns={"text": "sentence"})
499
+
500
+ # shuffle dataset
501
+ df = shuffle_dataframe(df)
502
+
503
+ split_dfs = output_data(
504
+ df=df,
505
+ train_ratio=config.train_ratio,
506
+ val_ratio=config.val_ratio,
507
+ test_ratio=config.test_ratio,
508
+ output_testset=config.output_testset,
509
+ filter_review_id_list_paths=filter_review_id_list_paths,
510
+ label_conv_review_id_list_paths=label_conv_review_id_list_paths,
511
+ )
512
+ return split_dfs
513
+
514
+
515
+ class JGLUE(ds.GeneratorBasedBuilder):
516
+ JGLUE_VERSION = ds.Version("1.1.0")
517
+ JCOLA_VERSION = ds.Version("1.0.0")
518
+
519
+ BUILDER_CONFIG_CLASS = JGLUEConfig
520
+ BUILDER_CONFIGS = [
521
+ MarcJaConfig(
522
+ name="MARC-ja",
523
+ version=JGLUE_VERSION,
524
+ description=_DESCRIPTION_CONFIGS["MARC-ja"],
525
+ ),
526
+ JcolaConfig(
527
+ name="JCoLA",
528
+ version=JCOLA_VERSION,
529
+ description=_DESCRIPTION_CONFIGS["JCoLA"],
530
+ ),
531
+ JGLUEConfig(
532
+ name="JSTS",
533
+ version=JGLUE_VERSION,
534
+ description=_DESCRIPTION_CONFIGS["JSTS"],
535
+ ),
536
+ JGLUEConfig(
537
+ name="JNLI",
538
+ version=JGLUE_VERSION,
539
+ description=_DESCRIPTION_CONFIGS["JNLI"],
540
+ ),
541
+ JGLUEConfig(
542
+ name="JSQuAD",
543
+ version=JGLUE_VERSION,
544
+ description=_DESCRIPTION_CONFIGS["JSQuAD"],
545
+ ),
546
+ JGLUEConfig(
547
+ name="JCommonsenseQA",
548
+ version=JGLUE_VERSION,
549
+ description=_DESCRIPTION_CONFIGS["JCommonsenseQA"],
550
+ ),
551
+ ]
552
+
553
+ def _info(self) -> ds.DatasetInfo:
554
+ if self.config.name == "JSTS":
555
+ return dataset_info_jsts()
556
+ elif self.config.name == "JNLI":
557
+ return dataset_info_jnli()
558
+ elif self.config.name == "JSQuAD":
559
+ return dataset_info_jsquad()
560
+ elif self.config.name == "JCommonsenseQA":
561
+ return dataset_info_jcommonsenseqa()
562
+ elif self.config.name == "JCoLA":
563
+ return dataset_info_jcola()
564
+ elif self.config.name == "MARC-ja":
565
+ return dataset_info_marc_ja()
566
+ else:
567
+ raise ValueError(f"Invalid config name: {self.config.name}")
568
+
569
+ def __split_generators_marc_ja(self, dl_manager: ds.DownloadManager):
570
+ file_paths = dl_manager.download_and_extract(_URLS[self.config.name])
571
+
572
+ filter_review_id_list = file_paths["filter_review_id_list"]
573
+ label_conv_review_id_list = file_paths["label_conv_review_id_list"]
574
+
575
+ try:
576
+ split_dfs = preprocess_for_marc_ja(
577
+ config=self.config,
578
+ data_file_path=file_paths["data"],
579
+ filter_review_id_list_paths=filter_review_id_list,
580
+ label_conv_review_id_list_paths=label_conv_review_id_list,
581
+ )
582
+ except KeyError as err:
583
+ from urllib.parse import urljoin
584
+
585
+ logger.warning(err)
586
+
587
+ base_url = "https://huggingface.co/datasets/shunk031/JGLUE/resolve/refs%2Fconvert%2Fparquet/MARC-ja/"
588
+ marcja_parquet_urls = {
589
+ "train": urljoin(base_url, "jglue-train.parquet"),
590
+ "valid": urljoin(base_url, "jglue-validation.parquet"),
591
+ }
592
+ file_paths = dl_manager.download_and_extract(marcja_parquet_urls)
593
+ split_dfs = {k: pd.read_parquet(v) for k, v in file_paths.items()}
594
+
595
+ return [
596
+ ds.SplitGenerator(
597
+ name=ds.Split.TRAIN,
598
+ gen_kwargs={"split_df": split_dfs["train"]},
599
+ ),
600
+ ds.SplitGenerator(
601
+ name=ds.Split.VALIDATION,
602
+ gen_kwargs={"split_df": split_dfs["valid"]},
603
+ ),
604
+ ]
605
+
606
+ def __split_generators_jcola(self, dl_manager: ds.DownloadManager):
607
+ file_paths = dl_manager.download_and_extract(_URLS[self.config.name])
608
+
609
+ return [
610
+ ds.SplitGenerator(
611
+ name=ds.Split.TRAIN,
612
+ gen_kwargs={"file_path": file_paths["train"]["in_domain"]["json"]},
613
+ ),
614
+ ds.SplitGenerator(
615
+ name=ds.Split.VALIDATION,
616
+ gen_kwargs={"file_path": file_paths["valid"]["in_domain"]["json"]},
617
+ ),
618
+ ds.SplitGenerator(
619
+ name=ds.NamedSplit("validation_out_of_domain"),
620
+ gen_kwargs={"file_path": file_paths["valid"]["out_of_domain"]["json"]},
621
+ ),
622
+ ds.SplitGenerator(
623
+ name=ds.NamedSplit("validation_out_of_domain_annotated"),
624
+ gen_kwargs={"file_path": file_paths["valid"]["out_of_domain"]["json_annotated"]},
625
+ ),
626
+ ]
627
+
628
+ def __split_generators(self, dl_manager: ds.DownloadManager):
629
+ file_paths = dl_manager.download_and_extract(_URLS[self.config.name])
630
+
631
+ return [
632
+ ds.SplitGenerator(
633
+ name=ds.Split.TRAIN,
634
+ gen_kwargs={"file_path": file_paths["train"]},
635
+ ),
636
+ ds.SplitGenerator(
637
+ name=ds.Split.VALIDATION,
638
+ gen_kwargs={"file_path": file_paths["valid"]},
639
+ ),
640
+ ]
641
+
642
+ def _split_generators(self, dl_manager: ds.DownloadManager):
643
+ if self.config.name == "MARC-ja":
644
+ return self.__split_generators_marc_ja(dl_manager)
645
+ elif self.config.name == "JCoLA":
646
+ return self.__split_generators_jcola(dl_manager)
647
+ else:
648
+ return self.__split_generators(dl_manager)
649
+
650
+ def __generate_examples_marc_ja(self, split_df: Optional[pd.DataFrame] = None):
651
+ if split_df is None:
652
+ raise ValueError(f"Invalid preprocessing for {self.config.name}")
653
+
654
+ instances = split_df.to_dict(orient="records")
655
+ for i, data_dict in enumerate(instances):
656
+ yield i, data_dict
657
+
658
+ def __generate_examples_jcola(self, file_path: Optional[str] = None):
659
+ if file_path is None:
660
+ raise ValueError(f"Invalid argument for {self.config.name}")
661
+
662
+ def convert_label(json_dict):
663
+ label_int = json_dict["label"]
664
+ label_str = "unacceptable" if label_int == 0 else "acceptable"
665
+ json_dict["label"] = label_str
666
+ return json_dict
667
+
668
+ def convert_addntional_info(json_dict):
669
+ json_dict["translation"] = json_dict.get("translation")
670
+ json_dict["gloss"] = json_dict.get("gloss")
671
+ return json_dict
672
+
673
+ def convert_phenomenon(json_dict):
674
+ argument_structure = json_dict.get("Arg. Str.")
675
+
676
+ def json_pop(key):
677
+ return json_dict.pop(key) if argument_structure is not None else None
678
+
679
+ json_dict["linguistic_phenomenon"] = {
680
+ "argument_structure": json_pop("Arg. Str."),
681
+ "binding": json_pop("binding"),
682
+ "control_raising": json_pop("control/raising"),
683
+ "ellipsis": json_pop("ellipsis"),
684
+ "filler_gap": json_pop("filler-gap"),
685
+ "island_effects": json_pop("island effects"),
686
+ "morphology": json_pop("morphology"),
687
+ "nominal_structure": json_pop("nominal structure"),
688
+ "negative_polarity_concord_items": json_pop("NPI/NCI"),
689
+ "quantifier": json_pop("quantifier"),
690
+ "verbal_agreement": json_pop("verbal agr."),
691
+ "simple": json_pop("simple"),
692
+ }
693
+ return json_dict
694
+
695
+ with open(file_path, "r", encoding="utf-8") as rf:
696
+ for i, line in enumerate(rf):
697
+ json_dict = json.loads(line)
698
+
699
+ example = convert_label(json_dict)
700
+ example = convert_addntional_info(example)
701
+ example = convert_phenomenon(example)
702
+
703
+ yield i, example
704
+
705
+ def __generate_examples_jsquad(self, file_path: Optional[str] = None):
706
+ if file_path is None:
707
+ raise ValueError(f"Invalid argument for {self.config.name}")
708
+
709
+ with open(file_path, "r", encoding="utf-8") as rf:
710
+ json_data = json.load(rf)
711
+
712
+ for json_dict in json_data["data"]:
713
+ title = json_dict["title"]
714
+ paragraphs = json_dict["paragraphs"]
715
+
716
+ for paragraph in paragraphs:
717
+ context = paragraph["context"]
718
+ questions = paragraph["qas"]
719
+
720
+ for question_dict in questions:
721
+ q_id = question_dict["id"]
722
+ question = question_dict["question"]
723
+ answers = question_dict["answers"]
724
+ is_impossible = question_dict["is_impossible"]
725
+
726
+ example_dict = {
727
+ "id": q_id,
728
+ "title": title,
729
+ "context": context,
730
+ "question": question,
731
+ "answers": answers,
732
+ "is_impossible": is_impossible,
733
+ }
734
+
735
+ yield q_id, example_dict
736
+
737
+ def __generate_examples_jcommonsenseqa(self, file_path: Optional[str] = None):
738
+ if file_path is None:
739
+ raise ValueError(f"Invalid argument for {self.config.name}")
740
+
741
+ with open(file_path, "r", encoding="utf-8") as rf:
742
+ for i, line in enumerate(rf):
743
+ json_dict = json.loads(line)
744
+ json_dict["label"] = f"choice{json_dict['label']}"
745
+ yield i, json_dict
746
+
747
+ def __generate_examples(self, file_path: Optional[str] = None):
748
+ if file_path is None:
749
+ raise ValueError(f"Invalid argument for {self.config.name}")
750
+
751
+ with open(file_path, "r", encoding="utf-8") as rf:
752
+ for i, line in enumerate(rf):
753
+ json_dict = json.loads(line)
754
+ yield i, json_dict
755
+
756
+ def _generate_examples(
757
+ self,
758
+ file_path: Optional[str] = None,
759
+ split_df: Optional[pd.DataFrame] = None,
760
+ ):
761
+ if self.config.name == "MARC-ja":
762
+ yield from self.__generate_examples_marc_ja(split_df)
763
+
764
+ elif self.config.name == "JCoLA":
765
+ yield from self.__generate_examples_jcola(file_path)
766
+
767
+ elif self.config.name == "JSQuAD":
768
+ yield from self.__generate_examples_jsquad(file_path)
769
+
770
+ elif self.config.name == "JCommonsenseQA":
771
+ yield from self.__generate_examples_jcommonsenseqa(file_path)
772
+
773
+ else:
774
+ yield from self.__generate_examples(file_path)