Datasets:

Modalities:
Tabular
Text
ArXiv:
Libraries:
Datasets
License:
Jonathan Li commited on
Commit
540732b
·
1 Parent(s): 4cc1fec

Fix dataset script

Browse files
Files changed (1) hide show
  1. echr.py +56 -33
echr.py CHANGED
@@ -1,4 +1,6 @@
1
  import datasets
 
 
2
  from datasets import Value, Sequence
3
 
4
  _CITATION = """\
@@ -78,39 +80,25 @@ class Echr(datasets.GeneratorBasedBuilder):
78
  ]
79
 
80
  def _info(self):
81
- features = {
82
- "itemid": Value(dtype="string", id=None),
83
- "languageisocode": Value(dtype="string", id=None),
84
- "respondent": Value(dtype="string", id=None),
85
- "branch": Value(dtype="string", id=None),
86
- "date": Value(dtype="int64", id=None),
87
- "docname": Value(dtype="string", id=None),
88
- "importance": Value(dtype="int64", id=None),
89
- "conclusion": Value(dtype="string", id=None),
90
- "judges": Value(dtype="string", id=None),
91
- "text": Sequence(
92
- feature=Value(dtype="string", id=None), length=-1, id=None
93
- ),
94
- "violated_articles": Sequence(
95
- feature=Value(dtype="string", id=None), length=-1, id=None
96
- ),
97
- "violated_paragraphs": Sequence(
98
- feature=Value(dtype="string", id=None), length=-1, id=None
99
- ),
100
- "violated_bulletpoints": Sequence(
101
- feature=Value(dtype="string", id=None), length=-1, id=None
102
- ),
103
- "non_violated_articles": Sequence(
104
- feature=Value(dtype="string", id=None), length=-1, id=None
105
- ),
106
- "non_violated_paragraphs": Sequence(
107
- feature=Value(dtype="string", id=None), length=-1, id=None
108
- ),
109
- "non_violated_bulletpoints": Sequence(
110
- feature=Value(dtype="string", id=None), length=-1, id=None
111
- ),
112
- "violated": Value(dtype="bool", id=None),
113
- }
114
 
115
  return datasets.DatasetInfo(
116
  features=features,
@@ -118,3 +106,38 @@ class Echr(datasets.GeneratorBasedBuilder):
118
  description=_DESCRIPTION,
119
  citation=_CITATION,
120
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import datasets
2
+ import json
3
+ import os
4
  from datasets import Value, Sequence
5
 
6
  _CITATION = """\
 
80
  ]
81
 
82
  def _info(self):
83
+ features = datasets.Features({
84
+ "itemid": Value(dtype="string"),
85
+ "languageisocode": Value(dtype="string"),
86
+ "respondent": Value(dtype="string"),
87
+ "branch": Value(dtype="string"),
88
+ "date": Value(dtype="int64"),
89
+ "docname": Value(dtype="string"),
90
+ "importance": Value(dtype="int64"),
91
+ "conclusion": Value(dtype="string"),
92
+ "judges": Value(dtype="string"),
93
+ "text": Sequence(feature=Value(dtype="string")),
94
+ "violated_articles": Sequence(feature=Value(dtype="string")),
95
+ "violated_paragraphs": Sequence(feature=Value(dtype="string")),
96
+ "violated_bulletpoints": Sequence(feature=Value(dtype="string")),
97
+ "non_violated_articles": Sequence(feature=Value(dtype="string")),
98
+ "non_violated_paragraphs": Sequence(feature=Value(dtype="string")),
99
+ "non_violated_bulletpoints": Sequence(feature=Value(dtype="string")),
100
+ "violated": Value(dtype="bool"),
101
+ })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
  return datasets.DatasetInfo(
104
  features=features,
 
106
  description=_DESCRIPTION,
107
  citation=_CITATION,
108
  )
109
+
110
+ def _split_generators(self, dl_manager):
111
+ data_dir = os.path.join(dl_manager._base_path, self.config.data_dir)
112
+ return [
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TRAIN,
115
+ # These kwargs will be passed to _generate_examples
116
+ gen_kwargs={
117
+ "filepath": os.path.join(data_dir, "train.jsonl"),
118
+ "split": "train",
119
+ },
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.TEST,
123
+ # These kwargs will be passed to _generate_examples
124
+ gen_kwargs={"filepath": os.path.join(data_dir, "test.jsonl"), "split": "test"},
125
+ ),
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.VALIDATION,
128
+ # These kwargs will be passed to _generate_examples
129
+ gen_kwargs={
130
+ "filepath": os.path.join(data_dir, "dev.jsonl"),
131
+ "split": "dev",
132
+ },
133
+ ),
134
+ ]
135
+
136
+ def _generate_examples(self, filepath, split):
137
+ with open(filepath, encoding="utf-8") as f:
138
+ for id_, row in enumerate(f):
139
+ data = json.loads(row)
140
+ yield id_, data
141
+
142
+
143
+