OfekGlick commited on
Commit
6111533
·
1 Parent(s): 0ff7432

Upload DiscoEval.py

Browse files
Files changed (1) hide show
  1. DiscoEval.py +22 -5
DiscoEval.py CHANGED
@@ -18,6 +18,7 @@ import datasets
18
  import constants
19
  import pickle
20
  import logging
 
21
 
22
  _CITATION = """\
23
  @InProceedings{mchen-discoeval-19,
@@ -38,10 +39,9 @@ _HOMEPAGE = "https://github.com/ZeweiChu/DiscoEval"
38
  # TODO: Add link to the official dataset URLs here
39
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
40
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
41
- # _URLS = {
42
- # "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
43
- # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
44
- # }
45
 
46
 
47
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
@@ -180,10 +180,24 @@ class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
180
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
181
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
182
 
 
183
  # urls = _URLS[self.config.name]
184
  # data_dir = dl_manager.download_and_extract(urls)
 
185
  if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI]:
186
- data_dir = os.path.join(constants.SP_DATA_DIR, constants.SP_DIRS[self.config.name])
 
 
 
 
 
 
 
 
 
 
 
 
187
  train_name = constants.SP_TRAIN_NAME
188
  valid_name = constants.SP_VALID_NAME
189
  test_name = constants.SP_TEST_NAME
@@ -239,6 +253,9 @@ class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
239
  ),
240
  ]
241
 
 
 
 
242
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
243
  def _generate_examples(self, filepath, split):
244
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
 
18
  import constants
19
  import pickle
20
  import logging
21
+ from huggingface_hub import snapshot_download, huggingface_hub
22
 
23
  _CITATION = """\
24
  @InProceedings{mchen-discoeval-19,
 
39
  # TODO: Add link to the official dataset URLs here
40
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
41
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
42
+ _URLS = {
43
+ "DiscoEval": "https://huggingface.co/.zip",
44
+ }
 
45
 
46
 
47
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
 
180
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
181
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
182
 
183
+
184
  # urls = _URLS[self.config.name]
185
  # data_dir = dl_manager.download_and_extract(urls)
186
+
187
  if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI]:
188
+ subfolder = os.path.join(constants.SP_DATA_DIR, constants.SP_DIRS[self.config.name])
189
+ huggingface_hub.hf_hub_url(
190
+ repo_id="OfekGlick/DiscoEval",
191
+ filename=constants.SP_TRAIN_NAME,
192
+ subfolder=subfolder)
193
+ huggingface_hub.hf_hub_url(
194
+ repo_id="OfekGlick/DiscoEval",
195
+ filename=constants.SP_VALID_NAME,
196
+ subfolder=subfolder)
197
+ huggingface_hub.hf_hub_url(
198
+ repo_id="OfekGlick/DiscoEval",
199
+ filename=constants.SP_TEST_NAME,
200
+ subfolder=subfolder)
201
  train_name = constants.SP_TRAIN_NAME
202
  valid_name = constants.SP_VALID_NAME
203
  test_name = constants.SP_TEST_NAME
 
253
  ),
254
  ]
255
 
256
+
257
+
258
+
259
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
260
  def _generate_examples(self, filepath, split):
261
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.