Upload covost2.py with huggingface_hub
Browse files- covost2.py +18 -18
covost2.py
CHANGED
@@ -21,9 +21,9 @@ from typing import Dict, List, Tuple
|
|
21 |
import datasets
|
22 |
import pandas as pd
|
23 |
|
24 |
-
from
|
25 |
-
from
|
26 |
-
from
|
27 |
|
28 |
_LANGUAGES = ["ind", "eng"]
|
29 |
_CITATION = """\
|
@@ -48,7 +48,7 @@ _CITATION = """\
|
|
48 |
|
49 |
_DATASETNAME = "covost2"
|
50 |
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
51 |
-
_UNIFIED_VIEW_NAME =
|
52 |
|
53 |
_DESCRIPTION = """\
|
54 |
CoVoST2 is a large-scale multilingual speech translation corpus covering translations from 21 languages to English
|
@@ -68,17 +68,17 @@ _URLS = {_DATASETNAME: {"ind": COMMONVOICE_URL_TEMPLATE.format(lang=LANG_CODE["i
|
|
68 |
|
69 |
_SUPPORTED_TASKS = [Tasks.SPEECH_TO_TEXT_TRANSLATION, Tasks.MACHINE_TRANSLATION]
|
70 |
_SOURCE_VERSION = "1.0.0"
|
71 |
-
|
72 |
|
73 |
|
74 |
-
def
|
75 |
if src_lang == "" or tgt_lang == "":
|
76 |
raise ValueError(f"Invalid src_lang {src_lang} or tgt_lang {tgt_lang}")
|
77 |
|
78 |
-
if schema not in ["source", "
|
79 |
raise ValueError(f"Invalid schema: {schema}")
|
80 |
|
81 |
-
return
|
82 |
name="covost2_{src}_{tgt}_{schema}".format(src=src_lang, tgt=tgt_lang, schema=schema),
|
83 |
version=datasets.Version(version),
|
84 |
description="covost2 source schema for {schema} from {src} to {tgt}".format(schema=schema, src=src_lang, tgt=tgt_lang),
|
@@ -90,7 +90,7 @@ def nusantara_config_constructor(src_lang, tgt_lang, schema, version):
|
|
90 |
class Covost2(datasets.GeneratorBasedBuilder):
|
91 |
"""CoVoST2 dataset is a dataset mainly for speech to text translation task. The data was taken from Mozilla Common
|
92 |
Voices dataset. In the implementation of the source schema, the audio and transcriptions of the source language,
|
93 |
-
as well as the translated transcriptions are provided. In the implementation of the
|
94 |
target language are provided. The source and target languages available are eng->ind and ind -> eng respectively.
|
95 |
In addition to the speech to text translation, this dataset (text only) can be used as a machine translation for
|
96 |
eng->ind and ind->eng.
|
@@ -101,12 +101,12 @@ class Covost2(datasets.GeneratorBasedBuilder):
|
|
101 |
COVOST_URL_TEMPLATE = "https://dl.fbaipublicfiles.com/covost/covost_v2.{src_lang}_{tgt_lang}.tsv.tar.gz"
|
102 |
|
103 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
104 |
-
|
105 |
|
106 |
BUILDER_CONFIGS = (
|
107 |
-
[
|
108 |
-
+ [
|
109 |
-
+ [
|
110 |
)
|
111 |
|
112 |
DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_eng_ind_source"
|
@@ -116,9 +116,9 @@ class Covost2(datasets.GeneratorBasedBuilder):
|
|
116 |
features = datasets.Features(
|
117 |
{"client_id": datasets.Value("string"), "file": datasets.Value("string"), "audio": datasets.Audio(sampling_rate=16_000), "sentence": datasets.Value("string"), "translation": datasets.Value("string"), "id": datasets.Value("string")}
|
118 |
)
|
119 |
-
elif self.config.schema == "
|
120 |
features = schemas.speech_text_features
|
121 |
-
elif self.config.schema == "
|
122 |
features = schemas.text2text_features
|
123 |
|
124 |
return datasets.DatasetInfo(
|
@@ -127,7 +127,7 @@ class Covost2(datasets.GeneratorBasedBuilder):
|
|
127 |
homepage=_HOMEPAGE,
|
128 |
license=_LICENSE,
|
129 |
citation=_CITATION,
|
130 |
-
task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="sentences")] if (self.config.schema == "
|
131 |
)
|
132 |
|
133 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
@@ -209,7 +209,7 @@ class Covost2(datasets.GeneratorBasedBuilder):
|
|
209 |
"file": os.path.join(filepath, "clips", row["path"]),
|
210 |
"audio": os.path.join(filepath, "clips", row["path"]),
|
211 |
}
|
212 |
-
elif self.config.schema == "
|
213 |
yield id, {
|
214 |
"id": row["path"].replace(".mp3", ""),
|
215 |
"speaker_id": row["client_id"],
|
@@ -221,7 +221,7 @@ class Covost2(datasets.GeneratorBasedBuilder):
|
|
221 |
"speaker_gender": None,
|
222 |
},
|
223 |
}
|
224 |
-
elif self.config.schema == "
|
225 |
yield id, {"id": row["path"].replace(".mp3", ""), "text_1": row["sentence"], "text_2": row["translation"], "text_1_name": src_lang, "text_2_name": tgt_lang}
|
226 |
else:
|
227 |
raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
|
|
|
21 |
import datasets
|
22 |
import pandas as pd
|
23 |
|
24 |
+
from seacrowd.utils import schemas
|
25 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
26 |
+
from seacrowd.utils.constants import DEFAULT_SEACROWD_VIEW_NAME, DEFAULT_SOURCE_VIEW_NAME, Tasks
|
27 |
|
28 |
_LANGUAGES = ["ind", "eng"]
|
29 |
_CITATION = """\
|
|
|
48 |
|
49 |
_DATASETNAME = "covost2"
|
50 |
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
51 |
+
_UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
|
52 |
|
53 |
_DESCRIPTION = """\
|
54 |
CoVoST2 is a large-scale multilingual speech translation corpus covering translations from 21 languages to English
|
|
|
68 |
|
69 |
_SUPPORTED_TASKS = [Tasks.SPEECH_TO_TEXT_TRANSLATION, Tasks.MACHINE_TRANSLATION]
|
70 |
_SOURCE_VERSION = "1.0.0"
|
71 |
+
_SEACROWD_VERSION = "2024.06.20"
|
72 |
|
73 |
|
74 |
+
def seacrowd_config_constructor(src_lang, tgt_lang, schema, version):
|
75 |
if src_lang == "" or tgt_lang == "":
|
76 |
raise ValueError(f"Invalid src_lang {src_lang} or tgt_lang {tgt_lang}")
|
77 |
|
78 |
+
if schema not in ["source", "seacrowd_sptext", "seacrowd_t2t"]:
|
79 |
raise ValueError(f"Invalid schema: {schema}")
|
80 |
|
81 |
+
return SEACrowdConfig(
|
82 |
name="covost2_{src}_{tgt}_{schema}".format(src=src_lang, tgt=tgt_lang, schema=schema),
|
83 |
version=datasets.Version(version),
|
84 |
description="covost2 source schema for {schema} from {src} to {tgt}".format(schema=schema, src=src_lang, tgt=tgt_lang),
|
|
|
90 |
class Covost2(datasets.GeneratorBasedBuilder):
|
91 |
"""CoVoST2 dataset is a dataset mainly for speech to text translation task. The data was taken from Mozilla Common
|
92 |
Voices dataset. In the implementation of the source schema, the audio and transcriptions of the source language,
|
93 |
+
as well as the translated transcriptions are provided. In the implementation of the seacrowd schema, only the audio of the source language and transcriptions of the
|
94 |
target language are provided. The source and target languages available are eng->ind and ind -> eng respectively.
|
95 |
In addition to the speech to text translation, this dataset (text only) can be used as a machine translation for
|
96 |
eng->ind and ind->eng.
|
|
|
101 |
COVOST_URL_TEMPLATE = "https://dl.fbaipublicfiles.com/covost/covost_v2.{src_lang}_{tgt_lang}.tsv.tar.gz"
|
102 |
|
103 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
104 |
+
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
|
105 |
|
106 |
BUILDER_CONFIGS = (
|
107 |
+
[seacrowd_config_constructor(src, tgt, "source", _SOURCE_VERSION) for (src, tgt) in LANG_COMBINATION_CODE]
|
108 |
+
+ [seacrowd_config_constructor(src, tgt, "seacrowd_sptext", _SEACROWD_VERSION) for (src, tgt) in LANG_COMBINATION_CODE]
|
109 |
+
+ [seacrowd_config_constructor(src, tgt, "seacrowd_t2t", _SEACROWD_VERSION) for (src, tgt) in LANG_COMBINATION_CODE]
|
110 |
)
|
111 |
|
112 |
DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_eng_ind_source"
|
|
|
116 |
features = datasets.Features(
|
117 |
{"client_id": datasets.Value("string"), "file": datasets.Value("string"), "audio": datasets.Audio(sampling_rate=16_000), "sentence": datasets.Value("string"), "translation": datasets.Value("string"), "id": datasets.Value("string")}
|
118 |
)
|
119 |
+
elif self.config.schema == "seacrowd_sptext":
|
120 |
features = schemas.speech_text_features
|
121 |
+
elif self.config.schema == "seacrowd_t2t":
|
122 |
features = schemas.text2text_features
|
123 |
|
124 |
return datasets.DatasetInfo(
|
|
|
127 |
homepage=_HOMEPAGE,
|
128 |
license=_LICENSE,
|
129 |
citation=_CITATION,
|
130 |
+
task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="sentences")] if (self.config.schema == "seacrowd_sptext" or self.config.schema == "source") else None,
|
131 |
)
|
132 |
|
133 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
|
|
209 |
"file": os.path.join(filepath, "clips", row["path"]),
|
210 |
"audio": os.path.join(filepath, "clips", row["path"]),
|
211 |
}
|
212 |
+
elif self.config.schema == "seacrowd_sptext":
|
213 |
yield id, {
|
214 |
"id": row["path"].replace(".mp3", ""),
|
215 |
"speaker_id": row["client_id"],
|
|
|
221 |
"speaker_gender": None,
|
222 |
},
|
223 |
}
|
224 |
+
elif self.config.schema == "seacrowd_t2t":
|
225 |
yield id, {"id": row["path"].replace(".mp3", ""), "text_1": row["sentence"], "text_2": row["translation"], "text_1_name": src_lang, "text_2_name": tgt_lang}
|
226 |
else:
|
227 |
raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
|