jhj0517 commited on
Commit
4dd81c9
·
unverified ·
2 Parent(s): 3b18ac5 3fde2e0

Merge pull request #9 from jhj0517/add-t2t-translation

Browse files
app.py CHANGED
@@ -1,7 +1,8 @@
1
  import gradio as gr
2
  from modules.whisper_Inference import WhisperInference
 
3
  import os
4
- from ui.htmls import CSS, MARKDOWN
5
  from modules.youtube_manager import get_ytmetas
6
 
7
 
@@ -21,6 +22,7 @@ def on_change_models(model_size):
21
 
22
 
23
  whisper_inf = WhisperInference()
 
24
  block = gr.Blocks(css=CSS).queue(api_open=False)
25
 
26
  with block:
@@ -100,4 +102,29 @@ with block:
100
  btn_openfolder.click(fn=lambda: open_fodler("outputs"), inputs=None, outputs=None)
101
  dd_model.change(fn=on_change_models, inputs=[dd_model], outputs=[cb_translate])
102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  block.launch()
 
1
  import gradio as gr
2
  from modules.whisper_Inference import WhisperInference
3
+ from modules.nllb_inference import NLLBInference
4
  import os
5
+ from ui.htmls import *
6
  from modules.youtube_manager import get_ytmetas
7
 
8
 
 
22
 
23
 
24
  whisper_inf = WhisperInference()
25
+ nllb_inf = NLLBInference()
26
  block = gr.Blocks(css=CSS).queue(api_open=False)
27
 
28
  with block:
 
102
  btn_openfolder.click(fn=lambda: open_fodler("outputs"), inputs=None, outputs=None)
103
  dd_model.change(fn=on_change_models, inputs=[dd_model], outputs=[cb_translate])
104
 
105
+ with gr.TabItem("T2T Translation"): # tab 4
106
+ with gr.Row():
107
+ file_subs = gr.Files(type="file", label="Upload Subtitle Files to translate here",
108
+ file_types=['.vtt', '.srt'])
109
+
110
+ with gr.TabItem("NLLB"): # sub tab1
111
+ with gr.Row():
112
+ dd_nllb_model = gr.Dropdown(label="Model", value=nllb_inf.default_model_size,
113
+ choices=nllb_inf.available_models)
114
+ dd_nllb_sourcelang = gr.Dropdown(label="Source Language", choices=nllb_inf.available_source_langs)
115
+ dd_nllb_targetlang = gr.Dropdown(label="Target Language", choices=nllb_inf.available_target_langs)
116
+ with gr.Row():
117
+ btn_run = gr.Button("TRANSLATE SUBTITLE FILE", variant="primary")
118
+ with gr.Row():
119
+ tb_indicator = gr.Textbox(label="Output")
120
+ btn_openfolder = gr.Button('📂').style(full_width=False)
121
+ with gr.Column():
122
+ md_vram_table = gr.HTML(NLLB_VRAM_TABLE, elem_id="md_nllb_vram_table")
123
+
124
+ btn_run.click(fn=nllb_inf.translate_file,
125
+ inputs=[file_subs, dd_nllb_model, dd_nllb_sourcelang, dd_nllb_targetlang],
126
+ outputs=[tb_indicator])
127
+ btn_openfolder.click(fn=lambda: open_fodler("outputs\\translations"), inputs=None, outputs=None)
128
+
129
+
130
  block.launch()
modules/nllb_inference.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
2
+ import gradio as gr
3
+ import torch
4
+ import os
5
+ from datetime import datetime
6
+
7
+ from modules.subtitle_manager import *
8
+
9
+ DEFAULT_MODEL_SIZE = "facebook/nllb-200-1.3B"
10
+ NLLB_MODELS = ["facebook/nllb-200-3.3B", "facebook/nllb-200-1.3B", "facebook/nllb-200-distilled-600M"]
11
+
12
+
13
+ class NLLBInference:
14
+ def __init__(self):
15
+ self.default_model_size = DEFAULT_MODEL_SIZE
16
+ self.current_model_size = None
17
+ self.model = None
18
+ self.tokenizer = None
19
+ self.available_models = NLLB_MODELS
20
+ self.available_source_langs = list(NLLB_AVAILABLE_LANGS.keys())
21
+ self.available_target_langs = list(NLLB_AVAILABLE_LANGS.keys())
22
+ self.device = 0 if torch.cuda.is_available() else -1
23
+ self.pipeline = None
24
+
25
+ def translate_text(self, text):
26
+ result = self.pipeline(text)
27
+ return result[0]['translation_text']
28
+
29
+ def translate_file(self, fileobjs
30
+ , model_size, src_lang, tgt_lang,
31
+ progress=gr.Progress()):
32
+
33
+ if model_size != self.current_model_size or self.model is None:
34
+ print("\nInitializing NLLB Model..\n")
35
+ progress(0, desc="Initializing NLLB Model..")
36
+ self.current_model_size = model_size
37
+ self.model = AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_name_or_path=model_size,
38
+ cache_dir="models/NLLB")
39
+ self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=model_size,
40
+ cache_dir=f"models/NLLB/tokenizers")
41
+
42
+ src_lang = NLLB_AVAILABLE_LANGS[src_lang]
43
+ tgt_lang = NLLB_AVAILABLE_LANGS[tgt_lang]
44
+
45
+ self.pipeline = pipeline("translation",
46
+ model=self.model,
47
+ tokenizer=self.tokenizer,
48
+ src_lang=src_lang,
49
+ tgt_lang=tgt_lang,
50
+ device=self.device)
51
+
52
+ files_info = {}
53
+ for fileobj in fileobjs:
54
+ file_path = fileobj.name
55
+ file_name, file_ext = os.path.splitext(os.path.basename(fileobj.orig_name))
56
+ if file_ext == ".srt":
57
+ parsed_dicts = parse_srt(file_path=file_path)
58
+ total_progress = len(parsed_dicts)
59
+ for index, dic in enumerate(parsed_dicts):
60
+ progress(index / total_progress, desc="Translating..")
61
+ translated_text = self.translate_text(dic["sentence"])
62
+ dic["sentence"] = translated_text
63
+ subtitle = get_serialized_srt(parsed_dicts)
64
+
65
+ timestamp = datetime.now().strftime("%m%d%H%M%S")
66
+ file_name = file_name[:-9]
67
+ output_path = f"outputs/translations/{file_name}-{timestamp}"
68
+
69
+ write_file(subtitle, f"{output_path}.srt")
70
+
71
+ elif file_ext == ".vtt":
72
+ parsed_dicts = parse_vtt(file_path=file_path)
73
+ total_progress = len(parsed_dicts)
74
+ for index, dic in enumerate(parsed_dicts):
75
+ progress(index / total_progress, desc="Translating..")
76
+ translated_text = self.translate_text(dic["sentence"])
77
+ dic["sentence"] = translated_text
78
+ subtitle = get_serialized_vtt(parsed_dicts)
79
+
80
+ timestamp = datetime.now().strftime("%m%d%H%M%S")
81
+ file_name = file_name[:-9]
82
+ output_path = f"outputs/translations/{file_name}-{timestamp}"
83
+
84
+ write_file(subtitle, f"{output_path}.vtt")
85
+
86
+ files_info[file_name] = subtitle
87
+
88
+ total_result = ''
89
+ for file_name, subtitle in files_info.items():
90
+ total_result += '------------------------------------\n'
91
+ total_result += f'{file_name}\n\n'
92
+ total_result += f'{subtitle}'
93
+
94
+ return f"Done! Subtitle is in the outputs/translation folder.\n\n{total_result}"
95
+
96
+
97
+ NLLB_AVAILABLE_LANGS = {
98
+ "Acehnese (Arabic script)": "ace_Arab",
99
+ "Acehnese (Latin script)": "ace_Latn",
100
+ "Mesopotamian Arabic": "acm_Arab",
101
+ "Ta’izzi-Adeni Arabic": "acq_Arab",
102
+ "Tunisian Arabic": "aeb_Arab",
103
+ "Afrikaans": "afr_Latn",
104
+ "South Levantine Arabic": "ajp_Arab",
105
+ "Akan": "aka_Latn",
106
+ "Amharic": "amh_Ethi",
107
+ "North Levantine Arabic": "apc_Arab",
108
+ "Modern Standard Arabic": "arb_Arab",
109
+ "Modern Standard Arabic (Romanized)": "arb_Latn",
110
+ "Najdi Arabic": "ars_Arab",
111
+ "Moroccan Arabic": "ary_Arab",
112
+ "Egyptian Arabic": "arz_Arab",
113
+ "Assamese": "asm_Beng",
114
+ "Asturian": "ast_Latn",
115
+ "Awadhi": "awa_Deva",
116
+ "Central Aymara": "ayr_Latn",
117
+ "South Azerbaijani": "azb_Arab",
118
+ "North Azerbaijani": "azj_Latn",
119
+ "Bashkir": "bak_Cyrl",
120
+ "Bambara": "bam_Latn",
121
+ "Balinese": "ban_Latn",
122
+ "Belarusian": "bel_Cyrl",
123
+ "Bemba": "bem_Latn",
124
+ "Bengali": "ben_Beng",
125
+ "Bhojpuri": "bho_Deva",
126
+ "Banjar (Arabic script)": "bjn_Arab",
127
+ "Banjar (Latin script)": "bjn_Latn",
128
+ "Standard Tibetan": "bod_Tibt",
129
+ "Bosnian": "bos_Latn",
130
+ "Buginese": "bug_Latn",
131
+ "Bulgarian": "bul_Cyrl",
132
+ "Catalan": "cat_Latn",
133
+ "Cebuano": "ceb_Latn",
134
+ "Czech": "ces_Latn",
135
+ "Chokwe": "cjk_Latn",
136
+ "Central Kurdish": "ckb_Arab",
137
+ "Crimean Tatar": "crh_Latn",
138
+ "Welsh": "cym_Latn",
139
+ "Danish": "dan_Latn",
140
+ "German": "deu_Latn",
141
+ "Southwestern Dinka": "dik_Latn",
142
+ "Dyula": "dyu_Latn",
143
+ "Dzongkha": "dzo_Tibt",
144
+ "Greek": "ell_Grek",
145
+ "English": "eng_Latn",
146
+ "Esperanto": "epo_Latn",
147
+ "Estonian": "est_Latn",
148
+ "Basque": "eus_Latn",
149
+ "Ewe": "ewe_Latn",
150
+ "Faroese": "fao_Latn",
151
+ "Fijian": "fij_Latn",
152
+ "Finnish": "fin_Latn",
153
+ "Fon": "fon_Latn",
154
+ "French": "fra_Latn",
155
+ "Friulian": "fur_Latn",
156
+ "Nigerian Fulfulde": "fuv_Latn",
157
+ "Scottish Gaelic": "gla_Latn",
158
+ "Irish": "gle_Latn",
159
+ "Galician": "glg_Latn",
160
+ "Guarani": "grn_Latn",
161
+ "Gujarati": "guj_Gujr",
162
+ "Haitian Creole": "hat_Latn",
163
+ "Hausa": "hau_Latn",
164
+ "Hebrew": "heb_Hebr",
165
+ "Hindi": "hin_Deva",
166
+ "Chhattisgarhi": "hne_Deva",
167
+ "Croatian": "hrv_Latn",
168
+ "Hungarian": "hun_Latn",
169
+ "Armenian": "hye_Armn",
170
+ "Igbo": "ibo_Latn",
171
+ "Ilocano": "ilo_Latn",
172
+ "Indonesian": "ind_Latn",
173
+ "Icelandic": "isl_Latn",
174
+ "Italian": "ita_Latn",
175
+ "Javanese": "jav_Latn",
176
+ "Japanese": "jpn_Jpan",
177
+ "Kabyle": "kab_Latn",
178
+ "Jingpho": "kac_Latn",
179
+ "Kamba": "kam_Latn",
180
+ "Kannada": "kan_Knda",
181
+ "Kashmiri (Arabic script)": "kas_Arab",
182
+ "Kashmiri (Devanagari script)": "kas_Deva",
183
+ "Georgian": "kat_Geor",
184
+ "Central Kanuri (Arabic script)": "knc_Arab",
185
+ "Central Kanuri (Latin script)": "knc_Latn",
186
+ "Kazakh": "kaz_Cyrl",
187
+ "Kabiyè": "kbp_Latn",
188
+ "Kabuverdianu": "kea_Latn",
189
+ "Khmer": "khm_Khmr",
190
+ "Kikuyu": "kik_Latn",
191
+ "Kinyarwanda": "kin_Latn",
192
+ "Kyrgyz": "kir_Cyrl",
193
+ "Kimbundu": "kmb_Latn",
194
+ "Northern Kurdish": "kmr_Latn",
195
+ "Kikongo": "kon_Latn",
196
+ "Korean": "kor_Hang",
197
+ "Lao": "lao_Laoo",
198
+ "Ligurian": "lij_Latn",
199
+ "Limburgish": "lim_Latn",
200
+ "Lingala": "lin_Latn",
201
+ "Lithuanian": "lit_Latn",
202
+ "Lombard": "lmo_Latn",
203
+ "Latgalian": "ltg_Latn",
204
+ "Luxembourgish": "ltz_Latn",
205
+ "Luba-Kasai": "lua_Latn",
206
+ "Ganda": "lug_Latn",
207
+ "Luo": "luo_Latn",
208
+ "Mizo": "lus_Latn",
209
+ "Standard Latvian": "lvs_Latn",
210
+ "Magahi": "mag_Deva",
211
+ "Maithili": "mai_Deva",
212
+ "Malayalam": "mal_Mlym",
213
+ "Marathi": "mar_Deva",
214
+ "Minangkabau (Arabic script)": "min_Arab",
215
+ "Minangkabau (Latin script)": "min_Latn",
216
+ "Macedonian": "mkd_Cyrl",
217
+ "Plateau Malagasy": "plt_Latn",
218
+ "Maltese": "mlt_Latn",
219
+ "Meitei (Bengali script)": "mni_Beng",
220
+ "Halh Mongolian": "khk_Cyrl",
221
+ "Mossi": "mos_Latn",
222
+ "Maori": "mri_Latn",
223
+ "Burmese": "mya_Mymr",
224
+ "Dutch": "nld_Latn",
225
+ "Norwegian Nynorsk": "nno_Latn",
226
+ "Norwegian Bokmål": "nob_Latn",
227
+ "Nepali": "npi_Deva",
228
+ "Northern Sotho": "nso_Latn",
229
+ "Nuer": "nus_Latn",
230
+ "Nyanja": "nya_Latn",
231
+ "Occitan": "oci_Latn",
232
+ "West Central Oromo": "gaz_Latn",
233
+ "Odia": "ory_Orya",
234
+ "Pangasinan": "pag_Latn",
235
+ "Eastern Panjabi": "pan_Guru",
236
+ "Papiamento": "pap_Latn",
237
+ "Western Persian": "pes_Arab",
238
+ "Polish": "pol_Latn",
239
+ "Portuguese": "por_Latn",
240
+ "Dari": "prs_Arab",
241
+ "Southern Pashto": "pbt_Arab",
242
+ "Ayacucho Quechua": "quy_Latn",
243
+ "Romanian": "ron_Latn",
244
+ "Rundi": "run_Latn",
245
+ "Russian": "rus_Cyrl",
246
+ "Sango": "sag_Latn",
247
+ "Sanskrit": "san_Deva",
248
+ "Santali": "sat_Olck",
249
+ "Sicilian": "scn_Latn",
250
+ "Shan": "shn_Mymr",
251
+ "Sinhala": "sin_Sinh",
252
+ "Slovak": "slk_Latn",
253
+ "Slovenian": "slv_Latn",
254
+ "Samoan": "smo_Latn",
255
+ "Shona": "sna_Latn",
256
+ "Sindhi": "snd_Arab",
257
+ "Somali": "som_Latn",
258
+ "Southern Sotho": "sot_Latn",
259
+ "Spanish": "spa_Latn",
260
+ "Tosk Albanian": "als_Latn",
261
+ "Sardinian": "srd_Latn",
262
+ "Serbian": "srp_Cyrl",
263
+ "Swati": "ssw_Latn",
264
+ "Sundanese": "sun_Latn",
265
+ "Swedish": "swe_Latn",
266
+ "Swahili": "swh_Latn",
267
+ "Silesian": "szl_Latn",
268
+ "Tamil": "tam_Taml",
269
+ "Tatar": "tat_Cyrl",
270
+ "Telugu": "tel_Telu",
271
+ "Tajik": "tgk_Cyrl",
272
+ "Tagalog": "tgl_Latn",
273
+ "Thai": "tha_Thai",
274
+ "Tigrinya": "tir_Ethi",
275
+ "Tamasheq (Latin script)": "taq_Latn",
276
+ "Tamasheq (Tifinagh script)": "taq_Tfng",
277
+ "Tok Pisin": "tpi_Latn",
278
+ "Tswana": "tsn_Latn",
279
+ "Tsonga": "tso_Latn",
280
+ "Turkmen": "tuk_Latn",
281
+ "Tumbuka": "tum_Latn",
282
+ "Turkish": "tur_Latn",
283
+ "Twi": "twi_Latn",
284
+ "Central Atlas Tamazight": "tzm_Tfng",
285
+ "Uyghur": "uig_Arab",
286
+ "Ukrainian": "ukr_Cyrl",
287
+ "Umbundu": "umb_Latn",
288
+ "Urdu": "urd_Arab",
289
+ "Northern Uzbek": "uzn_Latn",
290
+ "Venetian": "vec_Latn",
291
+ "Vietnamese": "vie_Latn",
292
+ "Waray": "war_Latn",
293
+ "Wolof": "wol_Latn",
294
+ "Xhosa": "xho_Latn",
295
+ "Eastern Yiddish": "ydd_Hebr",
296
+ "Yoruba": "yor_Latn",
297
+ "Yue Chinese": "yue_Hant",
298
+ "Chinese (Simplified)": "zho_Hans",
299
+ "Chinese (Traditional)": "zho_Hant",
300
+ "Standard Malay": "zsm_Latn",
301
+ "Zulu": "zul_Latn",
302
+ }
outputs/translations/outputs for translation are saved here.txt ADDED
File without changes
ui/htmls.py CHANGED
@@ -39,4 +39,59 @@ CSS = """
39
 
40
  MARKDOWN = """
41
  ### [Whisper Web-UI](https://github.com/jhj0517/Whsiper-WebUI)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  """
 
39
 
40
  MARKDOWN = """
41
  ### [Whisper Web-UI](https://github.com/jhj0517/Whsiper-WebUI)
42
+ """
43
+
44
+
45
+ NLLB_VRAM_TABLE = """
46
+ <!DOCTYPE html>
47
+ <html lang="en">
48
+ <head>
49
+ <meta charset="UTF-8">
50
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
51
+ <style>
52
+ table {
53
+ border-collapse: collapse;
54
+ width: 100%;
55
+ }
56
+ th, td {
57
+ border: 1px solid #dddddd;
58
+ text-align: left;
59
+ padding: 8px;
60
+ }
61
+ th {
62
+ background-color: #f2f2f2;
63
+ }
64
+ </style>
65
+ </head>
66
+ <body>
67
+
68
+ <details>
69
+ <summary>VRAM usage for each model</summary>
70
+ <table>
71
+ <thead>
72
+ <tr>
73
+ <th>Model name</th>
74
+ <th>Required VRAM</th>
75
+ </tr>
76
+ </thead>
77
+ <tbody>
78
+ <tr>
79
+ <td>nllb-200-3.3B</td>
80
+ <td>~16GB</td>
81
+ </tr>
82
+ <tr>
83
+ <td>nllb-200-1.3B</td>
84
+ <td>~8GB</td>
85
+ </tr>
86
+ <tr>
87
+ <td>nllb-200-distilled-600M</td>
88
+ <td>~4GB</td>
89
+ </tr>
90
+ </tbody>
91
+ </table>
92
+ <p><strong>Note:</strong> Be mindful of your VRAM! The table above provides an approximate VRAM usage for each model.</p>
93
+ </details>
94
+
95
+ </body>
96
+ </html>
97
  """