jhj0517 commited on
Commit
633c360
·
unverified ·
2 Parent(s): 5f094f9 207096c

Merge pull request #269 from jhj0517/feature/add-bgm-tab

Browse files
app.py CHANGED
@@ -132,7 +132,7 @@ class App:
132
  nb_batch_size = gr.Number(label="Batch Size", value=whisper_params["batch_size"], precision=0)
133
 
134
  with gr.Accordion("BGM Separation", open=False):
135
- cb_bgm_separation = gr.Checkbox(label="Enable BGM separation", value=uvr_params["is_separate_bgm"],
136
  interactive=True)
137
  dd_uvr_device = gr.Dropdown(label="Device", value=self.whisper_inf.music_separator.device,
138
  choices=self.whisper_inf.music_separator.available_devices)
@@ -199,6 +199,7 @@ class App:
199
  translation_params = self.default_params["translation"]
200
  deepl_params = translation_params["deepl"]
201
  nllb_params = translation_params["nllb"]
 
202
 
203
  with self.app:
204
  with gr.Row():
@@ -344,6 +345,39 @@ class App:
344
  inputs=None,
345
  outputs=None)
346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347
  # Launch the app with optional gradio settings
348
  args = self.args
349
 
@@ -363,7 +397,8 @@ class App:
363
  if os.path.exists(folder_path):
364
  os.system(f"start {folder_path}")
365
  else:
366
- print(f"The folder {folder_path} does not exist.")
 
367
 
368
  @staticmethod
369
  def on_change_models(model_size: str):
 
132
  nb_batch_size = gr.Number(label="Batch Size", value=whisper_params["batch_size"], precision=0)
133
 
134
  with gr.Accordion("BGM Separation", open=False):
135
+ cb_bgm_separation = gr.Checkbox(label="Enable BGM Separation Filter", value=uvr_params["is_separate_bgm"],
136
  interactive=True)
137
  dd_uvr_device = gr.Dropdown(label="Device", value=self.whisper_inf.music_separator.device,
138
  choices=self.whisper_inf.music_separator.available_devices)
 
199
  translation_params = self.default_params["translation"]
200
  deepl_params = translation_params["deepl"]
201
  nllb_params = translation_params["nllb"]
202
+ uvr_params = self.default_params["bgm_separation"]
203
 
204
  with self.app:
205
  with gr.Row():
 
345
  inputs=None,
346
  outputs=None)
347
 
348
+ with gr.TabItem("BGM Separation"):
349
+ files_audio = gr.Files(type="filepath", label="Upload Audio Files to separate background music")
350
+ dd_uvr_device = gr.Dropdown(label="Device", value=self.whisper_inf.music_separator.device,
351
+ choices=self.whisper_inf.music_separator.available_devices)
352
+ dd_uvr_model_size = gr.Dropdown(label="Model", value=uvr_params["model_size"],
353
+ choices=self.whisper_inf.music_separator.available_models)
354
+ nb_uvr_segment_size = gr.Number(label="Segment Size", value=uvr_params["segment_size"], precision=0)
355
+ cb_uvr_save_file = gr.Checkbox(label="Save separated files to output",
356
+ value=True, visible=False)
357
+ btn_run = gr.Button("SEPARATE BACKGROUND MUSIC", variant="primary")
358
+ with gr.Column():
359
+ with gr.Row():
360
+ ad_instrumental = gr.Audio(label="Instrumental", scale=8)
361
+ btn_open_instrumental_folder = gr.Button('📂', scale=1)
362
+ with gr.Row():
363
+ ad_vocals = gr.Audio(label="Vocals", scale=8)
364
+ btn_open_vocals_folder = gr.Button('📂', scale=1)
365
+
366
+ btn_run.click(fn=self.whisper_inf.music_separator.separate_files,
367
+ inputs=[files_audio, dd_uvr_model_size, dd_uvr_device, nb_uvr_segment_size,
368
+ cb_uvr_save_file],
369
+ outputs=[ad_instrumental, ad_vocals])
370
+ btn_open_instrumental_folder.click(inputs=None,
371
+ outputs=None,
372
+ fn=lambda: self.open_folder(os.path.join(
373
+ self.args.output_dir, "UVR", "instrumental"
374
+ )))
375
+ btn_open_vocals_folder.click(inputs=None,
376
+ outputs=None,
377
+ fn=lambda: self.open_folder(os.path.join(
378
+ self.args.output_dir, "UVR", "vocals"
379
+ )))
380
+
381
  # Launch the app with optional gradio settings
382
  args = self.args
383
 
 
397
  if os.path.exists(folder_path):
398
  os.system(f"start {folder_path}")
399
  else:
400
+ os.makedirs(folder_path, exist_ok=True)
401
+ print(f"The directory path {folder_path} has newly created.")
402
 
403
  @staticmethod
404
  def on_change_models(model_size: str):
configs/default_parameters.yaml CHANGED
@@ -48,7 +48,7 @@ bgm_separation:
48
  is_separate_bgm: false
49
  model_size: "UVR-MDX-NET-Inst_HQ_4"
50
  segment_size: 256
51
- save_file: true
52
 
53
  translation:
54
  deepl:
 
48
  is_separate_bgm: false
49
  model_size: "UVR-MDX-NET-Inst_HQ_4"
50
  segment_size: 256
51
+ save_file: false
52
 
53
  translation:
54
  deepl:
modules/uvr/music_separator.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Optional, Union
2
  import numpy as np
3
  import torchaudio
4
  import soundfile as sf
@@ -9,10 +9,10 @@ import gradio as gr
9
  from datetime import datetime
10
 
11
  from uvr.models import MDX, Demucs, VrNetwork, MDXC
12
- from modules.utils.files_manager import is_video
 
13
  from modules.diarize.audio_loader import load_audio
14
 
15
-
16
  class MusicSeparator:
17
  def __init__(self,
18
  model_dir: Optional[str] = None,
@@ -67,7 +67,7 @@ class MusicSeparator:
67
  device: Optional[str] = None,
68
  segment_size: int = 256,
69
  save_file: bool = False,
70
- progress: gr.Progress = gr.Progress()) -> tuple[np.ndarray, np.ndarray]:
71
  """
72
  Separate the background music from the audio.
73
 
@@ -80,10 +80,14 @@ class MusicSeparator:
80
  progress (gr.Progress): Gradio progress indicator.
81
 
82
  Returns:
83
- tuple[np.ndarray, np.ndarray]: Instrumental and vocals numpy arrays.
 
 
 
84
  """
85
  if isinstance(audio, str):
86
  output_filename, ext = os.path.basename(audio), ".wav"
 
87
 
88
  if is_video(audio):
89
  audio = load_audio(audio)
@@ -118,13 +122,37 @@ class MusicSeparator:
118
  result = self.model(audio)
119
  instrumental, vocals = result["instrumental"].T, result["vocals"].T
120
 
 
121
  if save_file:
122
  instrumental_output_path = os.path.join(self.output_dir, "instrumental", f"{output_filename}-instrumental{ext}")
123
  vocals_output_path = os.path.join(self.output_dir, "vocals", f"{output_filename}-vocals{ext}")
124
  sf.write(instrumental_output_path, instrumental, sample_rate, format="WAV")
125
  sf.write(vocals_output_path, vocals, sample_rate, format="WAV")
126
-
127
- return instrumental, vocals
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
  @staticmethod
130
  def get_device():
@@ -140,3 +168,16 @@ class MusicSeparator:
140
  torch.cuda.empty_cache()
141
  gc.collect()
142
  self.audio_info = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Union, List, Dict
2
  import numpy as np
3
  import torchaudio
4
  import soundfile as sf
 
9
  from datetime import datetime
10
 
11
  from uvr.models import MDX, Demucs, VrNetwork, MDXC
12
+ from modules.utils.paths import DEFAULT_PARAMETERS_CONFIG_PATH
13
+ from modules.utils.files_manager import load_yaml, save_yaml, is_video
14
  from modules.diarize.audio_loader import load_audio
15
 
 
16
  class MusicSeparator:
17
  def __init__(self,
18
  model_dir: Optional[str] = None,
 
67
  device: Optional[str] = None,
68
  segment_size: int = 256,
69
  save_file: bool = False,
70
+ progress: gr.Progress = gr.Progress()) -> tuple[np.ndarray, np.ndarray, List]:
71
  """
72
  Separate the background music from the audio.
73
 
 
80
  progress (gr.Progress): Gradio progress indicator.
81
 
82
  Returns:
83
+ A Tuple of
84
+ np.ndarray: Instrumental numpy arrays.
85
+ np.ndarray: Vocals numpy arrays.
86
+ file_paths: List of file paths where the separated audio is saved. Return empty when save_file is False.
87
  """
88
  if isinstance(audio, str):
89
  output_filename, ext = os.path.basename(audio), ".wav"
90
+ output_filename, orig_ext = os.path.splitext(output_filename)
91
 
92
  if is_video(audio):
93
  audio = load_audio(audio)
 
122
  result = self.model(audio)
123
  instrumental, vocals = result["instrumental"].T, result["vocals"].T
124
 
125
+ file_paths = []
126
  if save_file:
127
  instrumental_output_path = os.path.join(self.output_dir, "instrumental", f"{output_filename}-instrumental{ext}")
128
  vocals_output_path = os.path.join(self.output_dir, "vocals", f"{output_filename}-vocals{ext}")
129
  sf.write(instrumental_output_path, instrumental, sample_rate, format="WAV")
130
  sf.write(vocals_output_path, vocals, sample_rate, format="WAV")
131
+ file_paths += [instrumental_output_path, vocals_output_path]
132
+
133
+ return instrumental, vocals, file_paths
134
+
135
+ def separate_files(self,
136
+ files: List,
137
+ model_name: str,
138
+ device: Optional[str] = None,
139
+ segment_size: int = 256,
140
+ save_file: bool = True,
141
+ progress: gr.Progress = gr.Progress()) -> List[str]:
142
+ """Separate the background music from the audio files. Returns only last Instrumental and vocals file paths
143
+ to display into gr.Audio()"""
144
+ self.cache_parameters(model_size=model_name, segment_size=segment_size)
145
+
146
+ for file_path in files:
147
+ instrumental, vocals, file_paths = self.separate(
148
+ audio=file_path,
149
+ model_name=model_name,
150
+ device=device,
151
+ segment_size=segment_size,
152
+ save_file=save_file,
153
+ progress=progress
154
+ )
155
+ return file_paths
156
 
157
  @staticmethod
158
  def get_device():
 
168
  torch.cuda.empty_cache()
169
  gc.collect()
170
  self.audio_info = None
171
+
172
+ @staticmethod
173
+ def cache_parameters(model_size: str,
174
+ segment_size: int):
175
+ cached_params = load_yaml(DEFAULT_PARAMETERS_CONFIG_PATH)
176
+ cached_uvr_params = cached_params["bgm_separation"]
177
+ uvr_params_to_cache = {
178
+ "model_size": model_size,
179
+ "segment_size": segment_size
180
+ }
181
+ cached_uvr_params = {**cached_uvr_params, **uvr_params_to_cache}
182
+ cached_params["bgm_separation"] = cached_uvr_params
183
+ save_yaml(cached_params, DEFAULT_PARAMETERS_CONFIG_PATH)
modules/whisper/whisper_base.py CHANGED
@@ -111,7 +111,7 @@ class WhisperBase(ABC):
111
  params.lang = language_code_dict[params.lang]
112
 
113
  if params.is_bgm_separate:
114
- music, audio = self.music_separator.separate(
115
  audio=audio,
116
  model_name=params.uvr_model_size,
117
  device=params.uvr_device,
 
111
  params.lang = language_code_dict[params.lang]
112
 
113
  if params.is_bgm_separate:
114
+ music, audio, _ = self.music_separator.separate(
115
  audio=audio,
116
  model_name=params.uvr_model_size,
117
  device=params.uvr_device,