jhj0517 commited on
Commit
8fd7f62
·
1 Parent(s): b5773e7

Add docstring

Browse files
Files changed (1) hide show
  1. modules/uvr/music_separator.py +16 -1
modules/uvr/music_separator.py CHANGED
@@ -62,8 +62,21 @@ class MusicSeparator:
62
  device: Optional[str] = None,
63
  segment_size: int = 256,
64
  save_file: bool = False,
65
- progress: gr.Progress = gr.Progress()):
 
 
66
 
 
 
 
 
 
 
 
 
 
 
 
67
  if isinstance(audio, str):
68
  self.audio_info = torchaudio.info(audio)
69
  sample_rate = self.audio_info.sample_rate
@@ -106,9 +119,11 @@ class MusicSeparator:
106
 
107
  @staticmethod
108
  def get_device():
 
109
  return "cuda" if torch.cuda.is_available() else "cpu"
110
 
111
  def offload(self):
 
112
  if self.model is not None:
113
  del self.model
114
  self.model = None
 
62
  device: Optional[str] = None,
63
  segment_size: int = 256,
64
  save_file: bool = False,
65
+ progress: gr.Progress = gr.Progress()) -> tuple[np.ndarray, np.ndarray]:
66
+ """
67
+ Separate the background music from the audio.
68
 
69
+ Args:
70
+ audio (Union[str, np.ndarray]): Audio path or numpy array.
71
+ model_name (str): Model name.
72
+ device (str): Device to use for the model.
73
+ segment_size (int): Segment size for the prediction.
74
+ save_file (bool): Whether to save the separated audio to output path or not.
75
+ progress (gr.Progress): Gradio progress indicator.
76
+
77
+ Returns:
78
+ tuple[np.ndarray, np.ndarray]: Instrumental and vocals numpy arrays.
79
+ """
80
  if isinstance(audio, str):
81
  self.audio_info = torchaudio.info(audio)
82
  sample_rate = self.audio_info.sample_rate
 
119
 
120
  @staticmethod
121
  def get_device():
122
+ """Get device for the model"""
123
  return "cuda" if torch.cuda.is_available() else "cpu"
124
 
125
  def offload(self):
126
+ """Offload the model and free up the memory"""
127
  if self.model is not None:
128
  del self.model
129
  self.model = None