DurreSudoku commited on
Commit
6f3415e
·
verified ·
1 Parent(s): b9100cb

changed model path

Browse files
Files changed (1) hide show
  1. app.py +42 -42
app.py CHANGED
@@ -1,42 +1,42 @@
1
- import gradio as gr
2
- import torch
3
- from functions import *
4
- from unet import UNet
5
- from custom_scaler import min_max_scaler
6
-
7
- model = UNet()
8
- model_state_dict = torch.load("huggingface/model.pth", map_location="cpu")
9
- model.load_state_dict(model_state_dict["model_state_dict"])
10
-
11
- scaler = min_max_scaler()
12
- scaler.fit()
13
-
14
- with gr.Blocks() as demo:
15
- with gr.Row():
16
- gr.Markdown(
17
- """
18
- # Speech enhancement demonstration
19
-
20
- Hello!
21
-
22
- This is a demo for a speech enhancement model trained to reduce background noice to ensure inteligibility of a single speaker.
23
-
24
- Feel free to upload your own audio file or try one of our example files to see how it works!
25
-
26
- """
27
- )
28
- with gr.Row():
29
- with gr.Column():
30
- audio_path = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Upload your song here", format="wav")
31
- with gr.Column():
32
- enhanced_audio = gr.Audio(sources=None, label="Enhanced audio will be found here", format="wav")
33
- with gr.Row():
34
- files = gr.FileExplorer(label="Example files", file_count="single", root_dir=r"examples", interactive=True)
35
- files.change(fn=return_input, inputs=files, outputs=audio_path)
36
- files.change(fn=return_input, inputs=None, outputs=enhanced_audio)
37
- with gr.Row():
38
- submit_audio = gr.Button(value="Submit audio for enhancement")
39
- submit_audio.click(fn=lambda x: predict(x, model, scaler), inputs=audio_path, outputs=enhanced_audio, trigger_mode="once")
40
-
41
- demo.launch(share=True)
42
-
 
1
+ import gradio as gr
2
+ import torch
3
+ from functions import *
4
+ from unet import UNet
5
+ from custom_scaler import min_max_scaler
6
+
7
+ model = UNet()
8
+ model_state_dict = torch.load(r"model.pth", map_location="cpu")
9
+ model.load_state_dict(model_state_dict["model_state_dict"])
10
+
11
+ scaler = min_max_scaler()
12
+ scaler.fit()
13
+
14
+ with gr.Blocks() as demo:
15
+ with gr.Row():
16
+ gr.Markdown(
17
+ """
18
+ # Speech enhancement demonstration
19
+
20
+ Hello!
21
+
22
+ This is a demo for a speech enhancement model trained to reduce background noice to ensure inteligibility of a single speaker.
23
+
24
+ Feel free to upload your own audio file or try one of our example files to see how it works!
25
+
26
+ """
27
+ )
28
+ with gr.Row():
29
+ with gr.Column():
30
+ audio_path = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Upload your song here", format="wav")
31
+ with gr.Column():
32
+ enhanced_audio = gr.Audio(sources=None, label="Enhanced audio will be found here", format="wav")
33
+ with gr.Row():
34
+ files = gr.FileExplorer(label="Example files", file_count="single", root_dir=r"examples", interactive=True)
35
+ files.change(fn=return_input, inputs=files, outputs=audio_path)
36
+ files.change(fn=return_input, inputs=None, outputs=enhanced_audio)
37
+ with gr.Row():
38
+ submit_audio = gr.Button(value="Submit audio for enhancement")
39
+ submit_audio.click(fn=lambda x: predict(x, model, scaler), inputs=audio_path, outputs=enhanced_audio, trigger_mode="once")
40
+
41
+ demo.launch(share=True)
42
+