mrmocciai commited on
Commit
9c2e075
·
1 Parent(s): 514faed

Delete gui_v1.py

Browse files
Files changed (1) hide show
  1. gui_v1.py +0 -661
gui_v1.py DELETED
@@ -1,661 +0,0 @@
1
- import os, sys
2
-
3
- if sys.platform == "darwin":
4
- os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
5
-
6
- now_dir = os.getcwd()
7
- sys.path.append(now_dir)
8
- import multiprocessing
9
-
10
-
11
- class Harvest(multiprocessing.Process):
12
- def __init__(self, inp_q, opt_q):
13
- multiprocessing.Process.__init__(self)
14
- self.inp_q = inp_q
15
- self.opt_q = opt_q
16
-
17
- def run(self):
18
- import numpy as np, pyworld
19
-
20
- while 1:
21
- idx, x, res_f0, n_cpu, ts = self.inp_q.get()
22
- f0, t = pyworld.harvest(
23
- x.astype(np.double),
24
- fs=16000,
25
- f0_ceil=1100,
26
- f0_floor=50,
27
- frame_period=10,
28
- )
29
- res_f0[idx] = f0
30
- if len(res_f0.keys()) >= n_cpu:
31
- self.opt_q.put(ts)
32
-
33
-
34
- if __name__ == "__main__":
35
- from multiprocessing import Queue
36
- from queue import Empty
37
- import numpy as np
38
- import multiprocessing
39
- import traceback, re
40
- import json
41
- import PySimpleGUI as sg
42
- import sounddevice as sd
43
- import noisereduce as nr
44
- from multiprocessing import cpu_count
45
- import librosa, torch, time, threading
46
- import torch.nn.functional as F
47
- import torchaudio.transforms as tat
48
- from i18n import I18nAuto
49
-
50
- i18n = I18nAuto()
51
- device = torch.device(
52
- "cuda"
53
- if torch.cuda.is_available()
54
- else ("mps" if torch.backends.mps.is_available() else "cpu")
55
- )
56
- current_dir = os.getcwd()
57
- inp_q = Queue()
58
- opt_q = Queue()
59
- n_cpu = min(cpu_count(), 8)
60
- for _ in range(n_cpu):
61
- Harvest(inp_q, opt_q).start()
62
- from rvc_for_realtime import RVC
63
-
64
- class GUIConfig:
65
- def __init__(self) -> None:
66
- self.pth_path: str = ""
67
- self.index_path: str = ""
68
- self.pitch: int = 12
69
- self.samplerate: int = 40000
70
- self.block_time: float = 1.0 # s
71
- self.buffer_num: int = 1
72
- self.threhold: int = -30
73
- self.crossfade_time: float = 0.08
74
- self.extra_time: float = 0.04
75
- self.I_noise_reduce = False
76
- self.O_noise_reduce = False
77
- self.index_rate = 0.3
78
- self.n_cpu = min(n_cpu, 8)
79
- self.f0method = "harvest"
80
- self.sg_input_device = ""
81
- self.sg_output_device = ""
82
-
83
- class GUI:
84
- def __init__(self) -> None:
85
- self.config = GUIConfig()
86
- self.flag_vc = False
87
-
88
- self.launcher()
89
-
90
- def load(self):
91
- input_devices, output_devices, _, _ = self.get_devices()
92
- try:
93
- with open("values1.json", "r") as j:
94
- data = json.load(j)
95
- data["pm"] = data["f0method"] == "pm"
96
- data["harvest"] = data["f0method"] == "harvest"
97
- data["crepe"] = data["f0method"] == "crepe"
98
- data["rmvpe"] = data["f0method"] == "rmvpe"
99
- except:
100
- with open("values1.json", "w") as j:
101
- data = {
102
- "pth_path": " ",
103
- "index_path": " ",
104
- "sg_input_device": input_devices[sd.default.device[0]],
105
- "sg_output_device": output_devices[sd.default.device[1]],
106
- "threhold": "-45",
107
- "pitch": "0",
108
- "index_rate": "0",
109
- "block_time": "1",
110
- "crossfade_length": "0.04",
111
- "extra_time": "1",
112
- "f0method": "rmvpe",
113
- }
114
- return data
115
-
116
- def launcher(self):
117
- data = self.load()
118
- sg.theme("LightBlue3")
119
- input_devices, output_devices, _, _ = self.get_devices()
120
- layout = [
121
- [
122
- sg.Frame(
123
- title=i18n("加载模型"),
124
- layout=[
125
- [
126
- sg.Input(
127
- default_text=data.get("pth_path", ""),
128
- key="pth_path",
129
- ),
130
- sg.FileBrowse(
131
- i18n("选择.pth文件"),
132
- initial_folder=os.path.join(os.getcwd(), "weights"),
133
- file_types=((". pth"),),
134
- ),
135
- ],
136
- [
137
- sg.Input(
138
- default_text=data.get("index_path", ""),
139
- key="index_path",
140
- ),
141
- sg.FileBrowse(
142
- i18n("选择.index文件"),
143
- initial_folder=os.path.join(os.getcwd(), "logs"),
144
- file_types=((". index"),),
145
- ),
146
- ],
147
- ],
148
- )
149
- ],
150
- [
151
- sg.Frame(
152
- layout=[
153
- [
154
- sg.Text(i18n("输入设备")),
155
- sg.Combo(
156
- input_devices,
157
- key="sg_input_device",
158
- default_value=data.get("sg_input_device", ""),
159
- ),
160
- ],
161
- [
162
- sg.Text(i18n("输出设备")),
163
- sg.Combo(
164
- output_devices,
165
- key="sg_output_device",
166
- default_value=data.get("sg_output_device", ""),
167
- ),
168
- ],
169
- [sg.Button(i18n("重载设备列表"), key="reload_devices")],
170
- ],
171
- title=i18n("音频设备(请使用同种类驱动)"),
172
- )
173
- ],
174
- [
175
- sg.Frame(
176
- layout=[
177
- [
178
- sg.Text(i18n("响应阈值")),
179
- sg.Slider(
180
- range=(-60, 0),
181
- key="threhold",
182
- resolution=1,
183
- orientation="h",
184
- default_value=data.get("threhold", ""),
185
- ),
186
- ],
187
- [
188
- sg.Text(i18n("音调设置")),
189
- sg.Slider(
190
- range=(-24, 24),
191
- key="pitch",
192
- resolution=1,
193
- orientation="h",
194
- default_value=data.get("pitch", ""),
195
- ),
196
- ],
197
- [
198
- sg.Text(i18n("Index Rate")),
199
- sg.Slider(
200
- range=(0.0, 1.0),
201
- key="index_rate",
202
- resolution=0.01,
203
- orientation="h",
204
- default_value=data.get("index_rate", ""),
205
- ),
206
- ],
207
- [
208
- sg.Text(i18n("音高算法")),
209
- sg.Radio(
210
- "pm",
211
- "f0method",
212
- key="pm",
213
- default=data.get("pm", "") == True,
214
- ),
215
- sg.Radio(
216
- "harvest",
217
- "f0method",
218
- key="harvest",
219
- default=data.get("harvest", "") == True,
220
- ),
221
- sg.Radio(
222
- "crepe",
223
- "f0method",
224
- key="crepe",
225
- default=data.get("crepe", "") == True,
226
- ),
227
- sg.Radio(
228
- "rmvpe",
229
- "f0method",
230
- key="rmvpe",
231
- default=data.get("rmvpe", "") == True,
232
- ),
233
- ],
234
- ],
235
- title=i18n("常规设置"),
236
- ),
237
- sg.Frame(
238
- layout=[
239
- [
240
- sg.Text(i18n("采样长度")),
241
- sg.Slider(
242
- range=(0.12, 2.4),
243
- key="block_time",
244
- resolution=0.03,
245
- orientation="h",
246
- default_value=data.get("block_time", ""),
247
- ),
248
- ],
249
- [
250
- sg.Text(i18n("harvest进程数")),
251
- sg.Slider(
252
- range=(1, n_cpu),
253
- key="n_cpu",
254
- resolution=1,
255
- orientation="h",
256
- default_value=data.get(
257
- "n_cpu", min(self.config.n_cpu, n_cpu)
258
- ),
259
- ),
260
- ],
261
- [
262
- sg.Text(i18n("淡入淡出长度")),
263
- sg.Slider(
264
- range=(0.01, 0.15),
265
- key="crossfade_length",
266
- resolution=0.01,
267
- orientation="h",
268
- default_value=data.get("crossfade_length", ""),
269
- ),
270
- ],
271
- [
272
- sg.Text(i18n("额外推理时长")),
273
- sg.Slider(
274
- range=(0.05, 3.00),
275
- key="extra_time",
276
- resolution=0.01,
277
- orientation="h",
278
- default_value=data.get("extra_time", ""),
279
- ),
280
- ],
281
- [
282
- sg.Checkbox(i18n("输入降噪"), key="I_noise_reduce"),
283
- sg.Checkbox(i18n("输出降噪"), key="O_noise_reduce"),
284
- ],
285
- ],
286
- title=i18n("性能设置"),
287
- ),
288
- ],
289
- [
290
- sg.Button(i18n("开始音频转换"), key="start_vc"),
291
- sg.Button(i18n("停止音频转换"), key="stop_vc"),
292
- sg.Text(i18n("推理时间(ms):")),
293
- sg.Text("0", key="infer_time"),
294
- ],
295
- ]
296
- self.window = sg.Window("RVC - GUI", layout=layout)
297
- self.event_handler()
298
-
299
- def event_handler(self):
300
- while True:
301
- event, values = self.window.read()
302
- if event == sg.WINDOW_CLOSED:
303
- self.flag_vc = False
304
- exit()
305
- if event == "reload_devices":
306
- prev_input = self.window["sg_input_device"].get()
307
- prev_output = self.window["sg_output_device"].get()
308
- input_devices, output_devices, _, _ = self.get_devices(update=True)
309
- if prev_input not in input_devices:
310
- self.config.sg_input_device = input_devices[0]
311
- else:
312
- self.config.sg_input_device = prev_input
313
- self.window["sg_input_device"].Update(values=input_devices)
314
- self.window["sg_input_device"].Update(
315
- value=self.config.sg_input_device
316
- )
317
- if prev_output not in output_devices:
318
- self.config.sg_output_device = output_devices[0]
319
- else:
320
- self.config.sg_output_device = prev_output
321
- self.window["sg_output_device"].Update(values=output_devices)
322
- self.window["sg_output_device"].Update(
323
- value=self.config.sg_output_device
324
- )
325
- if event == "start_vc" and self.flag_vc == False:
326
- if self.set_values(values) == True:
327
- print("using_cuda:" + str(torch.cuda.is_available()))
328
- self.start_vc()
329
- settings = {
330
- "pth_path": values["pth_path"],
331
- "index_path": values["index_path"],
332
- "sg_input_device": values["sg_input_device"],
333
- "sg_output_device": values["sg_output_device"],
334
- "threhold": values["threhold"],
335
- "pitch": values["pitch"],
336
- "index_rate": values["index_rate"],
337
- "block_time": values["block_time"],
338
- "crossfade_length": values["crossfade_length"],
339
- "extra_time": values["extra_time"],
340
- "n_cpu": values["n_cpu"],
341
- "f0method": ["pm", "harvest", "crepe", "rmvpe"][
342
- [
343
- values["pm"],
344
- values["harvest"],
345
- values["crepe"],
346
- values["rmvpe"],
347
- ].index(True)
348
- ],
349
- }
350
- with open("values1.json", "w") as j:
351
- json.dump(settings, j)
352
- if event == "stop_vc" and self.flag_vc == True:
353
- self.flag_vc = False
354
-
355
- def set_values(self, values):
356
- if len(values["pth_path"].strip()) == 0:
357
- sg.popup(i18n("请选择pth文件"))
358
- return False
359
- if len(values["index_path"].strip()) == 0:
360
- sg.popup(i18n("请选择index文件"))
361
- return False
362
- pattern = re.compile("[^\x00-\x7F]+")
363
- if pattern.findall(values["pth_path"]):
364
- sg.popup(i18n("pth文件路径不可包含中文"))
365
- return False
366
- if pattern.findall(values["index_path"]):
367
- sg.popup(i18n("index文件路径不可包含中文"))
368
- return False
369
- self.set_devices(values["sg_input_device"], values["sg_output_device"])
370
- self.config.pth_path = values["pth_path"]
371
- self.config.index_path = values["index_path"]
372
- self.config.threhold = values["threhold"]
373
- self.config.pitch = values["pitch"]
374
- self.config.block_time = values["block_time"]
375
- self.config.crossfade_time = values["crossfade_length"]
376
- self.config.extra_time = values["extra_time"]
377
- self.config.I_noise_reduce = values["I_noise_reduce"]
378
- self.config.O_noise_reduce = values["O_noise_reduce"]
379
- self.config.index_rate = values["index_rate"]
380
- self.config.n_cpu = values["n_cpu"]
381
- self.config.f0method = ["pm", "harvest", "crepe", "rmvpe"][
382
- [
383
- values["pm"],
384
- values["harvest"],
385
- values["crepe"],
386
- values["rmvpe"],
387
- ].index(True)
388
- ]
389
- return True
390
-
391
- def start_vc(self):
392
- torch.cuda.empty_cache()
393
- self.flag_vc = True
394
- self.rvc = RVC(
395
- self.config.pitch,
396
- self.config.pth_path,
397
- self.config.index_path,
398
- self.config.index_rate,
399
- self.config.n_cpu,
400
- inp_q,
401
- opt_q,
402
- device,
403
- )
404
- self.config.samplerate = self.rvc.tgt_sr
405
- self.config.crossfade_time = min(
406
- self.config.crossfade_time, self.config.block_time
407
- )
408
- self.block_frame = int(self.config.block_time * self.config.samplerate)
409
- self.crossfade_frame = int(
410
- self.config.crossfade_time * self.config.samplerate
411
- )
412
- self.sola_search_frame = int(0.01 * self.config.samplerate)
413
- self.extra_frame = int(self.config.extra_time * self.config.samplerate)
414
- self.zc = self.rvc.tgt_sr // 100
415
- self.input_wav: np.ndarray = np.zeros(
416
- int(
417
- np.ceil(
418
- (
419
- self.extra_frame
420
- + self.crossfade_frame
421
- + self.sola_search_frame
422
- + self.block_frame
423
- )
424
- / self.zc
425
- )
426
- * self.zc
427
- ),
428
- dtype="float32",
429
- )
430
- self.output_wav_cache: torch.Tensor = torch.zeros(
431
- int(
432
- np.ceil(
433
- (
434
- self.extra_frame
435
- + self.crossfade_frame
436
- + self.sola_search_frame
437
- + self.block_frame
438
- )
439
- / self.zc
440
- )
441
- * self.zc
442
- ),
443
- device=device,
444
- dtype=torch.float32,
445
- )
446
- self.pitch: np.ndarray = np.zeros(
447
- self.input_wav.shape[0] // self.zc,
448
- dtype="int32",
449
- )
450
- self.pitchf: np.ndarray = np.zeros(
451
- self.input_wav.shape[0] // self.zc,
452
- dtype="float64",
453
- )
454
- self.output_wav: torch.Tensor = torch.zeros(
455
- self.block_frame, device=device, dtype=torch.float32
456
- )
457
- self.sola_buffer: torch.Tensor = torch.zeros(
458
- self.crossfade_frame, device=device, dtype=torch.float32
459
- )
460
- self.fade_in_window: torch.Tensor = torch.linspace(
461
- 0.0, 1.0, steps=self.crossfade_frame, device=device, dtype=torch.float32
462
- )
463
- self.fade_out_window: torch.Tensor = 1 - self.fade_in_window
464
- self.resampler = tat.Resample(
465
- orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32
466
- ).to(device)
467
- thread_vc = threading.Thread(target=self.soundinput)
468
- thread_vc.start()
469
-
470
- def soundinput(self):
471
- """
472
- 接受音频输入
473
- """
474
- channels = 1 if sys.platform == "darwin" else 2
475
- with sd.Stream(
476
- channels=channels,
477
- callback=self.audio_callback,
478
- blocksize=self.block_frame,
479
- samplerate=self.config.samplerate,
480
- dtype="float32",
481
- ):
482
- while self.flag_vc:
483
- time.sleep(self.config.block_time)
484
- print("Audio block passed.")
485
- print("ENDing VC")
486
-
487
- def audio_callback(
488
- self, indata: np.ndarray, outdata: np.ndarray, frames, times, status
489
- ):
490
- """
491
- 音频处理
492
- """
493
- start_time = time.perf_counter()
494
- indata = librosa.to_mono(indata.T)
495
- if self.config.I_noise_reduce:
496
- indata[:] = nr.reduce_noise(y=indata, sr=self.config.samplerate)
497
- """noise gate"""
498
- frame_length = 2048
499
- hop_length = 1024
500
- rms = librosa.feature.rms(
501
- y=indata, frame_length=frame_length, hop_length=hop_length
502
- )
503
- if self.config.threhold > -60:
504
- db_threhold = (
505
- librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold
506
- )
507
- for i in range(db_threhold.shape[0]):
508
- if db_threhold[i]:
509
- indata[i * hop_length : (i + 1) * hop_length] = 0
510
- self.input_wav[:] = np.append(self.input_wav[self.block_frame :], indata)
511
- # infer
512
- inp = torch.from_numpy(self.input_wav).to(device)
513
- ##0
514
- res1 = self.resampler(inp)
515
- ###55%
516
- rate1 = self.block_frame / (
517
- self.extra_frame
518
- + self.crossfade_frame
519
- + self.sola_search_frame
520
- + self.block_frame
521
- )
522
- rate2 = (
523
- self.crossfade_frame + self.sola_search_frame + self.block_frame
524
- ) / (
525
- self.extra_frame
526
- + self.crossfade_frame
527
- + self.sola_search_frame
528
- + self.block_frame
529
- )
530
- res2 = self.rvc.infer(
531
- res1,
532
- res1[-self.block_frame :].cpu().numpy(),
533
- rate1,
534
- rate2,
535
- self.pitch,
536
- self.pitchf,
537
- self.config.f0method,
538
- )
539
- self.output_wav_cache[-res2.shape[0] :] = res2
540
- infer_wav = self.output_wav_cache[
541
- -self.crossfade_frame - self.sola_search_frame - self.block_frame :
542
- ]
543
- # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC
544
- cor_nom = F.conv1d(
545
- infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame],
546
- self.sola_buffer[None, None, :],
547
- )
548
- cor_den = torch.sqrt(
549
- F.conv1d(
550
- infer_wav[
551
- None, None, : self.crossfade_frame + self.sola_search_frame
552
- ]
553
- ** 2,
554
- torch.ones(1, 1, self.crossfade_frame, device=device),
555
- )
556
- + 1e-8
557
- )
558
- if sys.platform == "darwin":
559
- _, sola_offset = torch.max(cor_nom[0, 0] / cor_den[0, 0])
560
- sola_offset = sola_offset.item()
561
- else:
562
- sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0])
563
- print("sola offset: " + str(int(sola_offset)))
564
- self.output_wav[:] = infer_wav[sola_offset : sola_offset + self.block_frame]
565
- self.output_wav[: self.crossfade_frame] *= self.fade_in_window
566
- self.output_wav[: self.crossfade_frame] += self.sola_buffer[:]
567
- # crossfade
568
- if sola_offset < self.sola_search_frame:
569
- self.sola_buffer[:] = (
570
- infer_wav[
571
- -self.sola_search_frame
572
- - self.crossfade_frame
573
- + sola_offset : -self.sola_search_frame
574
- + sola_offset
575
- ]
576
- * self.fade_out_window
577
- )
578
- else:
579
- self.sola_buffer[:] = (
580
- infer_wav[-self.crossfade_frame :] * self.fade_out_window
581
- )
582
- if self.config.O_noise_reduce:
583
- if sys.platform == "darwin":
584
- noise_reduced_signal = nr.reduce_noise(
585
- y=self.output_wav[:].cpu().numpy(), sr=self.config.samplerate
586
- )
587
- outdata[:] = noise_reduced_signal[:, np.newaxis]
588
- else:
589
- outdata[:] = np.tile(
590
- nr.reduce_noise(
591
- y=self.output_wav[:].cpu().numpy(),
592
- sr=self.config.samplerate,
593
- ),
594
- (2, 1),
595
- ).T
596
- else:
597
- if sys.platform == "darwin":
598
- outdata[:] = self.output_wav[:].cpu().numpy()[:, np.newaxis]
599
- else:
600
- outdata[:] = self.output_wav[:].repeat(2, 1).t().cpu().numpy()
601
- total_time = time.perf_counter() - start_time
602
- self.window["infer_time"].update(int(total_time * 1000))
603
- print("infer time:" + str(total_time))
604
-
605
- def get_devices(self, update: bool = True):
606
- """获取设备列表"""
607
- if update:
608
- sd._terminate()
609
- sd._initialize()
610
- devices = sd.query_devices()
611
- hostapis = sd.query_hostapis()
612
- for hostapi in hostapis:
613
- for device_idx in hostapi["devices"]:
614
- devices[device_idx]["hostapi_name"] = hostapi["name"]
615
- input_devices = [
616
- f"{d['name']} ({d['hostapi_name']})"
617
- for d in devices
618
- if d["max_input_channels"] > 0
619
- ]
620
- output_devices = [
621
- f"{d['name']} ({d['hostapi_name']})"
622
- for d in devices
623
- if d["max_output_channels"] > 0
624
- ]
625
- input_devices_indices = [
626
- d["index"] if "index" in d else d["name"]
627
- for d in devices
628
- if d["max_input_channels"] > 0
629
- ]
630
- output_devices_indices = [
631
- d["index"] if "index" in d else d["name"]
632
- for d in devices
633
- if d["max_output_channels"] > 0
634
- ]
635
- return (
636
- input_devices,
637
- output_devices,
638
- input_devices_indices,
639
- output_devices_indices,
640
- )
641
-
642
- def set_devices(self, input_device, output_device):
643
- """设置输出设备"""
644
- (
645
- input_devices,
646
- output_devices,
647
- input_device_indices,
648
- output_device_indices,
649
- ) = self.get_devices()
650
- sd.default.device[0] = input_device_indices[
651
- input_devices.index(input_device)
652
- ]
653
- sd.default.device[1] = output_device_indices[
654
- output_devices.index(output_device)
655
- ]
656
- print("input device:" + str(sd.default.device[0]) + ":" + str(input_device))
657
- print(
658
- "output device:" + str(sd.default.device[1]) + ":" + str(output_device)
659
- )
660
-
661
- gui = GUI()