import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm from utils import init_weights, get_padding import numpy as np from stft import TorchSTFT import torchaudio from nnAudio import features from einops import rearrange from norm2d import NormConv2d from utils import get_padding from munch import Munch from conformer import Conformer LRELU_SLOPE = 0.1 def get_2d_padding(kernel_size, dilation=(1, 1)): return ( ((kernel_size[0] - 1) * dilation[0]) // 2, ((kernel_size[1] - 1) * dilation[1]) // 2, ) class ResBlock1(torch.nn.Module): def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): super(ResBlock1, self).__init__() self.h = h self.convs1 = nn.ModuleList([ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1]))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], padding=get_padding(kernel_size, dilation[2]))) ]) self.convs1.apply(init_weights) self.convs2 = nn.ModuleList([ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))) ]) self.convs2.apply(init_weights) self.alpha1 = nn.ParameterList([nn.Parameter(torch.ones(1, channels, 1)) for i in range(len(self.convs1))]) self.alpha2 = nn.ParameterList([nn.Parameter(torch.ones(1, channels, 1)) for i in range(len(self.convs2))]) def forward(self, x): for c1, c2, a1, a2 in zip(self.convs1, self.convs2, self.alpha1, self.alpha2): xt = x + (1 / a1) * (torch.sin(a1 * x) ** 2) # Snake1D xt = c1(xt) xt = xt + (1 / a2) * (torch.sin(a2 * xt) ** 2) # Snake1D xt = c2(xt) x = xt + x return x def remove_weight_norm(self): for l in self.convs1: remove_weight_norm(l) for l in self.convs2: remove_weight_norm(l) class ResBlock1_old(torch.nn.Module): def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): super(ResBlock1, self).__init__() self.h = h self.convs1 = nn.ModuleList([ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1]))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], padding=get_padding(kernel_size, dilation[2]))) ]) self.convs1.apply(init_weights) self.convs2 = nn.ModuleList([ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))) ]) self.convs2.apply(init_weights) def forward(self, x): for c1, c2 in zip(self.convs1, self.convs2): xt = F.leaky_relu(x, LRELU_SLOPE) xt = c1(xt) xt = F.leaky_relu(xt, LRELU_SLOPE) xt = c2(xt) x = xt + x return x def remove_weight_norm(self): for l in self.convs1: remove_weight_norm(l) for l in self.convs2: remove_weight_norm(l) class ResBlock2(torch.nn.Module): def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): super(ResBlock2, self).__init__() self.h = h self.convs = nn.ModuleList([ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1]))) ]) self.convs.apply(init_weights) def forward(self, x): for c in self.convs: xt = F.leaky_relu(x, LRELU_SLOPE) xt = c(xt) x = xt + x return x def remove_weight_norm(self): for l in self.convs: remove_weight_norm(l) class SineGen(torch.nn.Module): """ Definition of sine generator SineGen(samp_rate, harmonic_num = 0, sine_amp = 0.1, noise_std = 0.003, voiced_threshold = 0, flag_for_pulse=False) samp_rate: sampling rate in Hz harmonic_num: number of harmonic overtones (default 0) sine_amp: amplitude of sine-wavefrom (default 0.1) noise_std: std of Gaussian noise (default 0.003) voiced_thoreshold: F0 threshold for U/V classification (default 0) flag_for_pulse: this SinGen is used inside PulseGen (default False) Note: when flag_for_pulse is True, the first time step of a voiced segment is always sin(np.pi) or cos(0) """ def __init__(self, samp_rate, upsample_scale, harmonic_num=0, sine_amp=0.1, noise_std=0.003, voiced_threshold=0, flag_for_pulse=False): super(SineGen, self).__init__() self.sine_amp = sine_amp self.noise_std = noise_std self.harmonic_num = harmonic_num self.dim = self.harmonic_num + 1 self.sampling_rate = samp_rate self.voiced_threshold = voiced_threshold self.flag_for_pulse = flag_for_pulse self.upsample_scale = upsample_scale def _f02uv(self, f0): # generate uv signal uv = (f0 > self.voiced_threshold).type(torch.float32) return uv def _f02sine(self, f0_values): """ f0_values: (batchsize, length, dim) where dim indicates fundamental tone and overtones """ # convert to F0 in rad. The interger part n can be ignored # because 2 * np.pi * n doesn't affect phase rad_values = (f0_values / self.sampling_rate) % 1 # initial phase noise (no noise for fundamental component) rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ device=f0_values.device) rand_ini[:, 0] = 0 rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) if not self.flag_for_pulse: # # for normal case # # To prevent torch.cumsum numerical overflow, # # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. # # Buffer tmp_over_one_idx indicates the time step to add -1. # # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi # tmp_over_one = torch.cumsum(rad_values, 1) % 1 # tmp_over_one_idx = (padDiff(tmp_over_one)) < 0 # cumsum_shift = torch.zeros_like(rad_values) # cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 # phase = torch.cumsum(rad_values, dim=1) * 2 * np.pi rad_values = torch.nn.functional.interpolate(rad_values.transpose(1, 2), scale_factor=1/self.upsample_scale, mode="linear").transpose(1, 2) # tmp_over_one = torch.cumsum(rad_values, 1) % 1 # tmp_over_one_idx = (padDiff(tmp_over_one)) < 0 # cumsum_shift = torch.zeros_like(rad_values) # cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 phase = torch.cumsum(rad_values, dim=1) * 2 * np.pi phase = torch.nn.functional.interpolate(phase.transpose(1, 2) * self.upsample_scale, scale_factor=self.upsample_scale, mode="linear").transpose(1, 2) sines = torch.sin(phase) else: # If necessary, make sure that the first time step of every # voiced segments is sin(pi) or cos(0) # This is used for pulse-train generation # identify the last time step in unvoiced segments uv = self._f02uv(f0_values) uv_1 = torch.roll(uv, shifts=-1, dims=1) uv_1[:, -1, :] = 1 u_loc = (uv < 1) * (uv_1 > 0) # get the instantanouse phase tmp_cumsum = torch.cumsum(rad_values, dim=1) # different batch needs to be processed differently for idx in range(f0_values.shape[0]): temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] # stores the accumulation of i.phase within # each voiced segments tmp_cumsum[idx, :, :] = 0 tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum # rad_values - tmp_cumsum: remove the accumulation of i.phase # within the previous voiced segment. i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) # get the sines sines = torch.cos(i_phase * 2 * np.pi) return sines def forward(self, f0): """ sine_tensor, uv = forward(f0) input F0: tensor(batchsize=1, length, dim=1) f0 for unvoiced steps should be 0 output sine_tensor: tensor(batchsize=1, length, dim) output uv: tensor(batchsize=1, length, 1) """ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) # fundamental component fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device)) # generate sine waveforms sine_waves = self._f02sine(fn) * self.sine_amp # generate uv signal # uv = torch.ones(f0.shape) # uv = uv * (f0 > self.voiced_threshold) uv = self._f02uv(f0) # noise: for unvoiced should be similar to sine_amp # std = self.sine_amp/3 -> max value ~ self.sine_amp # . for voiced regions is self.noise_std noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 noise = noise_amp * torch.randn_like(sine_waves) # first: set the unvoiced part to 0 by uv # then: additive noise sine_waves = sine_waves * uv + noise return sine_waves, uv, noise class SourceModuleHnNSF(torch.nn.Module): """ SourceModule for hn-nsf SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, add_noise_std=0.003, voiced_threshod=0) sampling_rate: sampling_rate in Hz harmonic_num: number of harmonic above F0 (default: 0) sine_amp: amplitude of sine source signal (default: 0.1) add_noise_std: std of additive Gaussian noise (default: 0.003) note that amplitude of noise in unvoiced is decided by sine_amp voiced_threshold: threhold to set U/V given F0 (default: 0) Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) F0_sampled (batchsize, length, 1) Sine_source (batchsize, length, 1) noise_source (batchsize, length 1) uv (batchsize, length, 1) """ def __init__(self, sampling_rate, upsample_scale, harmonic_num=0, sine_amp=0.1, add_noise_std=0.003, voiced_threshod=0): super(SourceModuleHnNSF, self).__init__() self.sine_amp = sine_amp self.noise_std = add_noise_std # to produce sine waveforms self.l_sin_gen = SineGen(sampling_rate, upsample_scale, harmonic_num, sine_amp, add_noise_std, voiced_threshod) # to merge source harmonics into a single excitation self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) self.l_tanh = torch.nn.Tanh() def forward(self, x): """ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) F0_sampled (batchsize, length, 1) Sine_source (batchsize, length, 1) noise_source (batchsize, length 1) """ # source for harmonic branch with torch.no_grad(): sine_wavs, uv, _ = self.l_sin_gen(x) sine_merge = self.l_tanh(self.l_linear(sine_wavs)) # source for noise branch, in the same shape as uv noise = torch.randn_like(uv) * self.sine_amp / 3 return sine_merge, noise, uv def padDiff(x): return F.pad(F.pad(x, (0,0,-1,1), 'constant', 0) - x, (0,0,0,-1), 'constant', 0) class Generator(torch.nn.Module): def __init__(self, h, F0_model): super(Generator, self).__init__() self.h = h self.num_kernels = len(h.resblock_kernel_sizes) self.num_upsamples = len(h.upsample_rates) self.conv_pre = weight_norm(Conv1d(128, h.upsample_initial_channel, 7, 1, padding=3)) resblock = ResBlock1 if h.resblock == '1' else ResBlock2 self.m_source = SourceModuleHnNSF( sampling_rate=h.sampling_rate, upsample_scale=np.prod(h.upsample_rates) * h.gen_istft_hop_size, harmonic_num=8, voiced_threshod=10) self.f0_upsamp = torch.nn.Upsample( scale_factor=np.prod(h.upsample_rates) * h.gen_istft_hop_size) self.noise_convs = nn.ModuleList() self.noise_res = nn.ModuleList() self.F0_model = F0_model self.ups = nn.ModuleList() for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): self.ups.append(weight_norm( ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)), k, u, padding=(k-u)//2))) c_cur = h.upsample_initial_channel // (2 ** (i + 1)) if i + 1 < len(h.upsample_rates): # stride_f0 = np.prod(h.upsample_rates[i + 1:]) self.noise_convs.append(Conv1d( h.gen_istft_n_fft + 2, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=(stride_f0+1) // 2)) self.noise_res.append(resblock(h, c_cur, 7, [1,3,5])) else: self.noise_convs.append(Conv1d(h.gen_istft_n_fft + 2, c_cur, kernel_size=1)) self.noise_res.append(resblock(h, c_cur, 11, [1,3,5])) self.alphas = nn.ParameterList() self.alphas.append(nn.Parameter(torch.ones(1, h.upsample_initial_channel, 1))) self.resblocks = nn.ModuleList() for i in range(len(self.ups)): ch = h.upsample_initial_channel//(2**(i+1)) self.alphas.append(nn.Parameter(torch.ones(1, ch, 1))) for j, (k, d) in enumerate( zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): self.resblocks.append(resblock(h, ch, k, d)) self.conformers = nn.ModuleList() self.post_n_fft = h.gen_istft_n_fft self.conv_post = weight_norm(Conv1d(128, self.post_n_fft + 2, 7, 1, padding=3)) for i in range(len(self.ups)): ch = h.upsample_initial_channel // (2**i) self.conformers.append( Conformer( dim=ch, depth=2, dim_head=64, heads=8, ff_mult=4, conv_expansion_factor=2, conv_kernel_size=31, attn_dropout=0.1, ff_dropout=0.1, conv_dropout=0.1, # device=self.device ) ) self.ups.apply(init_weights) self.conv_post.apply(init_weights) self.reflection_pad = torch.nn.ReflectionPad1d((1, 0)) self.stft = TorchSTFT(filter_length=h.gen_istft_n_fft, hop_length=h.gen_istft_hop_size, win_length=h.gen_istft_n_fft) def forward(self, x): f0, _, _ = self.F0_model(x.unsqueeze(1)) if len(f0.shape) == 1: f0 = f0.unsqueeze(0) f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t har_source, _, _ = self.m_source(f0) har_source = har_source.transpose(1, 2).squeeze(1) har_spec, har_phase = self.stft.transform(har_source) har = torch.cat([har_spec, har_phase], dim=1) x = self.conv_pre(x) for i in range(self.num_upsamples): x = x + (1 / self.alphas[i]) * (torch.sin(self.alphas[i] * x) ** 2) x = rearrange(x, "b f t -> b t f") x = self.conformers[i](x) x = rearrange(x, "b t f -> b f t") # x = F.leaky_relu(x, LRELU_SLOPE) x_source = self.noise_convs[i](har) x_source = self.noise_res[i](x_source) x = self.ups[i](x) if i == self.num_upsamples - 1: x = self.reflection_pad(x) x = x + x_source xs = None for j in range(self.num_kernels): if xs is None: xs = self.resblocks[i*self.num_kernels+j](x) else: xs += self.resblocks[i*self.num_kernels+j](x) x = xs / self.num_kernels # x = F.leaky_relu(x) x = x + (1 / self.alphas[i + 1]) * (torch.sin(self.alphas[i + 1] * x) ** 2) x = self.conv_post(x) spec = torch.exp(x[:,:self.post_n_fft // 2 + 1, :]).to(x.device) phase = torch.sin(x[:, self.post_n_fft // 2 + 1:, :]).to(x.device) return spec, phase def remove_weight_norm(self): print("Removing weight norm...") for l in self.ups: remove_weight_norm(l) for l in self.resblocks: l.remove_weight_norm() remove_weight_norm(self.conv_pre) remove_weight_norm(self.conv_post) def stft(x, fft_size, hop_size, win_length, window): """Perform STFT and convert to magnitude spectrogram. Args: x (Tensor): Input signal tensor (B, T). fft_size (int): FFT size. hop_size (int): Hop size. win_length (int): Window length. window (str): Window function type. Returns: Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1). """ x_stft = torch.stft(x, fft_size, hop_size, win_length, window, return_complex=True) real = x_stft[..., 0] imag = x_stft[..., 1] # NOTE(kan-bayashi): clamp is needed to avoid nan or inf return torch.abs(x_stft).transpose(2, 1) class SpecDiscriminator(nn.Module): """docstring for Discriminator.""" def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window", use_spectral_norm=False): super(SpecDiscriminator, self).__init__() norm_f = weight_norm if use_spectral_norm == False else spectral_norm self.fft_size = fft_size self.shift_size = shift_size self.win_length = win_length self.window = getattr(torch, window)(win_length) self.discriminators = nn.ModuleList([ norm_f(nn.Conv2d(1, 32, kernel_size=(3, 9), padding=(1, 4))), norm_f(nn.Conv2d(32, 32, kernel_size=(3, 9), stride=(1,2), padding=(1, 4))), norm_f(nn.Conv2d(32, 32, kernel_size=(3, 9), stride=(1,2), padding=(1, 4))), norm_f(nn.Conv2d(32, 32, kernel_size=(3, 9), stride=(1,2), padding=(1, 4))), norm_f(nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(1,1), padding=(1, 1))), ]) self.out = norm_f(nn.Conv2d(32, 1, 3, 1, 1)) def forward(self, y): fmap = [] y = y.squeeze(1) y = stft(y, self.fft_size, self.shift_size, self.win_length, self.window.to(y.get_device())) y = y.unsqueeze(1) for i, d in enumerate(self.discriminators): y = d(y) y = F.leaky_relu(y, LRELU_SLOPE) fmap.append(y) y = self.out(y) fmap.append(y) return torch.flatten(y, 1, -1), fmap # class MultiResSpecDiscriminator(torch.nn.Module): # def __init__(self, # fft_sizes=[1024, 2048, 512], # hop_sizes=[120, 240, 50], # win_lengths=[600, 1200, 240], # window="hann_window"): # super(MultiResSpecDiscriminator, self).__init__() # self.discriminators = nn.ModuleList([ # SpecDiscriminator(fft_sizes[0], hop_sizes[0], win_lengths[0], window), # SpecDiscriminator(fft_sizes[1], hop_sizes[1], win_lengths[1], window), # SpecDiscriminator(fft_sizes[2], hop_sizes[2], win_lengths[2], window) # ]) # def forward(self, y, y_hat): # y_d_rs = [] # y_d_gs = [] # fmap_rs = [] # fmap_gs = [] # for i, d in enumerate(self.discriminators): # y_d_r, fmap_r = d(y) # y_d_g, fmap_g = d(y_hat) # y_d_rs.append(y_d_r) # fmap_rs.append(fmap_r) # y_d_gs.append(y_d_g) # fmap_gs.append(fmap_g) # return y_d_rs, y_d_gs, fmap_rs, fmap_gs class DiscriminatorP(torch.nn.Module): def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): super(DiscriminatorP, self).__init__() self.period = period norm_f = weight_norm if use_spectral_norm == False else spectral_norm self.convs = nn.ModuleList([ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), ]) self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) def forward(self, x): fmap = [] # 1d to 2d b, c, t = x.shape if t % self.period != 0: # pad first n_pad = self.period - (t % self.period) x = F.pad(x, (0, n_pad), "reflect") t = t + n_pad x = x.view(b, c, t // self.period, self.period) for l in self.convs: x = l(x) x = F.leaky_relu(x, LRELU_SLOPE) fmap.append(x) x = self.conv_post(x) fmap.append(x) x = torch.flatten(x, 1, -1) return x, fmap class MultiPeriodDiscriminator(torch.nn.Module): def __init__(self): super(MultiPeriodDiscriminator, self).__init__() self.discriminators = nn.ModuleList([ DiscriminatorP(2), DiscriminatorP(3), DiscriminatorP(5), DiscriminatorP(7), DiscriminatorP(11), ]) def forward(self, y, y_hat): y_d_rs = [] y_d_gs = [] fmap_rs = [] fmap_gs = [] for i, d in enumerate(self.discriminators): y_d_r, fmap_r = d(y) y_d_g, fmap_g = d(y_hat) y_d_rs.append(y_d_r) fmap_rs.append(fmap_r) y_d_gs.append(y_d_g) fmap_gs.append(fmap_g) return y_d_rs, y_d_gs, fmap_rs, fmap_gs class DiscriminatorS(torch.nn.Module): def __init__(self, use_spectral_norm=False): super(DiscriminatorS, self).__init__() norm_f = weight_norm if use_spectral_norm == False else spectral_norm self.convs = nn.ModuleList([ norm_f(Conv1d(1, 128, 15, 1, padding=7)), norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), ]) self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) def forward(self, x): fmap = [] for l in self.convs: x = l(x) x = F.leaky_relu(x, LRELU_SLOPE) fmap.append(x) x = self.conv_post(x) fmap.append(x) x = torch.flatten(x, 1, -1) return x, fmap class MultiScaleDiscriminator(torch.nn.Module): def __init__(self): super(MultiScaleDiscriminator, self).__init__() self.discriminators = nn.ModuleList([ DiscriminatorS(use_spectral_norm=True), DiscriminatorS(), DiscriminatorS(), ]) self.meanpools = nn.ModuleList([ AvgPool1d(4, 2, padding=2), AvgPool1d(4, 2, padding=2) ]) def forward(self, y, y_hat): y_d_rs = [] y_d_gs = [] fmap_rs = [] fmap_gs = [] for i, d in enumerate(self.discriminators): if i != 0: y = self.meanpools[i-1](y) y_hat = self.meanpools[i-1](y_hat) y_d_r, fmap_r = d(y) y_d_g, fmap_g = d(y_hat) y_d_rs.append(y_d_r) fmap_rs.append(fmap_r) y_d_gs.append(y_d_g) fmap_gs.append(fmap_g) return y_d_rs, y_d_gs, fmap_rs, fmap_gs ########################### from ringformer multiscale_subband_cfg = { "hop_lengths": [1024, 512, 512], # Doubled to maintain similar time resolution "sampling_rate": 44100, # New sampling rate "filters": 32, # Kept same as it controls initial feature dimension "max_filters": 1024, # Kept same as it's a maximum limit "filters_scale": 1, # Kept same as it's a scaling factor "dilations": [1, 2, 4], # Kept same as they control receptive field growth "in_channels": 1, # Kept same (mono audio) "out_channels": 1, # Kept same (mono audio) "n_octaves": [10, 10, 10], # Increased by 1 to handle higher frequency range "bins_per_octaves": [24, 36, 48], # Kept same as they control frequency resolution } # multiscale_subband_cfg = { # "hop_lengths": [512, 256, 256], # "sampling_rate": 24000, # "filters": 32, # "max_filters": 1024, # "filters_scale": 1, # "dilations": [1, 2, 4], # "in_channels": 1, # "out_channels": 1, # "n_octaves": [9, 9, 9], # "bins_per_octaves": [24, 36, 48], # } class DiscriminatorCQT(nn.Module): def __init__(self, cfg, hop_length, n_octaves, bins_per_octave): super(DiscriminatorCQT, self).__init__() self.cfg = cfg self.filters = cfg.filters self.max_filters = cfg.max_filters self.filters_scale = cfg.filters_scale self.kernel_size = (3, 9) self.dilations = cfg.dilations self.stride = (1, 2) self.in_channels = cfg.in_channels self.out_channels = cfg.out_channels self.fs = cfg.sampling_rate self.hop_length = hop_length self.n_octaves = n_octaves self.bins_per_octave = bins_per_octave self.cqt_transform = features.cqt.CQT2010v2( sr=self.fs * 2, hop_length=self.hop_length, n_bins=self.bins_per_octave * self.n_octaves, bins_per_octave=self.bins_per_octave, output_format="Complex", pad_mode="constant", ) self.conv_pres = nn.ModuleList() for i in range(self.n_octaves): self.conv_pres.append( NormConv2d( self.in_channels * 2, self.in_channels * 2, kernel_size=self.kernel_size, padding=get_2d_padding(self.kernel_size), ) ) self.convs = nn.ModuleList() self.convs.append( NormConv2d( self.in_channels * 2, self.filters, kernel_size=self.kernel_size, padding=get_2d_padding(self.kernel_size), ) ) in_chs = min(self.filters_scale * self.filters, self.max_filters) for i, dilation in enumerate(self.dilations): out_chs = min( (self.filters_scale ** (i + 1)) * self.filters, self.max_filters ) self.convs.append( NormConv2d( in_chs, out_chs, kernel_size=self.kernel_size, stride=self.stride, dilation=(dilation, 1), padding=get_2d_padding(self.kernel_size, (dilation, 1)), norm="weight_norm", ) ) in_chs = out_chs out_chs = min( (self.filters_scale ** (len(self.dilations) + 1)) * self.filters, self.max_filters, ) self.convs.append( NormConv2d( in_chs, out_chs, kernel_size=(self.kernel_size[0], self.kernel_size[0]), padding=get_2d_padding((self.kernel_size[0], self.kernel_size[0])), norm="weight_norm", ) ) self.conv_post = NormConv2d( out_chs, self.out_channels, kernel_size=(self.kernel_size[0], self.kernel_size[0]), padding=get_2d_padding((self.kernel_size[0], self.kernel_size[0])), norm="weight_norm", ) self.activation = torch.nn.LeakyReLU(negative_slope=LRELU_SLOPE) self.resample = torchaudio.transforms.Resample( orig_freq=self.fs, new_freq=self.fs * 2 ) def forward(self, x): fmap = [] x = self.resample(x) z = self.cqt_transform(x) z_amplitude = z[:, :, :, 0].unsqueeze(1) z_phase = z[:, :, :, 1].unsqueeze(1) z = torch.cat([z_amplitude, z_phase], dim=1) z = rearrange(z, "b c w t -> b c t w") latent_z = [] for i in range(self.n_octaves): latent_z.append( self.conv_pres[i]( z[ :, :, :, i * self.bins_per_octave : (i + 1) * self.bins_per_octave, ] ) ) latent_z = torch.cat(latent_z, dim=-1) for i, l in enumerate(self.convs): latent_z = l(latent_z) latent_z = self.activation(latent_z) fmap.append(latent_z) latent_z = self.conv_post(latent_z) return latent_z, fmap class MultiScaleSubbandCQTDiscriminator(nn.Module): # replacing "MultiResSpecDiscriminator" def __init__(self): super(MultiScaleSubbandCQTDiscriminator, self).__init__() cfg = Munch(multiscale_subband_cfg) self.cfg = cfg self.discriminators = nn.ModuleList( [ DiscriminatorCQT( cfg, hop_length=cfg.hop_lengths[i], n_octaves=cfg.n_octaves[i], bins_per_octave=cfg.bins_per_octaves[i], ) for i in range(len(cfg.hop_lengths)) ] ) def forward(self, y, y_hat): y_d_rs = [] y_d_gs = [] fmap_rs = [] fmap_gs = [] for disc in self.discriminators: y_d_r, fmap_r = disc(y) y_d_g, fmap_g = disc(y_hat) y_d_rs.append(y_d_r) fmap_rs.append(fmap_r) y_d_gs.append(y_d_g) fmap_gs.append(fmap_g) return y_d_rs, y_d_gs, fmap_rs, fmap_gs ############################# def feature_loss(fmap_r, fmap_g): loss = 0 for dr, dg in zip(fmap_r, fmap_g): for rl, gl in zip(dr, dg): loss += torch.mean(torch.abs(rl - gl)) return loss*2 def discriminator_loss(disc_real_outputs, disc_generated_outputs): loss = 0 r_losses = [] g_losses = [] for dr, dg in zip(disc_real_outputs, disc_generated_outputs): r_loss = torch.mean((1-dr)**2) g_loss = torch.mean(dg**2) loss += (r_loss + g_loss) r_losses.append(r_loss.item()) g_losses.append(g_loss.item()) return loss, r_losses, g_losses def generator_loss(disc_outputs): loss = 0 gen_losses = [] for dg in disc_outputs: l = torch.mean((1-dg)**2) gen_losses.append(l) loss += l return loss, gen_losses def discriminator_TPRLS_loss(disc_real_outputs, disc_generated_outputs): loss = 0 for dr, dg in zip(disc_real_outputs, disc_generated_outputs): tau = 0.04 m_DG = torch.median((dr-dg)) L_rel = torch.mean((((dr - dg) - m_DG)**2)[dr < dg + m_DG]) loss += tau - F.relu(tau - L_rel) return loss def generator_TPRLS_loss(disc_real_outputs, disc_generated_outputs): loss = 0 for dg, dr in zip(disc_real_outputs, disc_generated_outputs): tau = 0.04 m_DG = torch.median((dr-dg)) L_rel = torch.mean((((dr - dg) - m_DG)**2)[dr < dg + m_DG]) loss += tau - F.relu(tau - L_rel) return loss