Upload WavToSpectro.py
Browse files- WavToSpectro.py +119 -0
WavToSpectro.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import matplotlib.pyplot as plt
|
2 |
+
from scipy import signal
|
3 |
+
from scipy.io import wavfile
|
4 |
+
import os
|
5 |
+
import wavio
|
6 |
+
import numpy as np
|
7 |
+
import wave
|
8 |
+
from scipy.io.wavfile import read as read_wav
|
9 |
+
import pylab
|
10 |
+
from numpy.lib import stride_tricks
|
11 |
+
|
12 |
+
""" NOTE: While a lot of this was self authored (lines 60-89), the spectrogram images I was producing were just not the correct colors. I couldn't find a way to make the
|
13 |
+
contrast between the noise caught by the microphone and the background more visible. The code between lines 16-30, 32-57, and 91-112 was made following this stack overflow
|
14 |
+
post https://stackoverflow.com/questions/44787437/how-to-convert-a-wav-file-to-a-spectrogram-in-python3. All it really is the template for the graph and the correct coloring
|
15 |
+
for it. The actual accesing of the files, processing of the wav data, and saving of the images was all pretty simple itself."""
|
16 |
+
|
17 |
+
#short time fourier transform of audio signal
|
18 |
+
def stft(sig, frameSize, overlapFac=0.5, window=np.hanning, hopFactor=1):
|
19 |
+
win = np.hamming(frameSize) + 1e-10
|
20 |
+
hopSize = int(frameSize - np.floor(overlapFac * frameSize)) * hopFactor
|
21 |
+
|
22 |
+
# zeros at beginning (thus center of 1st window should be for sample nr. 0)
|
23 |
+
samples = np.append(np.zeros(int(np.floor(frameSize/2.0))), sig)
|
24 |
+
# cols for windowing
|
25 |
+
cols = np.ceil( (len(samples) - frameSize) / float(hopSize)) + 1
|
26 |
+
# zeros at end (thus samples can be fully covered by frames)
|
27 |
+
samples = np.append(samples, np.zeros(frameSize))
|
28 |
+
|
29 |
+
frames = stride_tricks.as_strided(samples, shape=(int(cols), frameSize), strides=(samples.strides[0]*hopSize, samples.strides[0])).copy()
|
30 |
+
frames *= win
|
31 |
+
|
32 |
+
return np.fft.rfft(frames)
|
33 |
+
|
34 |
+
|
35 |
+
def logscale_spec(spec, sr=44100, factor=20.):
|
36 |
+
timebins, freqbins = np.shape(spec)
|
37 |
+
|
38 |
+
scale = np.linspace(0, 1, freqbins) ** factor
|
39 |
+
scale *= (freqbins-1)/max(scale)
|
40 |
+
scale = np.unique(np.round(scale))
|
41 |
+
|
42 |
+
# create spectrogram with new freq bins
|
43 |
+
newspec = np.complex128(np.zeros([timebins, len(scale)]))
|
44 |
+
for i in range(0, len(scale)):
|
45 |
+
if i == len(scale)-1:
|
46 |
+
newspec[:,i] = np.sum(spec[:,int(scale[i]):], axis=1)
|
47 |
+
else:
|
48 |
+
newspec[:,i] = np.sum(spec[:,int(scale[i]):int(scale[i+1])], axis=1)
|
49 |
+
|
50 |
+
# list center freq of bins
|
51 |
+
allfreqs = np.abs(np.fft.fftfreq(freqbins*2, 1./sr)[:freqbins+1])
|
52 |
+
freqs = []
|
53 |
+
for i in range(0, len(scale)):
|
54 |
+
if i == len(scale)-1:
|
55 |
+
freqs += [np.mean(allfreqs[int(scale[i]):])]
|
56 |
+
else:
|
57 |
+
freqs += [np.mean(allfreqs[int(scale[i]):int(scale[i+1])])]
|
58 |
+
|
59 |
+
return newspec, freqs
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
folders = ["Pipistrellus pygmaus with social sound", "Noctula nyctalus with noise", "Pipistrellus pygmaus wo social sound", "Noctula nyctalus with out social sound and noise"]
|
65 |
+
folders1 = ["test"]
|
66 |
+
folders1 = ["Noctula nyctalus with out social sound and noise"]
|
67 |
+
def wavToSpectro(folders):
|
68 |
+
for folder in folders:
|
69 |
+
for fN in os.listdir(f"/Users/elijahmendoza/OCS_Materials/Neural_Networks/NeuralNetworksProject/{folder}/to crop"):
|
70 |
+
#print(fN)
|
71 |
+
fileName = fN[:-4]
|
72 |
+
if ".wav" in fN:
|
73 |
+
fileToImport = f"/Users/elijahmendoza/OCS_Materials/Neural_Networks/NeuralNetworksProject/{folder}/to crop/{fileName}.wav"
|
74 |
+
pngName = f"/Users/elijahmendoza/OCS_Materials/Neural_Networks/NeuralNetworksProject/{folder}/Bar Spectrograms/{fileName}"
|
75 |
+
|
76 |
+
|
77 |
+
samp_rate, samp = wavfile.read(fileToImport)
|
78 |
+
|
79 |
+
# our samp is 5_000_000 (for a given clip)
|
80 |
+
# our samp rate is 500_000 (for a given clip)
|
81 |
+
# if we divide our samp/samp_rate then we get the length of our clip (in this case 10)
|
82 |
+
# adjust sample rate
|
83 |
+
|
84 |
+
frequencies, times, spectrogram = signal.spectrogram(samp, samp_rate)
|
85 |
+
binsize = 2**10
|
86 |
+
colormap = "jet"
|
87 |
+
|
88 |
+
#hopfactor Max: 15
|
89 |
+
#hopfactor min: ?
|
90 |
+
|
91 |
+
s = stft(samp, binsize, hopFactor=2)
|
92 |
+
sshow, freq = logscale_spec(s, factor=1, sr=samp_rate)
|
93 |
+
ims = 20. * np.log10(np.where(np.abs(sshow) < 1e-10, 1e-10, np.abs(sshow))) # amplitude to decibel
|
94 |
+
timebins, freqbins = np.shape(ims)
|
95 |
+
|
96 |
+
plt.figure(figsize=(3.0, 2.0), dpi=100)
|
97 |
+
plt.imshow(np.transpose(ims), origin="lower", aspect="auto", cmap=colormap, interpolation="bilinear")
|
98 |
+
#plt.colorbar()
|
99 |
+
plt.axis('off') # Turn off axis
|
100 |
+
plt.margins(0, 0) # Set margins to zero
|
101 |
+
#plt.gca().set_aspect('equal')
|
102 |
+
|
103 |
+
#plt.xlabel("time (s)")
|
104 |
+
#plt.ylabel("frequency (hz)")
|
105 |
+
plt.xlim([0, timebins-1])
|
106 |
+
plt.ylim([3, 250])
|
107 |
+
|
108 |
+
#xlocs = np.float32(np.linspace(0, timebins-1, 5))
|
109 |
+
#plt.xticks(xlocs, ["%.02f" % l for l in ((xlocs*len(samp)/timebins)+(0.5*binsize))/samp_rate])
|
110 |
+
#ylocs = np.int16(np.round(np.linspace(0, freqbins-1, 10)))
|
111 |
+
#plt.yticks(ylocs, ["%.02f" % freq[i] for i in ylocs])
|
112 |
+
|
113 |
+
plt.savefig(pngName, bbox_inches="tight", pad_inches=0.0)
|
114 |
+
plt.clf()
|
115 |
+
plt.close()
|
116 |
+
|
117 |
+
|
118 |
+
|
119 |
+
wavToSpectro(folders)
|