Hi guys,
I'm trying to plot in real-time several spectrograms in different plots. When I plot only one channel works great but after adding the others channels the result is shown in the same plot and I'd want to show the result by channel in different plots.
I'm getting the data from a thread. and the code I'm using to plot looks like this:
import numpy as np
import pyqtgraph as pg
import pyqtgraph.exporters
import pyaudio
from PyQt4 import QtCore, QtGui
import cv2
FS = 48000 #Hz
CHUNKSZ = 1024 #samples
class MicrophoneRecorder():
def __init__(self, signal):
self.signal = signal
#self.signal2 = signal2
self.p = pyaudio.PyAudio() # Create pyaudio instantiation
self.stream = self.p.open(format=pyaudio.paInt16,
rate=FS,
channels=8,
input_device_index=4,
input=True,
frames_per_buffer=CHUNKSZ)
def read(self):
self.stream.start_stream()
data = self.stream.read(CHUNKSZ)
y = np.fromstring(data, dtype=np.int16)
yC1 = y[0::8] # Channel 1
yC2 = y[1::8] # Channel 2
yC3 = y[2::8] # Channel 3
yC4 = y[3::8] # Channel 4
yC5 = y[4::8] # Channel 5
yC6 = y[5::8] # Channel 6
yC7 = y[6::8] # Channel 7
yC8 = y[7::8] # Channel 8
self.signal.emit(yC1)
self.signal.emit(yC2)
def close(self):
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
class SpectrogramWidget(pg.PlotWidget):
read_collected = QtCore.pyqtSignal(np.ndarray)
#print(read_collected)
#read_collected2 = QtCore.pyqtSignal(np.ndarray)
def __init__(self):
super(SpectrogramWidget, self).__init__(parent=None, background='default')
self.img = pg.ImageItem() # Displaying an image
self.img2 = pg.ImageItem() # Displaying an image
self.addItem(self.img) # Add a graphics item to the view box
self.addItem(self.img2) # Add a graphics item to the view box
#self.img_array = np.zeros((1000, CHUNKSZ/2+1))
self.img_array = np.zeros((94, CHUNKSZ/2+1)) # Return new array
print(self.img_array)
# bipolar colormap
pos = np.array([0., 1., 0.5, 0.25, 0.75]) # Object, dtype, copy, order, subok, ndmin
color = np.array([[0,255,255,255], [255,255,0,255], [0,0,0,255], (0, 0, 255, 255), (255, 0, 0, 255)], dtype=np.ubyte)
cmap = pg.ColorMap(pos, color) # pos, color - relationship between scalar value and range of color
lut = cmap.getLookupTable(0.0, 1.0, 256) # Start value, final value, number of points
self.img.setLookupTable(lut) # Accepts currents images
self.img.setLevels([-50,40]) # Blacklevel, Whitelevel
freq = np.arange((CHUNKSZ/2)+1)/(float(CHUNKSZ)/FS) # Spaced values within a given interval
yscale = 1.0/(self.img_array.shape[1]/freq[-1]) # Scale the y-axis for intervals of freq
self.img.scale((1./FS)*CHUNKSZ, yscale) # Scale between the time and freq
self.setLabel('left', 'Frequency', units='Hz') # Set y-axi label
self.setLabel('bottom', 'Time', units='sec') # Set x-axi label
self.win = np.hanning(CHUNKSZ) # Return Hanning Window
self.show() # Show the image generated
def update(self, chunk):
# normalized, windowed frequencies in data chunk
spec = np.fft.rfft(chunk*self.win) / CHUNKSZ # Compute discrete FT for real input
# get magnitude
psd = abs(spec)
# convert to dB scale
psd = 20 * np.log10(psd)
# roll down one and replace leading edge with new data
self.img_array = np.roll(self.img_array, -1, 0)
self.img_array[-1:] = psd
self.img.setImage(self.img_array, autoLevels=False)
#self.imagewindow.clear()
#self.imagewindow.addItem(self.img)
#exporter = pg.exporters.ImageExporter(self.img_array)
#exporter.parameters()['width'] = 100
#exporter.export('test.png')
if __name__ == '__main__':
app = QtGui.QApplication([])
w = SpectrogramWidget()
w.read_collected.connect(w.update)
#w.read_collected2.connect(w.update)
mic = MicrophoneRecorder(w.read_collected)
# time (seconds) between reads
interval = FS/CHUNKSZ
t = QtCore.QTimer()
t.timeout.connect(mic.read)
t.start(100/interval) #QTimer takes ms
app.exec_()
mic.close()
I appreciate any suggestions.