Guys please help me!
Dear Guillaume
I have similar questions, which the first one is how can I feed it with the frames which I am getting form this opencv code?
My second question is that our goal is to get the heart rate-real time because we want to sync it with EEG, is it possible to make the heart rate real-time?
my third question is that there is a bob face detection and because I do not have a video database and i want to feed it with camera, i could not check it. so my questions is because our experiment condition is not fixed and there are possibility of moving the head during the experiment, is there any problem with moving?
Thank you very much
Kikuchi
spatial_subspace_rotation.py |
OPENCV |
import bob.io.baseimport bob.io.imageimport bob.ip.facedetectimport bob.ip.drawimport bob.io.videoimport bob.ip.colorfrom bob.rppg.base.utils import crop_facefrom ssr_utils import get_skin_pixelsfrom ssr_utils import get_eigenfrom ssr_utils import plot_eigenvectorsfrom ssr_utils import build_Pfrom docopt import docoptimport cv2import numpy as npimport reimport sysimport threadingimport time
import osimport sysimport pkg_resources
from scipy.signal import welchimport matplotlibmatplotlib.use("tkagg")import matplotlib.pyplot as pltbuffer={}timebuffer={}writeindex=0buffersize = 1000def worker( ):faceDet = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')global writeindexglobal bufferglobal buffersizeglobal timebufferreadindex = 0temporal_stride = 20nframeproc = 60skininit = Falsethreshold = 0.5plot = Trueverbose = 1nfft = 128nsegments = 12segment_length = (2 * 256) // (nsegments + 1)count = 0counter = 0sbuffer={}readindex = writeindexXX = np.zeros(nframeproc, dtype='float64')output_data = np.zeros(nframeproc, dtype='float64')plt.ion()
fig = plt.figure()ax = fig.add_subplot(111)
line1, = ax.plot(XX, output_data, 'b-')fig.canvas.draw()while True:while not writeindex == readindex and count<nframeproc:sbuffer[count]=buffer[readindex].copy()#.transpose((2,0,1))XX[count]=timebuffer[readindex]readindex = (readindex + 1) % buffersizecount+=1if count==nframeproc:count=0bounding_box={}for i in range(0,nframeproc):gray = cv2.cvtColor(sbuffer[i], cv2.COLOR_BGR2GRAY)face = faceDet.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5),flags=cv2.CASCADE_SCALE_IMAGE)if len(face)>0:x,y,w,h=face[0]bounding_box[i]=bob.ip.facedetect.BoundingBox([y,x],[h,w])print(i, "opencv face detector", bounding_box[i])else:bounding_box[i], _ = bob.ip.facedetect.detect_single_face(sbuffer[i].transpose((2,0,1)))print(i, " bob face detector", bounding_box[i])
# the result -> the pulse signal
# store the eigenvalues and the eigenvectors at each frame
eigenvalues = np.zeros((3, nframeproc), dtype='float64')eigenvectors = np.zeros((3, 3, nframeproc), dtype='float64')for i in range(0,nframeproc):print(i, " th frame proceed")frame=sbuffer[i].transpose((2,0,1))try:if i == 0:
# init skin parameters in any cases if it's the first frame
skin_pixels = get_skin_pixels(frame, i,True, threshold, bounding_box)else:skin_pixels = get_skin_pixels(frame, i,skininit, threshold,bounding_box)except NameError:if i == 0:skin_pixels = get_skin_pixels(frame, i,skininit, threshold)else:skin_pixels = get_skin_pixels(frame, i,skininit, threshold)
# no skin pixels detected, generally due to no face detection# go back in time to find a face, and use this bbox to retrieve skin pixels in current frameif skin_pixels.shape[1] == 0:
#logger.warning("No skin pixels detected in frame {0}".format(i))
k = 1while skin_pixels.shape[1] <= 0:try:
skin_pixels = get_skin_pixels(sbuffer[i - k], (i - k),skininit, threshold,bounding_box, skin_frame=frame)except NameError:skin_pixels = get_skin_pixels(sbuffer[i - k], (i - k),skininit, threshold,skin_frame=frame)k += 1#logger.warning("got skin pixels in frame {0}".format(i - k))
# build c matrix and get eigenvectors and eigenvalues
eigenvalues[:, i], eigenvectors[:, :, i] = get_eigen(skin_pixels)
# plot the cluster of skin pixels and eigenvectors (see Figure 1)
if plot and verbose >= 2:plot_eigenvectors(skin_pixels, eigenvectors[:, :, i])
# build P and add it to the pulse signal
if i >= temporal_stride:tau = i - temporal_stridep = build_P(i, temporal_stride, eigenvectors, eigenvalues)output_data[tau:i] += (p - np.mean(p))frate=nframeproc*1.0/(XX.max()-XX.min())green_f, green_psd = welch(output_data, frate, nperseg=segment_length, nfft=nfft)first = np.where(green_f > 0.7)[0]last = np.where(green_f < 4)[0]first_index = first[0]last_index = last[-1]range_of_interest = range(first_index, last_index + 1, 1)max_idx = np.argmax(green_psd[range_of_interest])f_max = green_f[range_of_interest[max_idx]]hr = f_max * 60.0print("Heart Rate: ",hr)
# plot the pulse signal
if plot:line1.set_ydata(output_data)line1.set_xdata(XX)plt.axis([XX.min(), XX.max(), output_data.min() , output_data.max()])plt.title("Heart Rate: "+str(hr))#fig.canvas.draw()#plt.cla()#ax.plot(range(nframeproc), output_data)#plt.draw()readindex = writeindexelse:time.sleep(0.1)if __name__ == '__main__':#faceDet = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')cap = cv2.VideoCapture(0)ret, frame = cap.read()t = threading.Thread(target=worker)t.start()start=time.time()while (cap.isOpened()):ret, frame = cap.read()buffer[writeindex]=frame.copy()timebuffer[writeindex]=time.time()-startwriteindex=(writeindex+1)%buffersizecv2.imshow("hh",frame)
if cv2.waitKey(1) & 0xFF == ord('q'):break# When everything done, release the capturecap.release()cv2.destroyAllWindows()