Hi everyone.
I have a problem with my code.
I've done a color based tracking using OpenCV and a kinect camera (using the libfreenect library).
About
the color tracking everything is ok, but i don't understand how to get
the real distance of the supposed mass centre of the blob i ve detect as
object.
I get the RGB matrix and the 16 and 8 bit depth matrixs and
the screen position of the mass centre of the blob. i need to know how
to extract the depth info of the screen position i've found from the
depth matrix and, after that, how to trasform the screen X - screen Y -
depth Z in the coordinates of the object relative to the kinect (i
suppose that kinect has the coordinates X=0, Y=0, Z=0).
To get the depth value from the depth matrix i don't know if i need to use depthZ=depthMat.at<unsigned char>(iMeanx,iMeany);
or frameDepth->imageData[iMeanx,iMeany]
This is my code
#include <opencv/cvaux.h>
#include <opencv/highgui.h>
#include <opencv/cxcore.h>
#include <stdio.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <float.h>
#include <limits.h>
#include <time.h>
#include <ctype.h>
#include <cvblobs/BlobResult.h>
#include "libfreenect.hpp"
#include "libfreenect.h"
#include <libfreenect-registration.h>
using namespace cv;
using namespace std;
class Mutex {
public:
Mutex() {
pthread_mutex_init( &m_mutex, NULL );
}
void lock() {
pthread_mutex_lock( &m_mutex );
}
void unlock() {
pthread_mutex_unlock( &m_mutex );
}
private:
pthread_mutex_t m_mutex;
};
class MyFreenectDevice : public Freenect::FreenectDevice {
public:
MyFreenectDevice(freenect_context *_ctx, int _index)
: Freenect::FreenectDevice(_ctx, _index), m_buffer_depth(FREENECT_DEPTH_MM),m_buffer_rgb(FREENECT_VIDEO_RGB), m_gamma(2048), m_new_rgb_frame(false), m_new_depth_frame(false),
depthMat(Size(640,480),CV_16UC1), rgbMat(Size(640,480),CV_8UC3,Scalar(0)), ownMat(Size(640,480),CV_8UC3,Scalar(0))
{
for( unsigned int i = 0 ; i < 2048 ; i++) {
float v = i/2048.0;
v = std::pow(v, 3)* 6;
m_gamma[i] = v*6*256;
}
}
void VideoCallback(void* _rgb, uint32_t timestamp) {
//std::cout << "RGB callback" << std::endl;
m_rgb_mutex.lock();
uint8_t* rgb = static_cast<uint8_t*>(_rgb);
rgbMat.data = rgb;
m_new_rgb_frame = true;
m_rgb_mutex.unlock();
};
void DepthCallback(void* _depth, uint32_t timestamp) {
//std::cout << "Depth callback" << std::endl;
m_depth_mutex.lock();
uint16_t* depth = static_cast<uint16_t*>(_depth);
depthMat.data = (uchar*) depth;
m_new_depth_frame = true;
m_depth_mutex.unlock();
}
bool getVideo(Mat& output) {
m_rgb_mutex.lock();
if(m_new_rgb_frame) {
cv::cvtColor(rgbMat, output, CV_RGB2BGR);
m_new_rgb_frame = false;
m_rgb_mutex.unlock();
return true;
} else {
m_rgb_mutex.unlock();
return false;
}
}
bool getDepth(Mat& output) {
m_depth_mutex.lock();
if(m_new_depth_frame) {
depthMat.copyTo(output);
m_new_depth_frame = false;
m_depth_mutex.unlock();
return true;
} else {
m_depth_mutex.unlock();
return false;
}
}
private:
std::vector<uint8_t> m_buffer_depth;
std::vector<uint8_t> m_buffer_rgb;
std::vector<uint16_t> m_gamma;
Mat depthMat;
Mat rgbMat;
Mat ownMat;
Mutex m_rgb_mutex;
Mutex m_depth_mutex;
bool m_new_rgb_frame;
bool m_new_depth_frame;
};
IplImage* GetThresholdedImage(IplImage* imgHSV){
IplImage* imgThresh=cvCreateImage(cvGetSize(imgHSV),IPL_DEPTH_8U, 1);
//cvInRangeS(imgHSV, cvScalar(170,160,60), cvScalar(180,2556,256), imgThresh); //ROSSO
//cvInRangeS(imgHSV, cvScalar(112, 100, 100), cvScalar(124, 255, 255), imgThresh); //Blu
cvInRangeS(imgHSV, cvScalar(25, 148, 65), cvScalar(35, 256, 190), imgThresh); //giallo
return imgThresh;
}
int main(int argc, char **argv) {
IplImage* frame = 0;
IplImage* frameDepth = 0;
CBlobResult blobs;
int iMaxx, iMinx, iMaxy, iMiny, iMeanx, iMeany, depthZ;
double * world_x;
double * world_y;
/*IplImage *ResizedImage ;
ResizedImage = cvCreateImage( cvSize(320, 240), 8, 3 );*/
Mat depthMat(Size(640,480),CV_16UC1);//0---2047
Mat depthf(Size(640,480),CV_8UC1);//0---255
Mat rgbMat(Size(640,480),CV_8UC3,Scalar(0));
Mat ownMat(Size(640,480),CV_8UC3,Scalar(0));
Freenect::Freenect freenect;
MyFreenectDevice& device = freenect.createDevice<MyFreenectDevice>(0);
//MyFreenectDevice * device;
//device = &freenect.createDevice<MyFreenectDevice>(0);
namedWindow("rgb",CV_WINDOW_AUTOSIZE);
namedWindow("depth",CV_WINDOW_AUTOSIZE);
device.startVideo();
device.startDepth();
while (1) {
device.getVideo(rgbMat);
device.getDepth(depthMat);
frame = cvCreateImage(cvSize(640,480), 8, 3);
frame->imageData = (char *) rgbMat.data;
frameDepth = cvCreateImage(cvSize(640,480), 8, 1);
depthMat.convertTo(depthf, CV_8UC1, 255.0/2048.0);
frameDepth->imageData = (char *) depthf.data;
if( !frame )
{
fprintf( stderr, "ERROR: frame is null...\n" );
getchar();
break;
}
// cvResize(frame, ResizedImage, CV_INTER_LINEAR);
// ResizedImage = cvCloneImage(frame);
cvSmooth(frame, frame, CV_GAUSSIAN,3,3);
IplImage* imgHSV = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);
cvCvtColor(frame, imgHSV, CV_BGR2HSV);
IplImage* imgThresh = GetThresholdedImage(imgHSV);
cvDilate(imgThresh,imgThresh,NULL,2);
cvSmooth( imgThresh, imgThresh, CV_GAUSSIAN, 9, 9 );
CBlob biggestBlob;
blobs = CBlobResult(imgThresh, NULL, 0 );
blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob );
iMaxx=biggestBlob.MaxX();
iMinx=biggestBlob.MinX();
iMaxy=biggestBlob.MaxY();
iMiny=biggestBlob.MinY();
iMeanx=(iMinx+iMaxx)/2;
iMeany=(iMiny+iMaxy)/2;
//per escludere i blobs minori di parametro 80
//blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 80 );
IplImage* filtered = cvCreateImage( cvGetSize( frame ), IPL_DEPTH_8U, 3 );
cvMerge( imgThresh, imgThresh, imgThresh, NULL, filtered );
cvLine( frame, cvPoint(iMeanx, iMeany), cvPoint(iMeanx, iMeany), CV_RGB(50,50 , 50), 4, 8, 0 );
cvRectangle( frame, cvPoint(iMinx , iMiny ), cvPoint ( iMaxx, iMaxy ), CV_RGB(150, 150, 150), 2, 8, 0);
//std::cout << "\n\ndepth: " << depthf. << std::endl;
depthZ=depthMat.at<unsigned char>(iMeany,iMeanx);
// printf("\n iMeany: %d, iMeanx: %d\n, depthZ: %d\n",iMeany,iMeanx,depthZ );
biggestBlob.FillBlob( filtered, CV_RGB(255,0,0));
cvShowImage( "rgb", frame );
// cvShowImage( "depth", filtered);
// sleep(1);
if( (cvWaitKey(10) & 255) == 27 ) break;
}
// cvReleaseCapture( &capture );
cvDestroyWindow( "mywindow" );
return 0;
}