Hi Guys,
This might be a simple task for those in the know But i’m not sure how to do it correctly.
While captuing from multiple cameras, I want to perform some extra (intensive) image tranformations and processing on all captured images and display these resulting images. Am i correct in saying that based on the c++ CLEyeStereoVision sample code that each camera that is initialized is handle on a seperate thread?
Am I correct in saying that in order for maximum performance each camera capture should be on seperate threads, processing of the images on seperate thread, and finally displaying of images on another.
Images capturing would be priority as it would be important to capture at selected framerate whereas image display would not be as important if it doesn’t display every frame.
The code sample i’m using is based on the c++ example CLEyeStereoVision, all the calibrated stuff has been removed. At the moment for simpicity its just capturing images from 2 cameras and displaying the raw captured images and the 2 images combined in one window.
Can someone please advise me on if this is the correct approach for performance and show me how i’d create a new thread which would be able to take the captured images and perform any processing I require on it, and another thread to display the existing (raw or processed) images.
#include "stdafx.h"
using namespace std;
// Sample camera capture and processing class
class CLEyeStereoCameraCapture
{
CHAR _windowName[256];
CHAR _raw1WindowName[256];
CHAR _raw2WindowName[256];
GUID _cameraGUID[2];
CLEyeCameraInstance _cam[2];
CLEyeCameraColorMode _mode;
CLEyeCameraResolution _resolution;
float _fps;
HANDLE _hThread;
bool _running;
public:
CLEyeStereoCameraCapture(CLEyeCameraResolution resolution, float fps) :
_mode(CLEYE_COLOR_RAW), _resolution(resolution), _fps(fps), _running(false)
{
strcpy(_windowName, "Capture Window");
strcpy(_raw1WindowName, "Raw Image 1");
strcpy(_raw2WindowName, "Raw Image 2");
for(int i = 0; i < 2; i++)
_cameraGUID[i] = CLEyeGetCameraUUID(i);
}
bool StartCapture()
{
_running = true;
cvNamedWindow(_windowName, CV_WINDOW_AUTOSIZE);
cvNamedWindow(_raw1WindowName, CV_WINDOW_AUTOSIZE);
cvNamedWindow(_raw2WindowName, CV_WINDOW_AUTOSIZE);
// Start CLEye image capture thread
_hThread = CreateThread(NULL, 0, &CLEyeStereoCameraCapture;::CaptureThread, this, 0, 0);
if(_hThread == NULL)
{
MessageBox(NULL,"Could not create capture thread","CLEyeMulticamTest", MB_ICONEXCLAMATION);
return false;
}
return true;
}
void StopCapture()
{
if(!_running) return;
_running = false;
WaitForSingleObject(_hThread, 1000);
cvDestroyWindow(_windowName);
cvDestroyWindow(_raw1WindowName);
cvDestroyWindow(_raw2WindowName);
}
void IncrementCameraParameter(int param)
{
for(int i = 0; i < 2; i++)
{
if(!_cam[i]) continue;
CLEyeSetCameraParameter(_cam[i], (CLEyeCameraParameter)param, CLEyeGetCameraParameter(_cam[i], (CLEyeCameraParameter)param)+10);
}
}
void DecrementCameraParameter(int param)
{
for(int i = 0; i < 2; i++)
{
if(!_cam[i]) continue;
CLEyeSetCameraParameter(_cam[i], (CLEyeCameraParameter)param, CLEyeGetCameraParameter(_cam[i], (CLEyeCameraParameter)param)-10);
}
}
void Run()
{
int w, h;
IplImage *pCapImage[2];
IplImage *pDisplayImage;
// Create camera instances
for(int i = 0; i < 2; i++)
{
_cam[i] = CLEyeCreateCamera(_cameraGUID[i], _mode, _resolution, _fps);
if(_cam[i] == NULL) return;
// Get camera frame dimensions
CLEyeCameraGetFrameDimensions(_cam[i], w, h);
// Create the OpenCV images
pCapImage[i] = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 4);
// Set some camera parameters
CLEyeSetCameraParameter(_cam[i], CLEYE_AUTO_GAIN, true);
CLEyeSetCameraParameter(_cam[i], CLEYE_AUTO_EXPOSURE, true);
// Start capturing
CLEyeCameraStart(_cam[i]);
}
pDisplayImage = cvCreateImage(cvSize(w*2, h), IPL_DEPTH_8U, 4);
// image capturing loop
while(_running)
{
PBYTE pCapBuffer;
// Capture camera images
for(int i = 0; i < 2; i++)
{
cvGetImageRawData(pCapImage[i], &pCapBuffer;);
CLEyeCameraGetFrame(_cam[i], pCapBuffer, (i==0)?2000:0);
}
// Display captured images in one window
for(int i = 0; i < 2; i++)
{
cvSetImageROI(pDisplayImage, cvRect(i*w, 0, w, h));
cvCopy(pCapImage[i], pDisplayImage);
}
cvResetImageROI(pDisplayImage);
cvShowImage(_windowName, pDisplayImage);
cvShowImage(_raw1WindowName, pCapImage[0]);
cvShowImage(_raw2WindowName, pCapImage[1]);
}
for(int i = 0; i < 2; i++)
{
// Stop camera capture
CLEyeCameraStop(_cam[i]);
// Destroy camera object
CLEyeDestroyCamera(_cam[i]);
// Destroy the allocated OpenCV image
cvReleaseImage(&pCapImage;[i]);
_cam[i] = NULL;
}
}
static DWORD WINAPI CaptureThread(LPVOID instance)
{
// seed the RNG with current tick count and thread id
srand(GetTickCount() + GetCurrentThreadId());
// forward thread to Capture function
CLEyeStereoCameraCapture *pThis = (CLEyeStereoCameraCapture *)instance;
pThis->Run();
return 0;
}
};
int _tmain(int argc, _TCHAR* argv[])
{
CLEyeStereoCameraCapture *cam = NULL;
// Query for number of connected cameras
int numCams = CLEyeGetCameraCount();
printf("%d cameras found\n",numCams);
if(numCams < 1)
{
printf("No PS3Eye cameras detected\n");
return -1;
}
// Create camera capture object
cam = new CLEyeStereoCameraCapture(CLEYE_QVGA, 30);
printf("Starting capture\n");
cam->StartCapture();
printf("Use the following keys to change camera parameters:\n"
"\t'g' - select gain parameter\n"
"\t'e' - select exposure parameter\n"
"\t'+' - increment selected parameter\n"
"\t'-' - decrement selected parameter\n"
"\t'c' - start/end chessboard camera calibration\n");
// The <ESC> key will exit the program
CLEyeStereoCameraCapture *pCam = NULL;
int param = -1, key;
while((key = cvWaitKey(0)) != 0x1b)
{
switch(key)
{
case 'g': case 'G': printf("Parameter Gain\n"); param = CLEYE_GAIN; break;
case 'e': case 'E': printf("Parameter Exposure\n"); param = CLEYE_EXPOSURE; break;
case '+': if(cam) cam->IncrementCameraParameter(param); break;
case '-': if(cam) cam->DecrementCameraParameter(param); break;
}
}
cam->StopCapture();
delete cam;
return 0;
}
Thanks for your help,
Regards,
Alan