Creating new threads to handle additional image processing & image displaying
Posted: 23 February 2010 02:25 PM   [ Ignore ]
Jr. Member
Avatar
RankRank
Total Posts:  40
Joined  2009-12-28

Hi Guys,

This might be a simple task for those in the know grin But i’m not sure how to do it correctly.

While captuing from multiple cameras, I want to perform some extra (intensive) image tranformations and processing on all captured images and display these resulting images. Am i correct in saying that based on the c++ CLEyeStereoVision sample code that each camera that is initialized is handle on a seperate thread?

Am I correct in saying that in order for maximum performance each camera capture should be on seperate threads, processing of the images on seperate thread, and finally displaying of images on another.

Images capturing would be priority as it would be important to capture at selected framerate whereas image display would not be as important if it doesn’t display every frame.

The code sample i’m using is based on the c++ example CLEyeStereoVision, all the calibrated stuff has been removed. At the moment for simpicity its just capturing images from 2 cameras and displaying the raw captured images and the 2 images combined in one window.

Can someone please advise me on if this is the correct approach for performance and show me how i’d create a new thread which would be able to take the captured images and perform any processing I require on it, and another thread to display the existing (raw or processed) images.

#include "stdafx.h"
using namespace std;

// Sample camera capture and processing class
class CLEyeStereoCameraCapture
{
    CHAR _windowName[256]
;
    
CHAR _raw1WindowName[256];
    
CHAR _raw2WindowName[256];
    
GUID _cameraGUID[2];
    
CLEyeCameraInstance _cam[2];
    
CLEyeCameraColorMode _mode;
    
CLEyeCameraResolution _resolution;
    
float _fps;
    
HANDLE _hThread;
    
bool _running;
public:
    
CLEyeStereoCameraCapture(CLEyeCameraResolution resolutionfloat fps) :
    
_mode(CLEYE_COLOR_RAW), _resolution(resolution), _fps(fps), _running(false)
    
{
        strcpy
(_windowName"Capture Window");
        
strcpy(_raw1WindowName"Raw Image 1");
        
strcpy(_raw2WindowName"Raw Image 2");
        for(
int i 02i++)
            
_cameraGUID[i] CLEyeGetCameraUUID(i);
    
}
    bool StartCapture
()
    
{
        _running 
true;
        
cvNamedWindow(_windowNameCV_WINDOW_AUTOSIZE);
        
cvNamedWindow(_raw1WindowNameCV_WINDOW_AUTOSIZE);
        
cvNamedWindow(_raw2WindowNameCV_WINDOW_AUTOSIZE);

        
// Start CLEye image capture thread
        
_hThread CreateThread(NULL0, &CLEyeStereoCameraCapture;::CaptureThreadthis00);
        if(
_hThread == NULL)
        
{
            MessageBox
(NULL,"Could not create capture thread","CLEyeMulticamTest"MB_ICONEXCLAMATION);
            return 
false;
        
}
        
return true;
    
}
    void StopCapture
()
    
{
        
if(!_running)    return;
        
_running false;
        
WaitForSingleObject(_hThread1000);
        
cvDestroyWindow(_windowName);
        
cvDestroyWindow(_raw1WindowName);
        
cvDestroyWindow(_raw2WindowName);
    
}
    void IncrementCameraParameter
(int param)
    
{
        
for(int i 02i++)
        
{
            
if(!_cam[i])    continue;
            
CLEyeSetCameraParameter(_cam[i], (CLEyeCameraParameter)paramCLEyeGetCameraParameter(_cam[i], (CLEyeCameraParameter)param)+10);
        
}
    }
    void DecrementCameraParameter
(int param)
    
{
        
for(int i 02i++)
        
{
            
if(!_cam[i])    continue;
            
CLEyeSetCameraParameter(_cam[i], (CLEyeCameraParameter)paramCLEyeGetCameraParameter(_cam[i], (CLEyeCameraParameter)param)-10);
        
}
    }
    void Run
()
    
{
        int w
h;
        
IplImage *pCapImage[2];
        
IplImage *pDisplayImage;

        
// Create camera instances
        
for(int i 02i++)
        
{
            _cam[i] 
CLEyeCreateCamera(_cameraGUID[i]_mode_resolution_fps);
            if(
_cam[i] == NULL)    return;
            
// Get camera frame dimensions
            
CLEyeCameraGetFrameDimensions(_cam[i]wh);
            
// Create the OpenCV images
            
pCapImage[i] cvCreateImage(cvSize(wh), IPL_DEPTH_8U4);

            
// Set some camera parameters
            
CLEyeSetCameraParameter(_cam[i]CLEYE_AUTO_GAINtrue);
            
CLEyeSetCameraParameter(_cam[i]CLEYE_AUTO_EXPOSUREtrue);

            
// Start capturing
            
CLEyeCameraStart(_cam[i]);
        
}
        pDisplayImage 
cvCreateImage(cvSize(w*2h), IPL_DEPTH_8U4);

        
// image capturing loop
        
while(_running)
        
{
            PBYTE pCapBuffer
;
            
// Capture camera images
            
for(int i 02i++)
            
{
                cvGetImageRawData
(pCapImage[i], &pCapBuffer;);
                
CLEyeCameraGetFrame(_cam[i]pCapBuffer, (i==0)?2000:0);
            
}
            
            
// Display captured images in one window
                
for(int i 02i++)
                
{                    
                    cvSetImageROI
(pDisplayImagecvRect(i*w0wh));
                    
cvCopy(pCapImage[i]pDisplayImage);
                
}
                cvResetImageROI
(pDisplayImage);
        
            
cvShowImage(_windowNamepDisplayImage);
            
cvShowImage(_raw1WindowNamepCapImage[0]);
            
cvShowImage(_raw2WindowNamepCapImage[1]);
        
}

        
for(int i 02i++)
        
{
            
// Stop camera capture
            
CLEyeCameraStop(_cam[i]);
            
// Destroy camera object
            
CLEyeDestroyCamera(_cam[i]);
            
// Destroy the allocated OpenCV image
            
cvReleaseImage(&pCapImage;[i]);
            
_cam[i] NULL;
        
}
    }
    
static DWORD WINAPI CaptureThread(LPVOID instance)
    
{
        
// seed the RNG with current tick count and thread id
        
srand(GetTickCount() + GetCurrentThreadId());
        
// forward thread to Capture function
        
CLEyeStereoCameraCapture *pThis = (CLEyeStereoCameraCapture *)instance;
        
pThis->Run();
        return 
0;
    
}
}
;

int _tmain(int argc_TCHARargv[])
{
    CLEyeStereoCameraCapture 
*cam NULL;
    
// Query for number of connected cameras
    
int numCams CLEyeGetCameraCount();
    
printf("%d cameras found\n",numCams);
    if(
numCams 1)
    
{
        printf
("No PS3Eye cameras detected\n");
        return -
1;
    
}
    
// Create camera capture object
    
cam = new CLEyeStereoCameraCapture(CLEYE_QVGA30);
    
printf("Starting capture\n");
    
cam->StartCapture();

    
printf("Use the following keys to change camera parameters:\n"
        "\t'g' - select gain parameter\n"
        "\t'e' - select exposure parameter\n"
        "\t'+' - increment selected parameter\n"
        "\t'-' - decrement selected parameter\n"
        "\t'c' - start/end chessboard camera calibration\n"
);
    
// The <ESC> key will exit the program
    
CLEyeStereoCameraCapture *pCam NULL;
    
int param = -1key;
    while((
key cvWaitKey(0)) != 0x1b)
    
{
        
switch(key)
        
{
            
case 'g':    case 'G':    printf("Parameter Gain\n");        param CLEYE_GAIN;        break;
            case 
'e':    case 'E':    printf("Parameter Exposure\n");    param CLEYE_EXPOSURE;    break;
            case 
'+':    if(cam)        cam->IncrementCameraParameter(param);                    break;
            case 
'-':    if(cam)        cam->DecrementCameraParameter(param);                    break;
        
}
    }
    cam
->StopCapture();
    
delete cam;
    return 
0;

 


Thanks for your help,
Regards,
Alan

 Signature 

“Strive for perfection in everything you do. Take the best that exists and make it better. When it doesn’t exist, design it, build it and Open Source it!” http://www.alangunning.com

Profile
 
 
Posted: 26 February 2010 11:54 AM   [ Ignore ]   [ # 1 ]
Administrator
Avatar
RankRankRankRank
Total Posts:  585
Joined  2009-09-17

Alan,

For the multithreaded capture part I would do something like this:

#include "stdafx.h"

using namespace std;

// Sample camera capture and processing class
class CLEyeCameraCapture
{
    CHAR _windowName[256]
;
    
int _idx;
    
GUID _cameraGUID;
    
CLEyeCameraInstance _cam;
    
CLEyeCameraColorMode _mode;
    
CLEyeCameraResolution _resolution;
    
IplImage *_pCapImage;
    
HANDLE _hThread;
    
bool _running;
public:
    
CLEyeCameraCapture(int idxCLEyeCameraResolution resolutionfloat fps) :
    
_idx(idx), _mode(CLEYE_COLOR_RAW), _resolution(resolution), _fps(fps), _running(false)
    
{
        strcpy
(_windowName"Capture Window");
        
_cameraGUID CLEyeGetCameraUUID(_idx);
    
}
    bool StartCapture
()
    
{
        _running 
true;
        
cvNamedWindow(_windowNameCV_WINDOW_AUTOSIZE);

        
// Start CLEye image capture thread
        
_hThread CreateThread(NULL0, &CLEyeCameraCapture;::CaptureThreadthis00);
        if(
_hThread == NULL)
        
{
            MessageBox
(NULL,"Could not create capture thread","CLEyeCameraCapture"MB_ICONEXCLAMATION);
            return 
false;
        
}
        
return true;
    
}
    void StopCapture
()
    
{
        
if(!_running)    return;
        
_running false;
        
WaitForSingleObject(_hThread1000);
        
cvDestroyWindow(_windowName);
    
}
    void Run
()
    
{
        int w
h;
        
// Create camera instance
        
_cam CLEyeCreateCamera(_cameraGUID_mode_resolution_fps);
        if(
_cam == NULL)    return;
        
// Get camera frame dimensions
        
CLEyeCameraGetFrameDimensions(_camwh);
        
// Create the OpenCV image
        
_pCapImage cvCreateImage(cvSize(wh), IPL_DEPTH_8U4);

        
// Set some camera parameters
        
CLEyeSetCameraParameter(_camCLEYE_AUTO_GAINtrue);
        
CLEyeSetCameraParameter(_camCLEYE_AUTO_EXPOSUREtrue);

        
// Start capturing
        
CLEyeCameraStart(_cam);

        
// image capturing loop
        
while(_running)
        
{
            PBYTE pCapBuffer
;
            
// Capture camera image
            
cvGetImageRawData(_pCapImage, &pCapBuffer;);
            
CLEyeCameraGetFrame(_campCapBuffer);
            
            
//
            // Process the image here
            //
            
            // Display captured image in a window
            
cvShowImage(_windowNamepDisplayImage);
        
}

        
// Stop camera capture
        
CLEyeCameraStop(_cam);
        
// Destroy camera object
        
CLEyeDestroyCamera(_cam);
        
// Destroy the allocated OpenCV image
        
cvReleaseImage(&_pCapImage);
        
_cam NULL;
    
}
    
static DWORD WINAPI CaptureThread(LPVOID instance)
    
{
        
// seed the RNG with current tick count and thread id
        
srand(GetTickCount() + GetCurrentThreadId());
        
// forward thread to Capture function
        
CLEyeCameraCapture *pThis = (CLEyeCameraCapture *)instance;
        
pThis->Run();
        return 
0;
    
}
}
;

int _tmain(int argc_TCHARargv[])
{
    CLEyeCameraCapture 
*cam[2]{ NULL };
    
// Query for number of connected cameras
    
int numCams CLEyeGetCameraCount();
    
printf("%d cameras found\n"numCams);
    if(
numCams 2)
    
{
        printf
("ERROR: Need two PS3Eye cameras to run\n");
        return -
1;
    
}
    
// Create camera capture objects
    
cam[0] = new CLEyeStereoCameraCapture(0CLEYE_QVGA30);
    
cam[1] = new CLEyeStereoCameraCapture(1CLEYE_QVGA30);
    
printf("Starting capture\n");
    
cam[0]->StartCapture();
    
cam[1]->StartCapture();

    
// The <ESC> key will exit the program
    
while((key cvWaitKey(0)) != 0x1b);

    
// Stop capture
    
cam[0]->StopCapture();
    
cam[1]->StopCapture();
    
delete cam[0];
    
delete cam[1];
    return 
0;

You would insert your processing code where the “Process the image here” comment is defined.
Once you have the processed image, there are a few ways of combining them into a larger image:

1. Create third thread that waits for both image to be ready and retrieve them and combine them into a larger image.

2. Have each thread get access to the larger image and copy its processed image into it (using ROI). Of course, in this case you would need a critical section to guarantee that only one thread writes data to the larger image at a time.

3. Yet another way is to use memcpy and copy raw image data to the larger image yourself. In this case you would not need a critical section (but the code will be more complicated since you would have to copy and skip data).

Once each thread finishes copying its data in order to signal that the large image is ready, you would need some mechanism to do so. One way of doing this would be to setup a global “copy reference count” and decrement it after each copy. The initial value of this would be 2. After each thread copies its image it will decrement the ref count. If the ref count hits 0, the last thread would signal an event so that whoever is waiting on this image will wake up and process it further. As soon as this happen the ref count will be set to 2 again. And so on… Note that this decrement, set and check need to be atomic operations.

Hopefully this was clear and will help you get your project up and running.

AlexP

Profile
 
 
Posted: 27 February 2010 11:38 AM   [ Ignore ]   [ # 2 ]
Jr. Member
Avatar
RankRank
Total Posts:  40
Joined  2009-12-28

Hi Alex,

Thank you so much for your reply. I have taken your sample code and modified it a little to get it working.
So as it stands now with your help I am successfully creating four threads which are capturing frames from four cameras and storing each cameras captured frame in pCapImage[0], pCapImage[1], pCapImage[2], pCapImage[3] which is now hopefully globally accessible.

I now want to access each of these camera images and stitch them together.

1. Create third thread that waits for both image to be ready and retrieve them and combine them into a larger image.

2. Have each thread get access to the larger image and copy its processed image into it (using ROI). Of course, in this case you would need a critical section to guarantee that only one thread writes data to the larger image at a time.

I know how to take seperate images, process them and stitch them together but I am not able to do it when using multiple threads.
Up to now I have been trying to take approach 1 as you mentioned, but I can’t seem to figure out correctly how to create a new thread that waits for the 4 capture threads to write new frames, and then wakes up and runs a function that will stitch them, and release the thread so that the 4 capture threads can run again and repeat the process.

With regard to my project I have already written all of the processing and stitching I need of the 4 cameras captured images but it runs too slow because it is not using multiple threads.

Attached is the basic working multi threaded code so far. Would you please be able to show me how I can create another thread that will wait until 4 new camera frames are captured and then run a function that I can use to access the 4 frames and stitch them together, and once finished allow the capturing threads to continue.

I would be greatful for any help any of you here at the forum can give:-)

Regards
Alan

#include "stdafx.h"

using namespace std;


IplImage *_pCapImage[4]



// Sample camera capture and processing class
class CLEyeCameraCapture
{
    CHAR _windowName[256]
;
    
int _idx// Index of GUID to obtain
    
GUID _cameraGUID;
    
CLEyeCameraInstance _cam;
    
CLEyeCameraColorMode _mode;
    
CLEyeCameraResolution _resolution;
    
float _fps
    
int _imageidx// index of _pCapImage
    
HANDLE _hThread;
    
bool _running;
public:
    
CLEyeCameraCapture(LPSTR windowNameint idxCLEyeCameraResolution resolutionfloat fpsint imageidx) :
      
_idx(idx), _mode(CLEYE_COLOR_RAW), _resolution(resolution), _fps(fps), _imageidx(imageidx), _running(false)
      
{
          strcpy
(_windowNamewindowName);
          
_cameraGUID CLEyeGetCameraUUID(_idx); // Get current camera guid - change to obtain it form array!!!
      
}
      bool StartCapture
()
      
{
          _running 
true;
          
cvNamedWindow(_windowNameCV_WINDOW_AUTOSIZE);

          
// Start CLEye image capture thread
          
_hThread CreateThread(NULL0, &CLEyeCameraCapture;::CaptureThreadthis00);
          if(
_hThread == NULL)
          
{
              MessageBox
(NULL,"Could not create capture thread","CLEyeCameraCapture"MB_ICONEXCLAMATION);
              return 
false;
          
}
          printf
("\nThread %d created",_imageidx); //DEBUG ONLY
          
return true;
      
}
      void StopCapture
()
      
{
          
if(!_running)    return;
          
_running false;
          
WaitForSingleObject(_hThread1000);

          
cvDestroyWindow(_windowName);
      
}
      void Run
()
      
{
          printf
("\ninside run %d",_imageidx); //DEBUG ONLY
          
int wh;
          
// Create camera instance
          
_cam CLEyeCreateCamera(_cameraGUID_mode_resolution_fps);
          if(
_cam == NULL)    return;
          
// Get camera frame dimensions
          
CLEyeCameraGetFrameDimensions(_camwh);
          
// Create the OpenCV image
          
_pCapImage[_imageidx] cvCreateImage(cvSize(wh), IPL_DEPTH_8U4);
          
          
printf("\n_pcamImage[%d] created",_imageidx); //DEBUG ONLY
          // Set some camera parameters
          
CLEyeSetCameraParameter(_camCLEYE_AUTO_GAINtrue);
          
CLEyeSetCameraParameter(_camCLEYE_AUTO_EXPOSUREtrue);

          
// Start capturing
          
CLEyeCameraStart(_cam);

          
// image capturing loop
          
while(_running)
          
{
              PBYTE pCapBuffer
;
              
// Capture camera image
              
cvGetImageRawData(_pCapImage[_imageidx], &pCapBuffer;);
              
CLEyeCameraGetFrame(_campCapBuffer);

              
printf("\n_pCapImage[%d] frame captured",_imageidx); //DEBUG ONLY
              //
              // Process the image here
              
cvShowImage(_windowName_pCapImage[_imageidx]);
              
}

          
// Stop camera capture
          
CLEyeCameraStop(_cam);
          
// Destroy camera object
          
CLEyeDestroyCamera(_cam);
          
// Destroy the allocated OpenCV image
          //cvReleaseImage(&_pCapImage);
          
cvReleaseImage(&_pCapImage[_imageidx]);
          
_cam NULL;
      
}
      
static DWORD WINAPI CaptureThread(LPVOID instance)
      
{
          
// seed the RNG with current tick count and thread id
          
srand(GetTickCount() + GetCurrentThreadId());
          
// forward thread to Capture function
          
CLEyeCameraCapture *pThis = (CLEyeCameraCapture *)instance;
          
pThis->Run();
          return 
0;
      
}
}
;

int _tmain(int argc_TCHARargv[])
{
    CLEyeCameraCapture 
*cam[4]{ NULL };
    
// Query for number of connected cameras
    
int numCams CLEyeGetCameraCount();

    
printf("%d cameras found\n"numCams);

    
//    char windowName[64];

    
if(numCams 4)
    
{
        printf
("ERROR: Need four PS3Eye cameras to run\n");
        return -
1;
    
}
    
// Create camera capture objects
    
cam[0] = new CLEyeCameraCapture("Raw Camera 1"0CLEYE_VGA300);
    
cam[1] = new CLEyeCameraCapture("Raw Camera 2"1CLEYE_VGA301);
    
cam[2] = new CLEyeCameraCapture("Raw Camera 3"2CLEYE_VGA302);
    
cam[3] = new CLEyeCameraCapture("Raw Camera 4"3CLEYE_VGA303);
    
printf("Starting capture\n");
    
cam[0]->StartCapture();
    
cam[1]->StartCapture();
    
cam[2]->StartCapture();
    
cam[3]->StartCapture();
    
    
    
int key;
    
// The <ESC> key will exit the program
    
while((key cvWaitKey(0)) != 0x1b);
    

    
// Stop capture
    
cam[0]->StopCapture();
    
cam[1]->StopCapture();
    
cam[2]->StopCapture();
    
cam[3]->StopCapture();
    
delete cam[0];
    
delete cam[1];
    
delete cam[2];
    
delete cam[3];
    return 
0;
 Signature 

“Strive for perfection in everything you do. Take the best that exists and make it better. When it doesn’t exist, design it, build it and Open Source it!” http://www.alangunning.com

Profile
 
 
Posted: 27 February 2010 03:52 PM   [ Ignore ]   [ # 3 ]
Administrator
Avatar
RankRankRankRank
Total Posts:  585
Joined  2009-09-17

Alan,

Instead of the global _pCapImage variable, I would prefer to make it a member variable of the thread (this is better solution, especially if you want to scale this to any number of cameras). I would also add a function in the CLEyeCameraCapture class to retrieve the processed image.
Something like this:

IplImage *GetImage()
{
   
return _pCapImage;

Anyways, going on with your approach is fine for now.

Now about synchronization part. Lets look at the event based solution.
In your CLEyeCameraCapture thread whenever the image is captured and processed and ready for stitching, do this:

// In the class constructor create an event
// This event will be used to signal that our image is ready for stitching
hEvents[_imageidx] CreateEvent(NULLFALSEFALSENULL);
// In the processing loop after the image has been fully processed
SetEvent(hEvents[_imageidx]); 

We need to have a way to access these events. So initially you may go with your approach by defining them globally.
Like this:

HANDLE hEvents[4]

Now create a new class that will contain thread that does stitching. Inside the loop we will wait for all of the events and only when we get them all we will continue and stitch the images.

// In the while loop inside the Run() function write the following
// This will wait for all of the events with timeout of 2000ms
if(WaitForMultipleObjects(4hEventsTRUE2000) == WAIT_TIMEOUT)
{
   
// we timed out, so something went wrong
   // exit the thread here
}
// We got all images. Stitch images _pCapImage[0..3] together here 

That’s all there is to it.

AlexP

Profile
 
 
Posted: 28 February 2010 03:01 PM   [ Ignore ]   [ # 4 ]
Jr. Member
Avatar
RankRank
Total Posts:  40
Joined  2009-12-28

Hi Alex,

Thanks to your advice I am now able to:

Capture from 4 cameras on 4 seperate threads at 640x480 @30fps and stitch them together using a 5th stitching thread.

Each time each of the camera grabs a new frame it sets an event.
The stitching thread waits for the 4 events to happen (4 new frames to be ready). Then it stitches the 4 images together and displays the resulting image on screen

Thank you so much for your help!! grin

Regards,
Alan

 Signature 

“Strive for perfection in everything you do. Take the best that exists and make it better. When it doesn’t exist, design it, build it and Open Source it!” http://www.alangunning.com

Profile
 
 
Posted: 20 September 2010 04:45 PM   [ Ignore ]   [ # 5 ]
Member
Avatar
RankRankRank
Total Posts:  76
Joined  2010-08-03

I’ve been reading through this thread the past few days and have a few questions as to the implementation of parts of the code. Im looking to do something similar, using 8 cameras.

The cameras themselves will be mounted to cover a 360 degree field of view, however each camera will have a large overlap with the cameras either side of it. I Have currently been utilising the code from the Multicam SDK example (with great success), but now am unable to take this further.

My understanding of Threaded applications is largely theoretical, so I am wondering if the Multicam SDK example can be adapted to the same result or if it best to start from the beginning?

KShaaban

Profile
 
 
Posted: 26 September 2010 02:30 AM   [ Ignore ]   [ # 6 ]
Member
Avatar
RankRankRank
Total Posts:  76
Joined  2010-08-03

So I have decided to ditch the multicam code and use the stereo code. I’ve managed to get the code in this thread working on my side. :D

AlexP, looking at the amendments you have suggested I understand the theory and justification however, I am unsure however as to where they should be written. I am going to be hard coding the camera instances based on GUID as the installation will be fixed.

// In the class constructor create an event
// This event will be used to signal that our image is ready for stitching
hEvents[_imageidx] = CreateEvent(NULL, FALSE, FALSE, NULL);
// In the processing loop after the image has been fully processed
SetEvent(hEvents[_imageidx]);  

does the above code go inside the while loop? or inside the StartCapture function where the thread is initialised?

// In the while loop inside the Run() function write the following
// This will wait for all of the events with timeout of 2000ms
if(WaitForMultipleObjects(4hEventsTRUE2000) == WAIT_TIMEOUT)
{
   
// we timed out, so something went wrong
   // exit the thread here
}
// We got all images. Stitch images _pCapImage[0..3] together here 

I must create a new thread which gets the images and stitches them together.
The code above will wait until the images are stitched together and then I would use the new thread to stitch the images together.

Is there any documentation/links of threads in C++ as I seem to keep finding information on the boost thread library. not really sure how to create threads. or how threads are operating in this code.
found this:
http://msdn.microsoft.com/en-us/library/ms684847(v=VS.85).aspx

sorry for my newbie approach to all of this. :D

Profile
 
 
Posted: 18 October 2010 04:56 PM   [ Ignore ]   [ # 7 ]
Administrator
Avatar
RankRankRankRank
Total Posts:  585
Joined  2009-09-17

kshaaban,

Sorry for my convoluted answer. The idea is simple (here it is in plain English):

-Use 4 separate threads, one for each of the 4 cams;
-Each camera thread grabs an image into a buffer and signals via event that its done;
-The 5th thread waits for all threads events to be signaled (done) and then stitches the 4 images together, all while the 4 camera threads continue to capture images.

AlexP

Profile
 
 
 
 


RSS 2.0     Atom Feed