Skip to main content

Posts

Showing posts from December, 2010

Affine VS Perspective Transformation using CAMERA

I have used OpenCV's AFFINE and PERSPECTIVE transform to WARP the images. this same process can be done using HARRIS and RANSAC #include <opencv2/video/tracking.hpp> #include <opencv2/highgui/highgui.hpp> #include <stdio.h> #include <opencv2/objdetect/objdetect.hpp> #include <opencv2/features2d/features2d.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/calib3d/calib3d.hpp> #include <opencv2/imgproc/imgproc_c.h> #include <opencv2/video/tracking.hpp> #include <iostream> #include <vector>  void PrintMatrix ( CvMat *Matrix , char * name )  {  printf ("%s\n", name );     for ( int i=0;i<Matrix -> rows ;i++)      {          for ( int j =0;j< Matrix -> cols ;j ++)          {             printf (" %.3f\t",cvGet2D ( Matrix ,i,j).val [0]) ;          }      printf ("\n");      }  printf ("\n");  } // define whether to use approximate n

logpolar transform using Camera

The logpolar transform function emulates the human “foveal” vision and can be used for fast scale and rotation-invariant template matching , for object tracking and so forth. The function can not operate in-place. "The foveal system of the human eye is the only part of the retina that permits 100% visual acuity. The line-of-sight is a virtual line connecting the fovea with a fixation point in the outside world." Wikipedia; Code: #include <opencv2/video/tracking.hpp> #include <opencv2/highgui/highgui.hpp> #include <stdio.h> #include <opencv2/objdetect/objdetect.hpp> #include <opencv2/features2d/features2d.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/calib3d/calib3d.hpp> #include <opencv2/imgproc/imgproc_c.h> #include <opencv2/video/tracking.hpp> #include <iostream> #include <vector> int main(int argc, char** argv) { CvCapture *capture = 0; IplImage *src = 0; /* initialize camera

Camera Affine Transformation

An affine transform allows the user to warp, stretch, rotate and resize an image or a footage from a camera. Essentially the image is multiplied by 2x3 matrix to perform the transformation. An affine transform produces parallelograms (which includes standard rectangles). #include <opencv2/video/tracking.hpp> #include <opencv2/highgui/highgui.hpp> #include <stdio.h> #include <opencv2/objdetect/objdetect.hpp> #include <opencv2/features2d/features2d.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/calib3d/calib3d.hpp> #include <opencv2/imgproc/imgproc_c.h> #include <opencv2/video/tracking.hpp> #include <iostream> #include <vector> int angle_switch_value = 0; int angleInt = 0; int scale_switch_value = 0; int scaleInt = 0; void switch_callback_a( int position ){ angleInt = position; } void switch_callback_s( int position ){ scaleInt = position; } int main(int argc, char** argv) { // Set up variab

SURF, live camera points correspondance

#include <opencv2/video/tracking.hpp> #include <opencv2/highgui/highgui.hpp> #include <stdio.h> #include <opencv2/objdetect/objdetect.hpp> #include <opencv2/features2d/features2d.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/calib3d/calib3d.hpp> #include <opencv2/imgproc/imgproc_c.h> #include <opencv2/video/tracking.hpp> #include <iostream> #include <vector> // define whether to use approximate nearest-neighbor search #define USE_FLANN IplImage *image = 0; double compareSURFDescriptors( const float* d1, const float* d2, double best, int length ) { double total_cost = 0; assert( length % 4 == 0 ); for( int i = 0; i < length; i += 4 ) { double t0 = d1[i] - d2[i]; double t1 = d1[i+1] - d2[i+1]; double t2 = d1[i+2] - d2[i+2]; double t3 = d1[i+3] - d2[i+3]; total_cost += t0*t0 + t1*t1 + t2*t2 + t3*t3; if( total_cost > best ) break; } return total_cost; } int naiveNearestNe

SURF keypoints using a camera

#include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include <opencv2/objdetect/objdetect.hpp> #include <opencv2/features2d/features2d.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/calib3d/calib3d.hpp> #include <opencv2/imgproc/imgproc_c.h> #include <opencv2/video/tracking.hpp> #include <iostream> #include <vector> using namespace std; int main(int argc, char** argv) { CvMemStorage* storage = cvCreateMemStorage(0); cvNamedWindow("Object", 1); int key = 0; static CvScalar colors[] = { {{0,0,255}}, {{0,128,255}}, {{0,255,255}}, {{0,255,0}}, {{255,128,0}}, {{255,255,0}}, {{255,0,0}}, {{255,0,255}}, {{255,255,255}} }; CvCapture* capture = cvCreateCameraCapture(0); CvMat* prevgray = 0, *image = 0, *gray =0; while( key != 'q' ) { int firstFrame = gray == 0; IplImage* frame = cvQueryFrame(captur

SURF using Image and Camera

Im reusing the SURF implementation in the samples, what ive added is the camera, so im matching an image.jpg to a frame captured from the camera #include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include <opencv2/objdetect/objdetect.hpp> #include <opencv2/features2d/features2d.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/calib3d/calib3d.hpp> #include <opencv2/imgproc/imgproc_c.h> #include <opencv2/video/tracking.hpp> #include <iostream> #include <vector> using namespace std; void help() { printf( "This program demonstrated the use of the SURF Detector and Descriptor using\n" "either FLANN (fast approx nearst neighbor classification) or brute force matching\n" "on planar objects.\n" "Call:\n" "./find_obj

Viola and Jones Face identification

Check your Opencv2.2/data/haarcascades folder for a list of XML files you can use to identify human faces, bodies, nose, mouth, eyes and so on... just replace the xml file in the code with your desired data. NOTE: when you run this file in VisualStudio 2010, it will output an error. make sure you run the EXE file from the command prompt and place that XML file in the same directory as the EXE #include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include<opencv2\opencv.hpp> #include <opencv2\highgui\highgui.hpp> CvHaarClassifierCascade *cascade; CvMemStorage *storage; void detectFaces( IplImage *img ); int main( int argc, char** argv ) { CvCapture *capture; IplImage *frame; int key =0; cascade = ( CvHaarClassifierCascade* )cvLoad("haarcascade_mcs_nose.xml", 0, 0, 0 ); storage = cvCreateMemStorage( 0 ); capture = cvCaptureFromCAM( 0 ); assert( cascade &am

Template Matching using OpenCV internal function

For this example we need to add the following to the linker dependencies: opencv_core220d.lib opencv_highgui220d.lib opencv_imgproc220d.lib Code: #include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include<opencv2\opencv.hpp> #include <opencv2\highgui\highgui.hpp> int main(int argc, char *argv[]) { IplImage *img; IplImage *tpl; IplImage *res; CvPoint minloc, maxloc; double minval, maxval; int img_width, img_height; int tpl_width, tpl_height; int res_width, res_height; /* check for arguments */ if( argc < 3 ) { printf( "Usage: template_match <reference> <template>\n" ); return 1; } /* load reference image */ img = cvLoadImage( argv[1], CV_LOAD_IMAGE_COLOR ); /* always check */ if( img == 0 ) { printf( "Cannot load file %s!\n", argv[1] ); return 1; } /* load template image */ tpl = cvLoa

Manual ROI selection using mouse

#include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include<opencv2\opencv.hpp> #include <opencv2\highgui\highgui.hpp> IplImage* frame, * img1; CvPoint point; int drag = 0; CvCapture *capture = 0; int key = 0; void mouseHandler(int event, int x, int y, int flags, void* param) { /* user press left button */ if (event == CV_EVENT_LBUTTONDOWN && !drag) { point = cvPoint(x, y); drag = 1; } /* user drag the mouse */ if (event == CV_EVENT_MOUSEMOVE && drag) { img1 = cvCloneImage(frame); cvRectangle( img1, point, cvPoint(x, y), CV_RGB(255, 0, 0), 1, 8, 0 ); cvCopy(img1,frame, NULL); cvShowImage("result", img1); } /* user release left butt

Region of interest selection ROI

#include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include<opencv2\opencv.hpp> #include <opencv2\highgui\highgui.hpp> int main(int argc, char *argv[]) { CvCapture *capture = 0; IplImage *frame = 0; int key = 0; /* initialize camera */ capture = cvCaptureFromCAM( 0 ); /* always check */ if ( !capture ) { printf("Cannot open initialize webcam!\n" ); exit(0); } /* create a window for the video */ cvNamedWindow( "result", CV_WINDOW_AUTOSIZE ); while( key != 'q' ) { /* get a frame */ frame = cvQueryFrame( capture ); /* always check */ if( !frame ) break; /* sets the Region of Interest*/ cvSetImageROI(frame, cvRect(150, 50, 150, 250)); /* create destination image */ IplImage *img2 = cvCreateImage(cvGetSize(frame), frame->depth, frame->nChannels); /* * do the main processing with subimage here. * in this example, we simply invert the subimage

Load a camera in OpenCV

#include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include<opencv2\opencv.hpp> #include <opencv2\highgui\highgui.hpp> int main(int argc, char *argv[]) { CvCapture *capture = 0; IplImage *frame = 0; int key = 0; /* initialize camera */ capture = cvCaptureFromCAM( 0 ); /* always check */ if ( !capture ) { printf("Cannot open initialize webcam!\n" ); exit(0); } /* create a window for the video */ cvNamedWindow( "result", CV_WINDOW_AUTOSIZE ); while( key != 'q' ) { /* get a frame */ frame = cvQueryFrame( capture ); /* always check */ if( !frame ) break; /* display current frame */ cvShowImage( "result", frame ); /* exit if user press 'q' */ key = cvWaitKey( 1 ); } /* free memory */ cvDestroyWindow( "result&q

Load a Video in OpenCV

#include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include<opencv2\opencv.hpp> #include <opencv2\highgui\highgui.hpp> int main(int argc, char *argv[]) { IplImage *frame; int key = 'a'; /* supply the AVI file to play */ if(argc<2){ printf("Usage: main <video-file-name>.avi\n\7"); exit(0); } /* load the AVI file */ CvCapture *capture = cvCaptureFromAVI( argv[1] ); /* always check */ if( !capture ) return 1; /* get fps, needed to set the delay */ int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS ); /* display video */ cvNamedWindow( "video", 0 ); while( key != 'q' ) { /* get a frame */ frame = cvQueryFrame( capture ); /* always check */ if( !frame ) break; /* display frame */ cvShowImage( "video", frame ); /* quit if user press 'q' */ cvWaitKey( 1000 / fps ); } /* free memory */ cvReleaseCapture( &capture

Loading an Image in OpenCV

#include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include<opencv2\opencv.hpp> #include <opencv2\highgui\highgui.hpp> int main(int argc, char *argv[]) { IplImage* img = 0; int height,width,step,channels; uchar *data; int i,j,k; char* change_im(char*); char *im = ""; if(argc<2){ printf("Usage: main <image-file-name>\n\7"); im = "aresh.jpg"; //decalre DEFAULT } else { im = argv[1]; } // load an image img=cvLoadImage(im); if(!img){ printf("Could not load image file: %s\n",im); exit(0); } // get the image data height = img->height; width = img->width; step = img->widthStep; channels = img->nChannels; data = (uchar *)img->imageData; printf("Processing a %dx%d image with %d channels\n",height,width,channels); // create a window cvNamedWindow("mainWin", CV_WINDOW_AUTO

Installing and configuring OpenCV 2.2

Correction to the include library correction to the linking of files CODE USED: #include int main() { IplImage* img = cvLoadImage("C:\\hello.jpg"); cvNamedWindow("myfirstwindow"); cvShowImage("myfirstwindow", img); cvWaitKey(0); cvReleaseImage(&img); return 0; } The important files and includes in previous versions of opencv cv200d. lib cxcore200d. lib highgui200d. lib cvaux200d. lib ml200d. lib