Skip to main content

Nose Tracking using Kalman Filter and Viola and Jones Classifier

class atsKalman
{
public:
    atsKalman()
    {
        KalmanFilter KF(4, 2, 0);
        Mat_<float> state(4, 1); /* (x, y, Vx, Vy) */
        Mat processNoise(4, 1, CV_32F);
        Mat_<float> measurement(2,1);
        measurement.setTo(Scalar(0));

        KFs = KF;
        measurements = measurement;
    }
    void setKalman(int x, int y)
    {
        KFs.statePre.at<float>(0) = x;
        KFs.statePre.at<float>(1) = y;
        KFs.statePre.at<float>(2) = 0;
        KFs.statePre.at<float>(3) = 0;
        KFs.transitionMatrix = *(Mat_<float>(4, 4) << 1,0,0,0,   0,1,0,0,  0,0,1,0,  0,0,0,1);

        setIdentity(KFs.measurementMatrix);
        setIdentity(KFs.processNoiseCov, Scalar::all(1e-4));
        setIdentity(KFs.measurementNoiseCov, Scalar::all(1e-1));
        setIdentity(KFs.errorCovPost, Scalar::all(.1));

       
    }
    Point step1()
    {
        Mat prediction = KFs.predict();
        Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
        return predictPt;
    }
    Point step2()
    {
        Mat estimated = KFs.correct(measurements);
        Point statePt(estimated.at<float>(0),estimated.at<float>(1));
        return statePt;
    }
    void changeMeasure(int x,int y)
    {
        measurements(0) = x;
        measurements(1) = y;
    }
private:
    KalmanFilter KFs;
    Mat_<float> measurements;
};

Viola and Jones Classifier is used to detect the nose. the X and Y coordinate of the found nose is sent to the first step of Kalman which is the prediction part. The second step of measurement correction completes the kalman filter.

class atsViolaJones
{
public:
    atsViolaJones()
    {
        storage = cvCreateMemStorage( 0 );
        cascade = ( CvHaarClassifierCascade* )cvLoad("haarcascade_mcs_nose.xml", 0, 0, 0 );
    }
    cv::Point Find(cv::Mat image )
    {
        IplImage imgs =image;
        IplImage* img = &imgs;

       
       
        int i;
        CvSeq *faces = cvHaarDetectObjects(
        img,
        cascade,
        storage,
        1.1,
        3,
        0,
        cvSize( 40, 40 ) );

        //get one image
        CvRect *r = ( CvRect* )cvGetSeqElem( faces, 0 );

        cv::Point found;
        found.x = (r->x + r->width);
        found.y =  (r->y + r->height);

        return found;
    }
    ~atsViolaJones()
    {
        cvReleaseHaarClassifierCascade( &cascade );
        cvReleaseMemStorage( &storage );
    }
private:
    CvHaarClassifierCascade *cascade;
    CvMemStorage *storage;
};
UPDATED class to OPENCV 2.2:

class atsViolaJones
{
public:
    atsViolaJones(String filename)
    {
        if (filename == "")
            filename = "haarcascade_mcs_nose.xml";
        else
        {
        if( !face_cascade.load( filename ) ){ printf("--(!)Error loading\n"); };
        }
    }
    cv::Point Find(cv::Mat frame  )
    {
        std::vector<Rect> faces;
        Mat frame_gray;

        cvtColor( frame, frame_gray, CV_BGR2GRAY );
        equalizeHist( frame_gray, frame_gray );

        face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

        cv::Point found;
        found.x = faces[0].x; //Face[0] is the first face found
        found.y =  faces[0].y;
       
        return found;
    }
private:
    CascadeClassifier face_cascade;

};

This is the main function

int main (int argc, char * const argv[]) {
    Mat img(500, 500, CV_8UC3);

    //init the stuff
    atsKalman ats;
    atscameraCapture movie;
    atsViolaJones haar;

    char code = (char)-1;
   
   
    for(;;)
    {
       
        //init kalman
        ats.setKalman(0,0);
       

       
        mousev.clear();
        kalmanv.clear();
       
        for(;;)
        {
            //get camera
            img = movie.ats_getImage();

           
            //Step 1
            Point predictPt = ats.step1();

            //object X Y coordinte from another method
            cv::Point found;
            found = haar.Find(img);
            int x = found.x;
            int y = found.y;

            //
            ats.changeMeasure(x,y);
           
            Point measPt(x,y);
            mousev.push_back(measPt);

            //Step 2
            Point statePt = ats.step2();
            kalmanv.push_back(statePt);
           
            // plot points
            #define drawCross( center, color, d )                                 \
            line( img, Point( center.x - d, center.y - d ),                \
            Point( center.x + d, center.y + d ), color, 2, CV_AA, 0); \
            line( img, Point( center.x + d, center.y - d ),                \
            Point( center.x - d, center.y + d ), color, 2, CV_AA, 0 )


            drawCross( statePt, Scalar(0,255,255), 5 );
            drawCross( measPt, Scalar(0,0,255), 5 );
           
            for (int i = 0; i < mousev.size()-1; i++) {
                line(img, mousev[i], mousev[i+1], Scalar(255,255,0), 1);
            }
            for (int i = 0; i < kalmanv.size()-1; i++) {
                line(img, kalmanv[i], kalmanv[i+1], Scalar(0,255,0), 1);
            }
           
           
            imshow( "kalman", img );

            //imshow("kalman",haar.Find(img));
            code = (char)waitKey(100);
           
            if( code > 0 )
                break;
        }
        if( code == 27 || code == 'q' || code == 'Q' )
            break;
    }
   
    return 0;
}

Comments

  1. using Equation of Time:
    KFs.statePre.at(0) = x;
    KFs.statePre.at(1) = y;
    KFs.statePre.at(2) = 0;
    KFs.statePre.at(3) = 0;
    KFs.statePre.at(4) = 0;
    KFs.statePre.at(5) = 0;
    KFs.transitionMatrix = *(Mat_(6, 6) << 1,0,1,0,0.5,0, 0,1,0,1,0,0.5, 0,0,1,0,1,0, 0,0,0,1,0,1, 0,0,0,0,1,0, 0,0,0,0,0,1);
    KFs.measurementMatrix = *(Mat_(2, 6) << 1,0,1,0,0.5,0, 0,1,0,1,0,0.5);

    ReplyDelete
  2. is it possible to track multiple object with a kalmanfilter? i like to track some traffic signs in a video for example...

    ReplyDelete
  3. kalman filter does not work with multiple objects. there are many articles on the net you can find as reference.
    the only way kalman filter can be used for multiple object tracking, is if you know exactly what that object is. using meanshift/camshift or anything that can recognize an object in time T and time T+1. always remember kalamn works well assuming you know your object. initial prediction is based on that

    ReplyDelete
  4. let me explain it in a different way:
    if you look at the code: i take an X and Y coordinate from Viola and jones before feeding it to Kalamn, if i had 2 noses, i would get 2 X's and 2 Y's. how do i know that they are from different objects? therefore my recognition should allow me to separate the X and Y for each object before running Kalman

    ReplyDelete
    Replies
    1. well, you can extract each object using Blob analysis. and then apply kalman filter on each object location x, y ...
      Hope i would help.

      Delete
  5. ok I so in my first detection i know where the object (my traffic sign) is. now i would like to follow it over multiple frames. and maybe there are other traffic signs appears. whats the best technique?

    ReplyDelete
    Replies
    1. use blob analysis .. and better to work with Optical flow.

      Delete
  6. hi how to detect car in a video with open cv and visual studio?

    ReplyDelete

Post a Comment

Popular posts from this blog

Computing Entropy of an image (CORRECTED)

entropy is a measure of the uncertainty associated with a random variable. basically i want to get a single value representing the entropy of an image. 1. Assign 255 bins for the range of values between 0-255 2. separate the image into its 3 channels 3. compute histogram for each channel 4. normalize all 3 channels unifirmely 5. for each channel get the bin value (Hc) and use its absolute value (negative log is infinity) 6. compute Hc*log10(Hc) 7. add to entropy and continue with 5 until a single value converges 5. get the frequency of each channel - add all the values of the bin 6. for each bin get a probability - if bin 1 = 20 bin 2 = 30 then frequency is 50 and probability is 20/50 and 30/50 then compute using shannon formula  REFERENCE: http://people.revoledu.com/kardi/tutorial/DecisionTree/how-to-measure-impurity.htm class atsHistogram { public:     cv::Mat DrawHistogram(Mat src)     {         /// Separate the image in 3 places ( R, G and B )    

Blob Detection, Connected Component (Pure Opencv)

Connected-component labeling (alternatively connected-component analysis, blob extraction, region labeling, blob discovery, or region extraction) is an algorithmic application of graph theory, where subsets of connected components are uniquely labeled based on a given heuristic. Connected-component labeling is not to be confused with segmentation. i got the initial code from this URL: http://nghiaho.com/?p=1102 However the code did not compile with my setup of OpenCV 2.2, im guessing it was an older version. so a refactored and corrected the errors to come up with this Class class atsBlobFinder     {     public:         atsBlobFinder()         {         }         ///Original Code by http://nghiaho.com/?p=1102         ///Changed and added commments. Removed Errors         ///works with VS2010 and OpenCV 2.2+         void FindBlobs(const cv::Mat &binary, vector < vector<cv::Point>  > &blobs)         {             blobs.clear();             // Fill the la

Region of interest selection ROI

#include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include<opencv2\opencv.hpp> #include <opencv2\highgui\highgui.hpp> int main(int argc, char *argv[]) { CvCapture *capture = 0; IplImage *frame = 0; int key = 0; /* initialize camera */ capture = cvCaptureFromCAM( 0 ); /* always check */ if ( !capture ) { printf("Cannot open initialize webcam!\n" ); exit(0); } /* create a window for the video */ cvNamedWindow( "result", CV_WINDOW_AUTOSIZE ); while( key != 'q' ) { /* get a frame */ frame = cvQueryFrame( capture ); /* always check */ if( !frame ) break; /* sets the Region of Interest*/ cvSetImageROI(frame, cvRect(150, 50, 150, 250)); /* create destination image */ IplImage *img2 = cvCreateImage(cvGetSize(frame), frame->depth, frame->nChannels); /* * do the main processing with subimage here. * in this example, we simply invert the subimage