Saturday, December 31, 2011

Blob Detection, Connected Component (Pure Opencv)



Connected-component labeling (alternatively connected-component analysis, blob extraction, region labeling, blob discovery, or region extraction) is an algorithmic application of graph theory, where subsets of connected components are uniquely labeled based on a given heuristic. Connected-component labeling is not to be confused with segmentation.


i got the initial code from this URL: http://nghiaho.com/?p=1102
However the code did not compile with my setup of OpenCV 2.2, im guessing it was an older version. so a refactored and corrected the errors to come up with this Class



class atsBlobFinder
    {
    public:
        atsBlobFinder()
        {
        }
        ///Original Code by http://nghiaho.com/?p=1102
        ///Changed and added commments. Removed Errors
        ///works with VS2010 and OpenCV 2.2+
        void FindBlobs(const cv::Mat &binary, vector < vector<cv::Point>  > &blobs)
        {
            blobs.clear();

            // Fill the label_image with the blobs
            // 0  - background
            // 1  - unlabelled foreground
            // 2+ - labelled foreground

            ///input is a binary image therefore values are either 0 or 1
            ///out objective is to find a set of 1's that are together and assign 2 to it
            ///then look for other 1's, and assign 3 to it....so on a soforth

            cv::Mat label_image;
            binary.convertTo(label_image, CV_32FC1); // weird it doesn't support CV_32S! Because the CV::SCALAR is a double value in the function floodfill

            int label_count = 2; // starts at 2 because 0,1 are used already

            //erode to remove noise-------------------------------
            Mat element = getStructuringElement( MORPH_RECT,
            Size( 2*3 + 1, 2*3+1 ),
            Point( 0, 0 ) );
            /// Apply the erosion operation
            erode( label_image, label_image, element );
            //---------------------------------------------------

            //just check the Matrix of label_image to make sure we have 0 and 1 only
            //cout << label_image << endl;

            for(int y=0; y < binary.rows; y++) {
                for(int x=0; x < binary.cols; x++) {
                    float checker = label_image.at<float>(y,x); //need to look for float and not int as the scalar value is of type double
                    cv::Rect rect;
                    //cout << "check:" << checker << endl;
                    if(checker == 1) {
                        //fill region from a point
                        cv::floodFill(label_image, cv::Point(x,y), cv::Scalar(label_count), &rect, cv::Scalar(0), cv::Scalar(0), 4);
                        label_count++;
                        //cout << label_image << endl <<"by checking: " << label_image.at<float>(y,x) <<endl;
                        //cout << label_image;

                        //a vector of all points in a blob
                        std::vector<cv::Point> blob;

                        for(int i=rect.y; i < (rect.y+rect.height); i++) {
                            for(int j=rect.x; j < (rect.x+rect.width); j++) {
                                float chk = label_image.at<float>(i,j);
                                //cout << chk << endl;
                                if(chk == label_count-1) {
                                    blob.push_back(cv::Point(j,i));
                                }                       
                            }
                        }
                        //place the points of a single blob in a grouping
                        //a vector of vector points
                        blobs.push_back(blob);
                    }
                }
            }
            cout << label_count <<endl;
            imshow("label image",label_image);
        }
    private:
    }; 


From the code comments, ive answered and tested a few parts which the original author did not discuss.

Running on Visual Studio 2010 and OpenCV 2.2 using the 4 connected neighbors and opencv internal function FloodFill.

1. Get the Binary image. the binary image should have values of 0 and 1 only.
2. go through each pixel and find the value 1, floodfill and replace all 1 with a counter ie. number 2
3. every blob found should now be part of floodfill and all neighbors value changed to an ascending number
4. from each blob, get the x and y coordinate of that blob and save it in a vector. giving you a vector<Point>
5. save each of those Vector<Point> into another Vector which represents all the blobs found
6. Output the value



atsBlobFinder blb;
cv::Mat output = cv::Mat::zeros(img.size(), CV_8UC3); //3 Channels

            cv::Mat binary;
            vector <vector<cv::Point>> blobs;

            blb.FindBlobs(binary, blobs);
            for(size_t i=0; i < blobs.size(); i++) {
                unsigned char r = 255 * (rand()/(1.0 + RAND_MAX));
                unsigned char g = 255 * (rand()/(1.0 + RAND_MAX));
                unsigned char b = 255 * (rand()/(1.0 + RAND_MAX));

                for(size_t j=0; j < blobs[i].size(); j++) {
                    int x = blobs[i][j].x;
                    int y = blobs[i][j].y;

                    output.at<Vec3b>(y,x)[0] = ima.at<uchar>(y,x); //channel 1
                    output.at<Vec3b>(y,x)[1] = ima.at<uchar>(y,x); //channel 2
                    output.at<Vec3b>(y,x)[2] = ima.at<uchar>(y,x); //channel 3
                }
            }
            imshow("con comp",output);

the vector<vector<Point>> blobs can create a mask and output the blob in separate images

Wednesday, December 28, 2011

Computing Gini Index of an image (measure of Impurity)

Using my previous posts Class file and this reference:
http://people.revoledu.com/kardi/tutorial/DecisionTree/how-to-measure-impurity.htm

float computeGiniIndex(Mat r, Mat g, Mat b)
    {
        float giniIndex = 0.0;
        float frequency = getFrequencyOfBin(r);
        for( int i = 1; i < histSize; i++ )
        {
            float Hc = abs(getHistogramBinValue(r,i));
            giniIndex += Hc*Hc;
        }
        frequency = getFrequencyOfBin(g);
        for( int i = 1; i < histSize; i++ )
        {
            float Hc = abs(getHistogramBinValue(g,i));
            giniIndex += Hc*Hc;
        }
        frequency = getFrequencyOfBin(b);
        for( int i = 1; i < histSize; i++ )
        {
            float Hc = abs(getHistogramBinValue(b,i));
            giniIndex += Hc*Hc;
        }
        giniIndex = 1 - (giniIndex);
        //cout << giniIndex <<endl;
        return giniIndex;
    }

Friday, December 23, 2011

Computing Entropy of an image (CORRECTED)

entropy is a measure of the uncertainty associated with a random variable.
basically i want to get a single value representing the entropy of an image.

1. Assign 255 bins for the range of values between 0-255
2. separate the image into its 3 channels
3. compute histogram for each channel
4. normalize all 3 channels unifirmely
5. for each channel get the bin value (Hc) and use its absolute value (negative log is infinity)
6. compute Hc*log10(Hc)
7. add to entropy and continue with 5 until a single value converges
5. get the frequency of each channel - add all the values of the bin
6. for each bin get a probability -
if bin 1 = 20
bin 2 = 30
then frequency is 50 and probability is 20/50 and 30/50
then compute using shannon formula

 REFERENCE: http://people.revoledu.com/kardi/tutorial/DecisionTree/how-to-measure-impurity.htm




class atsHistogram
{
public:
    cv::Mat DrawHistogram(Mat src)
    {
        /// Separate the image in 3 places ( R, G and B )
         vector<Mat> rgb_planes;
         split( src, rgb_planes );


         /// Establish the number of bins
         int histSize = 255;


         /// Set the ranges ( for R,G,B) )
         float range[] = { 0, 255 } ;
         const float* histRange = { range };


         bool uniform = true; bool accumulate = false;


         Mat r_hist, g_hist, b_hist;


         /// Compute the histograms:
         calcHist( &rgb_planes[0], 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate );
         calcHist( &rgb_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate );
         calcHist( &rgb_planes[2], 1, 0, Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate );


         // Draw the histograms for R, G and B
         int hist_w = 400; int hist_h = 400;
         int bin_w = cvRound( (double) hist_w/histSize );


         Mat histImage( hist_w, hist_h, CV_8UC3, Scalar( 0,0,0) );


         /// Normalize the result to [ 0, histImage.rows ]
         normalize(r_hist, r_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
         normalize(g_hist, g_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
         normalize(b_hist, b_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );


         /// Draw for each channel
         /*for( int i = 1; i < histSize; i++ )
           {
             line( histImage, Point( bin_w*(i-1), hist_h - cvRound(r_hist.at<float>(i-1)) ) ,
                              Point( bin_w*(i), hist_h - cvRound(r_hist.at<float>(i)) ),
                              Scalar( 0, 0, 255), 2, 8, 0  );
             line( histImage, Point( bin_w*(i-1), hist_h - cvRound(g_hist.at<float>(i-1)) ) ,
                              Point( bin_w*(i), hist_h - cvRound(g_hist.at<float>(i)) ),
                              Scalar( 0, 255, 0), 2, 8, 0  );
             line( histImage, Point( bin_w*(i-1), hist_h - cvRound(b_hist.at<float>(i-1)) ) ,
                              Point( bin_w*(i), hist_h - cvRound(b_hist.at<float>(i)) ),
                              Scalar( 255, 0, 0), 2, 8, 0  );
            }*/
double entropy = (double)computeShannon(r_hist,g_hist,b_hist);
         //printf("Entropy: %f" , entropy );
         cout << entropy << endl;
         return histImage;
    }
    float getHistogramBinValue(Mat hist, int binNum)
    {
        //return cvRound(hist.at<float>(binNum));
        return hist.at<float>(binNum);
    }
    float computeShannon(Mat r, Mat g, Mat b)
    {
        int histSize = 255; //number of bins
        float entropy = 0.0;
        for( int i = 1; i < histSize; i++ )
        {
            float Hc = abs(getHistogramBinValue(r,i));
            //cout << "Red " << Hc << endl;
            entropy += Hc * log10(Hc);
            //cout << "-" << Hc << " -n- " << entropy << endl;
        }
        for( int i = 1; i < histSize; i++ )
        {
            float Hc = abs(getHistogramBinValue(g,i));
            //cout << "Green " << Hc << endl;
            entropy += Hc * log10(Hc);
        }
        for( int i = 1; i < histSize; i++ )
        {
            float Hc = abs(getHistogramBinValue(b,i));
            //cout << "Blue " << Hc << endl;
            entropy += Hc * log10(Hc);
        }
        entropy = entropy * -1;
        //cout << entropy <<endl;
        return entropy;
    }
private:
};

     float getHistogramBinValue(Mat hist, int binNum)
    {
        return hist.at<float>(binNum);
    }
    float getFrequencyOfBin(Mat channel)
    {
        float frequency = 0.0;
        for( int i = 1; i < histSize; i++ )
        {
            float Hc = abs(getHistogramBinValue(channel,i));
            frequency += Hc;
        }
        return frequency;
    }
    float computeShannonEntropy(Mat r, Mat g, Mat b)
    {
        float entropy = 0.0;
        float frequency = getFrequencyOfBin(r);
        for( int i = 1; i < histSize; i++ )
        {
            float Hc = abs(getHistogramBinValue(r,i));
            entropy += -(Hc/frequency) * log10((Hc/frequency));
        }
        frequency = getFrequencyOfBin(g);
        for( int i = 1; i < histSize; i++ )
        {
            float Hc = abs(getHistogramBinValue(g,i));
            entropy += -(Hc/frequency) * log10((Hc/frequency));
        }
        frequency = getFrequencyOfBin(b);
        for( int i = 1; i < histSize; i++ )
        {
            float Hc = abs(getHistogramBinValue(b,i));
            entropy += -(Hc/frequency) * log10((Hc/frequency));
        }
        entropy = entropy;
        //cout << entropy <<endl;
        return entropy;
    }

Saturday, October 8, 2011

Opencv 2.3.1 Installation

alright it took me a while before posting this. for OPENCV 2.3.1 and Visual studio 2010 Professional on a windows 7 64 bit environment

Download The "super pack" and unzip to "C:\OPENCV"
it seems you can only get the source of opencv and need to compile it with CMAKE.
very straight forward.
download CMAKE and use the command prompt to enter CMAKE CMAKELIST.txt at the opencv directory

Open  "ALL_BUILD" by double clicking - opens up visual studio
Compile it and wait. (took a minute)
go to properties and select the release version and compile again (took another minute)

your all set with the opencv installation "c:\opencv\build"

create an empty visual studio "win32 console application"

Click on Configuration Properties->VC++ Directories, and edit the Include Directories. Add:

C:\opencv\build\common\tbb30_20110427oss\include (UPDATE)
C:\opencv\build\include

Click on Configuration Properties->VC++ Directories, and edit the Library Directories. Add:
C:\opencv\build\common\tbb30_20110427oss\lib\ia32\vc10 (UPDATE)
C:\opencv\build\x86\vc10\lib

Click on Configuration Properties->Linker->Input, and edit the Additional Dependencies. Add:
opencv_core231d.lib
opencv_imgproc231d.lib
opencv_highgui231d.lib
opencv_ml231d.lib
opencv_video231d.lib
opencv_features2d231d.lib
opencv_calib3d231d.lib
opencv_objdetect231d.lib
opencv_contrib231d.lib
opencv_legacy231d.lib
opencv_flann231d.lib

Click on Configuration Properties->Debugging, and edit the Environment.
 PATH=C:\opencv\build\x86\vc10\bin;C:\opencv\build\common\tbb30_20110427oss\bin\ia32\vc10



Your all set.


(UPDATE)
i realized when trying to compile lkdemo.cpp an error:



even though opencv has a tbb folder in the build/common it does not include the complete library so we need to download the whole version from http://threadingbuildingblocks.org/ver.php?fid=171
and unzip it to "c:\opencv\build\common". there is no need to compile it as its already ready for use.

just make the additional changes to the library and include and bin files for this to work.

Saturday, September 17, 2011

Blobs with opencv (internal function)

There are many open source opencv BLOB libraries that you can use. i have tried several of these, however because of the 64 bit machine that im using recompiling these are very troublesome.

If you have heard of these libraries:
  1. "cvBlobsLib": http://opencv.willowgarage.com/wiki/cvBlobsLib 
  2. "cvBlob": http://code.google.com/p/cvblob/ 
  3. "Bloblib" by Dave Grossman (also referred to as "Blob Analysis Package"):
    Go to http://tech.groups.yahoo.com/group/OpenCV/files/
 You will know that opencv also has a built in function that can help you find blobs using cv::findContour and see some statistics using the cv::moments.
eventually make these functions similar to regionprops Matlab function


The BLOB FINDER CLASS:
class atsBlobFinder
{
public:
    atsBlobFinder(cv::Mat src)
    {
        numBlobs = 0;
        cv::Mat img; //must create a temporary Matrix to hold the gray scale or wont work
        cv::cvtColor(src,img,CV_BGR2GRAY); //Convert image to GrayScale
        img = img > 1; //create the binary image
        ////cv::adaptiveThreshold(src,src,64,ADAPTIVE_THRESH_MEAN_C,THRESH_BINARY,7,13); //create a binary image
       
        findContours( img, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE ); //Find the Contour BLOBS
        vector<Moments> _mu(contours.size() );
        vector<Point2f> _mc( contours.size() );
        for( int i = 0; i < contours.size(); i++ )
        {
            _mu[i] = moments( Mat(contours[i]), false );
            _mc[i] = Point2f( _mu[i].m10/_mu[i].m00 , _mu[i].m01/_mu[i].m00);
        }
        mu = _mu;
        mc = _mc;
        numBlobs = contours.size();
    }
    void Draw(cv::Mat &dst)
    {
        // iterate through all the top-level contours,
        // draw each connected component with its own random color
        for( int i = 0; i < contours.size(); i++ )
        {
            Scalar color(  rng.uniform(0,255),  rng.uniform(0,255),  rng.uniform(0,255) );
            drawContours( dst, contours, i, color, CV_FILLED, 8, hierarchy );
            // drawCross(mc[i],Scalar(0,0,255), 5,dst); //put a cross
            char buff[255];
            sprintf(buff, "%d", i);

            string text = std::string(buff);
            cv::putText(dst,text,mc[i],0,0.5,Scalar(0,0,255),1,8,false);
        }
    }
    int getNumBlobs()
    {
        //need to create a buffer for output or wrong reference
        /*char buff[255];
        sprintf(buff, "%d", numBlobs);*/
        return numBlobs;
    }
private:
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    vector<Moments> mu;
    vector<Point2f> mc;
    int numBlobs;
};




#include <iostream>

// Include OpenCV
#include <opencv/cv.h>
#include <opencv/highgui.h>

using namespace cv;
#define drawCross( center, color, d, drawing )                                 \
            line( drawing, Point( center.x - d, center.y - d ),                \
            Point( center.x + d, center.y + d ), color, 2, CV_AA, 0); \
            line( drawing, Point( center.x + d, center.y - d ),                \
            Point( center.x - d, center.y + d ), color, 2, CV_AA, 0 )

RNG rng(12345);
int main( int argc, char** argv )
{
    Mat src;
    // the first command line parameter must be file name of binary
    // (black-n-white) image
    if(!(src=imread("pic6.png", CV_LOAD_IMAGE_GRAYSCALE)).data)
    {
        printf("OOOPS");
        waitKey(0);
        return -1;
    }



    Mat dst = Mat::zeros(src.rows, src.cols, CV_8UC3);

    src = src > 1;
    //cv::adaptiveThreshold(src,src,64,ADAPTIVE_THRESH_MEAN_C,THRESH_BINARY,7,13);
    namedWindow( "Source", 1 );
    imshow( "Source", src );

    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;

    findContours( src, contours, hierarchy,
        CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );

    /// Get the moments
    vector<Moments> mu(contours.size() );
    vector<Point2f> mc( contours.size() );
    for( int i = 0; i < contours.size(); i++ )
        {
            mu[i] = moments( Mat(contours[i]), false );
            mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00);
        }

    // iterate through all the top-level contours,
    // draw each connected component with its own random color
    for( int i = 0; i < contours.size(); i++ )
    {
        Scalar color(  rng.uniform(0,255),  rng.uniform(0,255),  rng.uniform(0,255) );
        drawContours( dst, contours, i, color, CV_FILLED, 8, hierarchy );
        // drawCross(mc[i],Scalar(0,0,255), 5,dst); //put a cross
         char buff[255];
        sprintf(buff, "%d", i);

         string text = std::string(buff);
        cv::putText(dst,text,mc[i],0,0.5,Scalar(0,0,255),1,8,false);
    }
   

    namedWindow( "Components", 1 );
    imshow( "Components", dst );
    waitKey(0);
}

finding the center of gravity in opencv





Alot of feature detectors such as the Haar classifier will return rectangular shapes presented as a detected feature.

one of the things to do in order to track these, is to find the center of the rectangle.


int main( int argc, char** argv )
{
atscameraCapture movie;
char code = (char)-1;
for(;;)
        {
            //get camera
            cv::Mat imgs = movie.ats_getImage();

#define drawCross( center, color, d, img )                                 \
            line( img, Point( center.x - d, center.y - d ),                \
            Point( center.x + d, center.y + d ), color, 2, CV_AA, 0); \
            line( img, Point( center.x + d, center.y - d ),                \
            Point( center.x - d, center.y + d ), color, 2, CV_AA, 0 )

            cv::Point center;

            //given 2 points representing the rectangle
            cv::Point topLeft(100,100);
            cv::Point bottomRight(200,200);
            cv::rectangle(imgs,topLeft,bottomRight,Scalar( 0, 255, 255 ),-1, 8 );

            //compute for center of triangle
            center.x = (topLeft.x+bottomRight.x)/2;
            center.y = (topLeft.y+bottomRight.y)/2;
            drawCross(center,Scalar(0,0,255), 5,imgs);

            imshow("camera",  imgs);
            code = (char)waitKey(100);
           
             if( code == 27 || code == 'q' || code == 'Q' )
            break;
        }
 return 0;

}

Another way of finding the center of gravity is using the internal function of OPENCV called cv::moments

Mat canny_output;
  vector<vector<Point> > contours;
  vector<Vec4i> hierarchy;
  RNG rng(12345);

/** @function main */
int main( int argc, char** argv )
{
atscameraCapture movie;
char code = (char)-1;
for(;;)
        {
            //get camera
            cv::Mat src_gray;
            cv::Mat imgs = movie.ats_getImage();

#define drawCross( center, color, d, img )                                 \
            line( img, Point( center.x - d, center.y - d ),                \
            Point( center.x + d, center.y + d ), color, 2, CV_AA, 0); \
            line( img, Point( center.x + d, center.y - d ),                \
            Point( center.x - d, center.y + d ), color, 2, CV_AA, 0 )

             /// Convert image to gray and blur it
              cvtColor( imgs, src_gray, CV_BGR2GRAY );
              blur( src_gray, src_gray, Size(3,3) );


           

              /// Detect edges using canny
              Canny( src_gray, canny_output, 10, 50, 3 );
              /// Find contours
              findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );

              /// Get the moments
              vector<Moments> mu(contours.size() );
              for( int i = 0; i < contours.size(); i++ )
                 { mu[i] = moments( Mat(contours[i]), false ); }

              ///  Get the mass centers:
              vector<Point2f> mc( contours.size() );
              for( int i = 0; i < contours.size(); i++ )
                 { mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 );
              }
               Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
              for( int i = 0; i< contours.size(); i++ )
                 {
                   Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
                   drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
                   circle( drawing, mc[i], 4, color, -1, 8, 0 );
            printf(" * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f \n", i, mu[i].m00, contourArea(Mat(contours[i])), arcLength( Mat(contours[i]), true ) );
      


              }

              namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
              imshow( "Contours", drawing );

            //imshow("camera",  src_gray);
            code = (char)waitKey(1000);
           
             if( code == 27 || code == 'q' || code == 'Q' )
            break;
        }
 return 0;

}

Monday, September 12, 2011

2D/3D estimation using solvePnP in opencv (NOT SOLVED)

In opencv "solvePnP" is used to find known points on a known 3D object. doing so the objects orientation relative to the camera coordinate system can be found.
the function is equivalent to finding the extrinsic camera parameters. which makes me believe its more for planar objects. need to do a few more experiments to find out why.

im using code from:
http://www.morethantechnical.com/2010/03/19/quick-and-easy-head-pose-estimation-with-opencv-w-code/  




#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>


#include <iostream>
#include <string>


using namespace cv;

#include <vector>

using namespace std;

#include <GL/gl.h>
#include <GL/glu.h>
#include <glut.h>

void loadNext();
void loadWithPoints(Mat& ip, Mat& img);

const GLfloat light_ambient[]  = { 0.0f, 0.0f, 0.0f, 1.0f };
const GLfloat light_diffuse[]  = { 1.0f, 1.0f, 1.0f, 1.0f };
const GLfloat light_specular[] = { 1.0f, 1.0f, 1.0f, 1.0f };
const GLfloat light_position[] = { 2.0f, 5.0f, 5.0f, 0.0f };

const GLfloat mat_ambient[]    = { 0.7f, 0.7f, 0.7f, 1.0f };
const GLfloat mat_diffuse[]    = { 0.8f, 0.8f, 0.8f, 1.0f };
const GLfloat mat_specular[]   = { 1.0f, 1.0f, 1.0f, 1.0f };
const GLfloat high_shininess[] = { 100.0f };

double rot[9] = {0};
Vec3d eav;
GLuint textureID;
Mat backPxls;
vector<double> rv(3), tv(3);
Mat rvec(rv),tvec(tv);
Mat camMatrix;

void resize(int width, int height)
{
    const float ar = (float) width / (float) height;

    glViewport(0, 0, width, height);

    glMatrixMode(GL_PROJECTION);
    glLoadIdentity();
    gluPerspective(40,1.0,0.01,1000.0);

    glMatrixMode(GL_MODELVIEW);
    glLoadIdentity() ;
}

int __w=250,__h=250;

void key(unsigned char key, int x, int y)
{
    //static int counter = 0;

    switch (key)
    {
    case 27 :
    case 'Q':
    case 'q':
        break;
    case ' ':
        loadNext();

        glBindTexture(GL_TEXTURE_2D, textureID);//This binds the texture to a texture target
        glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);//set our filter
        glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);    //set our filter
        glTexImage2D(GL_TEXTURE_2D, 0, 3, backPxls.cols, backPxls.rows, 0, GL_RGB, GL_UNSIGNED_BYTE, backPxls.data);

        break;
    default:
        break;
    }

    glutPostRedisplay();
}

void idle(void)
{
    glutPostRedisplay();
}



void myGLinit() {
    glClearColor(1,1,1,1);

    glShadeModel(GL_SMOOTH);

    glEnable(GL_DEPTH_TEST);
    glDepthFunc(GL_LEQUAL);

    glEnable(GL_LIGHT0);
    glEnable(GL_NORMALIZE);
    glEnable(GL_COLOR_MATERIAL);
    glColorMaterial ( GL_FRONT, GL_AMBIENT_AND_DIFFUSE );

    glLightfv(GL_LIGHT0, GL_AMBIENT,  light_ambient);
    glLightfv(GL_LIGHT0, GL_DIFFUSE,  light_diffuse);
    glLightfv(GL_LIGHT0, GL_SPECULAR, light_specular);
    glLightfv(GL_LIGHT0, GL_POSITION, light_position);

    glMaterialfv(GL_FRONT, GL_AMBIENT,   mat_ambient);
    glMaterialfv(GL_FRONT, GL_DIFFUSE,   mat_diffuse);
    glMaterialfv(GL_FRONT, GL_SPECULAR,  mat_specular);
    glMaterialfv(GL_FRONT, GL_SHININESS, high_shininess);

    glEnable(GL_LIGHTING);

    glGenTextures(1, &textureID);
}



void display(void)
{
    glClearColor(1.0f, 1.0f, 1.0f, 0.5f);
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);    // Clear Screen And Depth Buffer

    glMatrixMode(GL_MODELVIEW);
    glLoadIdentity();

    gluLookAt(0,0,0,0,0,1,0,1,0);

    //----------Axes
    glPushMatrix();
    glTranslated(0,0,5);

    glPushMatrix();
    double _d[16] = {    rot[0],rot[1],rot[2],0,
                        rot[3],rot[4],rot[5],0,
                        rot[6],rot[7],rot[8],0,
                        0,       0,      0        ,1};
    glMultMatrixd(_d);
    glRotated(180,1,0,0);

    //Z = red
    glPushMatrix();
    glRotated(180,0,1,0);
    glColor3d(1,0,0);
    glutSolidCone(0.05,1,15,20);
    glTranslated(0,0,1);
    glScaled(.1,.1,.1);
    glutSolidTetrahedron();
    glPopMatrix();

    //Y = green
    glPushMatrix();
    glRotated(-90,1,0,0);
    glColor3d(0,1,0);
    glutSolidCone(0.05,1,15,20);
    glTranslated(0,0,1);
    glScaled(.1,.1,.1);
    glutSolidTetrahedron();
    glPopMatrix();

    //X = blue
    glPushMatrix();
    glRotated(-90,0,1,0);
    glColor3d(0,0,1);
    glutSolidCone(0.05,1,15,20);
   
    glTranslated(0,0,1);
    glScaled(.1,.1,.1);
    glutSolidTetrahedron();
    glPopMatrix();

    glPopMatrix();
    glPopMatrix();
    //----------End axes

    glutSwapBuffers();
}

int start_opengl_with_stereo(int argc,char** argv) {
    glutInitWindowSize(250,250);
    glutInitWindowPosition(40,40);
    glutInit(&argc, argv);
    glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH | GLUT_MULTISAMPLE);
    glutCreateWindow("3D points coordinate");

    myGLinit();

    glutReshapeFunc(resize);
    glutDisplayFunc(display);
    glutKeyboardFunc(key);
    glutIdleFunc(idle);

    glutMainLoop();

    return 1;
}

Mat op;

void loadNext() {
    vector<Point2f > points;

float x,y;
 
  x=282;y=274;
  points.push_back(cv::Point2f(x,y));
  x=397;y=227;
  points.push_back(cv::Point2f(x,y));
  x=577;y=271;
  points.push_back(cv::Point2f(x,y));
  x=462;y=318;
  points.push_back(cv::Point2f(x,y));
  x=270;y=479;
  points.push_back(cv::Point2f(x,y));
  x=450;y=523;
  points.push_back(cv::Point2f(x,y));
  x=566;y=475;
  points.push_back(cv::Point2f(x,y));

    Mat ip(points);

    Mat img =  Mat::zeros( 800, 600, CV_8UC3 );
    for(unsigned int i = 0; i < points.size(); ++i)
    {
    std::cout << points[i] << std::endl;
    cv::circle(img,points[i],2,cv::Scalar(0, 0, 255, 0),1,8,0);
    }
    loadWithPoints(ip,img);

}

void loadWithPoints(Mat& ip, Mat& img) {
    double _dc[] = {0,0,0,0};
    solvePnP(op,ip,camMatrix,Mat(1,4,CV_64FC1,_dc),rvec,tvec,true);

    Mat rotM(3,3,CV_64FC1,rot);
    Rodrigues(rvec,rotM);
    double* _r = rotM.ptr<double>();

    Mat tmp,tmp1,tmp2,tmp3,tmp4,tmp5;
    double _pm[12] = {_r[0],_r[1],_r[2],0,
                      _r[3],_r[4],_r[5],0,
                      _r[6],_r[7],_r[8],0};
    decomposeProjectionMatrix(Mat(3,4,CV_64FC1,_pm),tmp,tmp1,tmp2,tmp3,tmp4,tmp5,eav);   
    imshow("tmp",img);
}


int main(int argc, char** argv)
{

    vector<Point3f > modelPoints;
    float x,y,z;
 
  x=.5;y=.5;z=-.5;
  modelPoints.push_back(cv::Point3f(x,y,z));
  x=.5;y=.5;z=.5;
  modelPoints.push_back(cv::Point3f(x,y,z));
  x=-.5;y=.5;z=.5;
  modelPoints.push_back(cv::Point3f(x,y,z));
  x=-.5;y=.5;z=-.5;
  modelPoints.push_back(cv::Point3f(x,y,z));
  x=.5;y=-.5;z=-.5;
  modelPoints.push_back(cv::Point3f(x,y,z));
  x=-.5;y=-.5;z=-.5;
  modelPoints.push_back(cv::Point3f(x,y,z));
  x=-.5;y=-.5;z=.5;
  modelPoints.push_back(cv::Point3f(x,y,z));


    op = Mat(modelPoints);

    rvec = Mat(rv);
    double _d[9] = {1,0,0,
                    0,-1,0,
                    0,0,-1};
    Rodrigues(Mat(3,3,CV_64FC1,_d),rvec);
    tv[0]=0;tv[1]=0;tv[2]=1;
    tvec = Mat(tv);
    double _cm[9] = { 40, 0, 400,
                      0, 40, 500,
                      0,  0,   40 }; //caliberation matrix PROBLEM!?
    camMatrix = Mat(3,3,CV_64FC1,_cm);

    namedWindow("tmp",1);
    loadNext();

    start_opengl_with_stereo(argc,argv);

    return 0;
}