InversePerspectiveMapping.cpp
Go to the documentation of this file.
00001 /**************************************************************************************************
00002  Software License Agreement (BSD License)
00003 
00004  Copyright (c) 2011-2013, LAR toolkit developers - University of Aveiro - http://lars.mec.ua.pt
00005  All rights reserved.
00006 
00007  Redistribution and use in source and binary forms, with or without modification, are permitted
00008  provided that the following conditions are met:
00009 
00010   *Redistributions of source code must retain the above copyright notice, this list of
00011    conditions and the following disclaimer.
00012   *Redistributions in binary form must reproduce the above copyright notice, this list of
00013    conditions and the following disclaimer in the documentation and/or other materials provided
00014    with the distribution.
00015   *Neither the name of the University of Aveiro nor the names of its contributors may be used to
00016    endorse or promote products derived from this software without specific prior written permission.
00017  
00018  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
00019  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
00020  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
00021  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
00022  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
00023  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
00024  IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
00025  OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00026 ***************************************************************************************************/
00027 /***
00028  * \file InversePerspectiveMapping.cc
00029  * \author Mohamed Aly <malaa@caltech.edu>
00030  * \date 11/29/2006
00031  */
00032 
00033 #include "InversePerspectiveMapping.hh"
00034 
00035 #include "CameraInfoOpt.h"
00036 
00037 #include <iostream>
00038 #include <math.h>
00039 #include <assert.h>
00040 #include <list>
00041 
00042 using namespace std;
00043 #include <cv.h>
00044 #include <highgui.h>
00045 
00046 namespace LaneDetector
00047 {
00048 
00049 #define VP_PORTION 0.05
00050 
00051 /*
00052  We are assuming the world coordinate frame center is at the camera,
00053  the ground plane is at height -h, the X-axis is going right,
00054  the Y-axis is going forward, the Z-axis is going up. The
00055  camera is looking forward with optical axis in direction of
00056  Y-axis, with possible pitch angle (above or below the Y-axis)
00057  and yaw angle (left or right).
00058  The camera coordinates have the same center as the world, but the Xc-axis goes right,
00059  the  Yc-axis goes down, and the Zc-axis (optical cxis) goes forward. The
00060  uv-plane of the image is such that u is horizontal going right, v is
00061  vertical going down.
00062  The image coordinates uv are such that the pixels are at half coordinates
00063  i.e. first pixel is (.5,.5) ...etc where the top-left point is (0,0) i.e.
00064  the tip of the first pixel is (0,0)
00065 */
00066 
00079 void mcvGetIPM(const CvMat* inImage, CvMat* outImage,
00080                IPMInfo *ipmInfo, const CameraInfo *cameraInfo,
00081                list<CvPoint> *outPoints)
00082 {
00083         
00084 //      cvShowImage("mcvGetLanes", inImage );
00085 //      cvWaitKey(0);
00086         
00087         
00088   //check input images types
00089   //CvMat inMat, outMat;
00090   //cvGetMat(inImage, &inMat);
00091   //cvGetMat(outImage, &outMat);
00092 //      cout<<"val: "<<inImage->type<<endl;
00093 //      cout << CV_MAT_TYPE(inImage->type) << " " << CV_MAT_TYPE(FLOAT_MAT_TYPE) <<  " " << CV_MAT_TYPE(INT_MAT_TYPE)<<"\n";
00094         if (!(CV_ARE_TYPES_EQ(inImage, outImage) &&
00095                 (CV_MAT_TYPE(inImage->type)==CV_MAT_TYPE(FLOAT_MAT_TYPE) ||
00096                 (CV_MAT_TYPE(inImage->type)==CV_MAT_TYPE(INT_MAT_TYPE)))))
00097         {
00098                 cerr << "Unsupported image types in mcvGetIPM";
00099                 exit(1);
00100         }
00101 
00102   //get size of input image
00103   FLOAT u, v;
00104   v = inImage->height;
00105   u = inImage->width;
00106 
00107   //get the vanishing point
00108   FLOAT_POINT2D vp;
00109   vp = mcvGetVanishingPoint(cameraInfo);
00110   vp.y = MAX(0, vp.y);
00111   //vp.y = 30;
00112 
00113   //get extent of the image in the xfyf plane
00114   FLOAT_MAT_ELEM_TYPE eps = ipmInfo->vpPortion * v;//VP_PORTION*v;
00115   ipmInfo->ipmLeft = MAX(0, ipmInfo->ipmLeft);
00116   ipmInfo->ipmRight = MIN(u-1, ipmInfo->ipmRight);
00117   ipmInfo->ipmTop = MAX(vp.y+eps, ipmInfo->ipmTop);
00118   ipmInfo->ipmBottom = MIN(v-1, ipmInfo->ipmBottom);
00119   FLOAT_MAT_ELEM_TYPE uvLimitsp[] = {vp.x,
00120     ipmInfo->ipmRight, ipmInfo->ipmLeft, vp.x,
00121     ipmInfo->ipmTop, ipmInfo->ipmTop,   ipmInfo->ipmTop,  ipmInfo->ipmBottom};
00122         //{vp.x, u, 0, vp.x,
00123         //vp.y+eps, vp.y+eps, vp.y+eps, v};
00124   CvMat uvLimits = cvMat(2, 4, FLOAT_MAT_TYPE, uvLimitsp);
00125 
00126   //get these points on the ground plane
00127   CvMat * xyLimitsp = cvCreateMat(2, 4, FLOAT_MAT_TYPE);
00128   CvMat xyLimits = *xyLimitsp;
00129   mcvTransformImage2Ground(&uvLimits, &xyLimits,cameraInfo);
00130   //SHOW_MAT(xyLimitsp, "xyLImits");
00131 
00132   //get extent on the ground plane
00133   CvMat row1, row2;
00134   cvGetRow(&xyLimits, &row1, 0);
00135   cvGetRow(&xyLimits, &row2, 1);
00136   double xfMax, xfMin, yfMax, yfMin;
00137   cvMinMaxLoc(&row1, (double*)&xfMin, (double*)&xfMax, 0, 0, 0);
00138   cvMinMaxLoc(&row2, (double*)&yfMin, (double*)&yfMax, 0, 0, 0);
00139 
00140   INT outRow = outImage->height;
00141   INT outCol = outImage->width;
00142 
00143   FLOAT_MAT_ELEM_TYPE stepRow = (yfMax-yfMin)/outRow;
00144   FLOAT_MAT_ELEM_TYPE stepCol = (xfMax-xfMin)/outCol;
00145 
00146   //construct the grid to sample
00147   CvMat *xyGrid = cvCreateMat(2, outRow*outCol, FLOAT_MAT_TYPE);
00148   INT i, j;
00149   FLOAT_MAT_ELEM_TYPE x, y;
00150   //fill it with x-y values on the ground plane in world frame
00151   for (i=0, y=yfMax-.5*stepRow; i<outRow; i++, y-=stepRow)
00152     for (j=0, x=xfMin+.5*stepCol; j<outCol; j++, x+=stepCol)
00153     {
00154       CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 0, i*outCol+j) = x;
00155       CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 1, i*outCol+j) = y;
00156     }
00157   //get their pixel values in image frame
00158   CvMat *uvGrid = cvCreateMat(2, outRow*outCol, FLOAT_MAT_TYPE);
00159   mcvTransformGround2Image(xyGrid, uvGrid, cameraInfo);
00160   //now loop and find the nearest pixel value for each position
00161   //that's inside the image, otherwise put it zero
00162   FLOAT_MAT_ELEM_TYPE ui, vi;
00163   //get mean of the input image
00164   CvScalar means = cvAvg(inImage);
00165   double mean = means.val[0];
00166   //generic loop to work for both float and int matrix types
00167   #define MCV_GET_IPM(type) \
00168   for (i=0; i<outRow; i++) \
00169       for (j=0; j<outCol; j++) \
00170       { \
00171           /*get pixel coordiantes*/ \
00172           ui = CV_MAT_ELEM(*uvGrid, FLOAT_MAT_ELEM_TYPE, 0, i*outCol+j); \
00173           vi = CV_MAT_ELEM(*uvGrid, FLOAT_MAT_ELEM_TYPE, 1, i*outCol+j); \
00174           /*check if out-of-bounds*/ \
00175           /*if (ui<0 || ui>u-1 || vi<0 || vi>v-1) \*/ \
00176           if (ui<ipmInfo->ipmLeft || ui>ipmInfo->ipmRight || \
00177               vi<ipmInfo->ipmTop || vi>ipmInfo->ipmBottom) \
00178           { \
00179               CV_MAT_ELEM(*outImage, type, i, j) = (type)mean; \
00180           } \
00181           /*not out of bounds, then get nearest neighbor*/ \
00182           else \
00183           { \
00184               /*Bilinear interpolation*/ \
00185               if (ipmInfo->ipmInterpolation == 0) \
00186               { \
00187                   int x1 = int(ui), x2 = int(ui+1); \
00188                   int y1 = int(vi), y2 = int(vi+1); \
00189                   float x = ui - x1, y = vi - y1;   \
00190                   float val = CV_MAT_ELEM(*inImage, type, y1, x1) * (1-x) * (1-y) + \
00191                       CV_MAT_ELEM(*inImage, type, y1, x2) * x * (1-y) + \
00192                       CV_MAT_ELEM(*inImage, type, y2, x1) * (1-x) * y + \
00193                       CV_MAT_ELEM(*inImage, type, y2, x2) * x * y;   \
00194                   CV_MAT_ELEM(*outImage, type, i, j) =  (type)val; \
00195   } \
00196               /*nearest-neighbor interpolation*/ \
00197               else \
00198                   CV_MAT_ELEM(*outImage, type, i, j) = \
00199                       CV_MAT_ELEM(*inImage, type, int(vi+.5), int(ui+.5)); \
00200           } \
00201           if (outPoints && \
00202               (ui<ipmInfo->ipmLeft+10 || ui>ipmInfo->ipmRight-10 || \
00203               vi<ipmInfo->ipmTop || vi>ipmInfo->ipmBottom-2) )\
00204               outPoints->push_back(cvPoint(j, i)); \
00205       }
00206   if (CV_MAT_TYPE(inImage->type)==FLOAT_MAT_TYPE)
00207   {
00208       MCV_GET_IPM(FLOAT_MAT_ELEM_TYPE)
00209   }
00210   else
00211   {
00212       MCV_GET_IPM(INT_MAT_ELEM_TYPE)
00213   }
00214   //return the ipm info
00215   ipmInfo->xLimits[0] = CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 0, 0);
00216   ipmInfo->xLimits[1] =
00217     CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 0, (outRow-1)*outCol+outCol-1);
00218   ipmInfo->yLimits[1] = CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 1, 0);
00219   ipmInfo->yLimits[0] =
00220     CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 1, (outRow-1)*outCol+outCol-1);
00221   ipmInfo->xScale = 1/stepCol;
00222   ipmInfo->yScale = 1/stepRow;
00223   ipmInfo->width = outCol;
00224   ipmInfo->height = outRow;
00225 
00226   //clean
00227   cvReleaseMat(&xyLimitsp);
00228   cvReleaseMat(&xyGrid);
00229   cvReleaseMat(&uvGrid);
00230 }
00231 
00232 
00243 void mcvTransformImage2Ground(const CvMat *inPoints,
00244                               CvMat *outPoints, const CameraInfo *cameraInfo)
00245 {
00246 
00247   //add two rows to the input points
00248   CvMat *inPoints4 = cvCreateMat(inPoints->rows+2, inPoints->cols,
00249       cvGetElemType(inPoints));
00250 
00251   //copy inPoints to first two rows
00252   CvMat inPoints2, inPoints3, inPointsr4, inPointsr3;
00253   cvGetRows(inPoints4, &inPoints2, 0, 2);
00254   cvGetRows(inPoints4, &inPoints3, 0, 3);
00255   cvGetRow(inPoints4, &inPointsr3, 2);
00256   cvGetRow(inPoints4, &inPointsr4, 3);
00257   cvSet(&inPointsr3, cvRealScalar(1));
00258   cvCopy(inPoints, &inPoints2);
00259   //create the transformation matrix
00260   float c1 = cos(cameraInfo->pitch);
00261   float s1 = sin(cameraInfo->pitch);
00262   float c2 = cos(cameraInfo->yaw);
00263   float s2 = sin(cameraInfo->yaw);
00264   float matp[] = {
00265     -cameraInfo->cameraHeight*c2/cameraInfo->focalLength.x,
00266     cameraInfo->cameraHeight*s1*s2/cameraInfo->focalLength.y,
00267     (cameraInfo->cameraHeight*c2*cameraInfo->opticalCenter.x/
00268       cameraInfo->focalLength.x)-
00269       (cameraInfo->cameraHeight *s1*s2* cameraInfo->opticalCenter.y/
00270       cameraInfo->focalLength.y) - cameraInfo->cameraHeight *c1*s2,
00271 
00272     cameraInfo->cameraHeight *s2 /cameraInfo->focalLength.x,
00273     cameraInfo->cameraHeight *s1*c2 /cameraInfo->focalLength.y,
00274     (-cameraInfo->cameraHeight *s2* cameraInfo->opticalCenter.x
00275       /cameraInfo->focalLength.x)-(cameraInfo->cameraHeight *s1*c2*
00276       cameraInfo->opticalCenter.y /cameraInfo->focalLength.y) -
00277       cameraInfo->cameraHeight *c1*c2,
00278 
00279     0,
00280     cameraInfo->cameraHeight *c1 /cameraInfo->focalLength.y,
00281     (-cameraInfo->cameraHeight *c1* cameraInfo->opticalCenter.y /
00282       cameraInfo->focalLength.y) + cameraInfo->cameraHeight *s1,
00283 
00284     0,
00285     -c1 /cameraInfo->focalLength.y,
00286     (c1* cameraInfo->opticalCenter.y /cameraInfo->focalLength.y) - s1,
00287   };
00288   CvMat mat = cvMat(4, 3, CV_32FC1, matp);
00289   //multiply
00290   cvMatMul(&mat, &inPoints3, inPoints4);
00291   //divide by last row of inPoints4
00292   for (int i=0; i<inPoints->cols; i++)
00293   {
00294     float div = CV_MAT_ELEM(inPointsr4, float, 0, i);
00295     CV_MAT_ELEM(*inPoints4, float, 0, i) =
00296         CV_MAT_ELEM(*inPoints4, float, 0, i) / div ;
00297     CV_MAT_ELEM(*inPoints4, float, 1, i) =
00298         CV_MAT_ELEM(*inPoints4, float, 1, i) / div;
00299   }
00300   //put back the result into outPoints
00301   cvCopy(&inPoints2, outPoints);
00302   //clear
00303   cvReleaseMat(&inPoints4);
00304 }
00305 
00306 
00316 void mcvTransformGround2Image(const CvMat *inPoints,
00317                               CvMat *outPoints, const CameraInfo *cameraInfo)
00318 {
00319   //add two rows to the input points
00320   CvMat *inPoints3 = cvCreateMat(inPoints->rows+1, inPoints->cols,
00321       cvGetElemType(inPoints));
00322 
00323   //copy inPoints to first two rows
00324   CvMat inPoints2,  inPointsr3;
00325   cvGetRows(inPoints3, &inPoints2, 0, 2);
00326   cvGetRow(inPoints3, &inPointsr3, 2);
00327   cvSet(&inPointsr3, cvRealScalar(-cameraInfo->cameraHeight));
00328   cvCopy(inPoints, &inPoints2);
00329   //create the transformation matrix
00330   float c1 = cos(cameraInfo->pitch);
00331   float s1 = sin(cameraInfo->pitch);
00332   float c2 = cos(cameraInfo->yaw);
00333   float s2 = sin(cameraInfo->yaw);
00334   float matp[] = {
00335     cameraInfo->focalLength.x * c2 + c1*s2* cameraInfo->opticalCenter.x,
00336     -cameraInfo->focalLength.x * s2 + c1*c2* cameraInfo->opticalCenter.x,
00337     - s1 * cameraInfo->opticalCenter.x,
00338 
00339     s2 * (-cameraInfo->focalLength.y * s1 + c1* cameraInfo->opticalCenter.y),
00340     c2 * (-cameraInfo->focalLength.y * s1 + c1* cameraInfo->opticalCenter.y),
00341     -cameraInfo->focalLength.y * c1 - s1* cameraInfo->opticalCenter.y,
00342 
00343     c1*s2,
00344     c1*c2,
00345     -s1
00346   };
00347   CvMat mat = cvMat(3, 3, CV_32FC1, matp);
00348   //multiply
00349   cvMatMul(&mat, inPoints3, inPoints3);
00350   //divide by last row of inPoints4
00351   for (int i=0; i<inPoints->cols; i++)
00352   {
00353     float div = CV_MAT_ELEM(inPointsr3, float, 0, i);
00354     CV_MAT_ELEM(*inPoints3, float, 0, i) =
00355         CV_MAT_ELEM(*inPoints3, float, 0, i) / div ;
00356     CV_MAT_ELEM(*inPoints3, float, 1, i) =
00357         CV_MAT_ELEM(*inPoints3, float, 1, i) / div;
00358   }
00359   //put back the result into outPoints
00360   cvCopy(&inPoints2, outPoints);
00361   //clear
00362   cvReleaseMat(&inPoints3);
00363 }
00364 
00365 
00377 FLOAT_POINT2D mcvGetVanishingPoint(const CameraInfo *cameraInfo)
00378 {
00379   //get the vp in world coordinates
00380   FLOAT_MAT_ELEM_TYPE vpp[] = {sin(cameraInfo->yaw)/cos(cameraInfo->pitch),
00381           cos(cameraInfo->yaw)/cos(cameraInfo->pitch), 0};
00382   CvMat vp = cvMat(3, 1, FLOAT_MAT_TYPE, vpp);
00383 
00384   //transform from world to camera coordinates
00385   //
00386   //rotation matrix for yaw
00387   FLOAT_MAT_ELEM_TYPE tyawp[] = {cos(cameraInfo->yaw), -sin(cameraInfo->yaw), 0,
00388                 sin(cameraInfo->yaw), cos(cameraInfo->yaw), 0,
00389                 0, 0, 1};
00390   CvMat tyaw = cvMat(3, 3, FLOAT_MAT_TYPE, tyawp);
00391   //rotation matrix for pitch
00392   FLOAT_MAT_ELEM_TYPE tpitchp[] = {1, 0, 0,
00393                   0, -sin(cameraInfo->pitch), -cos(cameraInfo->pitch),
00394                   0, cos(cameraInfo->pitch), -sin(cameraInfo->pitch)};
00395   CvMat transform = cvMat(3, 3, FLOAT_MAT_TYPE, tpitchp);
00396   //combined transform
00397   cvMatMul(&transform, &tyaw, &transform);
00398 
00399   //
00400   //transformation from (xc, yc) in camra coordinates
00401   // to (u,v) in image frame
00402   //
00403   //matrix to shift optical center and focal length
00404   FLOAT_MAT_ELEM_TYPE t1p[] = {
00405     cameraInfo->focalLength.x, 0,
00406     cameraInfo->opticalCenter.x,
00407     0, cameraInfo->focalLength.y,
00408     cameraInfo->opticalCenter.y,
00409     0, 0, 1};
00410   CvMat t1 = cvMat(3, 3, FLOAT_MAT_TYPE, t1p);
00411   //combine transform
00412   cvMatMul(&t1, &transform, &transform);
00413   //transform
00414   cvMatMul(&transform, &vp, &vp);
00415 
00416   //
00417   //clean and return
00418   //
00419   FLOAT_POINT2D ret;
00420   ret.x = cvGetReal1D(&vp, 0);
00421   ret.y = cvGetReal1D(&vp, 1);
00422   return ret;
00423 }
00424 
00425 
00433 void mcvPointImIPM2World(FLOAT_POINT2D *point, const IPMInfo *ipmInfo)
00434 {
00435   //x-direction
00436   point->x /= ipmInfo->xScale;
00437   point->x += ipmInfo->xLimits[0];
00438   //y-direction
00439   point->y /= ipmInfo->yScale;
00440   point->y = ipmInfo->yLimits[1] - point->y;
00441 }
00442 
00443 
00452 void mcvTransformImIPM2Ground(const CvMat *inMat, CvMat* outMat, const IPMInfo *ipmInfo)
00453 {
00454   CvMat *mat;
00455   mat = outMat;
00456   if(inMat != mat)
00457   {
00458     cvCopy(inMat, mat);
00459   }
00460 
00461   //work on the x-direction i.e. first row
00462   CvMat row;
00463   cvGetRow(mat, &row, 0);
00464   cvConvertScale(&row, &row, 1./ipmInfo->xScale, ipmInfo->xLimits[0]);
00465 
00466   //work on y-direction
00467   cvGetRow(mat, &row, 1);
00468   cvConvertScale(&row, &row, -1./ipmInfo->yScale, ipmInfo->yLimits[1]);
00469 }
00470 
00480 void mcvTransformImIPM2Im(const CvMat *inMat, CvMat* outMat, const IPMInfo *ipmInfo,
00481                           const CameraInfo *cameraInfo)
00482 {
00483   //convert to world coordinates
00484   mcvTransformImIPM2Ground(inMat, outMat, ipmInfo);
00485 
00486   //convert to image coordinates
00487   mcvTransformGround2Image(outMat, outMat, cameraInfo);
00488 
00489 }
00490 
00491 
00499 void mcvInitCameraInfo (char * const fileName, CameraInfo *cameraInfo)
00500 {
00501   //parsed camera data
00502   CameraInfoParserInfo camInfo;
00503   //read the data
00504   assert(cameraInfoParser_configfile(fileName, &camInfo, 0, 1, 1)==0);
00505   //init the strucure
00506   cameraInfo->focalLength.x = camInfo.focalLengthX_arg;
00507   cameraInfo->focalLength.y = camInfo.focalLengthY_arg;
00508   cameraInfo->opticalCenter.x = camInfo.opticalCenterX_arg;
00509   cameraInfo->opticalCenter.y = camInfo.opticalCenterY_arg;
00510   cameraInfo->cameraHeight = camInfo.cameraHeight_arg;
00511   cameraInfo->pitch = camInfo.pitch_arg * CV_PI/180;
00512   cameraInfo->yaw = camInfo.yaw_arg * CV_PI/180;
00513   cameraInfo->imageWidth = camInfo.imageWidth_arg;
00514   cameraInfo->imageHeight = camInfo.imageHeight_arg;
00515 }
00516 
00517 
00525  void mcvScaleCameraInfo (CameraInfo *cameraInfo, CvSize size)
00526  {
00527   //compute the scale factor
00528   double scaleX = size.width/cameraInfo->imageWidth;
00529   double scaleY = size.height/cameraInfo->imageHeight;
00530   //scale
00531   cameraInfo->imageWidth = size.width;
00532   cameraInfo->imageHeight = size.height;
00533   cameraInfo->focalLength.x *= scaleX;
00534   cameraInfo->focalLength.y *= scaleY;
00535   cameraInfo->opticalCenter.x *= scaleX;
00536   cameraInfo->opticalCenter.y *= scaleY;
00537  }
00538 
00539 
00548 void mcvGetIPMExtent(const CameraInfo *cameraInfo, IPMInfo *ipmInfo )
00549 {
00550   //get size of input image
00551   FLOAT u, v;
00552   v = cameraInfo->imageHeight;
00553   u = cameraInfo->imageWidth;
00554 
00555   //get the vanishing point
00556   FLOAT_POINT2D vp;
00557   vp = mcvGetVanishingPoint(cameraInfo);
00558   vp.y = MAX(0, vp.y);
00559 
00560   //get extent of the image in the xfyf plane
00561   FLOAT_MAT_ELEM_TYPE eps = VP_PORTION*v;
00562   FLOAT_MAT_ELEM_TYPE uvLimitsp[] = {vp.x, u, 0, vp.x,
00563                       vp.y+eps, vp.y+eps, vp.y+eps, v};
00564   CvMat uvLimits = cvMat(2, 4, FLOAT_MAT_TYPE, uvLimitsp);
00565 
00566   //get these points on the ground plane
00567   CvMat * xyLimitsp = cvCreateMat(2, 4, FLOAT_MAT_TYPE);
00568   CvMat xyLimits = *xyLimitsp;
00569   mcvTransformImage2Ground(&uvLimits, &xyLimits,cameraInfo);
00570   //SHOW_MAT(xyLimitsp, "xyLImits");
00571 
00572   //get extent on the ground plane
00573   CvMat row1, row2;
00574   cvGetRow(&xyLimits, &row1, 0);
00575   cvGetRow(&xyLimits, &row2, 1);
00576   double xfMax, xfMin, yfMax, yfMin;
00577   cvMinMaxLoc(&row1, (double*)&xfMin, (double*)&xfMax, 0, 0, 0);
00578   cvMinMaxLoc(&row2, (double*)&yfMin, (double*)&yfMax, 0, 0, 0);
00579 
00580   //return
00581   ipmInfo->xLimits[0] = xfMin;
00582   ipmInfo->xLimits[1] = xfMax;
00583   ipmInfo->yLimits[1] = yfMax;
00584   ipmInfo->yLimits[0] = yfMin;
00585 
00586 }
00587 
00588 } // namespace LaneDetector


caltech_lanes
Author(s): Ricardo Morais
autogenerated on Thu Nov 20 2014 11:35:20