bazar  1.3.1
fullcalib.cpp
Go to the documentation of this file.
1 
11 #include <iostream>
12 #include "cv.h"
13 #include "highgui.h"
14 
15 #ifdef HAVE_CONFIG_H
16 #include <config.h>
17 #endif
18 
19 #include "calibmodel.h"
20 
21 void show_result(planar_object_recognizer &recognizer, IplImage *video, IplImage *dst);
22 static void augment_scene(CalibModel &model, IplImage *frame, IplImage *display);
25 bool geometric_calibration(CalibModel &model, CvCapture *capture, bool cache);
26 bool photometric_calibration(CalibModel &model, CvCapture *capture,
27  int nbImages, bool cache);
28 
29 void usage(const char *s) {
30  cerr << "usage:\n" << s
31  << "[<cam number>|<video file>] [-m <model image>] [-r]\n"
32  " -m specifies model image\n"
33  " -r do not load any data\n"
34  " -t train a new classifier\n"
35  " -g recompute geometric calibration\n"
36  " -l rebuild irradiance map from scratch\n";
37  exit(1);
38 }
39 
40 int main( int argc, char** argv )
41 {
42  CvCapture* capture = 0;
43 
44  const char *captureSrc = "0";
45  bool redo_geom=false;
46  bool redo_training=false;
47  bool redo_lighting=false;
48 
49  char *modelFile = "model.bmp";
50  // parse command line
51  for (int i=1; i<argc; i++) {
52  if (strcmp(argv[i], "-m") ==0) {
53  if (i==argc-1) usage(argv[0]);
54  modelFile = argv[i+1];
55  i++;
56  } else if (strcmp(argv[i], "-r")==0) {
57  redo_geom=redo_training=redo_lighting=true;
58  } else if (strcmp(argv[i], "-g")==0) {
59  redo_geom=redo_lighting=true;
60  } else if (strcmp(argv[i], "-l")==0) {
61  redo_lighting=true;
62  } else if (strcmp(argv[i], "-t")==0) {
63  redo_training=true;
64  } else if (argv[i][0]=='-') {
65  usage(argv[0]);
66  } else {
67  captureSrc = argv[i];
68  }
69  }
70 
71  if(strlen(captureSrc) == 1 && isdigit(captureSrc[0]))
72  capture = cvCaptureFromCAM( captureSrc[0]-'0');
73  else
74  capture = cvCaptureFromAVI( captureSrc );
75 
76  if( !capture )
77  {
78  cerr <<"Could not initialize capturing from " << captureSrc << " ...\n";
79  return -1;
80  }
81 
82  // Allocate the detector object
83  CalibModel model(modelFile);
84 
85  if (!model.buildCached(capture, !redo_training)) {
86  cout << "model.buildCached() failed.\n";
87  return 1;
88  }
89 
90  cout << "Model build. Starting geometric calibration.\n";
91 
92  if (!geometric_calibration(model,capture, !redo_geom)) {
93  cerr << "Geometric calibration failed.\n";
94  return 2;
95  }
96 
97  cout << "Geometric calibration OK. Calibrating light...\n";
98 
99  photometric_calibration(model, capture, 150, !redo_lighting);
100 }
101 
102 bool geometric_calibration(CalibModel &model, CvCapture *capture, bool cache)
103 {
104 
105  if (cache && model.augm.LoadOptimalStructureFromFile("camera_c.txt", "camera_r_t.txt")) {
106  return true;
107  }
108 
109  const char *win = "BazAR";
110 
111  IplImage*gray=0;
112 
113  cvNamedWindow(win, CV_WINDOW_AUTOSIZE);
114 
116 
117  IplImage* frame = cvQueryFrame(capture);
118  calib.AddCamera(frame->width, frame->height);
119  IplImage* display=cvCloneImage(frame);
120 
121  bool success=false;
122 
123  int nbHomography =0;
124  while (1)
125  {
126  // acquire image
127  frame = cvQueryFrame( capture );
128  if( !frame )
129  break;
130 
131  // convert it to gray levels, if required
132  if (frame->nChannels >1) {
133  if( !gray )
134  gray = cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 1 );
135  cvCvtColor(frame, gray, CV_RGB2GRAY);
136  } else {
137  gray = frame;
138  }
139 
140  // run the detector
141  if (model.detector.detect(gray)) {
142  add_detected_homography(model.detector, calib);
143  nbHomography++;
144  cout << nbHomography << " homographies.\n";
145  if (nbHomography >=70) {
146  if (calib.Calibrate(
147  50, // max hom
148  2, // random
149  3,
150  3, // padding ratio 1/2
151  0,
152  0,
153  0.0078125, //alpha
154  0.9, //beta
155  0.001953125,//gamma
156  12, // iter
157  0.05, //eps
158  3 //postfilter eps
159  ))
160  {
162  success=true;
163  break;
164  }
165  }
166  }
167  show_result(model.detector, frame, display);
168  cvShowImage(win, display);
169 
170  int k=cvWaitKey(10);
171  if (k=='q' || k== 27)
172  break;
173  }
174 
175  cvReleaseImage(&display);
176  if (frame->nChannels > 1)
177  cvReleaseImage(&gray);
178  if (success && model.augm.LoadOptimalStructureFromFile("camera_c.txt", "camera_r_t.txt")) {
179  return true;
180  }
181  return false;
182 }
183 
184 bool photometric_calibration(CalibModel &model, CvCapture *capture,
185  int nbImages, bool cache)
186 {
187 
188  if (cache) model.map.load();
189 
190  const char *win = "BazAR";
191 
192  IplImage*gray=0;
193 
194  cvNamedWindow(win, CV_WINDOW_AUTOSIZE);
195  cvNamedWindow("LightMap", CV_WINDOW_AUTOSIZE);
196 
197  IplImage* frame = 0;
198  IplImage* display=cvCloneImage(cvQueryFrame(capture));
199 
200  int nbHomography =0;
201  LightCollector lc(model.map.reflc);
202  IplImage *lightmap = cvCreateImage(cvGetSize(model.map.map.getIm()), IPL_DEPTH_8U,
203  lc.avgChannels);
204  while (1)
205  {
206  // acquire image
207  frame = cvQueryFrame( capture );
208  /*
209  if (frame) cvReleaseImage(&frame);
210  frame = cvLoadImage("model.bmp",1);
211  */
212  if( !frame )
213  break;
214 
215  // convert it to gray levels, if required
216  if (frame->nChannels >1) {
217  if( !gray )
218  gray = cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 1 );
219  cvCvtColor(frame, gray, CV_RGB2GRAY);
220  } else {
221  gray = frame;
222  }
223 
224  // run the detector
225  if (model.detector.detect(gray)) {
226  // 2d homography found
227  nbHomography++;
228 
229  // Computes 3D pose and surface normal
230  model.augm.Clear();
231  add_detected_homography(model.detector, model.augm);
232  model.augm.Accomodate(4, 1e-4);
233  CvMat *mat = model.augm.GetObjectToWorld();
234  float normal[3];
235  for (int j=0;j<3;j++) normal[j] = cvGet2D(mat, j, 2).val[0];
236  cvReleaseMat(&mat);
237 
238  // average pixels over triangles
239  lc.averageImage(frame,model.detector.H);
240 
241  // add observations
242  if (!model.map.isReady())
243  model.map.addNormal(normal, lc, 0);
244 
245  if (!model.map.isReady() && nbHomography >= nbImages) {
246  if (model.map.computeLightParams()) {
247  model.map.save();
248  const float *gain = model.map.getGain(0);
249  const float *bias = model.map.getBias(0);
250  cout << "Gain: " << gain[0] << ", " << gain[1] << ", " << gain[2] << endl;
251  cout << "Bias: " << bias[0] << ", " << bias[1] << ", " << bias[2] << endl;
252  }
253  }
254  }
255 
256  if (model.map.isReady()) {
257  double min, max;
258  IplImage *map = model.map.map.getIm();
259  cvSetImageCOI(map, 2);
260  cvMinMaxLoc(map, &min, &max);
261  cvSetImageCOI(map, 0);
262  assert(map->nChannels == lightmap->nChannels);
263  cvConvertScale(map, lightmap, 128, 0);
264  cvShowImage("LightMap", lightmap);
265  augment_scene(model, frame, display);
266  } else {
267  cvCopy(frame,display);
268  if (model.detector.object_is_detected)
269  lc.drawGrid(display, model.detector.H);
270  }
271 
272  cvShowImage(win, display);
273 
274  int k=cvWaitKey(10);
275  if (k=='q' || k== 27)
276  break;
277  }
278 
279  cvReleaseImage(&lightmap);
280  cvReleaseImage(&display);
281  if (frame->nChannels > 1)
282  cvReleaseImage(&gray);
283  return 0;
284 }
285 
286 
287 void show_result(planar_object_recognizer &detector, IplImage *video, IplImage *dst)
288 {
289  cvCopy(video, dst);
290 
291  if (detector.object_is_detected) {
292  for (int i=0; i<detector.match_number; ++i) {
293 
294  image_object_point_match * match = detector.matches+i;
295  if (match->inlier) {
296  cvCircle(dst,
297  cvPoint((int) (PyrImage::convCoordf(match->image_point->u,
298  int(match->image_point->scale), 0)),
299  (int)(PyrImage::convCoordf(match->image_point->v,
300  int(match->image_point->scale), 0))),
301  3, CV_RGB(0,255,0), -1, 8,0);
302  }
303  }
304  }
305 }
306 
307 static void augment_scene(CalibModel &model, IplImage *frame, IplImage *display)
308 {
309  cvCopy(frame, display);
310 
311  if (!model.detector.object_is_detected)
312  return;
313 
314  CvMat *m = model.augm.GetProjectionMatrix(0);
315  if (!m) return;
316 
317  double pts[4][4];
318  double proj[4][4];
319  CvMat ptsMat, projMat;
320  cvInitMatHeader(&ptsMat, 4, 4, CV_64FC1, pts);
321  cvInitMatHeader(&projMat, 3, 4, CV_64FC1, proj);
322  for (int i=0; i<4; i++) {
323  pts[0][i] = model.corners[i].x;
324  pts[1][i] = model.corners[i].y;
325  pts[2][i] = 0;
326  pts[3][i] = 1;
327  }
328  cvMatMul(m, &ptsMat, &projMat);
329  cvReleaseMat(&m);
330 
331  CvPoint projPts[4];
332  for (int i=0;i<4; i++) {
333  projPts[i].x = cvRound(proj[0][i]/proj[2][i]);
334  projPts[i].y = cvRound(proj[1][i]/proj[2][i]);
335  }
336 
337  CvMat *o2w = model.augm.GetObjectToWorld();
338  float normal[3];
339  for (int j=0;j<3;j++)
340  normal[j] = cvGet2D(o2w, j, 2).val[0];
341  cvReleaseMat(&o2w);
342 
343  // we want to relight a color present on the model image
344  // with an irradiance coming from the irradiance map
345  CvScalar color = cvGet2D(model.image, model.image->height/2, model.image->width/2);
346  CvScalar irradiance = model.map.readMap(normal);
347 
348  // the camera has some gain and bias
349  const float *g = model.map.getGain(0);
350  const float *b = model.map.getBias(0);
351 
352  // relight the 3 RGB channels. The bias value expects 0 black 1 white,
353  // but the image are stored with a white value of 255: Conversion is required.
354  for (int i=0; i<3; i++) {
355  color.val[i] = 255.0*(g[i]*(color.val[i]/255.0)*irradiance.val[i] + b[i]);
356  }
357 
358  // draw a filled polygon with the relighted color
359  cvFillConvexPoly(display, projPts, 4, color);
360 }
361 
363 {
364  static std::vector<CamCalibration::s_struct_points> pts;
365  pts.clear();
366 
367  for (int i=0; i<detector.match_number; ++i) {
368  image_object_point_match * match = detector.matches+i;
369  if (match->inlier) {
370  pts.push_back(CamCalibration::s_struct_points(
371  PyrImage::convCoordf(match->image_point->u, int(match->image_point->scale), 0),
372  PyrImage::convCoordf(match->image_point->v, int(match->image_point->scale), 0),
373  PyrImage::convCoordf((float)match->object_point->M[0], int(match->object_point->scale), 0),
374  PyrImage::convCoordf((float)match->object_point->M[1], int(match->object_point->scale), 0)));
375  }
376  }
377 
378  return calib.AddHomography(0, pts, detector.H);
379 }
380 
382 {
383  static std::vector<CamCalibration::s_struct_points> pts;
384  pts.clear();
385 
386  for (int i=0; i<detector.match_number; ++i) {
387  image_object_point_match * match = detector.matches+i;
388  if (match->inlier) {
389  pts.push_back(CamCalibration::s_struct_points(
390  PyrImage::convCoordf(match->image_point->u, int(match->image_point->scale), 0),
391  PyrImage::convCoordf(match->image_point->v, int(match->image_point->scale), 0),
392  PyrImage::convCoordf(match->object_point->M[0], int(match->object_point->scale), 0),
393  PyrImage::convCoordf(match->object_point->M[1], int(match->object_point->scale), 0)));
394  }
395  }
396 
397  a.AddHomography(pts, detector.H);
398  return true;
399 }