bazar  1.3.1
multicam.cpp
Go to the documentation of this file.
1 
2 #include <iostream>
3 #include <vector>
4 #include <cv.h>
5 #include <highgui.h>
6 
7 #ifdef HAVE_CONFIG_H
8 #include <config.h>
9 #endif
10 
11 #include "multigrab.h"
12 
13 void show_result(planar_object_recognizer &recognizer, IplImage *video, IplImage *dst);
14 static void augment_scene(int cam, CalibModel &model, IplImage *frame, IplImage *display);
15 bool geometric_calibration(MultiGrab &multi, bool cache);
16 bool photometric_calibration(MultiGrab &multi, int nbImages, bool cache);
17 
18 
19 void usage(const char *s) {
20  cerr << "usage:\n" << s
21  << "[-m <model image>] [-r]\n"
22  " -m specifies model image\n"
23  " -r do not load any data\n"
24  " -t train a new classifier\n"
25  " -g recompute geometric calibration\n"
26  " -l rebuild irradiance map from scratch\n";
27  exit(1);
28 }
29 
30 int main( int argc, char** argv )
31 {
32  bool redo_geom=false;
33  bool redo_training=false;
34  bool redo_lighting=false;
35 
36  char *modelFile = "model.bmp";
37  // parse command line
38  for (int i=1; i<argc; i++) {
39  if (strcmp(argv[i], "-m") ==0) {
40  if (i==argc-1) usage(argv[0]);
41  modelFile = argv[i+1];
42  i++;
43  } else if (strcmp(argv[i], "-r")==0) {
44  redo_geom=redo_training=redo_lighting=true;
45  } else if (strcmp(argv[i], "-g")==0) {
46  redo_geom=redo_lighting=true;
47  } else if (strcmp(argv[i], "-l")==0) {
48  redo_lighting=true;
49  } else if (strcmp(argv[i], "-t")==0) {
50  redo_training=true;
51  } else if (argv[i][0]=='-') {
52  usage(argv[0]);
53  }
54  }
55 
57 
58  if( multi.init(!redo_training) ==0 )
59  {
60  cerr <<"Initialization error.\n";
61  return -1;
62  }
63 
64  cout << "Starting geometric calibration.\n";
65 
66  if (!geometric_calibration(multi, !redo_geom)) {
67  cerr << "Geometric calibration failed.\n";
68  return 2;
69  }
70 
71  cout << "Geometric calibration OK. Calibrating light...\n";
72 
73  // start collecting light measurements
74  multi.allocLightCollector();
75 
76  photometric_calibration(multi, 150, !redo_lighting);
77 }
78 
80 {
81 
82  if (cache && multi.model.augm.LoadOptimalStructureFromFile("camera_c.txt", "camera_r_t.txt")) {
83  return true;
84  }
85 
86  const char *win = "BazAR";
87 
88  cvNamedWindow(win, CV_WINDOW_AUTOSIZE);
89 
90  // construct a CamCalibration object and register all the cameras
92 
93  for (int i=0; i<multi.cams.size(); ++i) {
94  calib.AddCamera(multi.cams[i]->width, multi.cams[i]->height);
95  }
96 
97  IplImage *display=0;
98  bool success=false;
99  bool end=false;
100 
101  int dispCam=0;
102  int nbHomography =0;
103  while (!end)
104  {
105  // acquire images
106  multi.grabFrames();
107 
108  // detect the calibration object in every image
109  // (this loop could be paralelized)
110  int nbdet=0;
111  for (int i=0; i<multi.cams.size(); ++i) {
112  if (multi.cams[i]->detect()) nbdet++;
113  }
114 
115  if (nbdet>0) {
116  for (int i=0; i<multi.cams.size(); ++i) {
117  if (multi.cams[i]->detector.object_is_detected) {
118  add_detected_homography(i, multi.cams[i]->detector, calib);
119  } else {
120  calib.AddHomography(i);
121  }
122  }
123  nbHomography++;
124  }
125 
126  if (nbHomography >=200) {
127  if (calib.Calibrate(
128  120, // max hom
129  (multi.cams.size() > 1 ? 1:2), // padding or random
130  3,
131  .5, // padding ratio 1/2
132  0,
133  0,
134  0.0078125, //alpha
135  0.9, //beta
136  0.001953125,//gamma
137  12, // iter
138  0.05, //eps
139  3 //postfilter eps
140  ))
141  {
143  success=true;
144  break;
145  }
146  }
147  if (display==0) display = cvCreateImage(cvGetSize(multi.cams[dispCam]->frame), IPL_DEPTH_8U, 3);
148  show_result(multi.cams[dispCam]->detector, multi.cams[dispCam]->frame, display);
149  cvShowImage(win, display);
150 
151  int k=cvWaitKey(10);
152  switch (k) {
153  case 'q':
154  case 27: end=true; break;
155  case 'n': if(dispCam < multi.cams.size()-1) {
156  cvReleaseImage(&display);
157  ++dispCam;
158  }
159  cout << "Current cam: " << dispCam << endl;
160  break;
161  case 'p': if(dispCam > 0) {
162  cvReleaseImage(&display);
163  --dispCam;
164  }
165  cout << "Current cam: " << dispCam << endl;
166  break;
167  case -1: break;
168  default: cout << (char)k <<": What ?\n";
169  }
170  }
171 
172  if (display) cvReleaseImage(&display);
173  if (success && multi.model.augm.LoadOptimalStructureFromFile("camera_c.txt", "camera_r_t.txt")) {
174  return true;
175  }
176  return false;
177 }
178 
179 bool photometric_calibration(MultiGrab &multi, int nbImages, bool cache)
180 {
181  CalibModel &model(multi.model);
182 
183  if (cache) model.map.load();
184 
185  const char *win = "BazAR";
186 
187  cvNamedWindow(win, CV_WINDOW_AUTOSIZE);
188 
189  IplImage *display=0;
190  bool success=false;
191  bool end=false;
192 
193  int dispCam=0;
194  int nbLightMeasures =0;
195  while (!end)
196  {
197  // acquire images
198  multi.grabFrames();
199 
200  // detect the calibration object in every image
201  // (this loop could be paralelized)
202  int nbdet=0;
203  for (int i=0; i<multi.cams.size(); ++i) {
204  if (multi.cams[i]->detect()) nbdet++;
205  }
206 
207  bool frameOK=false;
208  if (nbdet>0) {
209  model.augm.Clear();
210  for (int i=0; i<multi.cams.size(); ++i) {
211  if (multi.cams[i]->detector.object_is_detected) {
212  add_detected_homography(i, multi.cams[i]->detector, model.augm);
213  } else {
214  model.augm.AddHomography();
215  }
216  }
217  frameOK = model.augm.Accomodate(4, 1e-4);
218  }
219 
220  if (display==0) display = cvCreateImage(cvGetSize(multi.cams[dispCam]->frame), IPL_DEPTH_8U, 3);
221 
222  if (frameOK) {
223  // fetch surface normal in world coordinates
224  CvMat *mat = model.augm.GetObjectToWorld();
225  float normal[3];
226  for (int j=0;j<3;j++) normal[j] = cvGet2D(mat, j, 2).val[0];
227  cvReleaseMat(&mat);
228 
229  // collect lighting measures
230  for (int i=0; i<multi.cams.size();++i) {
231  if (multi.cams[i]->detector.object_is_detected) {
232  nbLightMeasures++;
233  model.map.addNormal(normal, *multi.cams[i]->lc, i);
234  }
235  }
236  if (!model.map.isReady() && nbLightMeasures > 40) {
237  if (model.map.computeLightParams()) {
238  model.map.save();
239  }
240  }
241 
242  augment_scene(dispCam, model, multi.cams[dispCam]->frame, display);
243  } else {
244  cvCopy( multi.cams[dispCam]->frame, display);
245  }
246  cvShowImage(win, display);
247 
248  int k=cvWaitKey(10);
249  switch (k) {
250  case 'q':
251  case 27: end=true; break;
252  case 'n': if(dispCam < multi.cams.size()-1) {
253  cvReleaseImage(&display);
254  ++dispCam;
255  }
256  cout << "Current cam: " << dispCam << endl;
257  break;
258  case 'p': if(dispCam > 0) {
259  cvReleaseImage(&display);
260  --dispCam;
261  }
262  cout << "Current cam: " << dispCam << endl;
263  break;
264  case -1: break;
265  default: cout << (char)k <<": What ?\n";
266  }
267  }
268 
269  if (display) cvReleaseImage(&display);
270  if (success && model.augm.LoadOptimalStructureFromFile("camera_c.txt", "camera_r_t.txt")) {
271  return true;
272  }
273  return false;
274 
275 
276  return false;
277 }
278 
279 void show_result(planar_object_recognizer &detector, IplImage *video, IplImage *dst)
280 {
281  cvCopy(video, dst);
282 
283  if (detector.object_is_detected) {
284  for (int i=0; i<detector.match_number; ++i) {
285 
286  image_object_point_match * match = detector.matches+i;
287  if (match->inlier) {
288  cvCircle(dst,
289  cvPoint((int) (PyrImage::convCoordf(match->image_point->u,
290  int(match->image_point->scale), 0)),
291  (int)(PyrImage::convCoordf(match->image_point->v,
292  int(match->image_point->scale), 0))),
293  3, CV_RGB(0,255,0), -1, 8,0);
294  }
295  }
296  }
297 }
298 
299 static void augment_scene(int cam, CalibModel &model, IplImage *frame, IplImage *display)
300 {
301  cvCopy(frame, display);
302 
303  CvMat *m = model.augm.GetProjectionMatrix(cam);
304  if (!m) return;
305 
306  double pts[4][4];
307  double proj[4][4];
308  CvMat ptsMat, projMat;
309  cvInitMatHeader(&ptsMat, 4, 4, CV_64FC1, pts);
310  cvInitMatHeader(&projMat, 3, 4, CV_64FC1, proj);
311  for (int i=0; i<4; i++) {
312  pts[0][i] = model.corners[i].x;
313  pts[1][i] = model.corners[i].y;
314  pts[2][i] = 0;
315  pts[3][i] = 1;
316  }
317  cvMatMul(m, &ptsMat, &projMat);
318  cvReleaseMat(&m);
319 
320  CvPoint projPts[4];
321  for (int i=0;i<4; i++) {
322  projPts[i].x = cvRound(proj[0][i]/proj[2][i]);
323  projPts[i].y = cvRound(proj[1][i]/proj[2][i]);
324  }
325 
326  CvScalar color = cvScalar(128,128,128,128);
327 
328  if (model.map.isReady()) {
329  CvMat *o2w = model.augm.GetObjectToWorld();
330  float normal[3];
331  for (int j=0;j<3;j++)
332  normal[j] = cvGet2D(o2w, j, 2).val[0];
333  cvReleaseMat(&o2w);
334 
335  // we want to relight a color present on the model image
336  // with an irradiance coming from the irradiance map
337  color = cvGet2D(model.image, model.image->height/2, model.image->width/2);
338  CvScalar irradiance = model.map.readMap(normal);
339 
340  // the camera has some gain and bias
341  const float *g = model.map.getGain(cam);
342  const float *b = model.map.getBias(cam);
343 
344  // relight the 3 RGB channels. The bias value expects 0 black 1 white,
345  // but the image are stored with a white value of 255: Conversion is required.
346  for (int i=0; i<3; i++) {
347  color.val[i] = 255.0*(g[i]*(color.val[i]/255.0)*irradiance.val[i] + b[i]);
348  }
349  }
350 
351  // draw a filled polygon with the relighted color
352  cvFillConvexPoly(display, projPts, 4, color);
353 }
354