bazar  1.3.1
singlecalib.cpp
Go to the documentation of this file.
1 
10 #include <iostream>
11 #include "cv.h"
12 #include "highgui.h"
13 #include <garfeild.h>
14 
15 #ifdef HAVE_CONFIG_H
16 #include <config.h>
17 #endif
18 
19 char *modelFile="model.jpg";
20 
21 IplImage *acquire_model(CvCapture *capture);
22 void show_result(planar_object_recognizer &recognizer, IplImage *video, IplImage **dst);
24 
25 void usage(const char *s) {
26  cerr << "usage:\n" << s
27  << "[<cam number>|<video file>] [-m <model image>] [-r]\n"
28  " -m specifies model image\n"
29  " -r do not load cached model image\n";
30  exit(1);
31 }
32 
33 int main( int argc, char** argv )
34 {
35  CvCapture* capture = 0;
36 
37  const char *captureSrc = "0";
38  bool relearn=false;
39 
40  // parse command line
41  for (int i=1; i<argc; i++) {
42  if (strcmp(argv[i], "-m") ==0) {
43  if (i==argc-1) usage(argv[0]);
44  modelFile = argv[i+1];
45  i++;
46  } else if (strcmp(argv[i], "-r")==0) {
47  relearn=true;
48  } else if (argv[i][0]=='-') {
49  usage(argv[0]);
50  } else {
51  captureSrc = argv[i];
52  }
53  }
54 
55  if(strlen(captureSrc) == 1 && isdigit(captureSrc[0]))
56  capture = cvCaptureFromCAM( captureSrc[0]-'0');
57  else
58  capture = cvCaptureFromAVI( captureSrc );
59 
60  if( !capture )
61  {
62  cerr <<"Could not initialize capturing from " << captureSrc << " ...\n";
63  return -1;
64  }
65 
66  // Allocate the detector object
67  planar_object_recognizer detector;
68 
69  detector.ransac_dist_threshold = 5;
70  detector.max_ransac_iterations = 800;
71  detector.non_linear_refine_threshold = 1.5;
72 
73  // Train or load classifier
74  if(relearn || !detector.build_with_cache(
75  string(modelFile), // mode image file name
76  400, // maximum number of keypoints on the model
77  32, // patch size in pixels
78  3, // yape radius. Use 3,5 or 7.
79  16, // number of trees for the classifier. Somewhere between 12-50
80  3 // number of levels in the gaussian pyramid
81  ))
82  {
83  // interactively acquire a model image
84  IplImage *shot = acquire_model(capture);
85  cvSaveImage(modelFile, shot);
86  detector.build(shot, 400, 32, 3, 16, 3);
87  detector.save(string(modelFile)+".classifier");
88  cvReleaseImage(&shot);
89  }
90 
91  // A lower threshold will allow detection in harder conditions, but
92  // might lead to false positives.
93  detector.match_score_threshold=.03f;
94 
95  const char *win = "Bazar";
96 
97  IplImage* display=0;
98  IplImage*gray=0;
99 
100  cvNamedWindow(win, 0);
101 
103 
104  IplImage* frame = cvQueryFrame(capture);
105  calib.AddCamera(frame->width, frame->height);
106 
107  int nbHomography =0;
108  for(;;)
109  {
110  // acquire image
111  frame = cvQueryFrame( capture );
112  if( !frame )
113  break;
114 
115  // convert it to gray levels, if required
116  if (frame->nChannels >1) {
117  if( !gray )
118  gray = cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 1 );
119  cvCvtColor(frame, gray, CV_RGB2GRAY);
120  } else {
121  gray = frame;
122  }
123 
124  // run the detector
125  if (detector.detect(gray)) {
126  add_detected_homography(detector, calib);
127  nbHomography++;
128  cout << nbHomography << " homographies.\n";
129  if (nbHomography >=70) {
130  if (calib.Calibrate(
131  50, // max hom
132  2, // random
133  3,
134  3, // padding ratio 1/2
135  0,
136  0,
137  0.0078125, //alpha
138  0.9, //beta
139  0.001953125,//gamma
140  12, // iter
141  0.05, //eps
142  3 //postfilter eps
143  )) {
145  break;
146  }
147  }
148  }
149  show_result(detector, frame, &display);
150  cvShowImage(win, display);
151  //cvShowImage(win, frame);
152 
153  if( cvWaitKey(10) >= 0 )
154  break;
155  }
156 
157  cvReleaseCapture( &capture );
158  cvDestroyWindow(win);
159 
160  return 0;
161 }
162 
163 void show_result(planar_object_recognizer &detector, IplImage *video, IplImage **dst)
164 {
165  if (*dst==0) *dst=cvCloneImage(video);
166  else cvCopy(video, *dst);
167 
168  if (detector.object_is_detected) {
169  for (int i=0; i<detector.match_number; ++i) {
170 
171  image_object_point_match * match = detector.matches+i;
172  if (match->inlier) {
173  cvCircle(*dst,
174  cvPoint((int) (PyrImage::convCoordf(match->image_point->u,
175  int(match->image_point->scale), 0)),
176  (int)(PyrImage::convCoordf(match->image_point->v,
177  int(match->image_point->scale), 0))),
178  3, CV_RGB(0,255,0), -1, 8,0);
179  }
180  }
181  }
182 }
183 
184 static void putText(IplImage *im, const char *text, CvPoint p, CvFont *f1, CvFont *f2)
185 {
186  cvPutText(im,text,p,f2, cvScalarAll(0));
187  cvPutText(im,text,p,f1, cvScalarAll(255));
188 }
189 
190 IplImage *acquire_model(CvCapture *capture)
191 {
192 
193  const char *win = "Bazar";
194 
195  CvFont font, fontbold;
196 
197  cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, 1, 1);
198  cvInitFont( &fontbold, CV_FONT_HERSHEY_PLAIN, 1, 1, 0, 5);
199 
200  cvNamedWindow(win, 0);
201 
202  bool pause=false;
203  IplImage *frame;
204  IplImage *shot=0, *text=0;
205 
206  bool accepted =false;
207  while (!accepted) {
208 
209  if (!pause) {
210  frame = cvQueryFrame(capture);
211  if (!text) text=cvCloneImage(frame);
212  else cvCopy(frame,text);
213  putText(text,"Please take a frontal view of a", cvPoint(3,20), &font, &fontbold);
214  putText(text,"textured planar surface and press space", cvPoint(3,40), &font, &fontbold);
215  cvShowImage(win, text);
216  }
217 
218  char k = cvWaitKey(pause ? 0 : 10);
219  switch (k) {
220  case 'n': pause=false; break;
221  case ' ':
222  pause = !pause;
223  if (pause) {
224  if (shot) cvCopy(frame,shot);
225  else shot = cvCloneImage(frame);
226  cvCopy(shot,text);
227  putText(text,"Image OK? (y/n)", cvPoint(3,20), &font, &fontbold);
228  cvShowImage(win, text);
229  }
230  break;
231  case 'y':
232  case '\n': if (pause && shot) accepted=true; break;
233  case 'q': exit(0); break;
234  case -1: break;
235  default: cerr << k << ": what?\n";
236  }
237  }
238 
239  cvReleaseImage(&text);
240  return shot;
241 }
242 
244 {
245  static std::vector<CamCalibration::s_struct_points> pts;
246  pts.clear();
247 
248  for (int i=0; i<detector.match_number; ++i) {
249  image_object_point_match * match = detector.matches+i;
250  if (match->inlier) {
251  pts.push_back(CamCalibration::s_struct_points(
252  PyrImage::convCoordf(match->image_point->u, int(match->image_point->scale), 0),
253  PyrImage::convCoordf(match->image_point->v, int(match->image_point->scale), 0),
254  PyrImage::convCoordf((float)match->object_point->M[0], int(match->object_point->scale), 0),
255  PyrImage::convCoordf((float)match->object_point->M[1], int(match->object_point->scale), 0)));
256  }
257  }
258 
259  return calib.AddHomography(0, pts, detector.H);
260 }