bazar  1.3.1
planar_object_recognizer.cpp
Go to the documentation of this file.
1 /*
2 Copyright 2005, 2006 Computer Vision Lab,
3 Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland.
4 All rights reserved.
5 
6 This file is part of BazAR.
7 
8 BazAR is free software; you can redistribute it and/or modify it under the
9 terms of the GNU General Public License as published by the Free Software
10 Foundation; either version 2 of the License, or (at your option) any later
11 version.
12 
13 BazAR is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
15 PARTICULAR PURPOSE. See the GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License along with
18 BazAR; if not, write to the Free Software Foundation, Inc., 51 Franklin
19 Street, Fifth Floor, Boston, MA 02110-1301, USA
20 */
21 #include <sys/stat.h> // for mkdir()
22 #include <fstream>
23 #include <algorithm>
24 
25 #ifdef WIN32
26 #include <direct.h> // for _mkdir()
27 #endif
28 
29 using namespace std;
30 
31 #include <starter.h>
32 #include <keypoints/yape.h>
34 
36 {
37  forest = 0;
38  model_points = 0;
39  object_input_view = 0;
40  model_and_input_images = 0;
41  point_detector = 0;
42  homography_estimator=0;
43 
44  default_settings();
45 
46  affine_motion = 0;
47  H = 0;
48  detected_points = new keypoint[hard_max_detected_pts];
49  detected_point_views = new image_class_example[hard_max_detected_pts];
50  match_score_threshold = .05f;
51  ransac_dist_threshold = 10;
52  ransac_stop_support = 50;
53  max_ransac_iterations = 500;
54  non_linear_refine_threshold = 3;
55 }
56 
58 {
59  set_max_depth(10);
60 
61  dont_use_bins_when_creating_model_points();
62  dont_use_bins_when_detecting_input_image_points();
63 
64  index_of_model_point_to_debug = -1;
65 
66  sample_number_for_refining = 1000;
67 
68  max_detected_pts = 1000;
69  views_number = 1000;
70  min_view_rate = .4;
71  keypoint_distance_threshold = 1.5;
72 }
73 
75 {
76  if (max > 0 && max < hard_max_detected_pts)
77  max_detected_pts = max;
78 }
79 
81 {
82  new_images_generator.set_use_orientation_correction(true);
83 }
84 
86 {
87  new_images_generator.set_use_orientation_correction(false);
88 }
89 
90 void planar_object_recognizer::set_range_variation_for_theta(float min_theta, float max_theta)
91 {
92  new_images_generator.set_range_variation_for_theta(min_theta, max_theta);
93 }
94 
96 {
97  new_images_generator.set_range_variation_for_phi(min_phi, max_phi);
98 }
99 
100 void planar_object_recognizer::independent_scaling(float min_lambda1, float max_lambda1,
101  float min_lambda2, float max_lambda2)
102 {
103  new_images_generator.independent_scaling(min_lambda1, max_lambda1, min_lambda2, max_lambda2);
104 }
105 
106 void planar_object_recognizer::constrained_scaling(float min_lambda1, float max_lambda1,
107  float min_lambda2, float max_lambda2,
108  float min_l1_l2, float max_l1_l2)
109 {
110  new_images_generator.constrained_scaling(min_lambda1, max_lambda1,
111  min_lambda2, max_lambda2,
112  min_l1_l2, max_l1_l2);
113 }
114 
116 {
117  new_images_generator.set_use_random_background(use_random_background);
118 }
119 
121 {
122  new_images_generator.set_noise_level(noise_level);
123 }
124 
128 bool planar_object_recognizer::build(IplImage *model_image,
129  int max_point_number_on_model, int patch_size,
130  int yape_radius, int tree_number, int nbLev,
131  LEARNPROGRESSION LearnProgress, int *roi)
132 {
133  new_images_generator.set_patch_size(patch_size);
134 
135  IplImage *model;
136  if (model_image->nChannels == 1)
137  model = model_image;
138  else {
139  model = cvCreateImage(cvGetSize(model_image), IPL_DEPTH_8U, 1);
140  cvCvtColor(model_image, model, CV_RGB2GRAY);
141  }
142 
143  if (roi)
144  new_images_generator.set_original_image(model,
145  roi[0], roi[1],
146  roi[2], roi[3],
147  roi[4], roi[5],
148  roi[6], roi[7]);
149  else
150  new_images_generator.set_original_image(model);
151 
152  if (model != model_image) cvReleaseImage(&model);
153 
154  new_images_generator.set_level_number(nbLev);
155  new_images_generator.set_gaussian_smoothing_kernel_size(yape_radius);
156 
157  initialize();
158 
159  learn(max_point_number_on_model, patch_size, yape_radius, tree_number, nbLev, LearnProgress);
160 
161  return true;
162 }
163 
164 
166  int max_point_number_on_model, int patch_size,
167  int yape_radius, int tree_number, int nbLev,
168  LEARNPROGRESSION LearnProgress)
169 {
170  new_images_generator.set_patch_size(patch_size);
171 
172  string dirname(filename + ".classifier");
173  if (load(dirname)) return true;
174 
175  IplImage * model = mcvLoadImage(filename.data(), 0);
176  if (model == 0 ) return false;
177 
178  string roifn(filename + ".roi");
179  ifstream roif(roifn.data());
180  int u[4] = {-1, -1, -1, -1};
181  int v[4] = {-1, -1, -1, -1};
182  if (roif.good()) {
183  cout << "Reading ROI from file " << roifn << ".\n";
184  for (int i = 0; i < 4; i++) {
185  roif >> u[i];
186  roif >> v[i];
187  }
188  roif.close();
189  }
190 
191  new_images_generator.set_original_image(model, u[0], v[0], u[1], v[1], u[2], v[2], u[3], v[3]);
192  new_images_generator.set_level_number(nbLev);
193  new_images_generator.set_gaussian_smoothing_kernel_size(yape_radius);
194 
195  initialize();
196 
197  learn(max_point_number_on_model, patch_size, yape_radius, tree_number, nbLev, LearnProgress);
198 
199  save(dirname);
200 
201  return true;
202 }
203 
205 {
206  forest = 0;
207  model_points = 0;
208 
209  load(directory_name);
210 }
211 
212 bool planar_object_recognizer::load(string directory_name)
213 {
214  // Read parameters:
215  char parameter_filename[1000];
216  sprintf(parameter_filename, "%s/parameters.txt", directory_name.data());
217  ifstream param_f(parameter_filename);
218  int yape_radius;
219  int nbLev;
220 
221  if (!param_f.good()) return false;
222  param_f >> yape_radius;
223  param_f >> nbLev;
224  param_f.close();
225 
226  new_images_generator.set_level_number(nbLev);
227  new_images_generator.set_gaussian_smoothing_kernel_size(yape_radius);
228 
229  // Read original image:
230  char image_name[1000];
231  sprintf(image_name, "%s/original_image.bmp", directory_name.data());
232  IplImage * original_image = mcvLoadImage(image_name, 0);
233  if (original_image == 0) return false;
234 
235  // Read object corner positions in this original image:
236  char corner_filename[1000];
237  sprintf(corner_filename, "%s/corners.txt", directory_name.data());
238  ifstream cf(corner_filename);
239 
240  if (!cf.good()) return false;
241  int u_corner1, v_corner1, u_corner2, v_corner2, u_corner3, v_corner3, u_corner4, v_corner4;
242  cf >> u_corner1 >> v_corner1;
243  cf >> u_corner2 >> v_corner2;
244  cf >> u_corner3 >> v_corner3;
245  cf >> u_corner4 >> v_corner4;
246  cf.close();
247  new_images_generator.set_original_image(original_image,
248  u_corner1, v_corner1, u_corner2, v_corner2, u_corner3, v_corner3, u_corner4, v_corner4);
249  cvReleaseImage(&original_image);
250 
251  // Read keypoints in this original image:
252  char point_filename[1000];
253  sprintf(point_filename, "%s/keypoints.txt", directory_name.data());
254  ifstream pf(point_filename);
255  if (!pf.good()) return false;
256  pf >> model_point_number;
257  model_points = new object_keypoint[model_point_number];
258 
259  for(int i = 0; i < model_point_number; i++) {
260  pf >> model_points[i].M[0] >> model_points[i].M[1] >> model_points[i].M[2] >> model_points[i].scale;
261  model_points[i].class_index = i;
262  assert(model_points[i].scale < nbLev);
263  if (!pf.good()) {
264  cerr << "problem while reading keypoint #" << i <<" in " << point_filename << endl;
265  return false;
266  }
267  }
268  pf.close();
269 
270  new_images_generator.set_object_keypoints(model_points, model_point_number);
271 
272  // Read image classification forest for this object:
273  forest = new image_classification_forest();
274 
275  if ( !forest->load(directory_name) )
276  {
277  delete forest;
278  return false;
279  }
280 
281  // Match probabilities:
282  for(int i = 0; i < hard_max_detected_pts; i++)
283  {
284  detected_point_views[i].alloc(forest->image_width);
285  match_probabilities[i] = new float[model_point_number];
286  }
287 
288  point_detector = new pyr_yape(new_images_generator.original_image->width, new_images_generator.original_image->height, nbLev);
289  point_detector->set_radius(yape_radius);
290 
291  initialize();
292 
293  return true;
294 }
295 
297 {
298  object_input_view = new object_view(new_images_generator.original_image->width,
299  new_images_generator.original_image->height,
300  new_images_generator.level_number);
301 
302  // For motion_estimation:
303  affine_motion = new affinity();
304  H = new homography();
305 
306  homography_estimator = new ls_minimizer2(8);
307 
308  // For visualization:
309  model_and_input_images = 0;
310 }
311 
313 {
314  if (object_input_view) delete object_input_view;
315  if (point_detector) delete point_detector;
316 
317  if (model_points != 0)
318  delete [] model_points;
319 
320  if (forest != 0)
321  delete forest;
322 
323  for(int i = 0; i < hard_max_detected_pts; i++) {
324  if (match_probabilities[i])
325  delete [] match_probabilities[i];
326  }
327 
328  if (detected_points) delete[] detected_points;
329  if (detected_point_views) delete[] detected_point_views;
330  if (affine_motion) delete affine_motion;
331  if (H) delete H;
332  if (homography_estimator) delete homography_estimator;
333 }
334 
335 void planar_object_recognizer::learn(int max_point_number_on_model, int patch_size,
336  int yape_radius, int tree_number, int nbLev,
337  LEARNPROGRESSION LearnProgress)
338 {
339  if (point_detector) delete point_detector;
340  point_detector = new pyr_yape(new_images_generator.original_image->width, new_images_generator.original_image->height, nbLev);
341  point_detector->set_radius(yape_radius);
342  point_detector->set_use_bins(use_bins_for_model_points);
343 
344  detect_most_stable_model_points(max_point_number_on_model, patch_size, views_number, min_view_rate, LearnProgress);
345 
346  save_image_of_model_points(patch_size);
347 
348  forest = new image_classification_forest(patch_size, patch_size, model_point_number, max_depth, tree_number, LearnProgress);
349 
350  // Pick m1 and m2 at random in each node of each tree
351  forest->create_trees_at_random();
352 
353  // Refine posterior probabilities for each leaf of each tree
354  forest->refine(/* example_generator */ &new_images_generator, /* call number to generate_random_examples */ sample_number_for_refining);
355  forest->test(/* example_generator */ &new_images_generator, /* call number to generate_random_examples */ 50);
356 
357  for(int i = 0; i < hard_max_detected_pts; i++)
358  {
359  detected_point_views[i].alloc(patch_size);
360  match_probabilities[i] = new float[model_point_number];
361  }
362 }
363 
364 void planar_object_recognizer::save(string directory_name)
365 {
366 #ifndef WIN32
367  mkdir(directory_name.data(), S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IWOTH);
368 #else
369  _mkdir(directory_name.data());
370 #endif
371 
372  char image_name[1000];
373  sprintf(image_name, "%s/original_image.bmp", directory_name.data());
374  mcvSaveImage(image_name, new_images_generator.original_image);
375 
376  char corner_filename[1000];
377  sprintf(corner_filename, "%s/corners.txt", directory_name.data());
378  ofstream cf(corner_filename);
379  cf << new_images_generator.u_corner1 << " " << new_images_generator.v_corner1 << endl;
380  cf << new_images_generator.u_corner2 << " " << new_images_generator.v_corner2 << endl;
381  cf << new_images_generator.u_corner3 << " " << new_images_generator.v_corner3 << endl;
382  cf << new_images_generator.u_corner4 << " " << new_images_generator.v_corner4 << endl;
383  cf.close();
384 
385  char parameter_filename[1000];
386  sprintf(parameter_filename, "%s/parameters.txt", directory_name.data());
387  ofstream param_f(parameter_filename);
388  param_f << point_detector->get_radius() << endl;
389  param_f << new_images_generator.level_number << endl;
390  param_f.close();
391 
392  char point_filename[1000];
393  sprintf(point_filename, "%s/keypoints.txt", directory_name.data());
394  ofstream pf(point_filename);
395  pf << model_point_number << endl;
396  for(int i = 0; i < model_point_number; i++)
397  pf << model_points[i].M[0] << " " << model_points[i].M[1]
398  << " " << model_points[i].M[2]
399  << " " << model_points[i].scale << endl;
400  pf.close();
401 
402  forest->save(directory_name);
403 }
404 
405 void planar_object_recognizer::detect_points(IplImage * input_image)
406 {
407  check_target_size(input_image);
408  point_detector->set_use_bins(use_bins_for_input_image);
409  point_detector->set_tau(10);
410 
411  detected_point_number = point_detector->pyramidBlurDetect(input_image,
412  detected_points, max_detected_pts,
413  &object_input_view->image);
414 }
415 
417 {
418  int patch_size = forest->image_width;
419 
420  object_input_view->comp_gradient();
421 
422  for(int i = 0; i < detected_point_number; i++)
423  {
424  image_class_example * pv = &(detected_point_views[i]);
425  pv->point2d = &(detected_points[i]);
426  float u = pv->point2d->u, v = pv->point2d->v;
427  int s = int(pv->point2d->scale);
428 
429  if (u > (patch_size/2) && u < object_input_view->image[s]->width - (patch_size/2) &&
430  v > (patch_size/2) && v < object_input_view->image[s]->height - (patch_size/2))
431  {
432  new_images_generator.preprocess_point_view(pv, object_input_view);
433 
434  if (0) mcvSaveImage("patches/detected_%03d_corrected.bmp", i, pv->preprocessed);
435  }
436  }
437 }
438 
439 void planar_object_recognizer::match_points(bool fill_match_struct)
440 {
441  match_number = 0;
442 
443  int patch_size = forest->image_width;
444 
445  for(int i = 0; i < detected_point_number; i++)
446  {
447  image_class_example * pv = &(detected_point_views[i]);
448  float u = pv->point2d->u, v = pv->point2d->v;
449  int s = int(pv->point2d->scale);
450 
451  if (u > (patch_size/2) && u < object_input_view->image[s]->width - (patch_size/2) &&
452  v > (patch_size/2) && v < object_input_view->image[s]->height - (patch_size/2))
453  {
454  forest->posterior_probabilities(pv, match_probabilities[i]);
455 
456  if (fill_match_struct) {
457  int model_point_index = 0;
458  float score = match_probabilities[i][model_point_index];
459  for(int j = 1; j < model_point_number; j++)
460  if (match_probabilities[i][j] > score)
461  {
462  score = match_probabilities[i][j];
463  model_point_index = j;
464  }
465  image_object_point_match * match = &(matches[match_number]);
466  match->image_point = pv->point2d;
467  match->object_point = &(model_points[model_point_index]);
468  match->score = score;
469 
470  match_number++;
471  }
472  }
473  else
474  memset(match_probabilities[i], 0, sizeof(float) * model_point_number);
475  }
476 }
477 
478 bool planar_object_recognizer::detect(IplImage * input_image)
479 {
480  detect_points(input_image);
481  preprocess_points();
482 
483  match_points();
484 
485  object_is_detected = false;
486 
487  object_is_detected = estimate_affine_transformation(); // Estimation of "affine_motion" using Ransac.
488 
489  affine_motion->transform_point(float(new_images_generator.u_corner1),
490  float(new_images_generator.v_corner1), &detected_u_corner1, &detected_v_corner1);
491  affine_motion->transform_point(float(new_images_generator.u_corner2),
492  float(new_images_generator.v_corner2), &detected_u_corner2, &detected_v_corner2);
493  affine_motion->transform_point(float(new_images_generator.u_corner3),
494  float(new_images_generator.v_corner3), &detected_u_corner3, &detected_v_corner3);
495  affine_motion->transform_point(float(new_images_generator.u_corner4),
496  float(new_images_generator.v_corner4), &detected_u_corner4, &detected_v_corner4);
497 
498  if (object_is_detected)
499  {
500  object_is_detected = estimate_homographic_transformation_nonlinear_method();
501 
502  if (object_is_detected)
503  {
504  H->transform_point(float(new_images_generator.u_corner1),
505  float(new_images_generator.v_corner1), &detected_u_corner1, &detected_v_corner1);
506  H->transform_point(float(new_images_generator.u_corner2),
507  float(new_images_generator.v_corner2), &detected_u_corner2, &detected_v_corner2);
508  H->transform_point(float(new_images_generator.u_corner3),
509  float(new_images_generator.v_corner3), &detected_u_corner3, &detected_v_corner3);
510  H->transform_point(float(new_images_generator.u_corner4),
511  float(new_images_generator.v_corner4), &detected_u_corner4, &detected_v_corner4);
512 
513  //cout << "-------------------------" << endl;
514  //cout << *H << endl;
515  }
516 
517  object_is_detected = true;
518  /*
519  cout << detected_u_corner1 << " x " << detected_v_corner1 << ", "
520  << detected_u_corner2 << " x " << detected_v_corner2 << ", "
521  << detected_u_corner3 << " x " << detected_v_corner3 << ", "
522  << detected_u_corner4 << " x " << detected_v_corner4 << endl;
523  */
524  }
525 
526  return object_is_detected;
527 }
528 
529 //
530 // ESTIMATING THE MOTION: ///////////////////////////////////////////////////////////////////////////////////////////////
531 //
532 
533 
535 {
536  if (match_number == 0)
537  return false;
538 
539  int shot = 0;
540  do
541  {
542  *n1 = rand() % match_number;
543  shot++; if (shot > 100) return false;
544  }
545  while(matches[*n1].score < match_score_threshold);
546 
547  shot = 0;
548  do
549  {
550  *n2 = rand() % match_number;
551  shot++; if (shot > 100) return false;
552  }
553  while(matches[*n2].score < match_score_threshold || *n2 == *n1);
554 
555  shot = 0;
556  do
557  {
558  *n3 = rand() % match_number;
559  shot++; if (shot > 100) return false;
560  }
561  while(matches[*n3].score < match_score_threshold || *n3 == *n1 || *n3 == *n2);
562 
563  return true;
564 }
565 
566 #define RANSAC_DIST_THRESHOLD ransac_dist_threshold
567 
569 {
570  int result = 0;
571 
572  if (!valid(A))
573  {
574  for(int i = 0; i < match_number; i++)
575  matches[i].inlier = false;
576 
577  return 0;
578  }
579 
580  for(int i = 0; i < match_number; i++)
581  {
582  image_object_point_match * match = &(matches[i]);
583 
584  float mu = PyrImage::convCoordf(float(match->object_point->M[0]), int(match->object_point->scale), 0);
585  float mv = PyrImage::convCoordf(float(match->object_point->M[1]), int(match->object_point->scale), 0);
586 
587  float du = PyrImage::convCoordf(match->image_point->u, int(match->image_point->scale), 0);
588  float dv = PyrImage::convCoordf(match->image_point->v, int(match->image_point->scale), 0);
589  float tu, tv;
590 
591  A->transform_point(mu, mv, &tu, &tv);
592 
593  if ((du - tu) * (du - tu) + (dv - tv) * (dv - tv) < RANSAC_DIST_THRESHOLD * RANSAC_DIST_THRESHOLD)
594  {
595  result++;
596  match->inlier = true;
597  }
598  else
599  match->inlier = false;
600  }
601 
602  return result;
603 }
604 
606 {
607  float det = float(cvmGet(A, 0, 0) * cvmGet(A, 1, 1) - cvmGet(A, 1, 0) * cvmGet(A, 0, 1));
608 
609  if (det < 0. || det > 4 * 4)
610  return false;
611 
612  return true;
613 }
614 
616 {
617  affinity * A = new affinity();
618  int iteration = 0;
619 
620  int best_support = -1;
621  do
622  {
623  int n1, n2, n3;
624 
625  iteration++;
626 
627  // Choose three correspondences randomly
628  if (!three_random_correspondences(&n1, &n2, &n3))
629  {
630  //cerr << "No three random correspondences found." << endl;
631  break;
632  }
633 
634  image_object_point_match * m1 = matches + n1;
635  image_object_point_match * m2 = matches + n2;
636  image_object_point_match * m3 = matches + n3;
637 
638  // Estimation of A (affine matrix) from the 3 correspondences:
639  A->estimate
640  (
641  PyrImage::convCoordf(float(m1->object_point->M[0]), int(m1->object_point->scale), 0),
642  PyrImage::convCoordf(float(m1->object_point->M[1]), int(m1->object_point->scale), 0),
643 
644  PyrImage::convCoordf(float(m1->image_point->u), int(m1->image_point->scale), 0),
645  PyrImage::convCoordf(float(m1->image_point->v), int(m1->image_point->scale), 0),
646 
647  PyrImage::convCoordf(float(m2->object_point->M[0]), int(m2->object_point->scale), 0),
648  PyrImage::convCoordf(float(m2->object_point->M[1]), int(m2->object_point->scale), 0),
649 
650  PyrImage::convCoordf(float(m2->image_point->u), int(m2->image_point->scale), 0),
651  PyrImage::convCoordf(float(m2->image_point->v), int(m2->image_point->scale), 0),
652 
653  PyrImage::convCoordf(float(m3->object_point->M[0]), int(m3->object_point->scale), 0),
654  PyrImage::convCoordf(float(m3->object_point->M[1]), int(m3->object_point->scale), 0),
655 
656  PyrImage::convCoordf(float(m3->image_point->u), int(m3->image_point->scale), 0),
657  PyrImage::convCoordf(float(m3->image_point->v), int(m3->image_point->scale), 0)
658  );
659 
660  int support = compute_support_for_affine_transformation(A);
661 
662  if (support > best_support)
663  {
664  best_support = support;
665  cvmCopy(A, affine_motion);
666  if (best_support > ransac_stop_support)
667  break;
668  }
669  } while(iteration < max_ransac_iterations);
670 
671  delete A;
672 
673  //cout << "Found support of " << best_support << " after " << iteration << " iterations." << endl;
674 
675  int support = compute_support_for_affine_transformation(affine_motion);
676 
677  return support > 10;
678 }
679 
681 {
682  int inlier_number = 0;
683  int i;
684 
685  for(i = 0; i < match_number; i++)
686  if (matches[i].inlier)
687  inlier_number++;
688 
689  if (inlier_number < 10)
690  {
691  object_is_detected = false;
692  return false;
693  }
694 
695  H->set_match_number(inlier_number);
696  for(i = 0; i < match_number; i++)
697  if (matches[i].inlier)
698  H->add_match(
699  PyrImage::convCoordf(float(matches[i].object_point->M[0]), int(matches[i].object_point->scale), 0),
700  PyrImage::convCoordf(float(matches[i].object_point->M[1]), int(matches[i].object_point->scale), 0),
701  PyrImage::convCoordf(matches[i].image_point->u, int(matches[i].image_point->scale), 0),
702  PyrImage::convCoordf(matches[i].image_point->v, int(matches[i].image_point->scale), 0));
703 
704  H->estimate();
705 
706  return true;
707 }
708 
709 static void homography_error(
710  const ls_minimizer2::flt_t * state,
711  const ls_minimizer2::flt_t *d, int dsize,
714  void ** /*user_data*/)
715 {
716  double den = 1. / (state[6] * d[0] + state[7] * d[1] + 1.);
717 
718  b[0] = (state[0] * d[0] + state[1] * d[1] + state[2]) * den;
719  b[1] = (state[3] * d[0] + state[4] * d[1] + state[5]) * den;
720 
721  if (J != 0)
722  {
723  J[0] = d[0] * den;
724  J[1] = d[1] * den;
725  J[2] = den;
726  J[3] = 0;
727  J[4] = 0;
728  J[5] = 0;
729  J[6] = -b[0] * J[0];
730  J[7] = -b[0] * J[1];
731 
732  J[8] = 0;
733  J[9] = 0;
734  J[10] = 0;
735  J[11] = J[0];
736  J[12] = J[1];
737  J[13] = J[2];
738  J[14] = -b[1] * J[11];
739  J[15] = -b[1] * J[12];
740  }
741 }
742 
744 {
745  int inlier_number = 0;
746 
747  for(int i = 0; i < match_number; i++)
748  if (matches[i].inlier)
749  inlier_number++;
750 
751  if (inlier_number < 10)
752  {
753  object_is_detected = false;
754  return false;
755  }
756 
757  homography_estimator->set_verbose_level(0);
758 
759  homography_estimator->reset_observations();
760  for(int i = 0; i < match_number; i++)
761  if (matches[i].inlier) {
762  ls_minimizer2::flt_t d[2] = {
763  PyrImage::convCoordf(float(matches[i].object_point->M[0]), int(matches[i].object_point->scale), 0),
764  PyrImage::convCoordf(float(matches[i].object_point->M[1]), int(matches[i].object_point->scale), 0)};
765  ls_minimizer2::flt_t b[2] = {
766  PyrImage::convCoordf(matches[i].image_point->u, int(matches[i].image_point->scale), 0),
767  PyrImage::convCoordf(matches[i].image_point->v, int(matches[i].image_point->scale), 0)};
768  homography_estimator->add_observation_2data_2measures(homography_error, d, b);
769  }
770 
771  double initial_state[8] = {affine_motion->cvmGet(0, 0), affine_motion->cvmGet(0, 1), affine_motion->cvmGet(0, 2),
772  affine_motion->cvmGet(1, 0), affine_motion->cvmGet(1, 1), affine_motion->cvmGet(1, 2),
773  0, 0};
774  homography_estimator->minimize_using_levenberg_marquardt_from(initial_state);
775 
776  double * state = homography_estimator->state;
777 
778  H->cvmSet(0, 0, state[0]); H->cvmSet(0, 1, state[1]); H->cvmSet(0, 2, state[2]);
779  H->cvmSet(1, 0, state[3]); H->cvmSet(1, 1, state[4]); H->cvmSet(1, 2, state[5]);
780  H->cvmSet(2, 0, state[6]); H->cvmSet(2, 1, state[7]); H->cvmSet(2, 2, 1.);
781 
782  homography_estimator->reset_observations();
783  for (int iter=0; iter<2; iter++) {
784  inlier_number=0;
785  for (int i=0; i<match_number; i++) {
786  double tu = PyrImage::convCoordf(matches[i].image_point->u, int(matches[i].image_point->scale), 0);
787  double tv = PyrImage::convCoordf(matches[i].image_point->v, int(matches[i].image_point->scale), 0);
788  double mu = PyrImage::convCoordf(float(matches[i].object_point->M[0]), int(matches[i].object_point->scale), 0);
789  double mv = PyrImage::convCoordf(float(matches[i].object_point->M[1]), int(matches[i].object_point->scale), 0);
790  double hmu, hmv;
791  H->transform_point(mu, mv, &hmu, &hmv);
792  double du = hmu-tu;
793  double dv = hmv-tv;
794 
795  if (sqrt(du*du+dv*dv)< non_linear_refine_threshold) {
796  matches[i].inlier=true;
797  inlier_number++;
798  if (iter==0) {
799  ls_minimizer2::flt_t d[2] = { mu, mv };
800  ls_minimizer2::flt_t b[2] = { tu, tv };
801  homography_estimator->add_observation_2data_2measures(homography_error, d, b);
802  }
803  } else {
804  matches[i].inlier=false;
805  }
806  }
807  if (iter==0 && inlier_number>10){
808  homography_estimator->minimize_using_levenberg_marquardt_from(state);
809  state = homography_estimator->state;
810 
811  H->cvmSet(0, 0, state[0]); H->cvmSet(0, 1, state[1]); H->cvmSet(0, 2, state[2]);
812  H->cvmSet(1, 0, state[3]); H->cvmSet(1, 1, state[4]); H->cvmSet(1, 2, state[5]);
813  H->cvmSet(2, 0, state[6]); H->cvmSet(2, 1, state[7]); H->cvmSet(2, 2, 1.);
814  }
815  }
816  if (inlier_number < 10){
817  object_is_detected = false;
818  return false;
819  }
820 
821  homography_estimator->reset_observations();
822 
823  return true;
824 }
825 
827  int u, int v, int patch_size,
828  image_class_example * pv,
829  int point_index,
830  int call_number)
831 {
832  char name[1000];
833 
834  IplImage * p = mcvGetPatch(image_before_smoothing, u, v, patch_size, patch_size);
835  sprintf(name, "patches/kp_%03d_%04d.bmp", point_index, call_number);
836  printf("Saving image in %s\n", name);
837  mcvSaveImage(name, p);
838  cvReleaseImage(&p);
839 
840  sprintf(name, "patches/kp_%03d_%04d_corrected.bmp", point_index, call_number);
841  mcvSaveImage(name, pv->preprocessed);
842 }
843 
845 
846 pair<object_keypoint, int> * planar_object_recognizer::search_for_existing_model_point(vector< pair<object_keypoint, int> > * tmp_model_points,
847  float cu, float cv, int scale)
848 {
849  for(vector< pair<object_keypoint, int> >::iterator it = tmp_model_points->begin();
850  it < tmp_model_points->end();
851  it++)
852  if (int(it->first.scale) == scale)
853  {
854  float dist2 =
855  (float(it->first.M[0]) - cu) * (float(it->first.M[0]) - cu) +
856  (float(it->first.M[1]) - cv) * (float(it->first.M[1]) - cv);
857 
858  if (dist2 < keypoint_distance_threshold * keypoint_distance_threshold)
859  return &(*it);
860  }
861 
862  return 0;
863 }
864 
865 bool cmp_tmp_model_points(pair<object_keypoint, int> p1, pair<object_keypoint, int> p2)
866 {
867  return p1.second > p2.second;
868 }
869 
870 // Selection of the interest points from the image model
872  int patch_size,
873  int view_nb, double min_view_rate,
874  LEARNPROGRESSION LearnProgress)
875 {
876  cout << "Determining most stable points:" << endl;
877 
878  vector< pair<object_keypoint, int> > tmp_model_points;
879  int K = 1;
880 
881  keypoint * model_points_2d = new keypoint[K * max_point_number_on_model];
882 
883  // First detection of interest points in the image model (frontal view):
884  int model_point2d_number = point_detector->pyramidBlurDetect(new_images_generator.original_image, model_points_2d,
885  K * max_point_number_on_model);
886 
887  // Save an image of the interest points detected in the frontal view
888  model_point_number = 0;
889  model_points = new object_keypoint[max_point_number_on_model];
890  for(int i = 0; i < model_point2d_number; i++)
891  {
892  keypoint * k = model_points_2d + i;
893  if (new_images_generator.inside_roi( // Keep point only if it is on the target roi
894  int(PyrImage::convCoordf(k->u, int(k->scale), 0)),
895  int(PyrImage::convCoordf(k->v, int(k->scale), 0))))
896  {
897  model_points[model_point_number].M[0] = k->u;
898  model_points[model_point_number].M[1] = k->v;
899  model_points[model_point_number].M[2] = 0;
900  model_points[model_point_number].scale = k->scale;
901 
902  model_point_number++;
903 
904  if (model_point_number >= max_point_number_on_model)
905  break;
906  }
907  }
908 
909  save_image_of_model_points(patch_size, "initial_model_points.bmp");
910  delete [] model_points;
911 
912  // Create tmp_model_points (vector) from model_points_2d (array of keypoints)
913  // tmp_model_points will be used afterward
914  for(int i = 0; i < model_point2d_number; i++)
915  {
916  keypoint * k = model_points_2d + i;
917 
918  if (new_images_generator.inside_roi( // Keep point only if it is on the target roi
919  int(PyrImage::convCoordf(k->u, int(k->scale), 0)),
920  int(PyrImage::convCoordf(k->v, int(k->scale), 0))))
921  {
922  pair<object_keypoint, int> * mp = search_for_existing_model_point(&tmp_model_points, k->u, k->v, int(k->scale));
923 
924  if (mp != 0) // Agglomerate clustered keypoints into one single keypoint
925  {
926  double n = double(++mp->second);
927  mp->first.M[0] = (mp->first.M[0]*(n-1) + k->u)/n;
928  mp->first.M[1] = (mp->first.M[1]*(n-1) + k->v)/n;
929  }
930  else
931  {
932  object_keypoint op;
933  op.M[0] = k->u;
934  op.M[1] = k->v;
935  op.M[2] = 0;
936  op.scale = k->scale;
937  tmp_model_points.push_back(pair<object_keypoint, int>(op, 1));
938  }
939  }
940  }
941 
942 
943  // Generate random views and run detection on each view to determine the most frequent interest points
944  bool use_random_background = new_images_generator.use_random_background;
945  new_images_generator.set_use_random_background(false);
946 
947  for(int j = 0; j < view_nb; j++)
948  {
949  if (LearnProgress!=0)
950  LearnProgress(DETECT_MODEL_POINTS, j, view_nb);
951  else
952  cout << "Generating views: " << view_nb - j << " \r" << flush;
953 
954  new_images_generator.generate_random_affine_transformation();
955  new_images_generator.generate_object_view();
956 
957  int current_detected_point_number = point_detector->detect(&(new_images_generator.smoothed_generated_object_view->image),
958  model_points_2d, K * max_point_number_on_model);
959 
960  for(int i = 0; i < current_detected_point_number; i++)
961  {
962  keypoint * k = model_points_2d + i;
963  float nu, nv;
964  new_images_generator.inverse_affine_transformation(PyrImage::convCoordf(k->u, int(k->scale), 0),
965  PyrImage::convCoordf(k->v, int(k->scale), 0),
966  nu, nv);
967 
968  nu = PyrImage::convCoordf(nu, 0, int(k->scale));
969  nv = PyrImage::convCoordf(nv, 0, int(k->scale));
970 
971  keypoint kd;
972  kd.u = nu;
973  kd.v = nv;
974  kd.scale = k->scale;
975  if (new_images_generator.inside_roi( // Keep point only if it is on the target roi
976  int(PyrImage::convCoordf(k->u, int(k->scale), 0)),
977  int(PyrImage::convCoordf(k->v, int(k->scale), 0))))
978  {
979  pair<object_keypoint, int> * mp = search_for_existing_model_point(&tmp_model_points, nu, nv, int(k->scale));
980 
981  if (mp != 0) // Agglomerate clustered keypoints into one single keypoint
982  {
983  // move the keypoint coordinates in the center of gravity of
984  // all agglomerated keypoints.
985  double n = double(++mp->second);
986  mp->first.M[0] = (mp->first.M[0]*(n-1) + nu)/n;
987  mp->first.M[1] = (mp->first.M[1]*(n-1) + nv)/n;
988  }
989  else
990  {
991  object_keypoint op;
992  op.M[0] = nu;
993  op.M[1] = nv;
994  op.M[2] = 0;
995  op.scale = k->scale;
996  tmp_model_points.push_back(pair<object_keypoint, int>(op, 1));
997  }
998  }
999  }
1000  }
1001 
1002  new_images_generator.set_use_random_background(use_random_background);
1003 
1004  sort(tmp_model_points.begin(), tmp_model_points.end(), cmp_tmp_model_points);
1005 
1006  int min_views = int(min_view_rate * (double)view_nb);
1007  int i;
1008  model_points = new object_keypoint[max_point_number_on_model];
1009  vector< pair<object_keypoint, int> >::iterator it;
1010 
1011  for( it = tmp_model_points.begin(), i = 0;
1012  (it < tmp_model_points.end()) && (i < max_point_number_on_model) && (it->second >= min_views);
1013  it++, i++)
1014  {
1015  model_points[i].M[0] = it->first.M[0];
1016  model_points[i].M[1] = it->first.M[1];
1017  model_points[i].M[2] = it->first.M[2];
1018  model_points[i].scale = it->first.scale;
1019  model_points[i].class_index = i;
1020 
1021  if (i == 0)
1022  cout << "Point " << i << " detected " << it->second << " times." << endl;
1023  }
1024  cout << "... Point " << i << " detected " << it->second << " times ("
1025  << view_nb << " generated views, "
1026  << min_view_rate*100 << "% minimum rate)." << endl;
1027 
1028  model_point_number = i;
1029 
1030  new_images_generator.set_object_keypoints(model_points, model_point_number);
1031 
1032  delete [] model_points_2d;
1033 }
1034 
1036 
1037 // VISUALIZATION: The following functions are useful for visualization only !!!
1038 
1039 void planar_object_recognizer::save_image_of_model_points(int patch_size, char * filename)
1040 {
1041  IplImage* model_image = mcvGrayToColor(new_images_generator.original_image);
1042 
1043  for(int i = 0; i < model_point_number; i++)
1044  cvCircle(model_image,
1045  cvPoint((int)PyrImage::convCoordf(float(model_points[i].M[0]), int(model_points[i].scale), 0),
1046  (int)PyrImage::convCoordf(float(model_points[i].M[1]), int(model_points[i].scale), 0)),
1047  (int)PyrImage::convCoordf(patch_size/2.f, int(model_points[i].scale), 0),
1048  mcvRainbowColor(int(model_points[i].scale)), 1);
1049 
1050  if (filename == 0)
1051  mcvSaveImage("model_points.bmp", model_image);
1052  else
1053  mcvSaveImage(filename, model_image);
1054 
1055  cvReleaseImage(&model_image);
1056 }
1057 
1058 IplImage * planar_object_recognizer::create_result_image(IplImage * input_image,
1059  bool p_draw_points,
1060  bool p_draw_matches,
1061  bool p_draw_object,
1062  bool p_draw_model_image,
1063  int line_width)
1064 {
1065  concat_model_and_input_images(input_image, p_draw_model_image);
1066 
1067  if (p_draw_points) draw_points(line_width);
1068  if (p_draw_model_image)
1069  {
1070  if (p_draw_matches && object_is_detected) draw_matches(line_width);
1071  if (p_draw_object && object_is_detected) draw_model();
1072  }
1073  else
1074  if (p_draw_object && object_is_detected) draw_model();
1075 
1076  IplImage *im= model_and_input_images;
1077  model_and_input_images=0;
1078  return im;
1079 }
1080 
1082 {
1083  CvPoint C1 = cvPoint(int(u_input_image + detected_u_corner1), int(v_input_image + detected_v_corner1));
1084  CvPoint C2 = cvPoint(int(u_input_image + detected_u_corner2), int(v_input_image + detected_v_corner2));
1085  CvPoint C3 = cvPoint(int(u_input_image + detected_u_corner3), int(v_input_image + detected_v_corner3));
1086  CvPoint C4 = cvPoint(int(u_input_image + detected_u_corner4), int(v_input_image + detected_v_corner4));
1087  CvScalar col;
1088 
1089  col = cvScalar(0, 0, 0);
1090  cvLine(model_and_input_images, C1, C2, col, 6);
1091  cvLine(model_and_input_images, C2, C3, col, 6);
1092  cvLine(model_and_input_images, C3, C4, col, 6);
1093  cvLine(model_and_input_images, C4, C1, col, 6);
1094 
1095  col = cvScalar(255, 255, 255);
1096  cvLine(model_and_input_images, C1, C2, col, 2);
1097  cvLine(model_and_input_images, C2, C3, col, 2);
1098  cvLine(model_and_input_images, C3, C4, col, 2);
1099  cvLine(model_and_input_images, C4, C1, col, 2);
1100 }
1101 
1102 void planar_object_recognizer::save_one_image_per_match_input_to_model(IplImage * input_image, const char * matches_dir)
1103 {
1104  int patch_size = forest->image_width;
1105 
1106  for(int i = 0; i < detected_point_number; i++)
1107  {
1108  concat_model_and_input_images(input_image);
1109  draw_input_image_points();
1110 
1111  image_class_example * pv = &(detected_point_views[i]);
1112  float u = pv->point2d->u, v = pv->point2d->v;
1113  int s = int(pv->point2d->scale);
1114 
1115  if (u > (patch_size/2) && u < object_input_view->image[s]->width - (patch_size/2) &&
1116  v > (patch_size/2) && v < object_input_view->image[s]->height - (patch_size/2))
1117  {
1118  CvPoint ip = cvPoint(u_input_image + int(PyrImage::convCoordf(pv->point2d->u, int(pv->point2d->scale),0)),
1119  v_input_image + int(PyrImage::convCoordf(pv->point2d->v, int(pv->point2d->scale),0)));
1120 
1121  cvCircle(model_and_input_images, ip,
1122  int(PyrImage::convCoordf(patch_size/2.f, int(pv->point2d->scale),0)), cvScalar(255, 255, 255), 1);
1123 
1124  float best_P = match_probabilities[i][0];
1125  int index_best_P = 0;
1126  for(int j = 0; j < model_point_number; j++)
1127  {
1128  if (match_probabilities[i][j] > 0.05)
1129  {
1130  object_keypoint * M = &(model_points[j]);
1131  int level = MIN(255, int(match_probabilities[i][j] / 0.2 * 255));
1132  CvScalar col = cvScalar(level, level, level);
1133  CvPoint mp = cvPoint(int(PyrImage::convCoordf(float(M->M[0]), int(M->scale), 0)),
1134  int(PyrImage::convCoordf(float(M->M[1]), int(M->scale), 0)));
1135 
1136  cvCircle(model_and_input_images, mp, (int)PyrImage::convCoordf(patch_size/2.f, int(M->scale), 0), col, 1);
1137 
1138 
1139  cvLine(model_and_input_images, ip, mp, col, 1);
1140  }
1141  if (best_P < match_probabilities[i][j])
1142  {
1143  best_P = match_probabilities[i][j];
1144  index_best_P = j;
1145  }
1146  }
1147 
1148  {
1149  object_keypoint * M = &(model_points[index_best_P]);
1150  int level = MIN(255, int(match_probabilities[i][index_best_P] / 0.2 * 255));
1151  CvScalar col = cvScalar(level, level, level);
1152  CvPoint mp = cvPoint(int(PyrImage::convCoordf(float(M->M[0]), int(M->scale), 0)),
1153  int(PyrImage::convCoordf(float(M->M[1]), int(M->scale), 0)));
1154 
1155  cvCircle(model_and_input_images, mp, int(PyrImage::convCoordf(patch_size/2.f, int(M->scale),0)),col,1);
1156 
1157  cvLine(model_and_input_images, ip, mp, col, 1);
1158  }
1159  }
1160  {
1161  char filename[10000];
1162  sprintf(filename, "%s/match%04d.bmp", matches_dir, i);
1163  mcvSaveImage(filename, model_and_input_images);
1164  }
1165  }
1166 }
1167 
1168 void planar_object_recognizer::save_one_image_per_match_model_to_input(IplImage * input_image, const char * matches_dir)
1169 {
1170  int patch_size = forest->image_width;
1171 
1172  for(int j = 0; j < model_point_number; j++)
1173  {
1174  concat_model_and_input_images(input_image);
1175  // draw_input_image_points();
1176 
1177  object_keypoint * M = &(model_points[j]);
1178  CvPoint mp = cvPoint(int(PyrImage::convCoordf(float(M->M[0]), int(M->scale), 0)),
1179  int(PyrImage::convCoordf(float(M->M[1]), int(M->scale), 0)));
1180  cvCircle(model_and_input_images, mp,
1181  int(PyrImage::convCoordf(patch_size/2.f, int(M->scale),0)), cvScalar(0, 0, 0), 3);
1182  cvCircle(model_and_input_images, mp,
1183  int(PyrImage::convCoordf(patch_size/2.f, int(M->scale),0)), cvScalar(255, 255, 255), 1);
1184 
1185  float best_P = match_probabilities[0][j];
1186  int index_best_P = 0;
1187  for(int i = 0; i < detected_point_number; i++)
1188  {
1189  image_class_example * pv = &(detected_point_views[i]);
1190  float u = pv->point2d->u, v = pv->point2d->v;
1191  int s = int(pv->point2d->scale);
1192 
1193  if (u > (patch_size / 2) && u < object_input_view->image[s]->width - (patch_size / 2) &&
1194  v > (patch_size / 2) && v < object_input_view->image[s]->height - (patch_size / 2))
1195  {
1196  if (best_P < match_probabilities[i][j])
1197  {
1198  best_P = match_probabilities[i][j];
1199  index_best_P = i;
1200  }
1201  }
1202  }
1203 
1204  {
1205  image_class_example * pv = &(detected_point_views[index_best_P]);
1206  float u = pv->point2d->u, v = pv->point2d->v;
1207  int s = int(pv->point2d->scale);
1208 
1209  CvPoint ip = cvPoint(u_input_image + int(PyrImage::convCoordf(u,s,0)),
1210  v_input_image + int(PyrImage::convCoordf(v,s,0)));
1211 
1212  cvCircle(model_and_input_images, ip,
1213  int(PyrImage::convCoordf(patch_size/2.f,int(pv->point2d->scale),0)),
1214  cvScalar(0, 0, 0), 3);
1215  cvLine(model_and_input_images, ip, mp, cvScalar(0, 0, 0), 3);
1216 
1217  cvCircle(model_and_input_images, ip,
1218  int(PyrImage::convCoordf(patch_size/2.f,int(pv->point2d->scale),0)),
1219  cvScalar(255, 255, 255), 2);
1220  cvLine(model_and_input_images, ip, mp, cvScalar(255, 255, 255), 2);
1221  }
1222 
1223  char image_name[1000];
1224  sprintf(image_name, "%s/match%04d.bmp", matches_dir, j);
1225  mcvSaveImage(image_name, model_and_input_images);
1226  }
1227 }
1228 
1229 
1230 void planar_object_recognizer::save_one_image_per_match(IplImage * input_image, const char * matches_dir)
1231 {
1232  int patch_size = forest->image_width;
1233 
1234  for(int i = 0; i < match_number; i++)
1235  {
1236  image_object_point_match * match = &(matches[i]);
1237 
1238  if (!match->inlier)
1239  continue;
1240 
1241  concat_model_and_input_images(input_image);
1242 
1243  CvPoint M = cvPoint(int(PyrImage::convCoordf(float(match->object_point->M[0]), int(match->object_point->scale), 0)),
1244  int(PyrImage::convCoordf(float(match->object_point->M[1]), int(match->object_point->scale), 0)));
1245 
1246  cvCircle(model_and_input_images,
1247  M, int(PyrImage::convCoordf(patch_size/2.f, int(match->object_point->scale), 0)), cvScalar(0, 0, 0), 3);
1248  cvCircle(model_and_input_images,
1249  M, int(PyrImage::convCoordf(patch_size/2.f, int(match->object_point->scale), 0)), cvScalar(255, 255, 255), 1);
1250 
1251  CvPoint m = cvPoint(u_input_image + (int)PyrImage::convCoordf(match->image_point->u, int(match->object_point->scale), 0),
1252  v_input_image + (int)PyrImage::convCoordf(match->image_point->v, int(match->object_point->scale), 0));
1253 
1254  cvCircle(model_and_input_images,
1255  m, int(PyrImage::convCoordf(patch_size/2.f, int(match->object_point->scale), 0)), cvScalar(0, 0, 0), 3);
1256  cvCircle(model_and_input_images,
1257  m, int(PyrImage::convCoordf(patch_size/2.f, int(match->object_point->scale), 0)), cvScalar(255, 255, 255), 1);
1258 
1259  char image_name[1000];
1260  sprintf(image_name, "%s/match%04d.bmp", matches_dir, i);
1261  mcvSaveImage(image_name, model_and_input_images);
1262  }
1263 }
1264 
1265 
1266 void planar_object_recognizer::concat_model_and_input_images(IplImage * input_image, bool draw_model_image)
1267 {
1268  if (model_and_input_images != 0)
1269  cvReleaseImage(&model_and_input_images);
1270 
1271  if (draw_model_image)
1272  {
1273  model_and_input_images =
1274  cvCreateImage(cvSize(max(new_images_generator.original_image->width, input_image->width),
1275  new_images_generator.original_image->height + input_image->height + 10), IPL_DEPTH_8U, 3);
1276 
1277  cvSet(model_and_input_images, cvScalar(128, 128, 128));
1278 
1279  x0 = 0;
1280  y0 = 0;
1281  mcvPut(model_and_input_images, new_images_generator.original_image, x0, y0);
1282 
1283  u_input_image = 0;
1284  v_input_image = new_images_generator.original_image->height + 10;
1285  mcvPut(model_and_input_images, input_image, u_input_image, v_input_image);
1286  }
1287  else
1288  {
1289  model_and_input_images = cvCloneImage(input_image);
1290 
1291  x0 = 0;
1292  y0 = 0;
1293 
1294  u_input_image = 0;
1295  v_input_image = 0;
1296  }
1297 }
1298 
1300 {
1301  draw_model_points(line_width);
1302  draw_input_image_points(line_width);
1303 }
1304 
1306 {
1307  int patch_size = forest->image_width;
1308 
1309  for(int i = 0; i < model_point_number; i++)
1310  cvCircle(model_and_input_images,
1311  cvPoint(x0 + (int)PyrImage::convCoordf(float(model_points[i].M[0]), int(model_points[i].scale), 0),
1312  y0 + (int)PyrImage::convCoordf(float(model_points[i].M[1]), int(model_points[i].scale), 0)),
1313  (int)PyrImage::convCoordf(patch_size/2.f, int(model_points[i].scale), 0), CV_RGB(0, 0, 255), line_width);
1314 }
1315 
1317 {
1318  int patch_size = forest->image_width;
1319 
1320  for(int i = 0; i < detected_point_number; i++)
1321  cvCircle(model_and_input_images,
1322  cvPoint(u_input_image + (int)PyrImage::convCoordf(detected_points[i].u, int(detected_points[i].scale), 0),
1323  v_input_image + (int)PyrImage::convCoordf(detected_points[i].v, int(detected_points[i].scale), 0)),
1324  (int)PyrImage::convCoordf(patch_size/2.f, int(detected_points[i].scale), 0),
1325  mcvRainbowColor(int(detected_points[i].scale)), line_width);
1326 }
1327 
1329 {
1330  for(int i = 0; i < match_number; i++)
1331  {
1332  image_object_point_match * match = &(matches[i]);
1333 
1334  if (match->inlier)
1335  {
1336  CvScalar col = cvScalar(255, 255, 255);
1337 
1338  cvLine(model_and_input_images,
1339  cvPoint(int(PyrImage::convCoordf(float(match->object_point->M[0]), int(match->object_point->scale), 0)),
1340  int(PyrImage::convCoordf(float(match->object_point->M[1]), int(match->object_point->scale), 0))),
1341  cvPoint(u_input_image + int(PyrImage::convCoordf(match->image_point->u, int(match->image_point->scale), 0)),
1342  v_input_image + int(PyrImage::convCoordf(match->image_point->v, int(match->image_point->scale), 0))),
1343  col, line_width);
1344  }
1345  }
1346 }
1347 
1349 {
1350  for(int i = 0; i < match_number; i++)
1351  {
1352  image_object_point_match * match = &(matches[i]);
1353 
1354  if (match->inlier)
1355  {
1356  cvLine(model_and_input_images,
1357  cvPoint( u_input_image+int(match->image_point->u), v_input_image+int(match->image_point->v) ),
1358  cvPoint((int)match->object_point->M[0], (int)match->object_point->M[1]),
1359  CV_RGB(255,0,0), line_width);
1360  }
1361  }
1362 }
1363 
1368 {
1369  int w = image->width;
1370  int h = image->height;
1371  int nbLev = new_images_generator.level_number;
1372 
1373  if (object_input_view->image[0]->width == w &&
1374  object_input_view->image[0]->height ==h)
1375  // Nothing to do: allocated resources have the right size
1376  return;
1377 
1378  //int u[4] = {-1, -1, -1, -1};
1379  //int v[4] = {-1, -1, -1, -1};
1380 
1381  if (new_images_generator.orientation_corrector != 0)
1382  delete new_images_generator.orientation_corrector;
1383  new_images_generator.orientation_corrector = new keypoint_orientation_corrector(
1384  w,h, new_images_generator.patch_size, nbLev);
1385 
1386  delete object_input_view;
1387  object_input_view = new object_view(w,h,nbLev);
1388  int yape_radius = point_detector->get_radius();
1389  delete point_detector;
1390  point_detector = new pyr_yape(w,h,nbLev);
1391  point_detector->set_radius(yape_radius);
1392 }
1393