Visual Servoing Platform version 3.5.0
vpKeyPoint.cpp
1/****************************************************************************
2 *
3 * ViSP, open source Visual Servoing Platform software.
4 * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5 *
6 * This software is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 * See the file LICENSE.txt at the root directory of this source
11 * distribution for additional information about the GNU GPL.
12 *
13 * For using ViSP with software that can not be combined with the GNU
14 * GPL, please contact Inria about acquiring a ViSP Professional
15 * Edition License.
16 *
17 * See http://visp.inria.fr for more information.
18 *
19 * This software was developed at:
20 * Inria Rennes - Bretagne Atlantique
21 * Campus Universitaire de Beaulieu
22 * 35042 Rennes Cedex
23 * France
24 *
25 * If you have questions regarding the use of this file, please contact
26 * Inria at visp@inria.fr
27 *
28 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30 *
31 * Description:
32 * Key point functionalities.
33 *
34 * Authors:
35 * Souriya Trinh
36 *
37 *****************************************************************************/
38
39#include <iomanip>
40#include <limits>
41
42#include <visp3/core/vpIoTools.h>
43#include <visp3/vision/vpKeyPoint.h>
44
45#if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
46
47#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
48#include <opencv2/calib3d/calib3d.hpp>
49#endif
50
51#include <pugixml.hpp>
52
53namespace
54{
55// Specific Type transformation functions
56inline cv::DMatch knnToDMatch(const std::vector<cv::DMatch> &knnMatches)
57{
58 if (knnMatches.size() > 0) {
59 return knnMatches[0];
60 }
61
62 return cv::DMatch();
63}
64
65inline vpImagePoint matchRansacToVpImage(const std::pair<cv::KeyPoint, cv::Point3f> &pair)
66{
67 return vpImagePoint(pair.first.pt.y, pair.first.pt.x);
68}
69
70}
71
81vpKeyPoint::vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
82 const std::string &matcherName, const vpFilterMatchingType &filterType)
83 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
84 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
85 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
86 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
87 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
88 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
89 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
90 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
91 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
92 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
93 m_useAffineDetection(false),
94#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
95 m_useBruteForceCrossCheck(true),
96#endif
97 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
98 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
99{
100 initFeatureNames();
101
102 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
103 m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
104
105 init();
106}
107
117vpKeyPoint::vpKeyPoint(const std::string &detectorName, const std::string &extractorName,
118 const std::string &matcherName, const vpFilterMatchingType &filterType)
119 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
120 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
121 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
122 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
123 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
124 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
125 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
126 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
127 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
128 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
129 m_useAffineDetection(false),
130#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
131 m_useBruteForceCrossCheck(true),
132#endif
133 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
134 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
135{
136 initFeatureNames();
137
138 m_detectorNames.push_back(detectorName);
139 m_extractorNames.push_back(extractorName);
140
141 init();
142}
143
153vpKeyPoint::vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
154 const std::string &matcherName, const vpFilterMatchingType &filterType)
155 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
156 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
157 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
158 m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
159 m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
160 m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
161 m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
162 m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(),
163 m_ransacOutliers(), m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
164 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
165#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
166 m_useBruteForceCrossCheck(true),
167#endif
168 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
169 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
170{
171 initFeatureNames();
172 init();
173}
174
183void vpKeyPoint::affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
184{
185 int h = img.rows;
186 int w = img.cols;
187
188 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
189
190 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
191
192 // if (phi != 0.0) {
193 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
194 phi *= M_PI / 180.;
195 double s = sin(phi);
196 double c = cos(phi);
197
198 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
199
200 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
201 cv::Mat tcorners = corners * A.t();
202 cv::Mat tcorners_x, tcorners_y;
203 tcorners.col(0).copyTo(tcorners_x);
204 tcorners.col(1).copyTo(tcorners_y);
205 std::vector<cv::Mat> channels;
206 channels.push_back(tcorners_x);
207 channels.push_back(tcorners_y);
208 cv::merge(channels, tcorners);
209
210 cv::Rect rect = cv::boundingRect(tcorners);
211 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
212
213 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
214 }
215 // if (tilt != 1.0) {
216 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
217 double s = 0.8 * sqrt(tilt * tilt - 1);
218 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
219 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
220 A.row(0) = A.row(0) / tilt;
221 }
222 // if (tilt != 1.0 || phi != 0.0) {
223 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
224 std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
225 h = img.rows;
226 w = img.cols;
227 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
228 }
229 cv::invertAffineTransform(A, Ai);
230}
231
239
246unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color) { return buildReference(I_color, vpRect()); }
247
258 unsigned int height, unsigned int width)
259{
260 return buildReference(I, vpRect(iP, width, height));
261}
262
272unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP,
273 unsigned int height, unsigned int width)
274{
275 return buildReference(I_color, vpRect(iP, width, height));
276}
277
285unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle)
286{
287 // Reset variables used when dealing with 3D models
288 // So as no 3D point list is passed, we dont need this variables
289 m_trainPoints.clear();
290 m_mapOfImageId.clear();
291 m_mapOfImages.clear();
292 m_currentImageId = 1;
293
294 if (m_useAffineDetection) {
295 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
296 std::vector<cv::Mat> listOfTrainDescriptors;
297
298 // Detect keypoints and extract descriptors on multiple images
299 detectExtractAffine(I, listOfTrainKeyPoints, listOfTrainDescriptors);
300
301 // Flatten the different train lists
302 m_trainKeyPoints.clear();
303 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
304 it != listOfTrainKeyPoints.end(); ++it) {
305 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
306 }
307
308 bool first = true;
309 for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end(); ++it) {
310 if (first) {
311 first = false;
312 it->copyTo(m_trainDescriptors);
313 } else {
314 m_trainDescriptors.push_back(*it);
315 }
316 }
317 } else {
318 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
319 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
320 }
321
322 // Save the correspondence keypoint class_id with the training image_id in a map.
323 // Used to display the matching with all the training images.
324 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
325 m_mapOfImageId[it->class_id] = m_currentImageId;
326 }
327
328 // Save the image in a map at a specific image_id
329 m_mapOfImages[m_currentImageId] = I;
330
331 // Convert OpenCV type to ViSP type for compatibility
333
334 _reference_computed = true;
335
336 // Add train descriptors in matcher object
337 m_matcher->clear();
338 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
339
340 return static_cast<unsigned int>(m_trainKeyPoints.size());
341}
342
350unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
351{
352 vpImageConvert::convert(I_color, m_I);
353 return (buildReference(m_I, rectangle));
354}
355
367unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
368 std::vector<cv::Point3f> &points3f, bool append, int class_id)
369{
370 cv::Mat trainDescriptors;
371 // Copy the input list of keypoints
372 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
373
374 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
375
376 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
377 // Keypoints have been removed
378 // Store the hash of a keypoint as the key and the index of the keypoint
379 // as the value
380 std::map<size_t, size_t> mapOfKeypointHashes;
381 size_t cpt = 0;
382 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
383 ++it, cpt++) {
384 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
385 }
386
387 std::vector<cv::Point3f> trainPoints_tmp;
388 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
389 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
390 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
391 }
392 }
393
394 // Copy trainPoints_tmp to points3f
395 points3f = trainPoints_tmp;
396 }
397
398 return (buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
399}
400
412unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &trainKeyPoints,
413 std::vector<cv::Point3f> &points3f, bool append, int class_id)
414{
415 cv::Mat trainDescriptors;
416 // Copy the input list of keypoints
417 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
418
419 extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
420
421 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
422 // Keypoints have been removed
423 // Store the hash of a keypoint as the key and the index of the keypoint
424 // as the value
425 std::map<size_t, size_t> mapOfKeypointHashes;
426 size_t cpt = 0;
427 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
428 ++it, cpt++) {
429 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
430 }
431
432 std::vector<cv::Point3f> trainPoints_tmp;
433 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
434 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
435 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
436 }
437 }
438
439 // Copy trainPoints_tmp to points3f
440 points3f = trainPoints_tmp;
441 }
442
443 return (buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
444}
445
459unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
460 const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
461 bool append, int class_id)
462{
463 if (!append) {
464 m_trainPoints.clear();
465 m_mapOfImageId.clear();
466 m_mapOfImages.clear();
467 m_currentImageId = 0;
468 m_trainKeyPoints.clear();
469 }
470
471 m_currentImageId++;
472
473 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
474 // Set class_id if != -1
475 if (class_id != -1) {
476 for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
477 it->class_id = class_id;
478 }
479 }
480
481 // Save the correspondence keypoint class_id with the training image_id in a map.
482 // Used to display the matching with all the training images.
483 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
484 m_mapOfImageId[it->class_id] = m_currentImageId;
485 }
486
487 // Save the image in a map at a specific image_id
488 m_mapOfImages[m_currentImageId] = I;
489
490 // Append reference lists
491 m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
492 if (!append) {
493 trainDescriptors.copyTo(m_trainDescriptors);
494 } else {
495 m_trainDescriptors.push_back(trainDescriptors);
496 }
497 this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
498
499 // Convert OpenCV type to ViSP type for compatibility
501 vpConvert::convertFromOpenCV(m_trainPoints, m_trainVpPoints);
502
503 // Add train descriptors in matcher object
504 m_matcher->clear();
505 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
506
507 _reference_computed = true;
508
509 return static_cast<unsigned int>(m_trainKeyPoints.size());
510}
511
524unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const std::vector<cv::KeyPoint> &trainKeyPoints,
525 const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
526 bool append, int class_id)
527{
528 vpImageConvert::convert(I_color, m_I);
529 return (buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
530}
531
547void vpKeyPoint::compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi,
548 const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
549{
550 /* compute plane equation */
551 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
552 vpPoint pts[3];
553 pts[0] = *it_roi;
554 ++it_roi;
555 pts[1] = *it_roi;
556 ++it_roi;
557 pts[2] = *it_roi;
558 vpPlane Po(pts[0], pts[1], pts[2]);
559 double xc = 0.0, yc = 0.0;
560 vpPixelMeterConversion::convertPoint(cam, candidate.pt.x, candidate.pt.y, xc, yc);
561 double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
562 double X = xc * Z;
563 double Y = yc * Z;
564 vpColVector point_cam(4);
565 point_cam[0] = X;
566 point_cam[1] = Y;
567 point_cam[2] = Z;
568 point_cam[3] = 1;
569 vpColVector point_obj(4);
570 point_obj = cMo.inverse() * point_cam;
571 point = cv::Point3f((float)point_obj[0], (float)point_obj[1], (float)point_obj[2]);
572}
573
589void vpKeyPoint::compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi,
590 const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, vpPoint &point)
591{
592 /* compute plane equation */
593 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
594 vpPoint pts[3];
595 pts[0] = *it_roi;
596 ++it_roi;
597 pts[1] = *it_roi;
598 ++it_roi;
599 pts[2] = *it_roi;
600 vpPlane Po(pts[0], pts[1], pts[2]);
601 double xc = 0.0, yc = 0.0;
602 vpPixelMeterConversion::convertPoint(cam, candidate, xc, yc);
603 double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
604 double X = xc * Z;
605 double Y = yc * Z;
606 vpColVector point_cam(4);
607 point_cam[0] = X;
608 point_cam[1] = Y;
609 point_cam[2] = Z;
610 point_cam[3] = 1;
611 vpColVector point_obj(4);
612 point_obj = cMo.inverse() * point_cam;
613 point.setWorldCoordinates(point_obj);
614}
615
633 std::vector<cv::KeyPoint> &candidates,
634 const std::vector<vpPolygon> &polygons,
635 const std::vector<std::vector<vpPoint> > &roisPt,
636 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
637{
638 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
639 candidates.clear();
640 points.clear();
641 vpImagePoint imPt;
642 cv::Point3f pt;
643 cv::Mat desc;
644
645 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
646 for (size_t i = 0; i < candidatesToCheck.size(); i++) {
647 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
648 }
649
650 size_t cpt1 = 0;
651 std::vector<vpPolygon> polygons_tmp = polygons;
652 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
653 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
654
655 while (it2 != pairOfCandidatesToCheck.end()) {
656 imPt.set_ij(it2->first.pt.y, it2->first.pt.x);
657 if (it1->isInside(imPt)) {
658 candidates.push_back(it2->first);
659 vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
660 points.push_back(pt);
661
662 if (descriptors != NULL) {
663 desc.push_back(descriptors->row((int)it2->second));
664 }
665
666 // Remove candidate keypoint which is located on the current polygon
667 it2 = pairOfCandidatesToCheck.erase(it2);
668 } else {
669 ++it2;
670 }
671 }
672 }
673
674 if (descriptors != NULL) {
675 desc.copyTo(*descriptors);
676 }
677}
678
696 std::vector<vpImagePoint> &candidates,
697 const std::vector<vpPolygon> &polygons,
698 const std::vector<std::vector<vpPoint> > &roisPt,
699 std::vector<vpPoint> &points, cv::Mat *descriptors)
700{
701 std::vector<vpImagePoint> candidatesToCheck = candidates;
702 candidates.clear();
703 points.clear();
704 vpPoint pt;
705 cv::Mat desc;
706
707 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
708 for (size_t i = 0; i < candidatesToCheck.size(); i++) {
709 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
710 }
711
712 size_t cpt1 = 0;
713 std::vector<vpPolygon> polygons_tmp = polygons;
714 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
715 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
716
717 while (it2 != pairOfCandidatesToCheck.end()) {
718 if (it1->isInside(it2->first)) {
719 candidates.push_back(it2->first);
720 vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
721 points.push_back(pt);
722
723 if (descriptors != NULL) {
724 desc.push_back(descriptors->row((int)it2->second));
725 }
726
727 // Remove candidate keypoint which is located on the current polygon
728 it2 = pairOfCandidatesToCheck.erase(it2);
729 } else {
730 ++it2;
731 }
732 }
733 }
734}
735
752 const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<cv::KeyPoint> &candidates,
753 const std::vector<vpCylinder> &cylinders,
754 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
755 cv::Mat *descriptors)
756{
757 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
758 candidates.clear();
759 points.clear();
760 cv::Mat desc;
761
762 // Keep only keypoints on cylinders
763 size_t cpt_keypoint = 0;
764 for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
765 ++it1, cpt_keypoint++) {
766 size_t cpt_cylinder = 0;
767
768 // Iterate through the list of vpCylinders
769 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
770 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
771 // Iterate through the list of the bounding boxes of the current
772 // vpCylinder
773 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
774 if (vpPolygon::isInside(*it3, it1->pt.y, it1->pt.x)) {
775 candidates.push_back(*it1);
776
777 // Calculate the 3D coordinates for each keypoint located on
778 // cylinders
779 double xm = 0.0, ym = 0.0;
780 vpPixelMeterConversion::convertPoint(cam, it1->pt.x, it1->pt.y, xm, ym);
781 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
782
783 if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
784 vpColVector point_cam(4);
785 point_cam[0] = xm * Z;
786 point_cam[1] = ym * Z;
787 point_cam[2] = Z;
788 point_cam[3] = 1;
789 vpColVector point_obj(4);
790 point_obj = cMo.inverse() * point_cam;
791 vpPoint pt;
792 pt.setWorldCoordinates(point_obj);
793 points.push_back(cv::Point3f((float)pt.get_oX(), (float)pt.get_oY(), (float)pt.get_oZ()));
794
795 if (descriptors != NULL) {
796 desc.push_back(descriptors->row((int)cpt_keypoint));
797 }
798
799 break;
800 }
801 }
802 }
803 }
804 }
805
806 if (descriptors != NULL) {
807 desc.copyTo(*descriptors);
808 }
809}
810
827 const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<vpImagePoint> &candidates,
828 const std::vector<vpCylinder> &cylinders,
829 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
830 cv::Mat *descriptors)
831{
832 std::vector<vpImagePoint> candidatesToCheck = candidates;
833 candidates.clear();
834 points.clear();
835 cv::Mat desc;
836
837 // Keep only keypoints on cylinders
838 size_t cpt_keypoint = 0;
839 for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
840 ++it1, cpt_keypoint++) {
841 size_t cpt_cylinder = 0;
842
843 // Iterate through the list of vpCylinders
844 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
845 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
846 // Iterate through the list of the bounding boxes of the current
847 // vpCylinder
848 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
849 if (vpPolygon::isInside(*it3, it1->get_i(), it1->get_j())) {
850 candidates.push_back(*it1);
851
852 // Calculate the 3D coordinates for each keypoint located on
853 // cylinders
854 double xm = 0.0, ym = 0.0;
855 vpPixelMeterConversion::convertPoint(cam, it1->get_u(), it1->get_v(), xm, ym);
856 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
857
858 if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
859 vpColVector point_cam(4);
860 point_cam[0] = xm * Z;
861 point_cam[1] = ym * Z;
862 point_cam[2] = Z;
863 point_cam[3] = 1;
864 vpColVector point_obj(4);
865 point_obj = cMo.inverse() * point_cam;
866 vpPoint pt;
867 pt.setWorldCoordinates(point_obj);
868 points.push_back(pt);
869
870 if (descriptors != NULL) {
871 desc.push_back(descriptors->row((int)cpt_keypoint));
872 }
873
874 break;
875 }
876 }
877 }
878 }
879 }
880
881 if (descriptors != NULL) {
882 desc.copyTo(*descriptors);
883 }
884}
885
899bool vpKeyPoint::computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
900 const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
901 double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &))
902{
903 double t = vpTime::measureTimeMs();
904
905 if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
906 elapsedTime = (vpTime::measureTimeMs() - t);
907 std::cerr << "Not enough points to compute the pose (at least 4 points "
908 "are needed)."
909 << std::endl;
910
911 return false;
912 }
913
914 cv::Mat cameraMatrix =
915 (cv::Mat_<double>(3, 3) << cam.get_px(), 0, cam.get_u0(), 0, cam.get_py(), cam.get_v0(), 0, 0, 1);
916 cv::Mat rvec, tvec;
917
918 // Bug with OpenCV < 2.4.0 when zero distorsion is provided by an empty
919 // array. http://code.opencv.org/issues/1700 ;
920 // http://code.opencv.org/issues/1718 what(): Distortion coefficients must
921 // be 1x4, 4x1, 1x5, 5x1, 1x8 or 8x1 floating-point vector in function
922 // cvProjectPoints2 Fixed in OpenCV 2.4.0 (r7558)
923 // cv::Mat distCoeffs;
924 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
925
926 try {
927#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
928 // OpenCV 3.0.0 (2014/12/12)
929 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
930 (float)m_ransacReprojectionError,
931 0.99, // confidence=0.99 (default) – The probability
932 // that the algorithm produces a useful result.
933 inlierIndex, cv::SOLVEPNP_ITERATIVE);
934// SOLVEPNP_ITERATIVE (default): Iterative method is based on
935// Levenberg-Marquardt optimization. In this case the function finds such a
936// pose that minimizes reprojection error, that is the sum of squared
937// distances between the observed projections imagePoints and the projected
938// (using projectPoints() ) objectPoints . SOLVEPNP_P3P: Method is based on
939// the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang “Complete Solution
940// Classification for the Perspective-Three-Point Problem”. In this case the
941// function requires exactly four object and image points. SOLVEPNP_EPNP:
942// Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
943// paper “EPnP: Efficient Perspective-n-Point Camera Pose Estimation”.
944// SOLVEPNP_DLS: Method is based on the paper of Joel A. Hesch and Stergios I.
945// Roumeliotis. “A Direct Least-Squares (DLS) Method for PnP”. SOLVEPNP_UPNP
946// Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
947// F.Moreno-Noguer. “Exhaustive Linearization for Robust Camera Pose and Focal
948// Length Estimation”. In this case the function also estimates the
949// parameters
950// f_x and f_y assuming that both have the same value. Then the cameraMatrix
951// is updated with the estimated focal length.
952#else
953 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
954 if (m_useConsensusPercentage) {
955 nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
956 }
957
958 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
959 (float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
960#endif
961 } catch (cv::Exception &e) {
962 std::cerr << e.what() << std::endl;
963 elapsedTime = (vpTime::measureTimeMs() - t);
964 return false;
965 }
966 vpTranslationVector translationVec(tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2));
967 vpThetaUVector thetaUVector(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2));
968 cMo = vpHomogeneousMatrix(translationVec, thetaUVector);
969
970 if (func != NULL) {
971 // Check the final pose returned by solvePnPRansac to discard
972 // solutions which do not respect the pose criterion.
973 if (!func(cMo)) {
974 elapsedTime = (vpTime::measureTimeMs() - t);
975 return false;
976 }
977 }
978
979 elapsedTime = (vpTime::measureTimeMs() - t);
980 return true;
981}
982
995bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
996 std::vector<vpPoint> &inliers, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &))
997{
998 std::vector<unsigned int> inlierIndex;
999 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
1000}
1001
1015bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
1016 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex, double &elapsedTime,
1017 bool (*func)(const vpHomogeneousMatrix &))
1018{
1019 double t = vpTime::measureTimeMs();
1020
1021 if (objectVpPoints.size() < 4) {
1022 elapsedTime = (vpTime::measureTimeMs() - t);
1023 // std::cerr << "Not enough points to compute the pose (at least 4
1024 // points are needed)." << std::endl;
1025
1026 return false;
1027 }
1028
1029 vpPose pose;
1030
1031 for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
1032 pose.addPoint(*it);
1033 }
1034
1035 unsigned int nbInlierToReachConsensus = (unsigned int)m_nbRansacMinInlierCount;
1036 if (m_useConsensusPercentage) {
1037 nbInlierToReachConsensus =
1038 (unsigned int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
1039 }
1040
1041 pose.setRansacFilterFlag(m_ransacFilterFlag);
1042 pose.setUseParallelRansac(m_ransacParallel);
1043 pose.setNbParallelRansacThreads(m_ransacParallelNbThreads);
1044 pose.setRansacNbInliersToReachConsensus(nbInlierToReachConsensus);
1045 pose.setRansacThreshold(m_ransacThreshold);
1046 pose.setRansacMaxTrials(m_nbRansacIterations);
1047
1048 bool isRansacPoseEstimationOk = false;
1049 try {
1050 pose.setCovarianceComputation(m_computeCovariance);
1051 isRansacPoseEstimationOk = pose.computePose(vpPose::RANSAC, cMo, func);
1052 inliers = pose.getRansacInliers();
1053 inlierIndex = pose.getRansacInlierIndex();
1054
1055 if (m_computeCovariance) {
1056 m_covarianceMatrix = pose.getCovarianceMatrix();
1057 }
1058 } catch (const vpException &e) {
1059 std::cerr << "e=" << e.what() << std::endl;
1060 elapsedTime = (vpTime::measureTimeMs() - t);
1061 return false;
1062 }
1063
1064 // if(func != NULL && isRansacPoseEstimationOk) {
1065 // //Check the final pose returned by the Ransac VVS pose estimation as
1066 // in rare some cases
1067 // //we can converge toward a final cMo that does not respect the pose
1068 // criterion even
1069 // //if the 4 minimal points picked to respect the pose criterion.
1070 // if(!func(&cMo)) {
1071 // elapsedTime = (vpTime::measureTimeMs() - t);
1072 // return false;
1073 // }
1074 // }
1075
1076 elapsedTime = (vpTime::measureTimeMs() - t);
1077 return isRansacPoseEstimationOk;
1078}
1079
1094double vpKeyPoint::computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1095 const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est)
1096{
1097 if (matchKeyPoints.size() == 0) {
1098 // return std::numeric_limits<double>::max(); // create an error under
1099 // Windows. To fix it we have to add #undef max
1100 return DBL_MAX;
1101 }
1102
1103 std::vector<double> errors(matchKeyPoints.size());
1104 size_t cpt = 0;
1105 vpPoint pt;
1106 for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1107 it != matchKeyPoints.end(); ++it, cpt++) {
1108 pt.set_oX(it->second.x);
1109 pt.set_oY(it->second.y);
1110 pt.set_oZ(it->second.z);
1111 pt.project(cMo_est);
1112 double u = 0.0, v = 0.0;
1113 vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), u, v);
1114 errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
1115 }
1116
1117 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1118}
1119
1129 vpImage<unsigned char> &IMatching)
1130{
1131 // Image matching side by side
1132 unsigned int width = IRef.getWidth() + ICurrent.getWidth();
1133 unsigned int height = ((std::max))(IRef.getHeight(), ICurrent.getHeight());
1134
1135 IMatching = vpImage<unsigned char>(height, width);
1136}
1137
1147 vpImage<vpRGBa> &IMatching)
1148{
1149 // Image matching side by side
1150 unsigned int width = IRef.getWidth() + ICurrent.getWidth();
1151 unsigned int height = ((std::max))(IRef.getHeight(), ICurrent.getHeight());
1152
1153 IMatching = vpImage<vpRGBa>(height, width);
1154}
1155
1166{
1167 // Nb images in the training database + the current image we want to detect
1168 // the object
1169 unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
1170
1171 if (m_mapOfImages.empty()) {
1172 std::cerr << "There is no training image loaded !" << std::endl;
1173 return;
1174 }
1175
1176 if (nbImg == 2) {
1177 // Only one training image, so we display them side by side
1178 createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
1179 } else {
1180 // Multiple training images, display them as a mosaic image
1181 //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
1182 unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
1183
1184 // Number of columns in the mosaic grid
1185 unsigned int nbWidth = nbImgSqrt;
1186 // Number of rows in the mosaic grid
1187 unsigned int nbHeight = nbImgSqrt;
1188
1189 // Deals with non square mosaic grid and the total number of images
1190 if (nbImgSqrt * nbImgSqrt < nbImg) {
1191 nbWidth++;
1192 }
1193
1194 unsigned int maxW = ICurrent.getWidth();
1195 unsigned int maxH = ICurrent.getHeight();
1196 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1197 ++it) {
1198 if (maxW < it->second.getWidth()) {
1199 maxW = it->second.getWidth();
1200 }
1201
1202 if (maxH < it->second.getHeight()) {
1203 maxH = it->second.getHeight();
1204 }
1205 }
1206
1207 IMatching = vpImage<unsigned char>(maxH * nbHeight, maxW * nbWidth);
1208 }
1209}
1210
1221{
1222 // Nb images in the training database + the current image we want to detect
1223 // the object
1224 unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
1225
1226 if (m_mapOfImages.empty()) {
1227 std::cerr << "There is no training image loaded !" << std::endl;
1228 return;
1229 }
1230
1231 if (nbImg == 2) {
1232 // Only one training image, so we display them side by side
1233 createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
1234 } else {
1235 // Multiple training images, display them as a mosaic image
1236 //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
1237 unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
1238
1239 // Number of columns in the mosaic grid
1240 unsigned int nbWidth = nbImgSqrt;
1241 // Number of rows in the mosaic grid
1242 unsigned int nbHeight = nbImgSqrt;
1243
1244 // Deals with non square mosaic grid and the total number of images
1245 if (nbImgSqrt * nbImgSqrt < nbImg) {
1246 nbWidth++;
1247 }
1248
1249 unsigned int maxW = ICurrent.getWidth();
1250 unsigned int maxH = ICurrent.getHeight();
1251 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1252 ++it) {
1253 if (maxW < it->second.getWidth()) {
1254 maxW = it->second.getWidth();
1255 }
1256
1257 if (maxH < it->second.getHeight()) {
1258 maxH = it->second.getHeight();
1259 }
1260 }
1261
1262 IMatching = vpImage<vpRGBa>(maxH * nbHeight, maxW * nbWidth);
1263 }
1264}
1265
1273void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
1274{
1275 double elapsedTime;
1276 detect(I, keyPoints, elapsedTime, rectangle);
1277}
1278
1286void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
1287{
1288 double elapsedTime;
1289 detect(I_color, keyPoints, elapsedTime, rectangle);
1290}
1291
1300void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask)
1301{
1302 double elapsedTime;
1303 detect(matImg, keyPoints, elapsedTime, mask);
1304}
1305
1314void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1315 const vpRect &rectangle)
1316{
1317 cv::Mat matImg;
1318 vpImageConvert::convert(I, matImg, false);
1319 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1320
1321 if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1322 cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1323 rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1324 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1325 } else {
1326 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1327 }
1328
1329 detect(matImg, keyPoints, elapsedTime, mask);
1330}
1331
1340void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1341 const vpRect &rectangle)
1342{
1343 cv::Mat matImg;
1344 vpImageConvert::convert(I_color, matImg);
1345 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1346
1347 if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1348 cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1349 rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1350 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1351 } else {
1352 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1353 }
1354
1355 detect(matImg, keyPoints, elapsedTime, mask);
1356}
1357
1367void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1368 const cv::Mat &mask)
1369{
1370 double t = vpTime::measureTimeMs();
1371 keyPoints.clear();
1372
1373 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1374 it != m_detectors.end(); ++it) {
1375 std::vector<cv::KeyPoint> kp;
1376
1377 it->second->detect(matImg, kp, mask);
1378 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1379 }
1380
1381 elapsedTime = vpTime::measureTimeMs() - t;
1382}
1383
1391void vpKeyPoint::display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size)
1392{
1393 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1394 getQueryKeyPoints(vpQueryImageKeyPoints);
1395 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1396 getTrainKeyPoints(vpTrainImageKeyPoints);
1397
1398 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1399 vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1400 vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1401 }
1402}
1403
1411void vpKeyPoint::display(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, unsigned int size)
1412{
1413 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1414 getQueryKeyPoints(vpQueryImageKeyPoints);
1415 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1416 getTrainKeyPoints(vpTrainImageKeyPoints);
1417
1418 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1419 vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1420 vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1421 }
1422}
1423
1431void vpKeyPoint::display(const vpImage<unsigned char> &ICurrent, unsigned int size, const vpColor &color)
1432{
1433 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1434 getQueryKeyPoints(vpQueryImageKeyPoints);
1435
1436 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1437 vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1438 }
1439}
1440
1448void vpKeyPoint::display(const vpImage<vpRGBa> &ICurrent, unsigned int size, const vpColor &color)
1449{
1450 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1451 getQueryKeyPoints(vpQueryImageKeyPoints);
1452
1453 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1454 vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1455 }
1456}
1457
1470 unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1471{
1472 bool randomColor = (color == vpColor::none);
1473 srand((unsigned int)time(NULL));
1474 vpColor currentColor = color;
1475
1476 std::vector<vpImagePoint> queryImageKeyPoints;
1477 getQueryKeyPoints(queryImageKeyPoints);
1478 std::vector<vpImagePoint> trainImageKeyPoints;
1479 getTrainKeyPoints(trainImageKeyPoints);
1480
1481 vpImagePoint leftPt, rightPt;
1482 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1483 if (randomColor) {
1484 currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1485 }
1486
1487 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1488 rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1489 queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1490 vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1491 vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1492 vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1493 }
1494}
1495
1508 unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1509{
1510 bool randomColor = (color == vpColor::none);
1511 srand((unsigned int)time(NULL));
1512 vpColor currentColor = color;
1513
1514 std::vector<vpImagePoint> queryImageKeyPoints;
1515 getQueryKeyPoints(queryImageKeyPoints);
1516 std::vector<vpImagePoint> trainImageKeyPoints;
1517 getTrainKeyPoints(trainImageKeyPoints);
1518
1519 vpImagePoint leftPt, rightPt;
1520 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1521 if (randomColor) {
1522 currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1523 }
1524
1525 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1526 rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1527 queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1528 vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1529 vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1530 vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1531 }
1532}
1533
1546 unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1547{
1548 bool randomColor = (color == vpColor::none);
1549 srand((unsigned int)time(NULL));
1550 vpColor currentColor = color;
1551
1552 std::vector<vpImagePoint> queryImageKeyPoints;
1553 getQueryKeyPoints(queryImageKeyPoints);
1554 std::vector<vpImagePoint> trainImageKeyPoints;
1555 getTrainKeyPoints(trainImageKeyPoints);
1556
1557 vpImagePoint leftPt, rightPt;
1558 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1559 if (randomColor) {
1560 currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1561 }
1562
1563 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1564 rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1565 queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1566 vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1567 vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1568 vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1569 }
1570}
1571
1584 const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1585 unsigned int lineThickness)
1586{
1587 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1588 // No training images so return
1589 std::cerr << "There is no training image loaded !" << std::endl;
1590 return;
1591 }
1592
1593 // Nb images in the training database + the current image we want to detect
1594 // the object
1595 int nbImg = (int)(m_mapOfImages.size() + 1);
1596
1597 if (nbImg == 2) {
1598 // Only one training image, so we display the matching result side-by-side
1599 displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1600 } else {
1601 // Multiple training images, display them as a mosaic image
1602 int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1603 int nbWidth = nbImgSqrt;
1604 int nbHeight = nbImgSqrt;
1605
1606 if (nbImgSqrt * nbImgSqrt < nbImg) {
1607 nbWidth++;
1608 }
1609
1610 std::map<int, int> mapOfImageIdIndex;
1611 int cpt = 0;
1612 unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1613 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1614 ++it, cpt++) {
1615 mapOfImageIdIndex[it->first] = cpt;
1616
1617 if (maxW < it->second.getWidth()) {
1618 maxW = it->second.getWidth();
1619 }
1620
1621 if (maxH < it->second.getHeight()) {
1622 maxH = it->second.getHeight();
1623 }
1624 }
1625
1626 // Indexes of the current image in the grid computed to put preferably the
1627 // image in the center of the mosaic grid
1628 int medianI = nbHeight / 2;
1629 int medianJ = nbWidth / 2;
1630 int medianIndex = medianI * nbWidth + medianJ;
1631 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1632 vpImagePoint topLeftCorner;
1633 int current_class_id_index = 0;
1634 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1635 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1636 } else {
1637 // Shift of one unity the index of the training images which are after
1638 // the current image
1639 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1640 }
1641
1642 int indexI = current_class_id_index / nbWidth;
1643 int indexJ = current_class_id_index - (indexI * nbWidth);
1644 topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1645
1646 // Display cross for keypoints in the learning database
1647 vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1648 (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1649 }
1650
1651 vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1652 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1653 // Display cross for keypoints detected in the current image
1654 vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1655 (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1656 }
1657 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1658 // Display green circle for RANSAC inliers
1659 vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1660 (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1661 }
1662 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1663 // Display red circle for RANSAC outliers
1664 vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1665 (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1666 }
1667
1668 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1669 int current_class_id = 0;
1670 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1671 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1672 } else {
1673 // Shift of one unity the index of the training images which are after
1674 // the current image
1675 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1676 }
1677
1678 int indexI = current_class_id / nbWidth;
1679 int indexJ = current_class_id - (indexI * nbWidth);
1680
1681 vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1682 (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1683 vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1684 (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1685
1686 // Draw line for matching keypoints detected in the current image and
1687 // those detected in the training images
1688 vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1689 }
1690 }
1691}
1692
1705 const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1706 unsigned int lineThickness)
1707{
1708 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1709 // No training images so return
1710 std::cerr << "There is no training image loaded !" << std::endl;
1711 return;
1712 }
1713
1714 // Nb images in the training database + the current image we want to detect
1715 // the object
1716 int nbImg = (int)(m_mapOfImages.size() + 1);
1717
1718 if (nbImg == 2) {
1719 // Only one training image, so we display the matching result side-by-side
1720 displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1721 } else {
1722 // Multiple training images, display them as a mosaic image
1723 int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1724 int nbWidth = nbImgSqrt;
1725 int nbHeight = nbImgSqrt;
1726
1727 if (nbImgSqrt * nbImgSqrt < nbImg) {
1728 nbWidth++;
1729 }
1730
1731 std::map<int, int> mapOfImageIdIndex;
1732 int cpt = 0;
1733 unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1734 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1735 ++it, cpt++) {
1736 mapOfImageIdIndex[it->first] = cpt;
1737
1738 if (maxW < it->second.getWidth()) {
1739 maxW = it->second.getWidth();
1740 }
1741
1742 if (maxH < it->second.getHeight()) {
1743 maxH = it->second.getHeight();
1744 }
1745 }
1746
1747 // Indexes of the current image in the grid computed to put preferably the
1748 // image in the center of the mosaic grid
1749 int medianI = nbHeight / 2;
1750 int medianJ = nbWidth / 2;
1751 int medianIndex = medianI * nbWidth + medianJ;
1752 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1753 vpImagePoint topLeftCorner;
1754 int current_class_id_index = 0;
1755 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1756 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1757 } else {
1758 // Shift of one unity the index of the training images which are after
1759 // the current image
1760 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1761 }
1762
1763 int indexI = current_class_id_index / nbWidth;
1764 int indexJ = current_class_id_index - (indexI * nbWidth);
1765 topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1766
1767 // Display cross for keypoints in the learning database
1768 vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1769 (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1770 }
1771
1772 vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1773 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1774 // Display cross for keypoints detected in the current image
1775 vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1776 (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1777 }
1778 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1779 // Display green circle for RANSAC inliers
1780 vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1781 (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1782 }
1783 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1784 // Display red circle for RANSAC outliers
1785 vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1786 (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1787 }
1788
1789 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1790 int current_class_id = 0;
1791 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1792 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1793 } else {
1794 // Shift of one unity the index of the training images which are after
1795 // the current image
1796 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1797 }
1798
1799 int indexI = current_class_id / nbWidth;
1800 int indexJ = current_class_id - (indexI * nbWidth);
1801
1802 vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1803 (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1804 vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1805 (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1806
1807 // Draw line for matching keypoints detected in the current image and
1808 // those detected in the training images
1809 vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1810 }
1811 }
1812}
1813
1824void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1825 std::vector<cv::Point3f> *trainPoints)
1826{
1827 double elapsedTime;
1828 extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1829}
1830
1841void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1842 std::vector<cv::Point3f> *trainPoints)
1843{
1844 double elapsedTime;
1845 extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1846}
1847
1858void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1859 std::vector<cv::Point3f> *trainPoints)
1860{
1861 double elapsedTime;
1862 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1863}
1864
1876void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1877 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1878{
1879 cv::Mat matImg;
1880 vpImageConvert::convert(I, matImg, false);
1881 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1882}
1883
1895void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1896 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1897{
1898 cv::Mat matImg;
1899 vpImageConvert::convert(I_color, matImg);
1900 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1901}
1902
1914void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1915 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1916{
1917 double t = vpTime::measureTimeMs();
1918 bool first = true;
1919
1920 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1921 itd != m_extractors.end(); ++itd) {
1922 if (first) {
1923 first = false;
1924 // Check if we have 3D object points information
1925 if (trainPoints != NULL && !trainPoints->empty()) {
1926 // Copy the input list of keypoints, keypoints that cannot be computed
1927 // are removed in the function compute
1928 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1929
1930 // Extract descriptors for the given list of keypoints
1931 itd->second->compute(matImg, keyPoints, descriptors);
1932
1933 if (keyPoints.size() != keyPoints_tmp.size()) {
1934 // Keypoints have been removed
1935 // Store the hash of a keypoint as the key and the index of the
1936 // keypoint as the value
1937 std::map<size_t, size_t> mapOfKeypointHashes;
1938 size_t cpt = 0;
1939 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1940 ++it, cpt++) {
1941 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1942 }
1943
1944 std::vector<cv::Point3f> trainPoints_tmp;
1945 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1946 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1947 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1948 }
1949 }
1950
1951 // Copy trainPoints_tmp to m_trainPoints
1952 *trainPoints = trainPoints_tmp;
1953 }
1954 } else {
1955 // Extract descriptors for the given list of keypoints
1956 itd->second->compute(matImg, keyPoints, descriptors);
1957 }
1958 } else {
1959 // Copy the input list of keypoints, keypoints that cannot be computed
1960 // are removed in the function compute
1961 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1962
1963 cv::Mat desc;
1964 // Extract descriptors for the given list of keypoints
1965 itd->second->compute(matImg, keyPoints, desc);
1966
1967 if (keyPoints.size() != keyPoints_tmp.size()) {
1968 // Keypoints have been removed
1969 // Store the hash of a keypoint as the key and the index of the
1970 // keypoint as the value
1971 std::map<size_t, size_t> mapOfKeypointHashes;
1972 size_t cpt = 0;
1973 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1974 ++it, cpt++) {
1975 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1976 }
1977
1978 std::vector<cv::Point3f> trainPoints_tmp;
1979 cv::Mat descriptors_tmp;
1980 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1981 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1982 if (trainPoints != NULL && !trainPoints->empty()) {
1983 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1984 }
1985
1986 if (!descriptors.empty()) {
1987 descriptors_tmp.push_back(descriptors.row((int)mapOfKeypointHashes[myKeypointHash(*it)]));
1988 }
1989 }
1990 }
1991
1992 if (trainPoints != NULL) {
1993 // Copy trainPoints_tmp to m_trainPoints
1994 *trainPoints = trainPoints_tmp;
1995 }
1996 // Copy descriptors_tmp to descriptors
1997 descriptors_tmp.copyTo(descriptors);
1998 }
1999
2000 // Merge descriptors horizontally
2001 if (descriptors.empty()) {
2002 desc.copyTo(descriptors);
2003 } else {
2004 cv::hconcat(descriptors, desc, descriptors);
2005 }
2006 }
2007 }
2008
2009 if (keyPoints.size() != (size_t)descriptors.rows) {
2010 std::cerr << "keyPoints.size() != (size_t) descriptors.rows" << std::endl;
2011 }
2012 elapsedTime = vpTime::measureTimeMs() - t;
2013}
2014
2018void vpKeyPoint::filterMatches()
2019{
2020 std::vector<cv::KeyPoint> queryKpts;
2021 std::vector<cv::Point3f> trainPts;
2022 std::vector<cv::DMatch> m;
2023
2024 if (m_useKnn) {
2025 // double max_dist = 0;
2026 // double min_dist = std::numeric_limits<double>::max(); // create an
2027 // error under Windows. To fix it we have to add #undef max
2028 double min_dist = DBL_MAX;
2029 double mean = 0.0;
2030 std::vector<double> distance_vec(m_knnMatches.size());
2031
2032 if (m_filterType == stdAndRatioDistanceThreshold) {
2033 for (size_t i = 0; i < m_knnMatches.size(); i++) {
2034 double dist = m_knnMatches[i][0].distance;
2035 mean += dist;
2036 distance_vec[i] = dist;
2037
2038 if (dist < min_dist) {
2039 min_dist = dist;
2040 }
2041 // if (dist > max_dist) {
2042 // max_dist = dist;
2043 //}
2044 }
2045 mean /= m_queryDescriptors.rows;
2046 }
2047
2048 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2049 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2050 double threshold = min_dist + stdev;
2051
2052 for (size_t i = 0; i < m_knnMatches.size(); i++) {
2053 if (m_knnMatches[i].size() >= 2) {
2054 // Calculate ratio of the descriptor distance between the two nearest
2055 // neighbors of the keypoint
2056 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
2057 // float ratio = std::sqrt((vecMatches[i][0].distance *
2058 // vecMatches[i][0].distance)
2059 // / (vecMatches[i][1].distance *
2060 // vecMatches[i][1].distance));
2061 double dist = m_knnMatches[i][0].distance;
2062
2063 if (ratio < m_matchingRatioThreshold || (m_filterType == stdAndRatioDistanceThreshold && dist < threshold)) {
2064 m.push_back(cv::DMatch((int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
2065
2066 if (!m_trainPoints.empty()) {
2067 trainPts.push_back(m_trainPoints[(size_t)m_knnMatches[i][0].trainIdx]);
2068 }
2069 queryKpts.push_back(m_queryKeyPoints[(size_t)m_knnMatches[i][0].queryIdx]);
2070 }
2071 }
2072 }
2073 } else {
2074 // double max_dist = 0;
2075 // create an error under Windows. To fix it we have to add #undef max
2076 // double min_dist = std::numeric_limits<double>::max();
2077 double min_dist = DBL_MAX;
2078 double mean = 0.0;
2079 std::vector<double> distance_vec(m_matches.size());
2080 for (size_t i = 0; i < m_matches.size(); i++) {
2081 double dist = m_matches[i].distance;
2082 mean += dist;
2083 distance_vec[i] = dist;
2084
2085 if (dist < min_dist) {
2086 min_dist = dist;
2087 }
2088 // if (dist > max_dist) {
2089 // max_dist = dist;
2090 // }
2091 }
2092 mean /= m_queryDescriptors.rows;
2093
2094 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2095 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2096
2097 // Define a threshold where we keep all keypoints whose the descriptor
2098 // distance falls below a factor of the minimum descriptor distance (for
2099 // all the query keypoints) or below the minimum descriptor distance +
2100 // the standard deviation (calculated on all the query descriptor
2101 // distances)
2102 double threshold =
2103 m_filterType == constantFactorDistanceThreshold ? m_matchingFactorThreshold * min_dist : min_dist + stdev;
2104
2105 for (size_t i = 0; i < m_matches.size(); i++) {
2106 if (m_matches[i].distance <= threshold) {
2107 m.push_back(cv::DMatch((int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
2108
2109 if (!m_trainPoints.empty()) {
2110 trainPts.push_back(m_trainPoints[(size_t)m_matches[i].trainIdx]);
2111 }
2112 queryKpts.push_back(m_queryKeyPoints[(size_t)m_matches[i].queryIdx]);
2113 }
2114 }
2115 }
2116
2117 if (m_useSingleMatchFilter) {
2118 // Eliminate matches where multiple query keypoints are matched to the
2119 // same train keypoint
2120 std::vector<cv::DMatch> mTmp;
2121 std::vector<cv::Point3f> trainPtsTmp;
2122 std::vector<cv::KeyPoint> queryKptsTmp;
2123
2124 std::map<int, int> mapOfTrainIdx;
2125 // Count the number of query points matched to the same train point
2126 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2127 mapOfTrainIdx[it->trainIdx]++;
2128 }
2129
2130 // Keep matches with only one correspondence
2131 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2132 if (mapOfTrainIdx[it->trainIdx] == 1) {
2133 mTmp.push_back(cv::DMatch((int)queryKptsTmp.size(), it->trainIdx, it->distance));
2134
2135 if (!m_trainPoints.empty()) {
2136 trainPtsTmp.push_back(m_trainPoints[(size_t)it->trainIdx]);
2137 }
2138 queryKptsTmp.push_back(queryKpts[(size_t)it->queryIdx]);
2139 }
2140 }
2141
2142 m_filteredMatches = mTmp;
2143 m_objectFilteredPoints = trainPtsTmp;
2144 m_queryFilteredKeyPoints = queryKptsTmp;
2145 } else {
2146 m_filteredMatches = m;
2147 m_objectFilteredPoints = trainPts;
2148 m_queryFilteredKeyPoints = queryKpts;
2149 }
2150}
2151
2159void vpKeyPoint::getObjectPoints(std::vector<cv::Point3f> &objectPoints) const
2160{
2161 objectPoints = m_objectFilteredPoints;
2162}
2163
2171void vpKeyPoint::getObjectPoints(std::vector<vpPoint> &objectPoints) const
2172{
2173 vpConvert::convertFromOpenCV(m_objectFilteredPoints, objectPoints);
2174}
2175
2184void vpKeyPoint::getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints, bool matches) const
2185{
2186 if (matches) {
2187 keyPoints = m_queryFilteredKeyPoints;
2188 }
2189 else {
2190 keyPoints = m_queryKeyPoints;
2191 }
2192}
2193
2202void vpKeyPoint::getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints, bool matches) const
2203{
2204 if (matches) {
2205 keyPoints = currentImagePointsList;
2206 }
2207 else {
2208 vpConvert::convertFromOpenCV(m_queryKeyPoints, keyPoints);
2209 }
2210}
2211
2217void vpKeyPoint::getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const { keyPoints = m_trainKeyPoints; }
2218
2224void vpKeyPoint::getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const { keyPoints = referenceImagePointsList; }
2225
2232void vpKeyPoint::getTrainPoints(std::vector<cv::Point3f> &points) const { points = m_trainPoints; }
2233
2240void vpKeyPoint::getTrainPoints(std::vector<vpPoint> &points) const { points = m_trainVpPoints; }
2241
2246void vpKeyPoint::init()
2247{
2248// Require 2.4.0 <= opencv < 3.0.0
2249#if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
2250 // The following line must be called in order to use SIFT or SURF
2251 if (!cv::initModule_nonfree()) {
2252 std::cerr << "Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
2253 }
2254#endif
2255
2256 // Use k-nearest neighbors (knn) to retrieve the two best matches for a
2257 // keypoint So this is useful only for ratioDistanceThreshold method
2258 if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
2259 m_useKnn = true;
2260 }
2261
2262 initDetectors(m_detectorNames);
2263 initExtractors(m_extractorNames);
2264 initMatcher(m_matcherName);
2265}
2266
2272void vpKeyPoint::initDetector(const std::string &detectorName)
2273{
2274#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2275 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
2276
2277 if (m_detectors[detectorName] == NULL) {
2278 std::stringstream ss_msg;
2279 ss_msg << "Fail to initialize the detector: " << detectorName
2280 << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2281 throw vpException(vpException::fatalError, ss_msg.str());
2282 }
2283#else
2284 std::string detectorNameTmp = detectorName;
2285 std::string pyramid = "Pyramid";
2286 std::size_t pos = detectorName.find(pyramid);
2287 bool usePyramid = false;
2288 if (pos != std::string::npos) {
2289 detectorNameTmp = detectorName.substr(pos + pyramid.size());
2290 usePyramid = true;
2291 }
2292
2293 if (detectorNameTmp == "SIFT") {
2294#if (VISP_HAVE_OPENCV_VERSION >= 0x040500) // OpenCV >= 4.5.0
2295 cv::Ptr<cv::FeatureDetector> siftDetector = cv::SiftFeatureDetector::create();
2296 if (!usePyramid) {
2297 m_detectors[detectorNameTmp] = siftDetector;
2298 } else {
2299 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2300 }
2301#else
2302#if defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2303 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2304 // SIFT is no more patented since 09/03/2020
2305 cv::Ptr<cv::FeatureDetector> siftDetector;
2306 if (m_maxFeatures > 0) {
2307#if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2308 siftDetector = cv::SIFT::create(m_maxFeatures);
2309#else
2310 siftDetector = cv::xfeatures2d::SIFT::create(m_maxFeatures);
2311#endif
2312 } else {
2313#if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2314 siftDetector = cv::SIFT::create();
2315#else
2316 siftDetector = cv::xfeatures2d::SIFT::create();
2317#endif
2318 }
2319 if (!usePyramid) {
2320 m_detectors[detectorNameTmp] = siftDetector;
2321 } else {
2322 std::cerr << "You should not use SIFT with Pyramid feature detection!" << std::endl;
2323 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2324 }
2325#else
2326 std::stringstream ss_msg;
2327 ss_msg << "Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2328 << " was not build with xFeatures2d module.";
2329 throw vpException(vpException::fatalError, ss_msg.str());
2330#endif
2331#endif
2332 } else if (detectorNameTmp == "SURF") {
2333#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2334 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
2335 if (!usePyramid) {
2336 m_detectors[detectorNameTmp] = surfDetector;
2337 } else {
2338 std::cerr << "You should not use SURF with Pyramid feature detection!" << std::endl;
2339 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
2340 }
2341#else
2342 std::stringstream ss_msg;
2343 ss_msg << "Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2344 << " was not build with xFeatures2d module.";
2345 throw vpException(vpException::fatalError, ss_msg.str());
2346#endif
2347 } else if (detectorNameTmp == "FAST") {
2348 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
2349 if (!usePyramid) {
2350 m_detectors[detectorNameTmp] = fastDetector;
2351 } else {
2352 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2353 }
2354 } else if (detectorNameTmp == "MSER") {
2355 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
2356 if (!usePyramid) {
2357 m_detectors[detectorNameTmp] = fastDetector;
2358 } else {
2359 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2360 }
2361 } else if (detectorNameTmp == "ORB") {
2362 cv::Ptr<cv::FeatureDetector> orbDetector;
2363 if (m_maxFeatures > 0) {
2364 orbDetector = cv::ORB::create(m_maxFeatures);
2365 }
2366 else {
2367 orbDetector = cv::ORB::create();
2368 }
2369 if (!usePyramid) {
2370 m_detectors[detectorNameTmp] = orbDetector;
2371 } else {
2372 std::cerr << "You should not use ORB with Pyramid feature detection!" << std::endl;
2373 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
2374 }
2375 } else if (detectorNameTmp == "BRISK") {
2376 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
2377 if (!usePyramid) {
2378 m_detectors[detectorNameTmp] = briskDetector;
2379 } else {
2380 std::cerr << "You should not use BRISK with Pyramid feature detection!" << std::endl;
2381 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
2382 }
2383 } else if (detectorNameTmp == "KAZE") {
2384 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
2385 if (!usePyramid) {
2386 m_detectors[detectorNameTmp] = kazeDetector;
2387 } else {
2388 std::cerr << "You should not use KAZE with Pyramid feature detection!" << std::endl;
2389 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
2390 }
2391 } else if (detectorNameTmp == "AKAZE") {
2392 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
2393 if (!usePyramid) {
2394 m_detectors[detectorNameTmp] = akazeDetector;
2395 } else {
2396 std::cerr << "You should not use AKAZE with Pyramid feature detection!" << std::endl;
2397 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
2398 }
2399 } else if (detectorNameTmp == "GFTT") {
2400 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
2401 if (!usePyramid) {
2402 m_detectors[detectorNameTmp] = gfttDetector;
2403 } else {
2404 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
2405 }
2406 } else if (detectorNameTmp == "SimpleBlob") {
2407 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
2408 if (!usePyramid) {
2409 m_detectors[detectorNameTmp] = simpleBlobDetector;
2410 } else {
2411 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
2412 }
2413 } else if (detectorNameTmp == "STAR") {
2414#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2415 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
2416 if (!usePyramid) {
2417 m_detectors[detectorNameTmp] = starDetector;
2418 } else {
2419 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
2420 }
2421#else
2422 std::stringstream ss_msg;
2423 ss_msg << "Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2424 << " was not build with xFeatures2d module.";
2425 throw vpException(vpException::fatalError, ss_msg.str());
2426#endif
2427 } else if (detectorNameTmp == "AGAST") {
2428 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2429 if (!usePyramid) {
2430 m_detectors[detectorNameTmp] = agastDetector;
2431 } else {
2432 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2433 }
2434 } else if (detectorNameTmp == "MSD") {
2435#if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
2436#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2437 cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2438 if (!usePyramid) {
2439 m_detectors[detectorNameTmp] = msdDetector;
2440 } else {
2441 std::cerr << "You should not use MSD with Pyramid feature detection!" << std::endl;
2442 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2443 }
2444#else
2445 std::stringstream ss_msg;
2446 ss_msg << "Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2447 << " was not build with xFeatures2d module.";
2448 throw vpException(vpException::fatalError, ss_msg.str());
2449#endif
2450#else
2451 std::stringstream ss_msg;
2452 ss_msg << "Feature " << detectorName << " is not available in OpenCV version: " << std::hex
2453 << VISP_HAVE_OPENCV_VERSION << " (require >= OpenCV 3.1).";
2454#endif
2455 } else {
2456 std::cerr << "The detector:" << detectorNameTmp << " is not available." << std::endl;
2457 }
2458
2459 bool detectorInitialized = false;
2460 if (!usePyramid) {
2461 //if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
2462 detectorInitialized = !m_detectors[detectorNameTmp].empty();
2463 } else {
2464 //if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
2465 detectorInitialized = !m_detectors[detectorName].empty();
2466 }
2467
2468 if (!detectorInitialized) {
2469 std::stringstream ss_msg;
2470 ss_msg << "Fail to initialize the detector: " << detectorNameTmp
2471 << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2472 throw vpException(vpException::fatalError, ss_msg.str());
2473 }
2474#endif
2475}
2476
2483void vpKeyPoint::initDetectors(const std::vector<std::string> &detectorNames)
2484{
2485 for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2486 initDetector(*it);
2487 }
2488}
2489
2495void vpKeyPoint::initExtractor(const std::string &extractorName)
2496{
2497#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2498 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2499#else
2500 if (extractorName == "SIFT") {
2501#if (VISP_HAVE_OPENCV_VERSION >= 0x040500) // OpenCV >= 4.5.0
2502 m_extractors[extractorName] = cv::SIFT::create();
2503#else
2504#if defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2505 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2506 // SIFT is no more patented since 09/03/2020
2507#if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2508 m_extractors[extractorName] = cv::SIFT::create();
2509#else
2510 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2511#endif
2512#else
2513 std::stringstream ss_msg;
2514 ss_msg << "Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2515 << " was not build with xFeatures2d module.";
2516 throw vpException(vpException::fatalError, ss_msg.str());
2517#endif
2518#endif
2519 } else if (extractorName == "SURF") {
2520#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2521 // Use extended set of SURF descriptors (128 instead of 64)
2522 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3, true);
2523#else
2524 std::stringstream ss_msg;
2525 ss_msg << "Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2526 << " was not build with xFeatures2d module.";
2527 throw vpException(vpException::fatalError, ss_msg.str());
2528#endif
2529 } else if (extractorName == "ORB") {
2530 m_extractors[extractorName] = cv::ORB::create();
2531 } else if (extractorName == "BRISK") {
2532 m_extractors[extractorName] = cv::BRISK::create();
2533 } else if (extractorName == "FREAK") {
2534#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2535 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2536#else
2537 std::stringstream ss_msg;
2538 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2539 << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2540 throw vpException(vpException::fatalError, ss_msg.str());
2541#endif
2542 } else if (extractorName == "BRIEF") {
2543#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2544 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2545#else
2546 std::stringstream ss_msg;
2547 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2548 << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2549 throw vpException(vpException::fatalError, ss_msg.str());
2550#endif
2551 } else if (extractorName == "KAZE") {
2552 m_extractors[extractorName] = cv::KAZE::create();
2553 } else if (extractorName == "AKAZE") {
2554 m_extractors[extractorName] = cv::AKAZE::create();
2555 } else if (extractorName == "DAISY") {
2556#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2557 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2558#else
2559 std::stringstream ss_msg;
2560 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2561 << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2562 throw vpException(vpException::fatalError, ss_msg.str());
2563#endif
2564 } else if (extractorName == "LATCH") {
2565#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2566 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2567#else
2568 std::stringstream ss_msg;
2569 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2570 << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2571 throw vpException(vpException::fatalError, ss_msg.str());
2572#endif
2573 } else if (extractorName == "LUCID") {
2574#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2575 // m_extractors[extractorName] = cv::xfeatures2d::LUCID::create(1, 2);
2576 // Not possible currently, need a color image
2577 throw vpException(vpException::badValue, "Not possible currently as it needs a color image.");
2578#else
2579 std::stringstream ss_msg;
2580 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2581 << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2582 throw vpException(vpException::fatalError, ss_msg.str());
2583#endif
2584 } else if (extractorName == "VGG") {
2585#if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2586#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2587 m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2588#else
2589 std::stringstream ss_msg;
2590 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2591 << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2592 throw vpException(vpException::fatalError, ss_msg.str());
2593#endif
2594#else
2595 std::stringstream ss_msg;
2596 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2597 << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2598 throw vpException(vpException::fatalError, ss_msg.str());
2599#endif
2600 } else if (extractorName == "BoostDesc") {
2601#if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2602#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2603 m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2604#else
2605 std::stringstream ss_msg;
2606 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2607 << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2608 throw vpException(vpException::fatalError, ss_msg.str());
2609#endif
2610#else
2611 std::stringstream ss_msg;
2612 ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2613 << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2614 throw vpException(vpException::fatalError, ss_msg.str());
2615#endif
2616 } else {
2617 std::cerr << "The extractor:" << extractorName << " is not available." << std::endl;
2618 }
2619#endif
2620
2621 if (!m_extractors[extractorName]) { //if null
2622 std::stringstream ss_msg;
2623 ss_msg << "Fail to initialize the extractor: " << extractorName
2624 << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2625 throw vpException(vpException::fatalError, ss_msg.str());
2626 }
2627
2628#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2629 if (extractorName == "SURF") {
2630 // Use extended set of SURF descriptors (128 instead of 64)
2631 m_extractors[extractorName]->set("extended", 1);
2632 }
2633#endif
2634}
2635
2642void vpKeyPoint::initExtractors(const std::vector<std::string> &extractorNames)
2643{
2644 for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2645 initExtractor(*it);
2646 }
2647
2648 int descriptorType = CV_32F;
2649 bool firstIteration = true;
2650 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2651 it != m_extractors.end(); ++it) {
2652 if (firstIteration) {
2653 firstIteration = false;
2654 descriptorType = it->second->descriptorType();
2655 } else {
2656 if (descriptorType != it->second->descriptorType()) {
2657 throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2658 }
2659 }
2660 }
2661}
2662
2663void vpKeyPoint::initFeatureNames()
2664{
2665// Create map enum to string
2666#if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2667 m_mapOfDetectorNames[DETECTOR_FAST] = "FAST";
2668 m_mapOfDetectorNames[DETECTOR_MSER] = "MSER";
2669 m_mapOfDetectorNames[DETECTOR_ORB] = "ORB";
2670 m_mapOfDetectorNames[DETECTOR_BRISK] = "BRISK";
2671 m_mapOfDetectorNames[DETECTOR_GFTT] = "GFTT";
2672 m_mapOfDetectorNames[DETECTOR_SimpleBlob] = "SimpleBlob";
2673#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2674 m_mapOfDetectorNames[DETECTOR_STAR] = "STAR";
2675#endif
2676#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2677 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2678 m_mapOfDetectorNames[DETECTOR_SIFT] = "SIFT";
2679#endif
2680#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2681 m_mapOfDetectorNames[DETECTOR_SURF] = "SURF";
2682#endif
2683#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2684 m_mapOfDetectorNames[DETECTOR_KAZE] = "KAZE";
2685 m_mapOfDetectorNames[DETECTOR_AKAZE] = "AKAZE";
2686 m_mapOfDetectorNames[DETECTOR_AGAST] = "AGAST";
2687#endif
2688#if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2689 m_mapOfDetectorNames[DETECTOR_MSD] = "MSD";
2690#endif
2691#endif
2692
2693#if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2694 m_mapOfDescriptorNames[DESCRIPTOR_ORB] = "ORB";
2695 m_mapOfDescriptorNames[DESCRIPTOR_BRISK] = "BRISK";
2696#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2697 m_mapOfDescriptorNames[DESCRIPTOR_FREAK] = "FREAK";
2698 m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] = "BRIEF";
2699#endif
2700#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2701 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2702 m_mapOfDescriptorNames[DESCRIPTOR_SIFT] = "SIFT";
2703#endif
2704#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2705 m_mapOfDescriptorNames[DESCRIPTOR_SURF] = "SURF";
2706#endif
2707#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2708 m_mapOfDescriptorNames[DESCRIPTOR_KAZE] = "KAZE";
2709 m_mapOfDescriptorNames[DESCRIPTOR_AKAZE] = "AKAZE";
2710#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2711 m_mapOfDescriptorNames[DESCRIPTOR_DAISY] = "DAISY";
2712 m_mapOfDescriptorNames[DESCRIPTOR_LATCH] = "LATCH";
2713#endif
2714#endif
2715#if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2716 m_mapOfDescriptorNames[DESCRIPTOR_VGG] = "VGG";
2717 m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] = "BoostDesc";
2718#endif
2719#endif
2720}
2721
2727void vpKeyPoint::initMatcher(const std::string &matcherName)
2728{
2729 int descriptorType = CV_32F;
2730 bool firstIteration = true;
2731 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2732 it != m_extractors.end(); ++it) {
2733 if (firstIteration) {
2734 firstIteration = false;
2735 descriptorType = it->second->descriptorType();
2736 } else {
2737 if (descriptorType != it->second->descriptorType()) {
2738 throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2739 }
2740 }
2741 }
2742
2743 if (matcherName == "FlannBased") {
2744 if (m_extractors.empty()) {
2745 std::cout << "Warning: No extractor initialized, by default use "
2746 "floating values (CV_32F) "
2747 "for descriptor type !"
2748 << std::endl;
2749 }
2750
2751 if (descriptorType == CV_8U) {
2752#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2753 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2754#else
2755 m_matcher = new cv::FlannBasedMatcher(new cv::flann::LshIndexParams(12, 20, 2));
2756#endif
2757 } else {
2758#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2759 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2760#else
2761 m_matcher = new cv::FlannBasedMatcher(new cv::flann::KDTreeIndexParams());
2762#endif
2763 }
2764 } else {
2765 m_matcher = cv::DescriptorMatcher::create(matcherName);
2766 }
2767
2768#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2769 if (m_matcher != NULL && !m_useKnn && matcherName == "BruteForce") {
2770 m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
2771 }
2772#endif
2773
2774 if (!m_matcher) { //if null
2775 std::stringstream ss_msg;
2776 ss_msg << "Fail to initialize the matcher: " << matcherName
2777 << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2778 throw vpException(vpException::fatalError, ss_msg.str());
2779 }
2780}
2781
2791 vpImage<unsigned char> &IMatching)
2792{
2793 vpImagePoint topLeftCorner(0, 0);
2794 IMatching.insert(IRef, topLeftCorner);
2795 topLeftCorner = vpImagePoint(0, IRef.getWidth());
2796 IMatching.insert(ICurrent, topLeftCorner);
2797}
2798
2808 vpImage<vpRGBa> &IMatching)
2809{
2810 vpImagePoint topLeftCorner(0, 0);
2811 IMatching.insert(IRef, topLeftCorner);
2812 topLeftCorner = vpImagePoint(0, IRef.getWidth());
2813 IMatching.insert(ICurrent, topLeftCorner);
2814}
2815
2824{
2825 // Nb images in the training database + the current image we want to detect
2826 // the object
2827 int nbImg = (int)(m_mapOfImages.size() + 1);
2828
2829 if (m_mapOfImages.empty()) {
2830 std::cerr << "There is no training image loaded !" << std::endl;
2831 return;
2832 }
2833
2834 if (nbImg == 2) {
2835 // Only one training image, so we display them side by side
2836 insertImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
2837 } else {
2838 // Multiple training images, display them as a mosaic image
2839 int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2840 int nbWidth = nbImgSqrt;
2841 int nbHeight = nbImgSqrt;
2842
2843 if (nbImgSqrt * nbImgSqrt < nbImg) {
2844 nbWidth++;
2845 }
2846
2847 unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2848 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2849 ++it) {
2850 if (maxW < it->second.getWidth()) {
2851 maxW = it->second.getWidth();
2852 }
2853
2854 if (maxH < it->second.getHeight()) {
2855 maxH = it->second.getHeight();
2856 }
2857 }
2858
2859 // Indexes of the current image in the grid made in order to the image is
2860 // in the center square in the mosaic grid
2861 int medianI = nbHeight / 2;
2862 int medianJ = nbWidth / 2;
2863 int medianIndex = medianI * nbWidth + medianJ;
2864
2865 int cpt = 0;
2866 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2867 ++it, cpt++) {
2868 int local_cpt = cpt;
2869 if (cpt >= medianIndex) {
2870 // Shift of one unity the index of the training images which are after
2871 // the current image
2872 local_cpt++;
2873 }
2874 int indexI = local_cpt / nbWidth;
2875 int indexJ = local_cpt - (indexI * nbWidth);
2876 vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2877
2878 IMatching.insert(it->second, topLeftCorner);
2879 }
2880
2881 vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2882 IMatching.insert(ICurrent, topLeftCorner);
2883 }
2884}
2885
2894{
2895 // Nb images in the training database + the current image we want to detect
2896 // the object
2897 int nbImg = (int)(m_mapOfImages.size() + 1);
2898
2899 if (m_mapOfImages.empty()) {
2900 std::cerr << "There is no training image loaded !" << std::endl;
2901 return;
2902 }
2903
2904 if (nbImg == 2) {
2905 // Only one training image, so we display them side by side
2906 vpImage<vpRGBa> IRef;
2907 vpImageConvert::convert(m_mapOfImages.begin()->second, IRef);
2908 insertImageMatching(IRef, ICurrent, IMatching);
2909 } else {
2910 // Multiple training images, display them as a mosaic image
2911 int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2912 int nbWidth = nbImgSqrt;
2913 int nbHeight = nbImgSqrt;
2914
2915 if (nbImgSqrt * nbImgSqrt < nbImg) {
2916 nbWidth++;
2917 }
2918
2919 unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2920 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2921 ++it) {
2922 if (maxW < it->second.getWidth()) {
2923 maxW = it->second.getWidth();
2924 }
2925
2926 if (maxH < it->second.getHeight()) {
2927 maxH = it->second.getHeight();
2928 }
2929 }
2930
2931 // Indexes of the current image in the grid made in order to the image is
2932 // in the center square in the mosaic grid
2933 int medianI = nbHeight / 2;
2934 int medianJ = nbWidth / 2;
2935 int medianIndex = medianI * nbWidth + medianJ;
2936
2937 int cpt = 0;
2938 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2939 ++it, cpt++) {
2940 int local_cpt = cpt;
2941 if (cpt >= medianIndex) {
2942 // Shift of one unity the index of the training images which are after
2943 // the current image
2944 local_cpt++;
2945 }
2946 int indexI = local_cpt / nbWidth;
2947 int indexJ = local_cpt - (indexI * nbWidth);
2948 vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2949
2950 vpImage<vpRGBa> IRef;
2951 vpImageConvert::convert(it->second, IRef);
2952 IMatching.insert(IRef, topLeftCorner);
2953 }
2954
2955 vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2956 IMatching.insert(ICurrent, topLeftCorner);
2957 }
2958}
2959
2965void vpKeyPoint::loadConfigFile(const std::string &configFile)
2966{
2968
2969 try {
2970 // Reset detector and extractor
2971 m_detectorNames.clear();
2972 m_extractorNames.clear();
2973 m_detectors.clear();
2974 m_extractors.clear();
2975
2976 std::cout << " *********** Parsing XML for configuration for vpKeyPoint "
2977 "************ "
2978 << std::endl;
2979 xmlp.parse(configFile);
2980
2981 m_detectorNames.push_back(xmlp.getDetectorName());
2982 m_extractorNames.push_back(xmlp.getExtractorName());
2983 m_matcherName = xmlp.getMatcherName();
2984
2985 switch (xmlp.getMatchingMethod()) {
2987 m_filterType = constantFactorDistanceThreshold;
2988 break;
2989
2991 m_filterType = stdDistanceThreshold;
2992 break;
2993
2995 m_filterType = ratioDistanceThreshold;
2996 break;
2997
2999 m_filterType = stdAndRatioDistanceThreshold;
3000 break;
3001
3003 m_filterType = noFilterMatching;
3004 break;
3005
3006 default:
3007 break;
3008 }
3009
3010 m_matchingFactorThreshold = xmlp.getMatchingFactorThreshold();
3011 m_matchingRatioThreshold = xmlp.getMatchingRatioThreshold();
3012
3013 m_useRansacVVS = xmlp.getUseRansacVVSPoseEstimation();
3014 m_useConsensusPercentage = xmlp.getUseRansacConsensusPercentage();
3015 m_nbRansacIterations = xmlp.getNbRansacIterations();
3016 m_ransacReprojectionError = xmlp.getRansacReprojectionError();
3017 m_nbRansacMinInlierCount = xmlp.getNbRansacMinInlierCount();
3018 m_ransacThreshold = xmlp.getRansacThreshold();
3019 m_ransacConsensusPercentage = xmlp.getRansacConsensusPercentage();
3020
3021 if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
3022 m_useKnn = true;
3023 } else {
3024 m_useKnn = false;
3025 }
3026
3027 init();
3028 } catch (...) {
3029 throw vpException(vpException::ioError, "Can't open XML file \"%s\"\n ", configFile.c_str());
3030 }
3031}
3032
3041void vpKeyPoint::loadLearningData(const std::string &filename, bool binaryMode, bool append)
3042{
3043 int startClassId = 0;
3044 int startImageId = 0;
3045 if (!append) {
3046 m_trainKeyPoints.clear();
3047 m_trainPoints.clear();
3048 m_mapOfImageId.clear();
3049 m_mapOfImages.clear();
3050 } else {
3051 // In append case, find the max index of keypoint class Id
3052 for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
3053 if (startClassId < it->first) {
3054 startClassId = it->first;
3055 }
3056 }
3057
3058 // In append case, find the max index of images Id
3059 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3060 ++it) {
3061 if (startImageId < it->first) {
3062 startImageId = it->first;
3063 }
3064 }
3065 }
3066
3067 // Get parent directory
3068 std::string parent = vpIoTools::getParent(filename);
3069 if (!parent.empty()) {
3070 parent += "/";
3071 }
3072
3073 if (binaryMode) {
3074 std::ifstream file(filename.c_str(), std::ifstream::binary);
3075 if (!file.is_open()) {
3076 throw vpException(vpException::ioError, "Cannot open the file.");
3077 }
3078
3079 // Read info about training images
3080 int nbImgs = 0;
3081 vpIoTools::readBinaryValueLE(file, nbImgs);
3082
3083#if !defined(VISP_HAVE_MODULE_IO)
3084 if (nbImgs > 0) {
3085 std::cout << "Warning: The learning file contains image data that will "
3086 "not be loaded as visp_io module "
3087 "is not available !"
3088 << std::endl;
3089 }
3090#endif
3091
3092 for (int i = 0; i < nbImgs; i++) {
3093 // Read image_id
3094 int id = 0;
3096
3097 int length = 0;
3098 vpIoTools::readBinaryValueLE(file, length);
3099 // Will contain the path to the training images
3100 char *path = new char[length + 1]; // char path[length + 1];
3101
3102 for (int cpt = 0; cpt < length; cpt++) {
3103 char c;
3104 file.read((char *)(&c), sizeof(c));
3105 path[cpt] = c;
3106 }
3107 path[length] = '\0';
3108
3110#ifdef VISP_HAVE_MODULE_IO
3111 if (vpIoTools::isAbsolutePathname(std::string(path))) {
3112 vpImageIo::read(I, path);
3113 } else {
3114 vpImageIo::read(I, parent + path);
3115 }
3116
3117 // Add the image previously loaded only if VISP_HAVE_MODULE_IO
3118 m_mapOfImages[id + startImageId] = I;
3119#endif
3120
3121 // Delete path
3122 delete[] path;
3123 }
3124
3125 // Read if 3D point information are saved or not
3126 int have3DInfoInt = 0;
3127 vpIoTools::readBinaryValueLE(file, have3DInfoInt);
3128 bool have3DInfo = have3DInfoInt != 0;
3129
3130 // Read the number of descriptors
3131 int nRows = 0;
3132 vpIoTools::readBinaryValueLE(file, nRows);
3133
3134 // Read the size of the descriptor
3135 int nCols = 0;
3136 vpIoTools::readBinaryValueLE(file, nCols);
3137
3138 // Read the type of the descriptor
3139 int descriptorType = 5; // CV_32F
3140 vpIoTools::readBinaryValueLE(file, descriptorType);
3141
3142 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3143 for (int i = 0; i < nRows; i++) {
3144 // Read information about keyPoint
3145 float u, v, size, angle, response;
3146 int octave, class_id, image_id;
3149 vpIoTools::readBinaryValueLE(file, size);
3150 vpIoTools::readBinaryValueLE(file, angle);
3151 vpIoTools::readBinaryValueLE(file, response);
3152 vpIoTools::readBinaryValueLE(file, octave);
3153 vpIoTools::readBinaryValueLE(file, class_id);
3154 vpIoTools::readBinaryValueLE(file, image_id);
3155 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
3156 m_trainKeyPoints.push_back(keyPoint);
3157
3158 if (image_id != -1) {
3159#ifdef VISP_HAVE_MODULE_IO
3160 // No training images if image_id == -1
3161 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3162#endif
3163 }
3164
3165 if (have3DInfo) {
3166 // Read oX, oY, oZ
3167 float oX, oY, oZ;
3171 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
3172 }
3173
3174 for (int j = 0; j < nCols; j++) {
3175 // Read the value of the descriptor
3176 switch (descriptorType) {
3177 case CV_8U: {
3178 unsigned char value;
3179 file.read((char *)(&value), sizeof(value));
3180 trainDescriptorsTmp.at<unsigned char>(i, j) = value;
3181 } break;
3182
3183 case CV_8S: {
3184 char value;
3185 file.read((char *)(&value), sizeof(value));
3186 trainDescriptorsTmp.at<char>(i, j) = value;
3187 } break;
3188
3189 case CV_16U: {
3190 unsigned short int value;
3191 vpIoTools::readBinaryValueLE(file, value);
3192 trainDescriptorsTmp.at<unsigned short int>(i, j) = value;
3193 } break;
3194
3195 case CV_16S: {
3196 short int value;
3197 vpIoTools::readBinaryValueLE(file, value);
3198 trainDescriptorsTmp.at<short int>(i, j) = value;
3199 } break;
3200
3201 case CV_32S: {
3202 int value;
3203 vpIoTools::readBinaryValueLE(file, value);
3204 trainDescriptorsTmp.at<int>(i, j) = value;
3205 } break;
3206
3207 case CV_32F: {
3208 float value;
3209 vpIoTools::readBinaryValueLE(file, value);
3210 trainDescriptorsTmp.at<float>(i, j) = value;
3211 } break;
3212
3213 case CV_64F: {
3214 double value;
3215 vpIoTools::readBinaryValueLE(file, value);
3216 trainDescriptorsTmp.at<double>(i, j) = value;
3217 } break;
3218
3219 default: {
3220 float value;
3221 vpIoTools::readBinaryValueLE(file, value);
3222 trainDescriptorsTmp.at<float>(i, j) = value;
3223 } break;
3224 }
3225 }
3226 }
3227
3228 if (!append || m_trainDescriptors.empty()) {
3229 trainDescriptorsTmp.copyTo(m_trainDescriptors);
3230 } else {
3231 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3232 }
3233
3234 file.close();
3235 } else {
3236 pugi::xml_document doc;
3237
3238 /*parse the file and get the DOM */
3239 if (!doc.load_file(filename.c_str())) {
3240 throw vpException(vpException::ioError, "Error with file: %s", filename.c_str());
3241 }
3242
3243 pugi::xml_node root_element = doc.document_element();
3244
3245 int descriptorType = CV_32F; // float
3246 int nRows = 0, nCols = 0;
3247 int i = 0, j = 0;
3248
3249 cv::Mat trainDescriptorsTmp;
3250
3251 for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node; first_level_node = first_level_node.next_sibling()) {
3252
3253 std::string name(first_level_node.name());
3254 if (first_level_node.type() == pugi::node_element && name == "TrainingImageInfo") {
3255
3256 for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node; image_info_node = image_info_node.next_sibling()) {
3257 name = std::string(image_info_node.name());
3258
3259 if (name == "trainImg") {
3260 // Read image_id
3261 int id = image_info_node.attribute("image_id").as_int();
3262
3264#ifdef VISP_HAVE_MODULE_IO
3265 std::string path(image_info_node.text().as_string());
3266 // Read path to the training images
3267 if (vpIoTools::isAbsolutePathname(std::string(path))) {
3268 vpImageIo::read(I, path);
3269 } else {
3270 vpImageIo::read(I, parent + path);
3271 }
3272
3273 // Add the image previously loaded only if VISP_HAVE_MODULE_IO
3274 m_mapOfImages[id + startImageId] = I;
3275#endif
3276 }
3277 }
3278 } else if (first_level_node.type() == pugi::node_element && name == "DescriptorsInfo") {
3279 for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
3280 descriptors_info_node = descriptors_info_node.next_sibling()) {
3281 if (descriptors_info_node.type() == pugi::node_element) {
3282 name = std::string(descriptors_info_node.name());
3283
3284 if (name == "nrows") {
3285 nRows = descriptors_info_node.text().as_int();
3286 } else if (name == "ncols") {
3287 nCols = descriptors_info_node.text().as_int();
3288 } else if (name == "type") {
3289 descriptorType = descriptors_info_node.text().as_int();
3290 }
3291 }
3292 }
3293
3294 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3295 } else if (first_level_node.type() == pugi::node_element && name == "DescriptorInfo") {
3296 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
3297 int octave = 0, class_id = 0, image_id = 0;
3298 double oX = 0.0, oY = 0.0, oZ = 0.0;
3299
3300 std::stringstream ss;
3301
3302 for (pugi::xml_node point_node = first_level_node.first_child(); point_node; point_node = point_node.next_sibling()) {
3303 if (point_node.type() == pugi::node_element) {
3304 name = std::string(point_node.name());
3305
3306 // Read information about keypoints
3307 if (name == "u") {
3308 u = point_node.text().as_double();
3309 } else if (name == "v") {
3310 v = point_node.text().as_double();
3311 } else if (name == "size") {
3312 size = point_node.text().as_double();
3313 } else if (name == "angle") {
3314 angle = point_node.text().as_double();
3315 } else if (name == "response") {
3316 response = point_node.text().as_double();
3317 } else if (name == "octave") {
3318 octave = point_node.text().as_int();
3319 } else if (name == "class_id") {
3320 class_id = point_node.text().as_int();
3321 cv::KeyPoint keyPoint(cv::Point2f((float)u, (float)v), (float)size, (float)angle, (float)response, octave,
3322 (class_id + startClassId));
3323 m_trainKeyPoints.push_back(keyPoint);
3324 } else if (name == "image_id") {
3325 image_id = point_node.text().as_int();
3326 if (image_id != -1) {
3327#ifdef VISP_HAVE_MODULE_IO
3328 // No training images if image_id == -1
3329 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3330#endif
3331 }
3332 } else if (name == "oX") {
3333 oX = point_node.text().as_double();
3334 } else if (name == "oY") {
3335 oY = point_node.text().as_double();
3336 } else if (name == "oZ") {
3337 oZ = point_node.text().as_double();
3338 m_trainPoints.push_back(cv::Point3f((float)oX, (float)oY, (float)oZ));
3339 } else if (name == "desc") {
3340 j = 0;
3341
3342 for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
3343 descriptor_value_node = descriptor_value_node.next_sibling()) {
3344
3345 if (descriptor_value_node.type() == pugi::node_element) {
3346 // Read descriptors values
3347 std::string parseStr(descriptor_value_node.text().as_string());
3348 ss.clear();
3349 ss.str(parseStr);
3350
3351 if (!ss.fail()) {
3352 switch (descriptorType) {
3353 case CV_8U: {
3354 // Parse the numeric value [0 ; 255] to an int
3355 int parseValue;
3356 ss >> parseValue;
3357 trainDescriptorsTmp.at<unsigned char>(i, j) = (unsigned char)parseValue;
3358 } break;
3359
3360 case CV_8S:
3361 // Parse the numeric value [-128 ; 127] to an int
3362 int parseValue;
3363 ss >> parseValue;
3364 trainDescriptorsTmp.at<char>(i, j) = (char)parseValue;
3365 break;
3366
3367 case CV_16U:
3368 ss >> trainDescriptorsTmp.at<unsigned short int>(i, j);
3369 break;
3370
3371 case CV_16S:
3372 ss >> trainDescriptorsTmp.at<short int>(i, j);
3373 break;
3374
3375 case CV_32S:
3376 ss >> trainDescriptorsTmp.at<int>(i, j);
3377 break;
3378
3379 case CV_32F:
3380 ss >> trainDescriptorsTmp.at<float>(i, j);
3381 break;
3382
3383 case CV_64F:
3384 ss >> trainDescriptorsTmp.at<double>(i, j);
3385 break;
3386
3387 default:
3388 ss >> trainDescriptorsTmp.at<float>(i, j);
3389 break;
3390 }
3391 } else {
3392 std::cerr << "Error when converting:" << ss.str() << std::endl;
3393 }
3394
3395 j++;
3396 }
3397 }
3398 }
3399 }
3400 }
3401 i++;
3402 }
3403 }
3404
3405 if (!append || m_trainDescriptors.empty()) {
3406 trainDescriptorsTmp.copyTo(m_trainDescriptors);
3407 } else {
3408 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3409 }
3410 }
3411
3412 // Convert OpenCV type to ViSP type for compatibility
3414 vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
3415
3416 // Add train descriptors in matcher object
3417 m_matcher->clear();
3418 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
3419
3420 // Set _reference_computed to true as we load a learning file
3421 _reference_computed = true;
3422
3423 // Set m_currentImageId
3424 m_currentImageId = (int)m_mapOfImages.size();
3425}
3426
3435void vpKeyPoint::match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors,
3436 std::vector<cv::DMatch> &matches, double &elapsedTime)
3437{
3438 double t = vpTime::measureTimeMs();
3439
3440 if (m_useKnn) {
3441 m_knnMatches.clear();
3442
3443 if (m_useMatchTrainToQuery) {
3444 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3445
3446 // Match train descriptors to query descriptors
3447 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
3448 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3449
3450 for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
3451 it1 != knnMatchesTmp.end(); ++it1) {
3452 std::vector<cv::DMatch> tmp;
3453 for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3454 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3455 }
3456 m_knnMatches.push_back(tmp);
3457 }
3458
3459 matches.resize(m_knnMatches.size());
3460 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3461 } else {
3462 // Match query descriptors to train descriptors
3463 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3464 matches.resize(m_knnMatches.size());
3465 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3466 }
3467 } else {
3468 matches.clear();
3469
3470 if (m_useMatchTrainToQuery) {
3471 std::vector<cv::DMatch> matchesTmp;
3472 // Match train descriptors to query descriptors
3473 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
3474 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3475
3476 for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3477 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3478 }
3479 } else {
3480 // Match query descriptors to train descriptors
3481 m_matcher->match(queryDescriptors, matches);
3482 }
3483 }
3484 elapsedTime = vpTime::measureTimeMs() - t;
3485}
3486
3494unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I) { return matchPoint(I, vpRect()); }
3495
3503unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color) { return matchPoint(I_color, vpRect()); }
3504
3515unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
3516 unsigned int width)
3517{
3518 return matchPoint(I, vpRect(iP, width, height));
3519}
3520
3531unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
3532 unsigned int width)
3533{
3534 return matchPoint(I_color, vpRect(iP, width, height));
3535}
3536
3545unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle)
3546{
3547 if (m_trainDescriptors.empty()) {
3548 std::cerr << "Reference is empty." << std::endl;
3549 if (!_reference_computed) {
3550 std::cerr << "Reference is not computed." << std::endl;
3551 }
3552 std::cerr << "Matching is not possible." << std::endl;
3553
3554 return 0;
3555 }
3556
3557 if (m_useAffineDetection) {
3558 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3559 std::vector<cv::Mat> listOfQueryDescriptors;
3560
3561 // Detect keypoints and extract descriptors on multiple images
3562 detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3563
3564 // Flatten the different train lists
3565 m_queryKeyPoints.clear();
3566 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3567 it != listOfQueryKeyPoints.end(); ++it) {
3568 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3569 }
3570
3571 bool first = true;
3572 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3573 ++it) {
3574 if (first) {
3575 first = false;
3576 it->copyTo(m_queryDescriptors);
3577 } else {
3578 m_queryDescriptors.push_back(*it);
3579 }
3580 }
3581 } else {
3582 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3583 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3584 }
3585
3586 return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3587}
3588
3597unsigned int vpKeyPoint::matchPoint(const std::vector<cv::KeyPoint> &queryKeyPoints, const cv::Mat &queryDescriptors)
3598{
3599 m_queryKeyPoints = queryKeyPoints;
3600 m_queryDescriptors = queryDescriptors;
3601
3602 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3603
3604 if (m_filterType != noFilterMatching) {
3605 m_queryFilteredKeyPoints.clear();
3606 m_objectFilteredPoints.clear();
3607 m_filteredMatches.clear();
3608
3609 filterMatches();
3610 } else {
3611 if (m_useMatchTrainToQuery) {
3612 // Add only query keypoints matched with a train keypoints
3613 m_queryFilteredKeyPoints.clear();
3614 m_filteredMatches.clear();
3615 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3616 m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3617 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3618 }
3619 } else {
3620 m_queryFilteredKeyPoints = m_queryKeyPoints;
3621 m_filteredMatches = m_matches;
3622 }
3623
3624 if (!m_trainPoints.empty()) {
3625 m_objectFilteredPoints.clear();
3626 // Add 3D object points such as the same index in
3627 // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3628 // matches to the same train object
3629 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3630 // m_matches is normally ordered following the queryDescriptor index
3631 m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3632 }
3633 }
3634 }
3635
3636 // Convert OpenCV type to ViSP type for compatibility
3637 vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
3639
3640 return static_cast<unsigned int>(m_filteredMatches.size());
3641}
3642
3651unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
3652{
3653 vpImageConvert::convert(I_color, m_I);
3654 return matchPoint(m_I, rectangle);
3655}
3656
3670 bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3671{
3672 double error, elapsedTime;
3673 return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3674}
3675
3689 bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3690{
3691 double error, elapsedTime;
3692 return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3693}
3694
3711 double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3712 const vpRect &rectangle)
3713{
3714 // Check if we have training descriptors
3715 if (m_trainDescriptors.empty()) {
3716 std::cerr << "Reference is empty." << std::endl;
3717 if (!_reference_computed) {
3718 std::cerr << "Reference is not computed." << std::endl;
3719 }
3720 std::cerr << "Matching is not possible." << std::endl;
3721
3722 return false;
3723 }
3724
3725 if (m_useAffineDetection) {
3726 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3727 std::vector<cv::Mat> listOfQueryDescriptors;
3728
3729 // Detect keypoints and extract descriptors on multiple images
3730 detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3731
3732 // Flatten the different train lists
3733 m_queryKeyPoints.clear();
3734 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3735 it != listOfQueryKeyPoints.end(); ++it) {
3736 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3737 }
3738
3739 bool first = true;
3740 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3741 ++it) {
3742 if (first) {
3743 first = false;
3744 it->copyTo(m_queryDescriptors);
3745 } else {
3746 m_queryDescriptors.push_back(*it);
3747 }
3748 }
3749 } else {
3750 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3751 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3752 }
3753
3754 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3755
3756 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3757
3758 if (m_filterType != noFilterMatching) {
3759 m_queryFilteredKeyPoints.clear();
3760 m_objectFilteredPoints.clear();
3761 m_filteredMatches.clear();
3762
3763 filterMatches();
3764 } else {
3765 if (m_useMatchTrainToQuery) {
3766 // Add only query keypoints matched with a train keypoints
3767 m_queryFilteredKeyPoints.clear();
3768 m_filteredMatches.clear();
3769 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3770 m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3771 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3772 }
3773 } else {
3774 m_queryFilteredKeyPoints = m_queryKeyPoints;
3775 m_filteredMatches = m_matches;
3776 }
3777
3778 if (!m_trainPoints.empty()) {
3779 m_objectFilteredPoints.clear();
3780 // Add 3D object points such as the same index in
3781 // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3782 // matches to the same train object
3783 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3784 // m_matches is normally ordered following the queryDescriptor index
3785 m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3786 }
3787 }
3788 }
3789
3790 // Convert OpenCV type to ViSP type for compatibility
3791 vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
3793
3794 // error = std::numeric_limits<double>::max(); // create an error under
3795 // Windows. To fix it we have to add #undef max
3796 error = DBL_MAX;
3797 m_ransacInliers.clear();
3798 m_ransacOutliers.clear();
3799
3800 if (m_useRansacVVS) {
3801 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3802 size_t cpt = 0;
3803 // Create a list of vpPoint with 2D coordinates (current keypoint
3804 // location) + 3D coordinates (world/object coordinates)
3805 for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3806 it != m_objectFilteredPoints.end(); ++it, cpt++) {
3807 vpPoint pt;
3808 pt.setWorldCoordinates(it->x, it->y, it->z);
3809
3810 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3811
3812 double x = 0.0, y = 0.0;
3814 pt.set_x(x);
3815 pt.set_y(y);
3816
3817 objectVpPoints[cpt] = pt;
3818 }
3819
3820 std::vector<vpPoint> inliers;
3821 std::vector<unsigned int> inlierIndex;
3822
3823 bool res = computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3824
3825 std::map<unsigned int, bool> mapOfInlierIndex;
3826 m_matchRansacKeyPointsToPoints.clear();
3827
3828 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3829 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3830 m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3831 mapOfInlierIndex[*it] = true;
3832 }
3833
3834 for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3835 if (mapOfInlierIndex.find((unsigned int)i) == mapOfInlierIndex.end()) {
3836 m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3837 }
3838 }
3839
3840 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3841
3842 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3843 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3844 m_ransacInliers.begin(), matchRansacToVpImage);
3845
3846 elapsedTime += m_poseTime;
3847
3848 return res;
3849 } else {
3850 std::vector<cv::Point2f> imageFilteredPoints;
3851 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3852 std::vector<int> inlierIndex;
3853 bool res = computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3854
3855 std::map<int, bool> mapOfInlierIndex;
3856 m_matchRansacKeyPointsToPoints.clear();
3857
3858 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3859 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3860 m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3861 mapOfInlierIndex[*it] = true;
3862 }
3863
3864 for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3865 if (mapOfInlierIndex.find((int)i) == mapOfInlierIndex.end()) {
3866 m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3867 }
3868 }
3869
3870 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3871
3872 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3873 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3874 m_ransacInliers.begin(), matchRansacToVpImage);
3875
3876 elapsedTime += m_poseTime;
3877
3878 return res;
3879 }
3880}
3881
3898 double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3899 const vpRect &rectangle)
3900{
3901 vpImageConvert::convert(I_color, m_I);
3902 return (matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3903}
3904
3905
3927 vpImagePoint &centerOfGravity, const bool isPlanarObject,
3928 std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3929 double *meanDescriptorDistance, double *detection_score, const vpRect &rectangle)
3930{
3931 if (imPts1 != NULL && imPts2 != NULL) {
3932 imPts1->clear();
3933 imPts2->clear();
3934 }
3935
3936 matchPoint(I, rectangle);
3937
3938 double meanDescriptorDistanceTmp = 0.0;
3939 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3940 meanDescriptorDistanceTmp += (double)it->distance;
3941 }
3942
3943 meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3944 double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3945
3946 if (meanDescriptorDistance != NULL) {
3947 *meanDescriptorDistance = meanDescriptorDistanceTmp;
3948 }
3949 if (detection_score != NULL) {
3950 *detection_score = score;
3951 }
3952
3953 if (m_filteredMatches.size() >= 4) {
3954 // Training / Reference 2D points
3955 std::vector<cv::Point2f> points1(m_filteredMatches.size());
3956 // Query / Current 2D points
3957 std::vector<cv::Point2f> points2(m_filteredMatches.size());
3958
3959 for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3960 points1[i] = cv::Point2f(m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx].pt);
3961 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx].pt);
3962 }
3963
3964 std::vector<vpImagePoint> inliers;
3965 if (isPlanarObject) {
3966#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3967 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3968#else
3969 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3970#endif
3971
3972 for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3973 // Compute reprojection error
3974 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3975 realPoint.at<double>(0, 0) = points1[i].x;
3976 realPoint.at<double>(1, 0) = points1[i].y;
3977 realPoint.at<double>(2, 0) = 1.f;
3978
3979 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3980 double err_x = (reprojectedPoint.at<double>(0, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].x;
3981 double err_y = (reprojectedPoint.at<double>(1, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].y;
3982 double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3983
3984 if (reprojectionError < 6.0) {
3985 inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3986 if (imPts1 != NULL) {
3987 imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3988 }
3989
3990 if (imPts2 != NULL) {
3991 imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3992 }
3993 }
3994 }
3995 } else if (m_filteredMatches.size() >= 8) {
3996 cv::Mat fundamentalInliers;
3997 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3998
3999 for (size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
4000 if (fundamentalInliers.at<uchar>((int)i, 0)) {
4001 inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
4002
4003 if (imPts1 != NULL) {
4004 imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
4005 }
4006
4007 if (imPts2 != NULL) {
4008 imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
4009 }
4010 }
4011 }
4012 }
4013
4014 if (!inliers.empty()) {
4015 // Build a polygon with the list of inlier keypoints detected in the
4016 // current image to get the bounding box
4017 vpPolygon polygon(inliers);
4018 boundingBox = polygon.getBoundingBox();
4019
4020 // Compute the center of gravity
4021 double meanU = 0.0, meanV = 0.0;
4022 for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
4023 meanU += it->get_u();
4024 meanV += it->get_v();
4025 }
4026
4027 meanU /= (double)inliers.size();
4028 meanV /= (double)inliers.size();
4029
4030 centerOfGravity.set_u(meanU);
4031 centerOfGravity.set_v(meanV);
4032 }
4033 } else {
4034 // Too few matches
4035 return false;
4036 }
4037
4038 if (m_detectionMethod == detectionThreshold) {
4039 return meanDescriptorDistanceTmp < m_detectionThreshold;
4040 } else {
4041 return score > m_detectionScore;
4042 }
4043}
4044
4065 vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, vpRect &boundingBox,
4066 vpImagePoint &centerOfGravity, bool (*func)(const vpHomogeneousMatrix &),
4067 const vpRect &rectangle)
4068{
4069 bool isMatchOk = matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
4070 if (isMatchOk) {
4071 // Use the pose estimated to project the model points in the image
4072 vpPoint pt;
4073 vpImagePoint imPt;
4074 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
4075 size_t cpt = 0;
4076 for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
4077 pt = *it;
4078 pt.project(cMo);
4079 vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), imPt);
4080 modelImagePoints[cpt] = imPt;
4081 }
4082
4083 // Build a polygon with the list of model image points to get the bounding
4084 // box
4085 vpPolygon polygon(modelImagePoints);
4086 boundingBox = polygon.getBoundingBox();
4087
4088 // Compute the center of gravity of the current inlier keypoints
4089 double meanU = 0.0, meanV = 0.0;
4090 for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
4091 meanU += it->get_u();
4092 meanV += it->get_v();
4093 }
4094
4095 meanU /= (double)m_ransacInliers.size();
4096 meanV /= (double)m_ransacInliers.size();
4097
4098 centerOfGravity.set_u(meanU);
4099 centerOfGravity.set_v(meanV);
4100 }
4101
4102 return isMatchOk;
4103}
4104
4120 std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
4121 std::vector<cv::Mat> &listOfDescriptors,
4122 std::vector<vpImage<unsigned char> > *listOfAffineI)
4123{
4124#if 0
4125 cv::Mat img;
4127 listOfKeypoints.clear();
4128 listOfDescriptors.clear();
4129
4130 for (int tl = 1; tl < 6; tl++) {
4131 double t = pow(2, 0.5 * tl);
4132 for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4133 std::vector<cv::KeyPoint> keypoints;
4134 cv::Mat descriptors;
4135
4136 cv::Mat timg, mask, Ai;
4137 img.copyTo(timg);
4138
4139 affineSkew(t, phi, timg, mask, Ai);
4140
4141
4142 if(listOfAffineI != NULL) {
4143 cv::Mat img_disp;
4144 bitwise_and(mask, timg, img_disp);
4146 vpImageConvert::convert(img_disp, tI);
4147 listOfAffineI->push_back(tI);
4148 }
4149#if 0
4150 cv::Mat img_disp;
4151 cv::bitwise_and(mask, timg, img_disp);
4152 cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
4153 cv::imshow( "Skew", img_disp );
4154 cv::waitKey(0);
4155#endif
4156
4157 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4158 it != m_detectors.end(); ++it) {
4159 std::vector<cv::KeyPoint> kp;
4160 it->second->detect(timg, kp, mask);
4161 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4162 }
4163
4164 double elapsedTime;
4165 extract(timg, keypoints, descriptors, elapsedTime);
4166
4167 for(unsigned int i = 0; i < keypoints.size(); i++) {
4168 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4169 cv::Mat kpt_t = Ai * cv::Mat(kpt);
4170 keypoints[i].pt.x = kpt_t.at<float>(0, 0);
4171 keypoints[i].pt.y = kpt_t.at<float>(1, 0);
4172 }
4173
4174 listOfKeypoints.push_back(keypoints);
4175 listOfDescriptors.push_back(descriptors);
4176 }
4177 }
4178
4179#else
4180 cv::Mat img;
4182
4183 // Create a vector for storing the affine skew parameters
4184 std::vector<std::pair<double, int> > listOfAffineParams;
4185 for (int tl = 1; tl < 6; tl++) {
4186 double t = pow(2, 0.5 * tl);
4187 for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4188 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
4189 }
4190 }
4191
4192 listOfKeypoints.resize(listOfAffineParams.size());
4193 listOfDescriptors.resize(listOfAffineParams.size());
4194
4195 if (listOfAffineI != NULL) {
4196 listOfAffineI->resize(listOfAffineParams.size());
4197 }
4198
4199#ifdef VISP_HAVE_OPENMP
4200#pragma omp parallel for
4201#endif
4202 for (int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
4203 std::vector<cv::KeyPoint> keypoints;
4204 cv::Mat descriptors;
4205
4206 cv::Mat timg, mask, Ai;
4207 img.copyTo(timg);
4208
4209 affineSkew(listOfAffineParams[(size_t)cpt].first, listOfAffineParams[(size_t)cpt].second, timg, mask, Ai);
4210
4211 if (listOfAffineI != NULL) {
4212 cv::Mat img_disp;
4213 bitwise_and(mask, timg, img_disp);
4215 vpImageConvert::convert(img_disp, tI);
4216 (*listOfAffineI)[(size_t)cpt] = tI;
4217 }
4218
4219#if 0
4220 cv::Mat img_disp;
4221 cv::bitwise_and(mask, timg, img_disp);
4222 cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
4223 cv::imshow( "Skew", img_disp );
4224 cv::waitKey(0);
4225#endif
4226
4227 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4228 it != m_detectors.end(); ++it) {
4229 std::vector<cv::KeyPoint> kp;
4230 it->second->detect(timg, kp, mask);
4231 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4232 }
4233
4234 double elapsedTime;
4235 extract(timg, keypoints, descriptors, elapsedTime);
4236
4237 for (size_t i = 0; i < keypoints.size(); i++) {
4238 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4239 cv::Mat kpt_t = Ai * cv::Mat(kpt);
4240 keypoints[i].pt.x = kpt_t.at<float>(0, 0);
4241 keypoints[i].pt.y = kpt_t.at<float>(1, 0);
4242 }
4243
4244 listOfKeypoints[(size_t)cpt] = keypoints;
4245 listOfDescriptors[(size_t)cpt] = descriptors;
4246 }
4247#endif
4248}
4249
4254{
4255 // vpBasicKeyPoint class
4257 currentImagePointsList.clear();
4258 matchedReferencePoints.clear();
4259 _reference_computed = false;
4260
4261 m_computeCovariance = false;
4262 m_covarianceMatrix = vpMatrix();
4263 m_currentImageId = 0;
4264 m_detectionMethod = detectionScore;
4265 m_detectionScore = 0.15;
4266 m_detectionThreshold = 100.0;
4267 m_detectionTime = 0.0;
4268 m_detectorNames.clear();
4269 m_detectors.clear();
4270 m_extractionTime = 0.0;
4271 m_extractorNames.clear();
4272 m_extractors.clear();
4273 m_filteredMatches.clear();
4274 m_filterType = ratioDistanceThreshold;
4275 m_imageFormat = jpgImageFormat;
4276 m_knnMatches.clear();
4277 m_mapOfImageId.clear();
4278 m_mapOfImages.clear();
4279 m_matcher = cv::Ptr<cv::DescriptorMatcher>();
4280 m_matcherName = "BruteForce-Hamming";
4281 m_matches.clear();
4282 m_matchingFactorThreshold = 2.0;
4283 m_matchingRatioThreshold = 0.85;
4284 m_matchingTime = 0.0;
4285 m_matchRansacKeyPointsToPoints.clear();
4286 m_nbRansacIterations = 200;
4287 m_nbRansacMinInlierCount = 100;
4288 m_objectFilteredPoints.clear();
4289 m_poseTime = 0.0;
4290 m_queryDescriptors = cv::Mat();
4291 m_queryFilteredKeyPoints.clear();
4292 m_queryKeyPoints.clear();
4293 m_ransacConsensusPercentage = 20.0;
4294 m_ransacFilterFlag = vpPose::NO_FILTER;
4295 m_ransacInliers.clear();
4296 m_ransacOutliers.clear();
4297 m_ransacParallel = true;
4298 m_ransacParallelNbThreads = 0;
4299 m_ransacReprojectionError = 6.0;
4300 m_ransacThreshold = 0.01;
4301 m_trainDescriptors = cv::Mat();
4302 m_trainKeyPoints.clear();
4303 m_trainPoints.clear();
4304 m_trainVpPoints.clear();
4305 m_useAffineDetection = false;
4306#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
4307 m_useBruteForceCrossCheck = true;
4308#endif
4309 m_useConsensusPercentage = false;
4310 m_useKnn = true; // as m_filterType == ratioDistanceThreshold
4311 m_useMatchTrainToQuery = false;
4312 m_useRansacVVS = true;
4313 m_useSingleMatchFilter = true;
4314
4315 m_detectorNames.push_back("ORB");
4316 m_extractorNames.push_back("ORB");
4317
4318 init();
4319}
4320
4329void vpKeyPoint::saveLearningData(const std::string &filename, bool binaryMode, bool saveTrainingImages)
4330{
4331 std::string parent = vpIoTools::getParent(filename);
4332 if (!parent.empty()) {
4334 }
4335
4336 std::map<int, std::string> mapOfImgPath;
4337 if (saveTrainingImages) {
4338#ifdef VISP_HAVE_MODULE_IO
4339 // Save the training image files in the same directory
4340 unsigned int cpt = 0;
4341
4342 for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
4343 ++it, cpt++) {
4344 if (cpt > 999) {
4345 throw vpException(vpException::fatalError, "The number of training images to save is too big !");
4346 }
4347
4348 std::stringstream ss;
4349 ss << "train_image_" << std::setfill('0') << std::setw(3) << cpt;
4350
4351 switch (m_imageFormat) {
4352 case jpgImageFormat:
4353 ss << ".jpg";
4354 break;
4355
4356 case pngImageFormat:
4357 ss << ".png";
4358 break;
4359
4360 case ppmImageFormat:
4361 ss << ".ppm";
4362 break;
4363
4364 case pgmImageFormat:
4365 ss << ".pgm";
4366 break;
4367
4368 default:
4369 ss << ".png";
4370 break;
4371 }
4372
4373 std::string imgFilename = ss.str();
4374 mapOfImgPath[it->first] = imgFilename;
4375 vpImageIo::write(it->second, parent + (!parent.empty() ? "/" : "") + imgFilename);
4376 }
4377#else
4378 std::cout << "Warning: in vpKeyPoint::saveLearningData() training images "
4379 "are not saved because "
4380 "visp_io module is not available !"
4381 << std::endl;
4382#endif
4383 }
4384
4385 bool have3DInfo = m_trainPoints.size() > 0;
4386 if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
4387 throw vpException(vpException::fatalError, "List of keypoints and list of 3D points have different size !");
4388 }
4389
4390 if (binaryMode) {
4391 // Save the learning data into little endian binary file.
4392 std::ofstream file(filename.c_str(), std::ofstream::binary);
4393 if (!file.is_open()) {
4394 throw vpException(vpException::ioError, "Cannot create the file.");
4395 }
4396
4397 // Write info about training images
4398 int nbImgs = (int)mapOfImgPath.size();
4399 vpIoTools::writeBinaryValueLE(file, nbImgs);
4400
4401#ifdef VISP_HAVE_MODULE_IO
4402 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4403 // Write image_id
4404 int id = it->first;
4406
4407 // Write image path
4408 std::string path = it->second;
4409 int length = (int)path.length();
4410 vpIoTools::writeBinaryValueLE(file, length);
4411
4412 for (int cpt = 0; cpt < length; cpt++) {
4413 file.write((char *)(&path[(size_t)cpt]), sizeof(path[(size_t)cpt]));
4414 }
4415 }
4416#endif
4417
4418 // Write if we have 3D point information
4419 int have3DInfoInt = have3DInfo ? 1 : 0;
4420 vpIoTools::writeBinaryValueLE(file, have3DInfoInt);
4421
4422 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4423 int descriptorType = m_trainDescriptors.type();
4424
4425 // Write the number of descriptors
4426 vpIoTools::writeBinaryValueLE(file, nRows);
4427
4428 // Write the size of the descriptor
4429 vpIoTools::writeBinaryValueLE(file, nCols);
4430
4431 // Write the type of the descriptor
4432 vpIoTools::writeBinaryValueLE(file, descriptorType);
4433
4434 for (int i = 0; i < nRows; i++) {
4435 unsigned int i_ = (unsigned int)i;
4436 // Write u
4437 float u = m_trainKeyPoints[i_].pt.x;
4439
4440 // Write v
4441 float v = m_trainKeyPoints[i_].pt.y;
4443
4444 // Write size
4445 float size = m_trainKeyPoints[i_].size;
4447
4448 // Write angle
4449 float angle = m_trainKeyPoints[i_].angle;
4450 vpIoTools::writeBinaryValueLE(file, angle);
4451
4452 // Write response
4453 float response = m_trainKeyPoints[i_].response;
4454 vpIoTools::writeBinaryValueLE(file, response);
4455
4456 // Write octave
4457 int octave = m_trainKeyPoints[i_].octave;
4458 vpIoTools::writeBinaryValueLE(file, octave);
4459
4460 // Write class_id
4461 int class_id = m_trainKeyPoints[i_].class_id;
4462 vpIoTools::writeBinaryValueLE(file, class_id);
4463
4464// Write image_id
4465#ifdef VISP_HAVE_MODULE_IO
4466 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4467 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
4468 vpIoTools::writeBinaryValueLE(file, image_id);
4469#else
4470 int image_id = -1;
4471 // file.write((char *)(&image_id), sizeof(image_id));
4472 vpIoTools::writeBinaryValueLE(file, image_id);
4473#endif
4474
4475 if (have3DInfo) {
4476 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
4477 // Write oX
4479
4480 // Write oY
4482
4483 // Write oZ
4485 }
4486
4487 for (int j = 0; j < nCols; j++) {
4488 // Write the descriptor value
4489 switch (descriptorType) {
4490 case CV_8U:
4491 file.write((char *)(&m_trainDescriptors.at<unsigned char>(i, j)),
4492 sizeof(m_trainDescriptors.at<unsigned char>(i, j)));
4493 break;
4494
4495 case CV_8S:
4496 file.write((char *)(&m_trainDescriptors.at<char>(i, j)), sizeof(m_trainDescriptors.at<char>(i, j)));
4497 break;
4498
4499 case CV_16U:
4500 vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<unsigned short int>(i, j));
4501 break;
4502
4503 case CV_16S:
4504 vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<short int>(i, j));
4505 break;
4506
4507 case CV_32S:
4508 vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<int>(i, j));
4509 break;
4510
4511 case CV_32F:
4512 vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<float>(i, j));
4513 break;
4514
4515 case CV_64F:
4516 vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<double>(i, j));
4517 break;
4518
4519 default:
4520 throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
4521 break;
4522 }
4523 }
4524 }
4525
4526 file.close();
4527 } else {
4528 pugi::xml_document doc;
4529 pugi::xml_node node = doc.append_child(pugi::node_declaration);
4530 node.append_attribute("version") = "1.0";
4531 node.append_attribute("encoding") = "UTF-8";
4532
4533 if (!doc) {
4534 throw vpException(vpException::ioError, "Error with file: ", filename.c_str());
4535 }
4536
4537 pugi::xml_node root_node = doc.append_child("LearningData");
4538
4539 // Write the training images info
4540 pugi::xml_node image_node = root_node.append_child("TrainingImageInfo");
4541
4542#ifdef VISP_HAVE_MODULE_IO
4543 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4544 pugi::xml_node image_info_node = image_node.append_child("trainImg");
4545 image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
4546 std::stringstream ss;
4547 ss << it->first;
4548 image_info_node.append_attribute("image_id") = ss.str().c_str();
4549 }
4550#endif
4551
4552 // Write information about descriptors
4553 pugi::xml_node descriptors_info_node = root_node.append_child("DescriptorsInfo");
4554
4555 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4556 int descriptorType = m_trainDescriptors.type();
4557
4558 // Write the number of rows
4559 descriptors_info_node.append_child("nrows").append_child(pugi::node_pcdata).text() = nRows;
4560
4561 // Write the number of cols
4562 descriptors_info_node.append_child("ncols").append_child(pugi::node_pcdata).text() = nCols;
4563
4564 // Write the descriptors type
4565 descriptors_info_node.append_child("type").append_child(pugi::node_pcdata).text() = descriptorType;
4566
4567 for (int i = 0; i < nRows; i++) {
4568 unsigned int i_ = (unsigned int)i;
4569 pugi::xml_node descriptor_node = root_node.append_child("DescriptorInfo");
4570
4571 descriptor_node.append_child("u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
4572 descriptor_node.append_child("v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
4573 descriptor_node.append_child("size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
4574 descriptor_node.append_child("angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
4575 descriptor_node.append_child("response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
4576 descriptor_node.append_child("octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
4577 descriptor_node.append_child("class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
4578
4579#ifdef VISP_HAVE_MODULE_IO
4580 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4581 descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() =
4582 ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
4583#else
4584 descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() = -1;
4585#endif
4586
4587 if (have3DInfo) {
4588 descriptor_node.append_child("oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
4589 descriptor_node.append_child("oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
4590 descriptor_node.append_child("oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
4591 }
4592
4593 pugi::xml_node desc_node = descriptor_node.append_child("desc");
4594
4595 for (int j = 0; j < nCols; j++) {
4596 switch (descriptorType) {
4597 case CV_8U: {
4598 // Promote an unsigned char to an int
4599 // val_tmp holds the numeric value that will be written
4600 // We save the value in numeric form otherwise xml library will not be
4601 // able to parse A better solution could be possible
4602 int val_tmp = m_trainDescriptors.at<unsigned char>(i, j);
4603 desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
4604 } break;
4605
4606 case CV_8S: {
4607 // Promote a char to an int
4608 // val_tmp holds the numeric value that will be written
4609 // We save the value in numeric form otherwise xml library will not be
4610 // able to parse A better solution could be possible
4611 int val_tmp = m_trainDescriptors.at<char>(i, j);
4612 desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
4613 } break;
4614
4615 case CV_16U:
4616 desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4617 m_trainDescriptors.at<unsigned short int>(i, j);
4618 break;
4619
4620 case CV_16S:
4621 desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4622 m_trainDescriptors.at<short int>(i, j);
4623 break;
4624
4625 case CV_32S:
4626 desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4627 m_trainDescriptors.at<int>(i, j);
4628 break;
4629
4630 case CV_32F:
4631 desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4632 m_trainDescriptors.at<float>(i, j);
4633 break;
4634
4635 case CV_64F:
4636 desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4637 m_trainDescriptors.at<double>(i, j);
4638 break;
4639
4640 default:
4641 throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
4642 break;
4643 }
4644 }
4645 }
4646
4647 doc.save_file(filename.c_str(), PUGIXML_TEXT(" "), pugi::format_default, pugi::encoding_utf8);
4648 }
4649}
4650
4651#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
4652#ifndef DOXYGEN_SHOULD_SKIP_THIS
4653// From OpenCV 2.4.11 source code.
4654struct KeypointResponseGreaterThanThreshold {
4655 KeypointResponseGreaterThanThreshold(float _value) : value(_value) {}
4656 inline bool operator()(const cv::KeyPoint &kpt) const { return kpt.response >= value; }
4657 float value;
4658};
4659
4660struct KeypointResponseGreater {
4661 inline bool operator()(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2) const { return kp1.response > kp2.response; }
4662};
4663
4664// takes keypoints and culls them by the response
4665void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints, int n_points)
4666{
4667 // this is only necessary if the keypoints size is greater than the number
4668 // of desired points.
4669 if (n_points >= 0 && keypoints.size() > (size_t)n_points) {
4670 if (n_points == 0) {
4671 keypoints.clear();
4672 return;
4673 }
4674 // first use nth element to partition the keypoints into the best and
4675 // worst.
4676 std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
4677 // this is the boundary response, and in the case of FAST may be ambiguous
4678 float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
4679 // use std::partition to grab all of the keypoints with the boundary
4680 // response.
4681 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4682 keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4683 // resize the keypoints, given this new end point. nth_element and
4684 // partition reordered the points inplace
4685 keypoints.resize((size_t)(new_end - keypoints.begin()));
4686 }
4687}
4688
4689struct RoiPredicate {
4690 RoiPredicate(const cv::Rect &_r) : r(_r) {}
4691
4692 bool operator()(const cv::KeyPoint &keyPt) const { return !r.contains(keyPt.pt); }
4693
4694 cv::Rect r;
4695};
4696
4697void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4698 int borderSize)
4699{
4700 if (borderSize > 0) {
4701 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4702 keypoints.clear();
4703 else
4704 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4705 RoiPredicate(cv::Rect(
4706 cv::Point(borderSize, borderSize),
4707 cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4708 keypoints.end());
4709 }
4710}
4711
4712struct SizePredicate {
4713 SizePredicate(float _minSize, float _maxSize) : minSize(_minSize), maxSize(_maxSize) {}
4714
4715 bool operator()(const cv::KeyPoint &keyPt) const
4716 {
4717 float size = keyPt.size;
4718 return (size < minSize) || (size > maxSize);
4719 }
4720
4721 float minSize, maxSize;
4722};
4723
4724void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize)
4725{
4726 CV_Assert(minSize >= 0);
4727 CV_Assert(maxSize >= 0);
4728 CV_Assert(minSize <= maxSize);
4729
4730 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4731}
4732
4733class MaskPredicate
4734{
4735public:
4736 MaskPredicate(const cv::Mat &_mask) : mask(_mask) {}
4737 bool operator()(const cv::KeyPoint &key_pt) const
4738 {
4739 return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4740 }
4741
4742private:
4743 const cv::Mat mask;
4744};
4745
4746void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask)
4747{
4748 if (mask.empty())
4749 return;
4750
4751 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4752}
4753
4754struct KeyPoint_LessThan {
4755 KeyPoint_LessThan(const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) {}
4756 bool operator()(/*int i, int j*/ size_t i, size_t j) const
4757 {
4758 const cv::KeyPoint &kp1 = (*kp)[/*(size_t)*/ i];
4759 const cv::KeyPoint &kp2 = (*kp)[/*(size_t)*/ j];
4760 if (!vpMath::equal(kp1.pt.x, kp2.pt.x,
4761 std::numeric_limits<float>::epsilon())) { // if (kp1.pt.x !=
4762 // kp2.pt.x) {
4763 return kp1.pt.x < kp2.pt.x;
4764 }
4765
4766 if (!vpMath::equal(kp1.pt.y, kp2.pt.y,
4767 std::numeric_limits<float>::epsilon())) { // if (kp1.pt.y !=
4768 // kp2.pt.y) {
4769 return kp1.pt.y < kp2.pt.y;
4770 }
4771
4772 if (!vpMath::equal(kp1.size, kp2.size,
4773 std::numeric_limits<float>::epsilon())) { // if (kp1.size !=
4774 // kp2.size) {
4775 return kp1.size > kp2.size;
4776 }
4777
4778 if (!vpMath::equal(kp1.angle, kp2.angle,
4779 std::numeric_limits<float>::epsilon())) { // if (kp1.angle !=
4780 // kp2.angle) {
4781 return kp1.angle < kp2.angle;
4782 }
4783
4784 if (!vpMath::equal(kp1.response, kp2.response,
4785 std::numeric_limits<float>::epsilon())) { // if (kp1.response !=
4786 // kp2.response) {
4787 return kp1.response > kp2.response;
4788 }
4789
4790 if (kp1.octave != kp2.octave) {
4791 return kp1.octave > kp2.octave;
4792 }
4793
4794 if (kp1.class_id != kp2.class_id) {
4795 return kp1.class_id > kp2.class_id;
4796 }
4797
4798 return i < j;
4799 }
4800 const std::vector<cv::KeyPoint> *kp;
4801};
4802
4803void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4804{
4805 size_t i, j, n = keypoints.size();
4806 std::vector<size_t> kpidx(n);
4807 std::vector<uchar> mask(n, (uchar)1);
4808
4809 for (i = 0; i < n; i++) {
4810 kpidx[i] = i;
4811 }
4812 std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4813 for (i = 1, j = 0; i < n; i++) {
4814 cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4815 cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4816 // if (kp1.pt.x != kp2.pt.x || kp1.pt.y != kp2.pt.y || kp1.size !=
4817 // kp2.size || kp1.angle != kp2.angle) {
4818 if (!vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4819 !vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4820 !vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4821 !vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4822 j = i;
4823 } else {
4824 mask[kpidx[i]] = 0;
4825 }
4826 }
4827
4828 for (i = j = 0; i < n; i++) {
4829 if (mask[i]) {
4830 if (i != j) {
4831 keypoints[j] = keypoints[i];
4832 }
4833 j++;
4834 }
4835 }
4836 keypoints.resize(j);
4837}
4838
4839/*
4840 * PyramidAdaptedFeatureDetector
4841 */
4842vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &_detector,
4843 int _maxLevel)
4844 : detector(_detector), maxLevel(_maxLevel)
4845{
4846}
4847
4848bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty() const
4849{
4850 return detector.empty() || (cv::FeatureDetector *)detector->empty();
4851}
4852
4853void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4854 CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4855{
4856 detectImpl(image.getMat(), keypoints, mask.getMat());
4857}
4858
4859void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4860 const cv::Mat &mask) const
4861{
4862 cv::Mat src = image;
4863 cv::Mat src_mask = mask;
4864
4865 cv::Mat dilated_mask;
4866 if (!mask.empty()) {
4867 cv::dilate(mask, dilated_mask, cv::Mat());
4868 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4869 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4870 dilated_mask = mask255;
4871 }
4872
4873 for (int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4874 // Detect on current level of the pyramid
4875 std::vector<cv::KeyPoint> new_pts;
4876 detector->detect(src, new_pts, src_mask);
4877 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4878 for (; it != end; ++it) {
4879 it->pt.x *= multiplier;
4880 it->pt.y *= multiplier;
4881 it->size *= multiplier;
4882 it->octave = l;
4883 }
4884 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4885
4886 // Downsample
4887 if (l < maxLevel) {
4888 cv::Mat dst;
4889 pyrDown(src, dst);
4890 src = dst;
4891
4892 if (!mask.empty())
4893 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4894 }
4895 }
4896
4897 if (!mask.empty())
4898 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4899}
4900#endif
4901#endif
4902
4903#elif !defined(VISP_BUILD_SHARED_LIBS)
4904// Work around to avoid warning: libvisp_vision.a(vpKeyPoint.cpp.o) has no
4905// symbols
4906void dummy_vpKeyPoint(){};
4907#endif
bool _reference_computed
flag to indicate if the reference has been built.
std::vector< vpImagePoint > currentImagePointsList
std::vector< unsigned int > matchedReferencePoints
std::vector< vpImagePoint > referenceImagePointsList
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Definition: vpColVector.h:131
Class to define RGB colors available for display functionnalities.
Definition: vpColor.h:158
static const vpColor red
Definition: vpColor.h:217
static const vpColor none
Definition: vpColor.h:229
static const vpColor green
Definition: vpColor.h:220
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Definition: vpConvert.cpp:226
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint &center, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
error that can be emited by ViSP classes.
Definition: vpException.h:72
@ ioError
I/O error.
Definition: vpException.h:91
@ badValue
Used to indicate that a value is not in the allowed range.
Definition: vpException.h:97
@ fatalError
Fatal error.
Definition: vpException.h:96
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:149
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:293
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:88
double get_j() const
Definition: vpImagePoint.h:214
void set_ij(double ii, double jj)
Definition: vpImagePoint.h:188
void set_u(double u)
Definition: vpImagePoint.h:225
void set_v(double v)
Definition: vpImagePoint.h:236
double get_i() const
Definition: vpImagePoint.h:203
unsigned int getWidth() const
Definition: vpImage.h:246
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
Definition: vpImage.h:1115
unsigned int getHeight() const
Definition: vpImage.h:188
static bool isAbsolutePathname(const std::string &pathname)
Definition: vpIoTools.cpp:1711
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
Definition: vpIoTools.cpp:1993
static void makeDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:570
static void writeBinaryValueLE(std::ofstream &file, const int16_t short_value)
Definition: vpIoTools.cpp:2071
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1606
unsigned int matchPoint(const vpImage< unsigned char > &I)
@ detectionThreshold
Definition: vpKeyPoint.h:242
@ detectionScore
Definition: vpKeyPoint.h:244
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
void initMatcher(const std::string &matcherName)
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition: vpKeyPoint.cpp:899
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
Definition: vpKeyPoint.cpp:547
vpFeatureDetectorType
Definition: vpKeyPoint.h:258
@ DETECTOR_BRISK
Definition: vpKeyPoint.h:263
@ DETECTOR_AKAZE
Definition: vpKeyPoint.h:278
@ DETECTOR_AGAST
Definition: vpKeyPoint.h:279
@ DETECTOR_SimpleBlob
Definition: vpKeyPoint.h:265
void reset()
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
vpFeatureDescriptorType
Definition: vpKeyPoint.h:289
@ DESCRIPTOR_AKAZE
Definition: vpKeyPoint.h:306
@ DESCRIPTOR_ORB
Definition: vpKeyPoint.h:291
@ DESCRIPTOR_KAZE
Definition: vpKeyPoint.h:305
@ DESCRIPTOR_SURF
Definition: vpKeyPoint.h:302
@ DESCRIPTOR_BRISK
Definition: vpKeyPoint.h:292
@ DESCRIPTOR_SIFT
Definition: vpKeyPoint.h:299
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:751
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:632
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint &centerOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
Definition: vpKeyPoint.cpp:81
@ ppmImageFormat
Definition: vpKeyPoint.h:253
@ jpgImageFormat
Definition: vpKeyPoint.h:251
@ pngImageFormat
Definition: vpKeyPoint.h:252
@ pgmImageFormat
Definition: vpKeyPoint.h:254
vpFilterMatchingType
Definition: vpKeyPoint.h:227
@ stdAndRatioDistanceThreshold
Definition: vpKeyPoint.h:235
@ constantFactorDistanceThreshold
Definition: vpKeyPoint.h:228
@ ratioDistanceThreshold
Definition: vpKeyPoint.h:232
@ stdDistanceThreshold
Definition: vpKeyPoint.h:230
@ noFilterMatching
Definition: vpKeyPoint.h:237
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:238
void loadConfigFile(const std::string &configFile)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static bool isNaN(double value)
Definition: vpMath.cpp:85
static bool equal(double x, double y, double s=0.001)
Definition: vpMath.h:295
static int round(double x)
Definition: vpMath.h:247
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:154
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class defines the container for a plane geometrical structure.
Definition: vpPlane.h:59
double getD() const
Definition: vpPlane.h:108
double getA() const
Definition: vpPlane.h:102
double getC() const
Definition: vpPlane.h:106
double getB() const
Definition: vpPlane.h:104
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:82
double get_oX() const
Get the point oX coordinate in the object frame.
Definition: vpPoint.cpp:461
void set_x(double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.cpp:511
double get_y() const
Get the point y coordinate in the image plane.
Definition: vpPoint.cpp:472
double get_oZ() const
Get the point oZ coordinate in the object frame.
Definition: vpPoint.cpp:465
void set_oY(double oY)
Set the point oY coordinate in the object frame.
Definition: vpPoint.cpp:504
double get_x() const
Get the point x coordinate in the image plane.
Definition: vpPoint.cpp:470
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
Definition: vpPoint.cpp:506
void set_oX(double oX)
Set the point oX coordinate in the object frame.
Definition: vpPoint.cpp:502
double get_oY() const
Get the point oY coordinate in the object frame.
Definition: vpPoint.cpp:463
void setWorldCoordinates(double oX, double oY, double oZ)
Definition: vpPoint.cpp:113
void set_y(double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.cpp:513
Defines a generic 2D polygon.
Definition: vpPolygon.h:104
vpRect getBoundingBox() const
Definition: vpPolygon.h:177
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Definition: vpPolygon.cpp:309
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition: vpPose.h:81
void setRansacMaxTrials(const int &rM)
Definition: vpPose.h:245
void addPoint(const vpPoint &P)
Definition: vpPose.cpp:149
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
Definition: vpPose.h:235
vpMatrix getCovarianceMatrix() const
Definition: vpPose.h:267
std::vector< unsigned int > getRansacInlierIndex() const
Definition: vpPose.h:247
@ RANSAC
Definition: vpPose.h:90
void setCovarianceComputation(const bool &flag)
Definition: vpPose.h:256
std::vector< vpPoint > getRansacInliers() const
Definition: vpPose.h:248
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
Definition: vpPose.h:287
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition: vpPose.cpp:374
@ NO_FILTER
Definition: vpPose.h:104
void setUseParallelRansac(bool use)
Definition: vpPose.h:318
void setNbParallelRansacThreads(int nb)
Definition: vpPose.h:304
void setRansacThreshold(const double &t)
Definition: vpPose.h:236
Defines a rectangle in the plane.
Definition: vpRect.h:80
double getWidth() const
Definition: vpRect.h:228
double getLeft() const
Definition: vpRect.h:174
double getRight() const
Definition: vpRect.h:180
double getBottom() const
Definition: vpRect.h:98
double getHeight() const
Definition: vpRect.h:167
double getTop() const
Definition: vpRect.h:193
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
void parse(const std::string &filename)
vpMatchingMethodEnum getMatchingMethod() const
VISP_EXPORT double measureTimeMs()