Visual Servoing Platform version 3.5.0
vpKeyPoint.h
1/****************************************************************************
2 *
3 * ViSP, open source Visual Servoing Platform software.
4 * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5 *
6 * This software is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 * See the file LICENSE.txt at the root directory of this source
11 * distribution for additional information about the GNU GPL.
12 *
13 * For using ViSP with software that can not be combined with the GNU
14 * GPL, please contact Inria about acquiring a ViSP Professional
15 * Edition License.
16 *
17 * See http://visp.inria.fr for more information.
18 *
19 * This software was developed at:
20 * Inria Rennes - Bretagne Atlantique
21 * Campus Universitaire de Beaulieu
22 * 35042 Rennes Cedex
23 * France
24 *
25 * If you have questions regarding the use of this file, please contact
26 * Inria at visp@inria.fr
27 *
28 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30 *
31 * Description:
32 * Key point functionalities.
33 *
34 * Authors:
35 * Souriya Trinh
36 *
37 *****************************************************************************/
38#ifndef _vpKeyPoint_h_
39#define _vpKeyPoint_h_
40
41#include <algorithm> // std::transform
42#include <float.h> // DBL_MAX
43#include <fstream> // std::ofstream
44#include <limits>
45#include <map> // std::map
46#include <numeric> // std::accumulate
47#include <stdlib.h> // srand, rand
48#include <time.h> // time
49#include <vector> // std::vector
50
51#include <visp3/core/vpConfig.h>
52#include <visp3/core/vpDisplay.h>
53#include <visp3/core/vpImageConvert.h>
54#include <visp3/core/vpPixelMeterConversion.h>
55#include <visp3/core/vpPlane.h>
56#include <visp3/core/vpPoint.h>
57#include <visp3/vision/vpBasicKeyPoint.h>
58#include <visp3/vision/vpPose.h>
59#ifdef VISP_HAVE_MODULE_IO
60# include <visp3/io/vpImageIo.h>
61#endif
62#include <visp3/core/vpConvert.h>
63#include <visp3/core/vpCylinder.h>
64#include <visp3/core/vpMeterPixelConversion.h>
65#include <visp3/core/vpPolygon.h>
66#include <visp3/vision/vpXmlConfigParserKeyPoint.h>
67
68// Require at least OpenCV >= 2.1.1
69#if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
70
71# include <opencv2/calib3d/calib3d.hpp>
72# include <opencv2/features2d/features2d.hpp>
73# include <opencv2/imgproc/imgproc.hpp>
74
75# if (VISP_HAVE_OPENCV_VERSION >= 0x040000) // Require opencv >= 4.0.0
76# include <opencv2/imgproc/imgproc_c.h>
77# include <opencv2/imgproc.hpp>
78# endif
79
80# if defined(VISP_HAVE_OPENCV_XFEATURES2D) // OpenCV >= 3.0.0
81# include <opencv2/xfeatures2d.hpp>
82# elif defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && \
83 (VISP_HAVE_OPENCV_VERSION < 0x030000)
84# include <opencv2/nonfree/nonfree.hpp>
85# endif
86
222class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint
223{
224
225public:
228 constantFactorDistanceThreshold,
230 stdDistanceThreshold,
232 ratioDistanceThreshold,
235 stdAndRatioDistanceThreshold,
237 noFilterMatching
238 };
239
242 detectionThreshold,
244 detectionScore
247 };
248
250 typedef enum {
254 pgmImageFormat
255 } vpImageFormatType;
256
259#if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
266#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
267 DETECTOR_STAR,
268#endif
269#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
270 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
272#endif
273#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
275#endif
276#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
280#endif
281#if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
282 DETECTOR_MSD,
283#endif
284#endif
285 DETECTOR_TYPE_SIZE
286 };
287
290#if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
293#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
294 DESCRIPTOR_FREAK,
295 DESCRIPTOR_BRIEF,
296#endif
297#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
298 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
300#endif
301#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
303#endif
304#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
307#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
308 DESCRIPTOR_DAISY,
309 DESCRIPTOR_LATCH,
310#endif
311#endif
312#if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
313 DESCRIPTOR_VGG,
314 DESCRIPTOR_BoostDesc,
315#endif
316#endif
317 DESCRIPTOR_TYPE_SIZE
318 };
319
320 vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
321 const std::string &matcherName, const vpFilterMatchingType &filterType = ratioDistanceThreshold);
322 vpKeyPoint(const std::string &detectorName = "ORB", const std::string &extractorName = "ORB",
323 const std::string &matcherName = "BruteForce-Hamming",
324 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
325 vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
326 const std::string &matcherName = "BruteForce",
327 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
328
329 unsigned int buildReference(const vpImage<unsigned char> &I);
330 unsigned int buildReference(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
331 unsigned int width);
332 unsigned int buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle);
333
334 unsigned int buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
335 std::vector<cv::Point3f> &points3f, bool append = false, int class_id = -1);
336 unsigned int buildReference(const vpImage<unsigned char> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
337 const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
338 bool append = false, int class_id = -1);
339
340 unsigned int buildReference(const vpImage<vpRGBa> &I_color);
341 unsigned int buildReference(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
342 unsigned int width);
343 unsigned int buildReference(const vpImage<vpRGBa> &I_color, const vpRect &rectangle);
344
345 unsigned int buildReference(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &trainKeyPoints,
346 std::vector<cv::Point3f> &points3f, bool append = false, int class_id = -1);
347 unsigned int buildReference(const vpImage<vpRGBa> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
348 const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
349 bool append = false, int class_id = -1);
350
351 static void compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi, const vpCameraParameters &cam,
352 const vpHomogeneousMatrix &cMo, cv::Point3f &point);
353
354 static void compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi, const vpCameraParameters &cam,
355 const vpHomogeneousMatrix &cMo, vpPoint &point);
356
357 static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
358 std::vector<cv::KeyPoint> &candidates,
359 const std::vector<vpPolygon> &polygons,
360 const std::vector<std::vector<vpPoint> > &roisPt,
361 std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
362
363 static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
364 std::vector<vpImagePoint> &candidates,
365 const std::vector<vpPolygon> &polygons,
366 const std::vector<std::vector<vpPoint> > &roisPt,
367 std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
368
369 static void
370 compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
371 std::vector<cv::KeyPoint> &candidates, const std::vector<vpCylinder> &cylinders,
372 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
373 std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
374
375 static void
376 compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
377 std::vector<vpImagePoint> &candidates, const std::vector<vpCylinder> &cylinders,
378 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
379 std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
380
381 bool computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
382 const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
383 double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL);
384
385 bool computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
386 double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL);
387
388 bool computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
389 std::vector<unsigned int> &inlierIndex, double &elapsedTime,
390 bool (*func)(const vpHomogeneousMatrix &) = NULL);
391
392 void createImageMatching(vpImage<unsigned char> &IRef, vpImage<unsigned char> &ICurrent,
393 vpImage<unsigned char> &IMatching);
394 void createImageMatching(vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
395
396 void createImageMatching(vpImage<unsigned char> &IRef, vpImage<vpRGBa> &ICurrent,
397 vpImage<vpRGBa> &IMatching);
398 void createImageMatching(vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
399
400 void detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints,
401 const vpRect &rectangle = vpRect());
402 void detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints,
403 const vpRect &rectangle = vpRect());
404 void detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask = cv::Mat());
405 void detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
406 const vpRect &rectangle = vpRect());
407 void detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
408 const vpRect &rectangle = vpRect());
409 void detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
410 const cv::Mat &mask = cv::Mat());
411
412 void detectExtractAffine(const vpImage<unsigned char> &I, std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
413 std::vector<cv::Mat> &listOfDescriptors,
414 std::vector<vpImage<unsigned char> > *listOfAffineI = NULL);
415
416 void display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size = 3);
417 void display(const vpImage<unsigned char> &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green);
418 void display(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, unsigned int size = 3);
419 void display(const vpImage<vpRGBa> &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green);
420
421 void displayMatching(const vpImage<unsigned char> &IRef, vpImage<unsigned char> &IMatching, unsigned int crossSize,
422 unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
423 void displayMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching,
424 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
425 unsigned int crossSize = 3, unsigned int lineThickness = 1);
426 void displayMatching(const vpImage<unsigned char> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
427 unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
428 void displayMatching(const vpImage<vpRGBa> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
429 unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
430 void displayMatching(const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching,
431 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
432 unsigned int crossSize = 3, unsigned int lineThickness = 1);
433
434 void extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
435 std::vector<cv::Point3f> *trainPoints = NULL);
436 void extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
437 std::vector<cv::Point3f> *trainPoints = NULL);
438 void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
439 std::vector<cv::Point3f> *trainPoints = NULL);
440 void extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
441 double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
442 void extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
443 double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
444 void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors, double &elapsedTime,
445 std::vector<cv::Point3f> *trainPoints = NULL);
446
457 {
458 if (!m_computeCovariance) {
459 std::cout << "Warning : The covariance matrix has not been computed. "
460 "See setCovarianceComputation() to do it."
461 << std::endl;
462 return vpMatrix();
463 }
464
465 if (m_computeCovariance && !m_useRansacVVS) {
466 std::cout << "Warning : The covariance matrix can only be computed "
467 "with a Virtual Visual Servoing approach."
468 << std::endl
469 << "Use setUseRansacVVS(true) to choose to use a pose "
470 "estimation method based on a Virtual Visual Servoing "
471 "approach."
472 << std::endl;
473 return vpMatrix();
474 }
475
476 return m_covarianceMatrix;
477 }
478
484 inline double getDetectionTime() const { return m_detectionTime; }
485
493 inline cv::Ptr<cv::FeatureDetector> getDetector(const vpFeatureDetectorType &type) const
494 {
495 std::map<vpFeatureDetectorType, std::string>::const_iterator it_name = m_mapOfDetectorNames.find(type);
496 if (it_name == m_mapOfDetectorNames.end()) {
497 std::cerr << "Internal problem with the feature type and the "
498 "corresponding name!"
499 << std::endl;
500 }
501
502 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector =
503 m_detectors.find(it_name->second);
504 if (findDetector != m_detectors.end()) {
505 return findDetector->second;
506 }
507
508 std::cerr << "Cannot find: " << it_name->second << std::endl;
509 return cv::Ptr<cv::FeatureDetector>();
510 }
511
519 inline cv::Ptr<cv::FeatureDetector> getDetector(const std::string &name) const
520 {
521 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector = m_detectors.find(name);
522 if (findDetector != m_detectors.end()) {
523 return findDetector->second;
524 }
525
526 std::cerr << "Cannot find: " << name << std::endl;
527 return cv::Ptr<cv::FeatureDetector>();
528 }
529
533 inline std::map<vpFeatureDetectorType, std::string> getDetectorNames() const { return m_mapOfDetectorNames; }
534
540 inline double getExtractionTime() const { return m_extractionTime; }
541
549 inline cv::Ptr<cv::DescriptorExtractor> getExtractor(const vpFeatureDescriptorType &type) const
550 {
551 std::map<vpFeatureDescriptorType, std::string>::const_iterator it_name = m_mapOfDescriptorNames.find(type);
552 if (it_name == m_mapOfDescriptorNames.end()) {
553 std::cerr << "Internal problem with the feature type and the "
554 "corresponding name!"
555 << std::endl;
556 }
557
558 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor =
559 m_extractors.find(it_name->second);
560 if (findExtractor != m_extractors.end()) {
561 return findExtractor->second;
562 }
563
564 std::cerr << "Cannot find: " << it_name->second << std::endl;
565 return cv::Ptr<cv::DescriptorExtractor>();
566 }
567
575 inline cv::Ptr<cv::DescriptorExtractor> getExtractor(const std::string &name) const
576 {
577 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor = m_extractors.find(name);
578 if (findExtractor != m_extractors.end()) {
579 return findExtractor->second;
580 }
581
582 std::cerr << "Cannot find: " << name << std::endl;
583 return cv::Ptr<cv::DescriptorExtractor>();
584 }
585
589 inline std::map<vpFeatureDescriptorType, std::string> getExtractorNames() const { return m_mapOfDescriptorNames; }
590
596 inline vpImageFormatType getImageFormat() const { return m_imageFormat; }
597
603 inline double getMatchingTime() const { return m_matchingTime; }
604
610 inline cv::Ptr<cv::DescriptorMatcher> getMatcher() const { return m_matcher; }
611
618 inline std::vector<cv::DMatch> getMatches() const { return m_filteredMatches; }
619
627 inline std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > getMatchQueryToTrainKeyPoints() const
628 {
629 std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > matchQueryToTrainKeyPoints(m_filteredMatches.size());
630 for (size_t i = 0; i < m_filteredMatches.size(); i++) {
631 matchQueryToTrainKeyPoints.push_back(
632 std::pair<cv::KeyPoint, cv::KeyPoint>(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx],
633 m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx]));
634 }
635 return matchQueryToTrainKeyPoints;
636 }
637
643 inline unsigned int getNbImages() const {
644 return static_cast<unsigned int>(m_mapOfImages.size());
645 }
646
647 void getObjectPoints(std::vector<cv::Point3f> &objectPoints) const;
648 void getObjectPoints(std::vector<vpPoint> &objectPoints) const;
649
655 inline double getPoseTime() const { return m_poseTime; }
656
663 inline cv::Mat getQueryDescriptors() const { return m_queryDescriptors; }
664
665 void getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints, bool matches = true) const;
666 void getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints, bool matches = true) const;
667
673 inline std::vector<vpImagePoint> getRansacInliers() const { return m_ransacInliers; }
674
680 inline std::vector<vpImagePoint> getRansacOutliers() const { return m_ransacOutliers; }
681
688 inline cv::Mat getTrainDescriptors() const { return m_trainDescriptors; }
689
690 void getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const;
691 void getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const;
692
693 void getTrainPoints(std::vector<cv::Point3f> &points) const;
694 void getTrainPoints(std::vector<vpPoint> &points) const;
695
696 void initMatcher(const std::string &matcherName);
697
698 void insertImageMatching(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent,
699 vpImage<unsigned char> &IMatching);
700 void insertImageMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
701
702 void insertImageMatching(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent,
703 vpImage<vpRGBa> &IMatching);
704 void insertImageMatching(const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
705
706 void loadConfigFile(const std::string &configFile);
707
708 void loadLearningData(const std::string &filename, bool binaryMode = false, bool append = false);
709
710 void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector<cv::DMatch> &matches,
711 double &elapsedTime);
712
713 unsigned int matchPoint(const vpImage<unsigned char> &I);
714 unsigned int matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
715 unsigned int width);
716 unsigned int matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle);
717
718 unsigned int matchPoint(const std::vector<cv::KeyPoint> &queryKeyPoints, const cv::Mat &queryDescriptors);
720 bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect());
722 double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL,
723 const vpRect &rectangle = vpRect());
724
725 bool matchPointAndDetect(const vpImage<unsigned char> &I, vpRect &boundingBox, vpImagePoint &centerOfGravity,
726 const bool isPlanarObject = true, std::vector<vpImagePoint> *imPts1 = NULL,
727 std::vector<vpImagePoint> *imPts2 = NULL, double *meanDescriptorDistance = NULL,
728 double *detectionScore = NULL, const vpRect &rectangle = vpRect());
729
730 bool matchPointAndDetect(const vpImage<unsigned char> &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
731 double &error, double &elapsedTime, vpRect &boundingBox, vpImagePoint &centerOfGravity,
732 bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect());
733
734 unsigned int matchPoint(const vpImage<vpRGBa> &I_color);
735 unsigned int matchPoint(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
736 unsigned int width);
737 unsigned int matchPoint(const vpImage<vpRGBa> &I_color, const vpRect &rectangle);
738
739 bool matchPoint(const vpImage<vpRGBa> &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
740 bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect());
741 bool matchPoint(const vpImage<vpRGBa> &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
742 double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL,
743 const vpRect &rectangle = vpRect());
744
745 void reset();
746
747 void saveLearningData(const std::string &filename, bool binaryMode = false,
748 bool saveTrainingImages = true);
749
756 inline void setCovarianceComputation(const bool &flag)
757 {
758 m_computeCovariance = flag;
759 if (!m_useRansacVVS) {
760 std::cout << "Warning : The covariance matrix can only be computed "
761 "with a Virtual Visual Servoing approach."
762 << std::endl
763 << "Use setUseRansacVVS(true) to choose to use a pose "
764 "estimation method based on a Virtual "
765 "Visual Servoing approach."
766 << std::endl;
767 }
768 }
769
775 inline void setDetectionMethod(const vpDetectionMethodType &method) { m_detectionMethod = method; }
776
782 inline void setDetector(const vpFeatureDetectorType &detectorType)
783 {
784 m_detectorNames.clear();
785 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
786 m_detectors.clear();
787 initDetector(m_mapOfDetectorNames[detectorType]);
788 }
789
795 inline void setDetector(const std::string &detectorName)
796 {
797 m_detectorNames.clear();
798 m_detectorNames.push_back(detectorName);
799 m_detectors.clear();
800 initDetector(detectorName);
801 }
802
803#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
812 template <typename T1, typename T2, typename T3>
813 inline void setDetectorParameter(const T1 detectorName, const T2 parameterName, const T3 value)
814 {
815 if (m_detectors.find(detectorName) != m_detectors.end()) {
816 m_detectors[detectorName]->set(parameterName, value);
817 }
818 }
819#endif
820
827 inline void setDetectors(const std::vector<std::string> &detectorNames)
828 {
829 m_detectorNames.clear();
830 m_detectors.clear();
831 m_detectorNames = detectorNames;
832 initDetectors(m_detectorNames);
833 }
834
840 inline void setExtractor(const vpFeatureDescriptorType &extractorType)
841 {
842 m_extractorNames.clear();
843 m_extractorNames.push_back(m_mapOfDescriptorNames[extractorType]);
844 m_extractors.clear();
845 initExtractor(m_mapOfDescriptorNames[extractorType]);
846 }
847
854 inline void setExtractor(const std::string &extractorName)
855 {
856 m_extractorNames.clear();
857 m_extractorNames.push_back(extractorName);
858 m_extractors.clear();
859 initExtractor(extractorName);
860 }
861
862#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
871 template <typename T1, typename T2, typename T3>
872 inline void setExtractorParameter(const T1 extractorName, const T2 parameterName, const T3 value)
873 {
874 if (m_extractors.find(extractorName) != m_extractors.end()) {
875 m_extractors[extractorName]->set(parameterName, value);
876 }
877 }
878#endif
879
886 inline void setExtractors(const std::vector<std::string> &extractorNames)
887 {
888 m_extractorNames.clear();
889 m_extractorNames = extractorNames;
890 m_extractors.clear();
891 initExtractors(m_extractorNames);
892 }
893
899 inline void setImageFormat(const vpImageFormatType &imageFormat) { m_imageFormat = imageFormat; }
900
916 inline void setMatcher(const std::string &matcherName)
917 {
918 m_matcherName = matcherName;
919 initMatcher(m_matcherName);
920 }
921
927 void setMaxFeatures(int maxFeatures)
928 {
929 m_maxFeatures = maxFeatures;
930 }
931
947 inline void setFilterMatchingType(const vpFilterMatchingType &filterType)
948 {
949 m_filterType = filterType;
950
951 // Use k-nearest neighbors (knn) to retrieve the two best matches for a
952 // keypoint So this is useful only for ratioDistanceThreshold method
953 if (filterType == ratioDistanceThreshold || filterType == stdAndRatioDistanceThreshold) {
954 m_useKnn = true;
955
956#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
957 if (m_matcher != NULL && m_matcherName == "BruteForce") {
958 // if a matcher is already initialized, disable the crossCheck
959 // because it will not work with knnMatch
960 m_matcher->set("crossCheck", false);
961 }
962#endif
963 } else {
964 m_useKnn = false;
965
966#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
967 if (m_matcher != NULL && m_matcherName == "BruteForce") {
968 // if a matcher is already initialized, set the crossCheck mode if
969 // necessary
970 m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
971 }
972#endif
973 }
974 }
975
982 inline void setMatchingFactorThreshold(const double factor)
983 {
984 if (factor > 0.0) {
985 m_matchingFactorThreshold = factor;
986 } else {
987 throw vpException(vpException::badValue, "The factor must be positive.");
988 }
989 }
990
996 inline void setMatchingRatioThreshold(double ratio)
997 {
998 if (ratio > 0.0 && (ratio < 1.0 || std::fabs(ratio - 1.0) < std::numeric_limits<double>::epsilon())) {
999 m_matchingRatioThreshold = ratio;
1000 } else {
1001 throw vpException(vpException::badValue, "The ratio must be in the interval ]0 ; 1].");
1002 }
1003 }
1004
1011 inline void setRansacConsensusPercentage(double percentage)
1012 {
1013 if (percentage > 0.0 &&
1014 (percentage < 100.0 || std::fabs(percentage - 100.0) < std::numeric_limits<double>::epsilon())) {
1015 m_ransacConsensusPercentage = percentage;
1016 } else {
1017 throw vpException(vpException::badValue, "The percentage must be in the interval ]0 ; 100].");
1018 }
1019 }
1020
1025 {
1026 m_ransacFilterFlag = flag;
1027 }
1028
1035 inline void setRansacIteration(int nbIter)
1036 {
1037 if (nbIter > 0) {
1038 m_nbRansacIterations = nbIter;
1039 } else {
1040 throw vpException(vpException::badValue, "The number of iterations must be greater than zero.");
1041 }
1042 }
1043
1049 inline void setRansacParallel(bool parallel)
1050 {
1051 m_ransacParallel = parallel;
1052 }
1053
1060 inline void setRansacParallelNbThreads(unsigned int nthreads)
1061 {
1062 m_ransacParallelNbThreads = nthreads;
1063 }
1064
1072 inline void setRansacReprojectionError(double reprojectionError)
1073 {
1074 if (reprojectionError > 0.0) {
1075 m_ransacReprojectionError = reprojectionError;
1076 } else {
1077 throw vpException(vpException::badValue, "The Ransac reprojection "
1078 "threshold must be positive "
1079 "as we deal with distance.");
1080 }
1081 }
1082
1088 inline void setRansacMinInlierCount(int minCount)
1089 {
1090 if (minCount > 0) {
1091 m_nbRansacMinInlierCount = minCount;
1092 } else {
1093 throw vpException(vpException::badValue, "The minimum number of inliers must be greater than zero.");
1094 }
1095 }
1096
1103 inline void setRansacThreshold(double threshold)
1104 {
1105 if (threshold > 0.0) {
1106 m_ransacThreshold = threshold;
1107 } else {
1108 throw vpException(vpException::badValue, "The Ransac threshold must be positive as we deal with distance.");
1109 }
1110 }
1111
1119 inline void setUseAffineDetection(bool useAffine) { m_useAffineDetection = useAffine; }
1120
1121#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1128 inline void setUseBruteForceCrossCheck(bool useCrossCheck)
1129 {
1130 // Only available with BruteForce and with k=1 (i.e not used with a
1131 // ratioDistanceThreshold method)
1132 if (m_matcher != NULL && !m_useKnn && m_matcherName == "BruteForce") {
1133 m_matcher->set("crossCheck", useCrossCheck);
1134 } else if (m_matcher != NULL && m_useKnn && m_matcherName == "BruteForce") {
1135 std::cout << "Warning, you try to set the crossCheck parameter with a "
1136 "BruteForce matcher but knn is enabled";
1137 std::cout << " (the filtering method uses a ratio constraint)" << std::endl;
1138 }
1139 }
1140#endif
1141
1148 inline void setUseMatchTrainToQuery(bool useMatchTrainToQuery)
1149 {
1150 m_useMatchTrainToQuery = useMatchTrainToQuery;
1151 }
1152
1160 inline void setUseRansacConsensusPercentage(bool usePercentage) { m_useConsensusPercentage = usePercentage; }
1161
1169 inline void setUseRansacVVS(bool ransacVVS) { m_useRansacVVS = ransacVVS; }
1170
1177 inline void setUseSingleMatchFilter(bool singleMatchFilter) { m_useSingleMatchFilter = singleMatchFilter; }
1178
1179private:
1182 bool m_computeCovariance;
1184 vpMatrix m_covarianceMatrix;
1186 int m_currentImageId;
1189 vpDetectionMethodType m_detectionMethod;
1191 double m_detectionScore;
1194 double m_detectionThreshold;
1196 double m_detectionTime;
1198 std::vector<std::string> m_detectorNames;
1201 // with a key based upon the detector name.
1202 std::map<std::string, cv::Ptr<cv::FeatureDetector> > m_detectors;
1204 double m_extractionTime;
1206 std::vector<std::string> m_extractorNames;
1209 // with a key based upon the extractor name.
1210 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> > m_extractors;
1212 std::vector<cv::DMatch> m_filteredMatches;
1214 vpFilterMatchingType m_filterType;
1216 vpImageFormatType m_imageFormat;
1219 std::vector<std::vector<cv::DMatch> > m_knnMatches;
1221 std::map<vpFeatureDescriptorType, std::string> m_mapOfDescriptorNames;
1223 std::map<vpFeatureDetectorType, std::string> m_mapOfDetectorNames;
1226 std::map<int, int> m_mapOfImageId;
1229 std::map<int, vpImage<unsigned char> > m_mapOfImages;
1232 cv::Ptr<cv::DescriptorMatcher> m_matcher;
1234 std::string m_matcherName;
1236 std::vector<cv::DMatch> m_matches;
1238 double m_matchingFactorThreshold;
1240 double m_matchingRatioThreshold;
1242 double m_matchingTime;
1244 std::vector<std::pair<cv::KeyPoint, cv::Point3f> > m_matchRansacKeyPointsToPoints;
1246 int m_nbRansacIterations;
1248 int m_nbRansacMinInlierCount;
1251 std::vector<cv::Point3f> m_objectFilteredPoints;
1253 double m_poseTime;
1256 cv::Mat m_queryDescriptors;
1258 std::vector<cv::KeyPoint> m_queryFilteredKeyPoints;
1260 std::vector<cv::KeyPoint> m_queryKeyPoints;
1263 double m_ransacConsensusPercentage;
1265 vpPose::RANSAC_FILTER_FLAGS m_ransacFilterFlag;
1267 std::vector<vpImagePoint> m_ransacInliers;
1269 std::vector<vpImagePoint> m_ransacOutliers;
1271 bool m_ransacParallel;
1273 unsigned int m_ransacParallelNbThreads;
1276 double m_ransacReprojectionError;
1279 double m_ransacThreshold;
1282 // detected in the train images).
1283 cv::Mat m_trainDescriptors;
1285 std::vector<cv::KeyPoint> m_trainKeyPoints;
1288 std::vector<cv::Point3f> m_trainPoints;
1291 std::vector<vpPoint> m_trainVpPoints;
1294 bool m_useAffineDetection;
1295#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1299 bool m_useBruteForceCrossCheck;
1300#endif
1303 bool m_useConsensusPercentage;
1305 bool m_useKnn;
1310 bool m_useMatchTrainToQuery;
1312 bool m_useRansacVVS;
1315 bool m_useSingleMatchFilter;
1319 int m_maxFeatures;
1320
1321 void affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai);
1322
1323 double computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1324 const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est);
1325
1326 void filterMatches();
1327
1328 void init();
1329 void initDetector(const std::string &detectorNames);
1330 void initDetectors(const std::vector<std::string> &detectorNames);
1331
1332 void initExtractor(const std::string &extractorName);
1333 void initExtractors(const std::vector<std::string> &extractorNames);
1334
1335 void initFeatureNames();
1336
1337 inline size_t myKeypointHash(const cv::KeyPoint &kp)
1338 {
1339 size_t _Val = 2166136261U, scale = 16777619U;
1340 Cv32suf u;
1341 u.f = kp.pt.x;
1342 _Val = (scale * _Val) ^ u.u;
1343 u.f = kp.pt.y;
1344 _Val = (scale * _Val) ^ u.u;
1345 u.f = kp.size;
1346 _Val = (scale * _Val) ^ u.u;
1347 // As the keypoint angle can be computed for certain type of keypoint only
1348 // when extracting the corresponding descriptor, the angle field is not
1349 // taking into account for the hash
1350 // u.f = kp.angle; _Val = (scale * _Val) ^ u.u;
1351 u.f = kp.response;
1352 _Val = (scale * _Val) ^ u.u;
1353 _Val = (scale * _Val) ^ ((size_t)kp.octave);
1354 _Val = (scale * _Val) ^ ((size_t)kp.class_id);
1355 return _Val;
1356 }
1357
1358#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
1359 /*
1360 * Adapts a detector to detect points over multiple levels of a Gaussian
1361 * pyramid. Useful for detectors that are not inherently scaled.
1362 * From OpenCV 2.4.11 source code.
1363 */
1364 class PyramidAdaptedFeatureDetector : public cv::FeatureDetector
1365 {
1366 public:
1367 // maxLevel - The 0-based index of the last pyramid layer
1368 PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &detector, int maxLevel = 2);
1369
1370 // TODO implement read/write
1371 virtual bool empty() const;
1372
1373 protected:
1374 virtual void detect(cv::InputArray image, CV_OUT std::vector<cv::KeyPoint> &keypoints,
1375 cv::InputArray mask = cv::noArray());
1376 virtual void detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
1377 const cv::Mat &mask = cv::Mat()) const;
1378
1379 cv::Ptr<cv::FeatureDetector> detector;
1380 int maxLevel;
1381 };
1382
1383 /*
1384 * A class filters a vector of keypoints.
1385 * Because now it is difficult to provide a convenient interface for all
1386 * usage scenarios of the keypoints filter class, it has only several needed
1387 * by now static methods.
1388 */
1389 class KeyPointsFilter
1390 {
1391 public:
1392 KeyPointsFilter() {}
1393
1394 /*
1395 * Remove keypoints within borderPixels of an image edge.
1396 */
1397 static void runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize, int borderSize);
1398 /*
1399 * Remove keypoints of sizes out of range.
1400 */
1401 static void runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize = FLT_MAX);
1402 /*
1403 * Remove keypoints from some image by mask for pixels of this image.
1404 */
1405 static void runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask);
1406 /*
1407 * Remove duplicated keypoints.
1408 */
1409 static void removeDuplicated(std::vector<cv::KeyPoint> &keypoints);
1410
1411 /*
1412 * Retain the specified number of the best keypoints (according to the
1413 * response)
1414 */
1415 static void retainBest(std::vector<cv::KeyPoint> &keypoints, int npoints);
1416 };
1417
1418#endif
1419};
1420
1421#endif
1422#endif
class that defines what is a Keypoint. This class provides all the basic elements to implement classe...
virtual unsigned int buildReference(const vpImage< unsigned char > &I)=0
virtual unsigned int matchPoint(const vpImage< unsigned char > &I)=0
virtual void display(const vpImage< unsigned char > &Iref, const vpImage< unsigned char > &Icurrent, unsigned int size=3)=0
Generic class defining intrinsic camera parameters.
Class to define RGB colors available for display functionnalities.
Definition: vpColor.h:158
static const vpColor green
Definition: vpColor.h:220
error that can be emited by ViSP classes.
Definition: vpException.h:72
@ badValue
Used to indicate that a value is not in the allowed range.
Definition: vpException.h:97
Implementation of an homogeneous matrix and operations on such kind of matrices.
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:88
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition: vpKeyPoint.h:223
double getDetectionTime() const
Definition: vpKeyPoint.h:484
std::vector< vpImagePoint > getRansacInliers() const
Definition: vpKeyPoint.h:673
void setMatchingFactorThreshold(const double factor)
Definition: vpKeyPoint.h:982
cv::Ptr< cv::DescriptorMatcher > getMatcher() const
Definition: vpKeyPoint.h:610
void setRansacConsensusPercentage(double percentage)
Definition: vpKeyPoint.h:1011
vpDetectionMethodType
Definition: vpKeyPoint.h:241
void setRansacParallel(bool parallel)
Definition: vpKeyPoint.h:1049
void setRansacReprojectionError(double reprojectionError)
Definition: vpKeyPoint.h:1072
void setExtractor(const std::string &extractorName)
Definition: vpKeyPoint.h:854
void setUseSingleMatchFilter(bool singleMatchFilter)
Definition: vpKeyPoint.h:1177
void setFilterMatchingType(const vpFilterMatchingType &filterType)
Definition: vpKeyPoint.h:947
void setRansacParallelNbThreads(unsigned int nthreads)
Definition: vpKeyPoint.h:1060
double getExtractionTime() const
Definition: vpKeyPoint.h:540
void setUseRansacVVS(bool ransacVVS)
Definition: vpKeyPoint.h:1169
void setDetectors(const std::vector< std::string > &detectorNames)
Definition: vpKeyPoint.h:827
void setExtractors(const std::vector< std::string > &extractorNames)
Definition: vpKeyPoint.h:886
cv::Mat getTrainDescriptors() const
Definition: vpKeyPoint.h:688
vpFeatureDetectorType
Definition: vpKeyPoint.h:258
@ DETECTOR_BRISK
Definition: vpKeyPoint.h:263
@ DETECTOR_AKAZE
Definition: vpKeyPoint.h:278
@ DETECTOR_AGAST
Definition: vpKeyPoint.h:279
@ DETECTOR_SimpleBlob
Definition: vpKeyPoint.h:265
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition: vpKeyPoint.h:840
void setImageFormat(const vpImageFormatType &imageFormat)
Definition: vpKeyPoint.h:899
cv::Ptr< cv::DescriptorExtractor > getExtractor(const vpFeatureDescriptorType &type) const
Definition: vpKeyPoint.h:549
std::vector< vpImagePoint > getRansacOutliers() const
Definition: vpKeyPoint.h:680
void setRansacThreshold(double threshold)
Definition: vpKeyPoint.h:1103
void setRansacMinInlierCount(int minCount)
Definition: vpKeyPoint.h:1088
void setRansacFilterFlag(const vpPose::RANSAC_FILTER_FLAGS &flag)
Definition: vpKeyPoint.h:1024
double getPoseTime() const
Definition: vpKeyPoint.h:655
unsigned int getNbImages() const
Definition: vpKeyPoint.h:643
double getMatchingTime() const
Definition: vpKeyPoint.h:603
vpFeatureDescriptorType
Definition: vpKeyPoint.h:289
@ DESCRIPTOR_AKAZE
Definition: vpKeyPoint.h:306
@ DESCRIPTOR_ORB
Definition: vpKeyPoint.h:291
@ DESCRIPTOR_KAZE
Definition: vpKeyPoint.h:305
@ DESCRIPTOR_SURF
Definition: vpKeyPoint.h:302
@ DESCRIPTOR_BRISK
Definition: vpKeyPoint.h:292
@ DESCRIPTOR_SIFT
Definition: vpKeyPoint.h:299
cv::Ptr< cv::DescriptorExtractor > getExtractor(const std::string &name) const
Definition: vpKeyPoint.h:575
void setMatcher(const std::string &matcherName)
Definition: vpKeyPoint.h:916
vpImageFormatType getImageFormat() const
Definition: vpKeyPoint.h:596
std::map< vpFeatureDescriptorType, std::string > getExtractorNames() const
Definition: vpKeyPoint.h:589
void setUseAffineDetection(bool useAffine)
Definition: vpKeyPoint.h:1119
void setUseRansacConsensusPercentage(bool usePercentage)
Definition: vpKeyPoint.h:1160
void setMatchingRatioThreshold(double ratio)
Definition: vpKeyPoint.h:996
std::map< vpFeatureDetectorType, std::string > getDetectorNames() const
Definition: vpKeyPoint.h:533
@ ppmImageFormat
Definition: vpKeyPoint.h:253
@ jpgImageFormat
Definition: vpKeyPoint.h:251
@ pngImageFormat
Definition: vpKeyPoint.h:252
vpFilterMatchingType
Definition: vpKeyPoint.h:227
void setCovarianceComputation(const bool &flag)
Definition: vpKeyPoint.h:756
void setDetector(const vpFeatureDetectorType &detectorType)
Definition: vpKeyPoint.h:782
std::vector< cv::DMatch > getMatches() const
Definition: vpKeyPoint.h:618
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition: vpKeyPoint.h:493
std::vector< std::pair< cv::KeyPoint, cv::KeyPoint > > getMatchQueryToTrainKeyPoints() const
Definition: vpKeyPoint.h:627
void setUseMatchTrainToQuery(bool useMatchTrainToQuery)
Definition: vpKeyPoint.h:1148
vpMatrix getCovarianceMatrix() const
Definition: vpKeyPoint.h:456
cv::Ptr< cv::FeatureDetector > getDetector(const std::string &name) const
Definition: vpKeyPoint.h:519
void setDetectionMethod(const vpDetectionMethodType &method)
Definition: vpKeyPoint.h:775
void setDetector(const std::string &detectorName)
Definition: vpKeyPoint.h:795
void setMaxFeatures(int maxFeatures)
Definition: vpKeyPoint.h:927
void setRansacIteration(int nbIter)
Definition: vpKeyPoint.h:1035
cv::Mat getQueryDescriptors() const
Definition: vpKeyPoint.h:663
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:154
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:82
RANSAC_FILTER_FLAGS
Definition: vpPose.h:103
Defines a rectangle in the plane.
Definition: vpRect.h:80