2#include <visp3/core/vpConfig.h>
3#ifdef VISP_HAVE_MODULE_SENSOR
4#include <visp3/sensor/vpV4l2Grabber.h>
5#include <visp3/sensor/vp1394CMUGrabber.h>
6#include <visp3/sensor/vp1394TwoGrabber.h>
7#include <visp3/sensor/vpFlyCaptureGrabber.h>
8#include <visp3/sensor/vpRealSense2.h>
10#include <visp3/core/vpIoTools.h>
11#include <visp3/core/vpXmlParserCamera.h>
12#include <visp3/gui/vpDisplayGDI.h>
13#include <visp3/gui/vpDisplayOpenCV.h>
14#include <visp3/gui/vpDisplayX.h>
15#include <visp3/io/vpImageIo.h>
16#include <visp3/vision/vpKeyPoint.h>
18#include <visp3/mbt/vpMbGenericTracker.h>
30int main(
int argc,
char **argv)
32#if defined(VISP_HAVE_OPENCV) && \
33 (defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || (VISP_HAVE_OPENCV_VERSION >= 0x020100) || \
34 defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2) )
37 std::string opt_modelname =
"model/teabox/teabox.cao";
40 double opt_proj_error_threshold = 30.;
41 bool opt_use_ogre =
false;
42 bool opt_use_scanline =
false;
43 bool opt_display_projection_error =
false;
44 bool opt_learn =
false;
45 bool opt_auto_init =
false;
46 std::string opt_learning_data =
"learning/data-learned.bin";
47 std::string opt_intrinsic_file =
"";
48 std::string opt_camera_name =
"";
50 for (
int i = 0; i < argc; i++) {
51 if (std::string(argv[i]) ==
"--model") {
52 opt_modelname = std::string(argv[i + 1]);
54 else if (std::string(argv[i]) ==
"--tracker") {
55 opt_tracker = atoi(argv[i + 1]);
57 else if (std::string(argv[i]) ==
"--camera_device" && i + 1 < argc) {
58 opt_device = atoi(argv[i + 1]);
60 else if (std::string(argv[i]) ==
"--max_proj_error") {
61 opt_proj_error_threshold = atof(argv[i + 1]);
62 }
else if (std::string(argv[i]) ==
"--use_ogre") {
64 }
else if (std::string(argv[i]) ==
"--use_scanline") {
65 opt_use_scanline =
true;
66 }
else if (std::string(argv[i]) ==
"--learn") {
68 }
else if (std::string(argv[i]) ==
"--learning_data" && i+1 < argc) {
69 opt_learning_data = argv[i+1];
70 }
else if (std::string(argv[i]) ==
"--auto_init") {
72 }
else if (std::string(argv[i]) ==
"--display_proj_error") {
73 opt_display_projection_error =
true;
74 }
else if (std::string(argv[i]) ==
"--intrinsic" && i + 1 < argc) {
75 opt_intrinsic_file = std::string(argv[i + 1]);
76 }
else if (std::string(argv[i]) ==
"--camera_name" && i + 1 < argc) {
77 opt_camera_name = std::string(argv[i + 1]);
79 else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
80 std::cout <<
"\nUsage: " << argv[0]
81 <<
" [--camera_device <camera device> (default: 0)]"
82 <<
" [--intrinsic <intrinsic file> (default: empty)]"
83 <<
" [--camera_name <camera name> (default: empty)]"
84 <<
" [--model <model name> (default: teabox)]"
85 <<
" [--tracker <0=egde|1=keypoint|2=hybrid> (default: 2)]"
86 <<
" [--use_ogre] [--use_scanline]"
87 <<
" [--max_proj_error <allowed projection error> (default: 30)]"
88 <<
" [--learn] [--auto_init] [--learning_data <data-learned.bin> (default: learning/data-learned.bin)]"
89 <<
" [--display_proj_error]"
98 if (!parentname.empty())
99 objectname = parentname +
"/" + objectname;
101 std::cout <<
"Tracker requested config files: " << objectname <<
".[init, cao]" << std::endl;
102 std::cout <<
"Tracker optional config files: " << objectname <<
".[ppm]" << std::endl;
104 std::cout <<
"Tracked features: " << std::endl;
105 std::cout <<
" Use edges : " << (opt_tracker == 0 || opt_tracker == 2) << std::endl;
106 std::cout <<
" Use klt : " << (opt_tracker == 1 || opt_tracker == 2) << std::endl;
107 std::cout <<
"Tracker options: " << std::endl;
108 std::cout <<
" Use ogre : " << opt_use_ogre << std::endl;
109 std::cout <<
" Use scanline: " << opt_use_scanline << std::endl;
110 std::cout <<
" Proj. error : " << opt_proj_error_threshold << std::endl;
111 std::cout <<
" Display proj. error: " << opt_display_projection_error << std::endl;
112 std::cout <<
"Config files: " << std::endl;
113 std::cout <<
" Config file : " <<
"\"" << objectname +
".xml" <<
"\"" << std::endl;
114 std::cout <<
" Model file : " <<
"\"" << objectname +
".cao" <<
"\"" << std::endl;
115 std::cout <<
" Init file : " <<
"\"" << objectname +
".init" <<
"\"" << std::endl;
116 std::cout <<
"Learning options : " << std::endl;
117 std::cout <<
" Learn : " << opt_learn << std::endl;
118 std::cout <<
" Auto init : " << opt_auto_init << std::endl;
119 std::cout <<
" Learning data: " << opt_learning_data << std::endl;
122#if VISP_VERSION_INT > VP_VERSION_INT(3, 2, 0)
134 if (!opt_intrinsic_file.empty() && !opt_camera_name.empty())
142#if defined(VISP_HAVE_V4L2)
144 std::ostringstream device;
145 device <<
"/dev/video" << opt_device;
146 std::cout <<
"Use Video 4 Linux grabber on device " << device.str() << std::endl;
150#elif defined(VISP_HAVE_DC1394)
152 std::cout <<
"Use DC1394 grabber" << std::endl;
155#elif defined(VISP_HAVE_CMU1394)
157 std::cout <<
"Use CMU1394 grabber" << std::endl;
160#elif defined(VISP_HAVE_FLYCAPTURE)
162 std::cout <<
"Use FlyCapture grabber" << std::endl;
165#elif defined(VISP_HAVE_REALSENSE2)
167 std::cout <<
"Use Realsense 2 grabber" << std::endl;
170 config.disable_stream(RS2_STREAM_DEPTH);
171 config.disable_stream(RS2_STREAM_INFRARED);
172 config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);
176 std::cout <<
"Read camera parameters from Realsense device" << std::endl;
179#elif defined(VISP_HAVE_OPENCV)
180 std::cout <<
"Use OpenCV grabber on device " << opt_device << std::endl;
181 cv::VideoCapture g(opt_device);
183 std::cout <<
"Failed to open the camera" << std::endl;
193#if defined(VISP_HAVE_X11)
195#elif defined(VISP_HAVE_GDI)
200 display->init(I, 100, 100,
"Model-based tracker");
203#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
205#elif defined(VISP_HAVE_OPENCV)
221 if (opt_tracker == 0)
223#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV)
224 else if (opt_tracker == 1)
230# if !defined(VISP_HAVE_MODULE_KLT)
231 std::cout <<
"klt and hybrid model-based tracker are not available since visp_klt module is not available. "
232 "In CMakeGUI turn visp_klt module ON, configure and build ViSP again."
235 std::cout <<
"Hybrid tracking is impossible since OpenCV is not enabled. "
236 <<
"Install OpenCV, configure and build ViSP again to run this tutorial."
254 if (opt_tracker == 0 || opt_tracker == 2) {
268#ifdef VISP_HAVE_MODULE_KLT
269 if (opt_tracker == 1 || opt_tracker == 2) {
304#if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)) || \
305 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
306 std::string detectorName =
"SIFT";
307 std::string extractorName =
"SIFT";
308 std::string matcherName =
"BruteForce";
310 std::string detectorName =
"FAST";
311 std::string extractorName =
"ORB";
312 std::string matcherName =
"BruteForce-Hamming";
315 if (opt_learn || opt_auto_init) {
319#if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
320# if (VISP_HAVE_OPENCV_VERSION < 0x030000)
321 keypoint.setDetectorParameter(
"ORB",
"nLevels", 1);
323 cv::Ptr<cv::ORB> orb_detector = keypoint.
getDetector(
"ORB").dynamicCast<cv::ORB>();
325 orb_detector->setNLevels(1);
333 std::cout <<
"Cannot enable auto detection. Learning file \"" << opt_learning_data <<
"\" doesn't exist" << std::endl;
339 tracker.
initClick(I, objectname +
".init",
true);
342 bool learn_position =
false;
343 bool run_auto_init =
false;
345 run_auto_init =
true;
350 unsigned int learn_cpt = 0;
352 bool tracking_failed =
false;
356#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
358#elif defined(VISP_HAVE_OPENCV)
366 tracking_failed =
false;
368 std::cout <<
"Auto init succeed" << std::endl;
375 else if (tracking_failed) {
377 tracking_failed =
false;
378 tracker.
initClick(I, objectname +
".init",
true);
387 run_auto_init =
false;
392 tracking_failed =
true;
394 std::cout <<
"Tracker needs to restart (tracking exception)" << std::endl;
395 run_auto_init =
true;
399 if (! tracking_failed) {
400 double proj_error = 0;
410 if (proj_error > opt_proj_error_threshold) {
411 std::cout <<
"Tracker needs to restart (projection error detected: " << proj_error <<
")" << std::endl;
413 run_auto_init =
true;
415 tracking_failed =
true;
419 if (! tracking_failed) {
432 std::stringstream ss;
433 ss <<
"Translation: " << std::setprecision(5) << pose[0] <<
" " << pose[1] <<
" " << pose[2] <<
" [m]";
440 std::stringstream ss;
446 if (learn_position) {
449 std::vector<cv::KeyPoint> trainKeyPoints;
450 keypoint.
detect(I, trainKeyPoints);
453 std::vector<vpPolygon> polygons;
454 std::vector<std::vector<vpPoint> > roisPt;
455 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.
getPolygonFaces();
456 polygons = pair.first;
457 roisPt = pair.second;
460 std::vector<cv::Point3f> points3f;
464 keypoint.
buildReference(I, trainKeyPoints, points3f,
true, learn_id++);
467 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
470 learn_position =
false;
471 std::cout <<
"Data learned" << std::endl;
474 std::stringstream ss;
479 else if (opt_auto_init)
489 learn_position =
true;
491 run_auto_init =
true;
497 if (opt_learn && learn_cpt) {
498 std::cout <<
"Save learning from " << learn_cpt <<
" images in file: " << opt_learning_data << std::endl;
506 std::cout <<
"Catch a ViSP exception: " << e << std::endl;
508#elif defined(VISP_HAVE_OPENCV)
511 std::cout <<
"Install a 3rd party dedicated to frame grabbing (dc1394, cmu1394, v4l2, OpenCV, FlyCapture, Realsense2), configure and build ViSP again to use this example" << std::endl;
515 std::cout <<
"Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
Firewire cameras video capture based on CMU 1394 Digital Camera SDK.
void open(vpImage< unsigned char > &I)
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void open(vpImage< unsigned char > &I)
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
@ perspectiveProjWithoutDistortion
static const vpColor none
static const vpColor yellow
static const vpColor green
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Class that defines generic functionnalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0))
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emited by ViSP classes.
const std::string & getStringMessage() const
Send a reference (constant) related the error message (can be empty).
void open(vpImage< unsigned char > &I)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void setMatcher(const std::string &matcherName)
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
unsigned int buildReference(const vpImage< unsigned char > &I)
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double deg(double rad)
Real-time 6D object pose tracking using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &camera)
virtual void getPose(vpHomogeneousMatrix &cMo) const
virtual void setDisplayFeatures(bool displayF)
virtual int getTrackerType() const
virtual void setKltMaskBorder(const unsigned int &e)
virtual void setProjectionErrorComputation(const bool &flag)
virtual unsigned int getNbFeaturesEdge() const
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo)
virtual unsigned int getNbFeaturesKlt() const
virtual void getCameraParameters(vpCameraParameters &camera) const
virtual void setMovingEdge(const vpMe &me)
virtual void setScanLineVisibilityTest(const bool &v)
virtual void setKltOpencv(const vpKltOpencv &t)
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false)
virtual void setProjectionErrorDisplay(bool display)
virtual void setTrackerType(int type)
virtual void initClick(const vpImage< unsigned char > &I1, const vpImage< unsigned char > &I2, const std::string &initFile1, const std::string &initFile2, bool displayHelp=false, const vpHomogeneousMatrix &T1=vpHomogeneousMatrix(), const vpHomogeneousMatrix &T2=vpHomogeneousMatrix())
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam)
virtual void loadConfigFile(const std::string &configFile, bool verbose=true)
virtual void setOgreVisibilityTest(const bool &v)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
virtual void track(const vpImage< unsigned char > &I)
virtual double getProjectionError() const
void setMu1(const double &mu_1)
void setSampleStep(const double &s)
void setRange(const unsigned int &r)
void setMaskSize(const unsigned int &a)
void setMu2(const double &mu_2)
void setMaskNumber(const unsigned int &a)
void setThreshold(const double &t)
Implementation of a pose vector and operations on poses.
void acquire(vpImage< unsigned char > &grey, double *ts=NULL)
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
bool open(const rs2::config &cfg=rs2::config())
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
void open(vpImage< unsigned char > &I)
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
void setDevice(const std::string &devname)
XML parser to load and save intrinsic camera parameters.
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, unsigned int image_width=0, unsigned int image_height=0)
VISP_EXPORT double measureTimeMs()