Visual Servoing Platform version 3.5.0
servoAfma6Points2DCamVelocityEyeToHand.cpp
1/****************************************************************************
2 *
3 * ViSP, open source Visual Servoing Platform software.
4 * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5 *
6 * This software is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 * See the file LICENSE.txt at the root directory of this source
11 * distribution for additional information about the GNU GPL.
12 *
13 * For using ViSP with software that can not be combined with the GNU
14 * GPL, please contact Inria about acquiring a ViSP Professional
15 * Edition License.
16 *
17 * See http://visp.inria.fr for more information.
18 *
19 * This software was developed at:
20 * Inria Rennes - Bretagne Atlantique
21 * Campus Universitaire de Beaulieu
22 * 35042 Rennes Cedex
23 * France
24 *
25 * If you have questions regarding the use of this file, please contact
26 * Inria at visp@inria.fr
27 *
28 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30 *
31 * Description:
32 * tests the control law
33 * eye-to-hand control
34 * velocity computed in the camera frame
35 *
36 * Authors:
37 * Eric Marchand
38 *
39 *****************************************************************************/
58#include <cmath> // std::fabs
59#include <limits> // numeric_limits
60#include <list>
61#include <stdlib.h>
62#include <visp3/core/vpConfig.h>
63#include <visp3/core/vpDebug.h> // Debug trace
64#if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394))
65
66#define SAVE 0
67
68#include <visp3/blob/vpDot.h>
69#include <visp3/core/vpDisplay.h>
70#include <visp3/core/vpException.h>
71#include <visp3/core/vpHomogeneousMatrix.h>
72#include <visp3/core/vpImage.h>
73#include <visp3/core/vpImagePoint.h>
74#include <visp3/core/vpMath.h>
75#include <visp3/core/vpPoint.h>
76#include <visp3/gui/vpDisplayGTK.h>
77#include <visp3/gui/vpDisplayOpenCV.h>
78#include <visp3/gui/vpDisplayX.h>
79#include <visp3/io/vpImageIo.h>
80#include <visp3/robot/vpRobotAfma6.h>
81#include <visp3/sensor/vp1394TwoGrabber.h>
82#include <visp3/vision/vpPose.h>
83#include <visp3/visual_features/vpFeatureBuilder.h>
84#include <visp3/visual_features/vpFeaturePoint.h>
85#include <visp3/vs/vpServo.h>
86#include <visp3/vs/vpServoDisplay.h>
87
88#define L 0.006
89#define D 0
90
91int main()
92{
93 try {
94 vpServo task;
95
98 int i;
99
103 g.open(I);
104
105 g.acquire(I);
106
107#ifdef VISP_HAVE_X11
108 vpDisplayX display(I, 100, 100, "Current image");
109#elif defined(VISP_HAVE_OPENCV)
110 vpDisplayOpenCV display(I, 100, 100, "Current image");
111#elif defined(VISP_HAVE_GTK)
112 vpDisplayGTK display(I, 100, 100, "Current image");
113#endif
114
117
118 std::cout << std::endl;
119 std::cout << "-------------------------------------------------------" << std::endl;
120 std::cout << " Test program for vpServo " << std::endl;
121 std::cout << " Eye-to-hand task control" << std::endl;
122 std::cout << " Simulation " << std::endl;
123 std::cout << " task : servo a point " << std::endl;
124 std::cout << "-------------------------------------------------------" << std::endl;
125 std::cout << std::endl;
126
127 int nbPoint = 7;
128
129 vpDot dot[nbPoint];
130 vpImagePoint cog;
131
132 for (i = 0; i < nbPoint; i++) {
133 dot[i].initTracking(I);
134 dot[i].setGraphics(true);
135 dot[i].track(I);
137 dot[i].setGraphics(false);
138 }
139
140 // Compute the pose 3D model
141 vpPoint point[nbPoint];
142 point[0].setWorldCoordinates(-2 * L, D, -3 * L);
143 point[1].setWorldCoordinates(0, D, -3 * L);
144 point[2].setWorldCoordinates(2 * L, D, -3 * L);
145
146 point[3].setWorldCoordinates(-L, D, -L);
147 point[4].setWorldCoordinates(L, D, -L);
148 point[5].setWorldCoordinates(L, D, L);
149 point[6].setWorldCoordinates(-L, D, L);
150
151 vpRobotAfma6 robot;
152 // Update camera parameters
153 robot.getCameraParameters(cam, I);
154
155 vpHomogeneousMatrix cMo, cdMo;
156 vpPose pose;
157 pose.clearPoint();
158 for (i = 0; i < nbPoint; i++) {
159 cog = dot[i].getCog();
160 double x = 0, y = 0;
162 point[i].set_x(x);
163 point[i].set_y(y);
164 pose.addPoint(point[i]);
165 }
166
167 // compute the initial pose using Dementhon method followed by a non
168 // linear minimisation method
170
171 std::cout << cMo << std::endl;
172 cMo.print();
173
174 /*------------------------------------------------------------------
175 -- Learning the desired position
176 -- or reading the desired position
177 ------------------------------------------------------------------
178 */
179 std::cout << " Learning 0/1 " << std::endl;
180 char name[FILENAME_MAX];
181 sprintf(name, "cdMo.dat");
182 int learning;
183 std::cin >> learning;
184 if (learning == 1) {
185 // save the object position
186 vpTRACE("Save the location of the object in a file cdMo.dat");
187 std::ofstream f(name);
188 cMo.save(f);
189 f.close();
190 exit(1);
191 }
192
193 {
194 vpTRACE("Loading desired location from cdMo.dat");
195 std::ifstream f("cdMo.dat");
196 cdMo.load(f);
197 f.close();
198 }
199
200 vpFeaturePoint p[nbPoint], pd[nbPoint];
201
202 // set the desired position of the point by forward projection using
203 // the pose cdMo
204 for (i = 0; i < nbPoint; i++) {
205 vpColVector cP, p;
206 point[i].changeFrame(cdMo, cP);
207 point[i].projection(cP, p);
208
209 pd[i].set_x(p[0]);
210 pd[i].set_y(p[1]);
211 }
212
213 //------------------------------------------------------------------
214
215 vpTRACE("define the task");
216 vpTRACE("\t we want an eye-in-hand control law");
217 vpTRACE("\t robot is controlled in the camera frame");
220
221 for (i = 0; i < nbPoint; i++) {
222 task.addFeature(p[i], pd[i]);
223 }
224
225 vpTRACE("Display task information ");
226 task.print();
227
228 //------------------------------------------------------------------
229
230 double convergence_threshold = 0.00; // 025 ;
232
233 //-------------------------------------------------------------
234 double error = 1;
235 unsigned int iter = 0;
236 vpTRACE("\t loop");
238 vpColVector v; // computed robot velocity
239
240 // position of the object in the effector frame
241 vpHomogeneousMatrix oMcamrobot;
242 oMcamrobot[0][3] = -0.05;
243
245 int it = 0;
246
247 double lambda_av = 0.1;
248 double alpha = 1; // 1 ;
249 double beta = 3; // 3 ;
250
251 std::cout << "alpha 0.7" << std::endl;
252 std::cin >> alpha;
253 std::cout << "beta 5" << std::endl;
254 std::cin >> beta;
255 std::list<vpImagePoint> Lcog;
256 vpImagePoint ip;
257 while (error > convergence_threshold) {
258 std::cout << "---------------------------------------------" << iter++ << std::endl;
259
260 g.acquire(I);
262 ip.set_i(265);
263 ip.set_j(150);
264 vpDisplay::displayText(I, ip, "Eye-To-Hand Visual Servoing", vpColor::green);
265 ip.set_i(280);
266 ip.set_j(150);
267 vpDisplay::displayText(I, ip, "IRISA-INRIA Rennes, Lagadic project", vpColor::green);
268 try {
269 for (i = 0; i < nbPoint; i++) {
270 dot[i].track(I);
271 Lcog.push_back(dot[i].getCog());
272 }
273 } catch (...) {
274 vpTRACE("Error detected while tracking visual features");
275 robot.stopMotion();
276 exit(1);
277 }
278
279 // compute the initial pose using a non linear minimisation method
280 pose.clearPoint();
281
282 for (i = 0; i < nbPoint; i++) {
283 double x = 0, y = 0;
284 cog = dot[i].getCog();
286 point[i].set_x(x);
287 point[i].set_y(y);
288
289 vpColVector cP;
290 point[i].changeFrame(cdMo, cP);
291
292 p[i].set_x(x);
293 p[i].set_y(y);
294 p[i].set_Z(cP[2]);
295
296 pose.addPoint(point[i]);
297
298 point[i].display(I, cMo, cam, vpColor::green);
299 point[i].display(I, cdMo, cam, vpColor::blue);
300 }
301 pose.computePose(vpPose::LOWE, cMo);
303
305 vpHomogeneousMatrix cMe, camrobotMe;
306 robot.get_cMe(camrobotMe);
307 cMe = cMo * oMcamrobot * camrobotMe;
308
309 task.set_cVe(cMe);
310
311 vpMatrix eJe;
312 robot.get_eJe(eJe);
313 task.set_eJe(eJe);
314
315 // Compute the adaptative gain (speed up the convergence)
316 double gain;
317 if (iter > 2) {
318 if (std::fabs(alpha) <= std::numeric_limits<double>::epsilon())
319 gain = lambda_av;
320 else {
321 gain = alpha * exp(-beta * (task.getError()).sumSquare()) + lambda_av;
322 }
323 } else
324 gain = lambda_av;
325 if (SAVE == 1)
326 gain = gain / 5;
327
328 vpTRACE("%f %f %f %f %f", alpha, beta, lambda_av, (task.getError()).sumSquare(), gain);
329 task.setLambda(gain);
330
331 v = task.computeControlLaw();
332
333 // display points trajectory
334 for (std::list<vpImagePoint>::const_iterator it_cog = Lcog.begin(); it_cog != Lcog.end(); ++it_cog) {
336 }
337 vpServoDisplay::display(task, cam, I);
339
340 error = (task.getError()).sumSquare();
341 std::cout << "|| s - s* || = " << error << std::endl;
342
343 if (error > 7) {
344 vpTRACE("Error detected while tracking visual features");
345 robot.stopMotion();
346 exit(1);
347 }
348
349 // display the pose
350 // pose.display(I,cMo,cam, 0.04, vpColor::red) ;
351 // display the pose
352 // pose.display(I,cdMo,cam, 0.04, vpColor::blue) ;
353 if ((SAVE == 1) && (iter % 3 == 0)) {
354
355 vpDisplay::getImage(I, Ic);
356 sprintf(name, "/tmp/marchand/image.%04d.ppm", it++);
357 vpImageIo::write(Ic, name);
358 }
359 }
360 v = 0;
363 return EXIT_SUCCESS;
364 }
365 catch (const vpException &e) {
366 std::cout << "Test failed with exception: " << e << std::endl;
367 return EXIT_FAILURE;
368 }
369}
370
371#else
372int main()
373{
374 std::cout << "You do not have an afma6 robot connected to your computer..." << std::endl;
375 return EXIT_SUCCESS;
376}
377
378#endif
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void acquire(vpImage< unsigned char > &I)
void setVideoMode(vp1394TwoVideoModeType videomode)
void setFramerate(vp1394TwoFramerateType fps)
void open(vpImage< unsigned char > &I)
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Definition: vpColVector.h:131
static const vpColor red
Definition: vpColor.h:217
static const vpColor blue
Definition: vpColor.h:223
static const vpColor green
Definition: vpColor.h:220
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
Definition: vpDisplayGTK.h:135
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:135
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void getImage(const vpImage< unsigned char > &Is, vpImage< vpRGBa > &Id)
Definition: vpDisplay.cpp:144
static void flush(const vpImage< unsigned char > &I)
static void displayPoint(const vpImage< unsigned char > &I, const vpImagePoint &ip, const vpColor &color, unsigned int thickness=1)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
This tracker is meant to track a dot (connected pixels with same gray level) on a vpImage.
Definition: vpDot.h:116
void initTracking(const vpImage< unsigned char > &I)
Definition: vpDot.cpp:635
void setGraphics(bool activate)
Definition: vpDot.h:361
vpImagePoint getCog() const
Definition: vpDot.h:247
void track(const vpImage< unsigned char > &I)
Definition: vpDot.cpp:770
error that can be emited by ViSP classes.
Definition: vpException.h:72
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
void set_y(double y)
void set_x(double x)
Implementation of an homogeneous matrix and operations on such kind of matrices.
void load(std::ifstream &f)
void print() const
Print the matrix as a pose vector .
void save(std::ofstream &f) const
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:293
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:88
void set_j(double jj)
Definition: vpImagePoint.h:177
void set_i(double ii)
Definition: vpImagePoint.h:166
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:154
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:82
void set_x(double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.cpp:511
void projection(const vpColVector &_cP, vpColVector &_p) const
Definition: vpPoint.cpp:222
void display(const vpImage< unsigned char > &I, const vpCameraParameters &cam, const vpColor &color=vpColor::green, unsigned int thickness=1)
Definition: vpPoint.cpp:430
void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const
Definition: vpPoint.cpp:239
void setWorldCoordinates(double oX, double oY, double oZ)
Definition: vpPoint.cpp:113
void set_y(double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.cpp:513
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition: vpPose.h:81
void addPoint(const vpPoint &P)
Definition: vpPose.cpp:149
@ DEMENTHON_LOWE
Definition: vpPose.h:93
@ LOWE
Definition: vpPose.h:87
void clearPoint()
Definition: vpPose.cpp:134
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition: vpPose.cpp:374
Control of Irisa's gantry robot named Afma6.
Definition: vpRobotAfma6.h:212
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel)
void get_eJe(vpMatrix &eJe)
@ ARTICULAR_FRAME
Definition: vpRobot.h:78
@ CAMERA_FRAME
Definition: vpRobot.h:82
@ STATE_VELOCITY_CONTROL
Initialize the velocity controller.
Definition: vpRobot.h:66
virtual vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState)
Definition: vpRobot.cpp:201
static void display(const vpServo &s, const vpCameraParameters &cam, const vpImage< unsigned char > &I, vpColor currentColor=vpColor::green, vpColor desiredColor=vpColor::red, unsigned int thickness=1)
void setInteractionMatrixType(const vpServoIteractionMatrixType &interactionMatrixType, const vpServoInversionType &interactionMatrixInversion=PSEUDO_INVERSE)
Definition: vpServo.cpp:567
@ EYETOHAND_L_cVe_eJe
Definition: vpServo.h:164
void set_cVe(const vpVelocityTwistMatrix &cVe_)
Definition: vpServo.h:448
void print(const vpServo::vpServoPrintType display_level=ALL, std::ostream &os=std::cout)
Definition: vpServo.cpp:306
void setLambda(double c)
Definition: vpServo.h:404
void set_eJe(const vpMatrix &eJe_)
Definition: vpServo.h:506
void setServo(const vpServoType &servo_type)
Definition: vpServo.cpp:218
vpColVector getError() const
Definition: vpServo.h:278
vpColVector computeControlLaw()
Definition: vpServo.cpp:929
@ CURRENT
Definition: vpServo.h:182
void addFeature(vpBasicFeature &s, vpBasicFeature &s_star, unsigned int select=vpBasicFeature::FEATURE_ALL)
Definition: vpServo.cpp:490
vpHomogeneousMatrix get_cMe() const
Definition: vpUnicycle.h:74
#define vpTRACE
Definition: vpDebug.h:416