2 * Credits: Julian Oliver, 2008-2009 <julian@julianoliver.com>.
4 * This program is free software: you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation, either version 3 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
17 * This code builds upon BazAR, in particular 'multigl.cpp'. It has been
18 * modified to support texture and video-texture mapping to an OpenGL plane over
19 * the ROI. The ROI in the model image is now read in from a file generated by
20 * the training process. Pose estimation stabilisation, augmentation fades,
21 * fonts, mouse input hooks, augmentation over archival video and other bits have
22 * pieces have been added also.
24 * I've fixed a bug in BazAR's planar_object_recognizer::build_with_cache where
25 * corner values for the ROI were only being set immediately after training, not
30 * There are four ways to use Artvertiser.
32 * With video substitution of the ROI:
34 * ./artvertiser -m <model file> -a <avi file>
36 * With video substitution of the ROI and capture from an AVI file
38 * ./artvertiser -m <model file> -a <avi file> -b <avi file>
40 * With image substitution of the ROI and capture from a v4l device
42 * ./artvertiser -m <model file>
44 * See defines below for setting capture window size and V4L device index
48 #include "multigrab.h"
51 #include <stdio.h> /* Standard input/output definitions */
53 #include <stdint.h> /* Standard types */
54 #include <string.h> /* String function definitions */
55 #include <unistd.h> /* UNIX standard function definitions */
56 #include <errno.h> /* Error number definitions */
57 #include <fcntl.h> /* File control definitions */
58 #include <errno.h> /* Error number definitions */
59 #include <fcntl.h> /* File control definitions */
60 #include <termios.h> /* POSIX terminal control definitions */
63 #include <sstream> // for conv int->str
65 #include <opencv/cv.h>
76 #include <calib/camera.h>
79 #define HAVE_APPLE_OPENGL_FRAMEWORK
81 #ifdef HAVE_APPLE_OPENGL_FRAMEWORK
82 #include <GLUT/glut.h>
87 #include "/usr/include/freetype2/freetype/config/ftconfig.h"
88 #include <FTGL/ftgl.h>
90 #include "FProfiler/FProfiler.h"
93 #include "framerate.h"
99 #include "MatrixTracker/MatrixTracker.h"
101 #define IsRGB(s) ((s[0] == 'R') && (s[1] == 'G') && (s[2] == 'B'))
102 #define IsBGR(s) ((s[0] == 'B') && (s[1] == 'G') && (s[2] == 'R'))
104 #ifndef GL_CLAMP_TO_BORDER
105 #define GL_CLAMP_TO_BORDER 0x812D
107 #define GL_MIRROR_CLAMP_EXT 0x8742
109 #define DEFAULT_WIDTH 640
110 #define DEFAULT_HEIGHT 480
111 #define DEFAULT_V4LDEVICE 0
113 #define NUMARTVERTS 5
115 // buttons via arduino
116 // button_state is bitmapped so as to handle multiple button presses at once
117 char button_state
= 0;
118 bool button_state_changed
= false;
119 const char BUTTON_RED
= 0x01;
120 const char BUTTON_GREEN
= 0x02;
121 const char BUTTON_BLUE
= 0x04;
123 int serialport_init(const char* serialport
, int baud
);
124 int serialport_read_until(int fd
, char* buf
, char until
);
125 bool serial_thread_should_exit
= false;
126 bool serial_thread_is_running
= false;
127 pthread_t serial_thread
;
128 void startSerialThread();
129 void shutdownSerialThread();
130 void* serialThreadFunc( void* );
131 // running on binoculars?
132 bool running_on_binoculars
= false;
133 bool no_fullscreen
= false;
136 // we continue tracking for 1 second, then fade for 3
137 static const float SECONDS_LOST_TRACK
= 0.5f
;
138 static const float SECONDS_LOST_FADE
= 1.0f
;
139 static const float MAX_FADE_SHOW
= 0.9f
;
140 static const float MAX_FADE_NORMAL
= 1.0f
;
142 static const int DEFAULT_CAPTURE_FPS
= 20;
148 CamCalibration
*calib
=0;
150 IplTexture
*raw_frame_texture
=0;
151 FTime raw_frame_timestamp
;
154 CvCapture
*capture
= 0;
155 CvCapture
*avi_capture
= 0;
156 IplImage
*avi_image
= 0;
157 IplImage
*avi_frame
= 0;
158 //IplImage *model_image = 0;
159 IplImage
*this_frame
= 0;
160 IplImage
*last_frame
= 0;
162 IplImage
*bit_frame
= 0;
164 int v4l_device
= DEFAULT_V4LDEVICE
;
165 int video_width
= DEFAULT_WIDTH
;
166 int video_height
= DEFAULT_HEIGHT
;
167 int detect_width
= DEFAULT_WIDTH
;
168 int detect_height
= DEFAULT_HEIGHT
;
169 int desired_capture_fps
= DEFAULT_CAPTURE_FPS
;
171 // load some images. hard-coded for know until i get the path parsing together.
172 IplImage
*image1
= cvLoadImage("artvert1.png");
173 IplImage
*image2
= cvLoadImage("artvert2.png");
174 IplImage
*image3
= cvLoadImage("artvert3.png");
175 IplImage
*image4
= cvLoadImage("artvert4.png");
176 IplImage
*image5
= cvLoadImage("artvert5.png");
177 IplImage
*fallback_artvert_image
= cvLoadImage("artvert1.png");
180 MatrixTracker matrix_tracker
;
182 // define a container struct for each artvert
183 struct artvert_struct
193 typedef vector
<artvert_struct
> artverts_list
;
194 artverts_list
artverts(5);
196 // create a vector for the images and initialise it.
197 typedef vector
<IplImage
> imgVec
;
200 bool cache_light
=false;
201 bool dynamic_light
=false;
202 bool sphere_object
=false;
204 bool avi_play_init
=false;
205 bool lbutton_down
= false;
208 bool track_kalman
= false;
209 bool delay_video
= true;
210 // how many frames to delay the video
211 static const int VIDEO_DELAY_FRAMES
=7;
214 double old_a_proj
[3][4];
216 FTime last_frame_caught_time
;
221 int nb_light_measures
=0;
222 int geom_calib_nb_homography
;
229 CvPoint2D32f
*c1
= new CvPoint2D32f
[4];
230 vector
<int> artvert_roi_vec
;
239 model_file
="model.bmp";
240 artvert_image_file
="artvert1.png";
241 artvert_is_movie
= false;
242 artist
= "unknown artist";
243 advert
= "unknown advert";
244 name
= "unnamed artvert";
247 avi_play_init
= false;
252 cvReleaseImage( &artvert_image
);
254 cvReleaseCapture( &avi_capture
);
256 cvReleaseImage( &avi_image
);
259 IplImage
* getArtvertImage()
261 if ( artvert_is_movie
)
266 avi_capture
= cvCaptureFromAVI( artvert_movie_file
.c_str() );
267 avi_play_init
= false;
270 IplImage
*avi_frame
= 0;
271 avi_frame
= cvQueryFrame( avi_capture
);
272 if ( avi_frame
== 0 )
273 return fallback_artvert_image
;
274 if ( avi_image
== 0 )
275 avi_image
= cvCreateImage( cvGetSize(avi_frame
), avi_frame
->depth
, avi_frame
->nChannels
);
276 cvCopy( avi_frame
, avi_image
);
277 avi_image
->origin
= avi_frame
->origin
;
278 GLenum format
= IsBGR(avi_image
->channelSeq
) ? GL_BGR_EXT
: GL_RGBA
;
282 glGenTextures(1, &imageID
);
283 glBindTexture(GL_TEXTURE_2D
, imageID
);
284 glPixelStorei(GL_UNPACK_ALIGNMENT
, 1);
291 if ( !artvert_image
)
293 printf("loading artvert image '%s'\n", artvert_image_file
.c_str() );
294 artvert_image
= cvLoadImage( artvert_image_file
.c_str() );
296 if ( !artvert_image
)
298 fprintf(stderr
, "couldn't load artvert image '%s'\n", artvert_image_file
.c_str() );
299 artvert_image
= fallback_artvert_image
;
301 return artvert_image
;
306 string artvert_image_file
;
310 bool artvert_is_movie
;
311 string artvert_movie_file
;
313 CvCapture
* avi_capture
;
318 IplImage
* artvert_image
;
321 vector
< Artvert
> artvert_list
;
322 bool new_artvert_switching_in_progress
= false;
323 int current_artvert_index
=-1;
324 bool new_artvert_requested
= false;
325 int new_artvert_requested_index
= 0;
326 vector
< bool > model_file_needs_training
;
327 #include "ofxXmlSettings/ofxXmlSettings.h"
330 pthread_t detection_thread
;
331 double detection_fps
= 0.0;
332 static void shutdownDetectionThread();
333 static void startDetectionThread( int thread_priority
= 0 /* only takes effect if root */ );
334 static void* detectionThreadFunc( void* _data
);
335 bool detection_thread_should_exit
= false;
336 bool detection_thread_running
= false;
340 static void geomCalibStart(bool cache
);
342 // initialise a couple of fonts.
343 CvFont font
, fontbold
;
345 // Gl format for texturing
350 static FTFont
*ftglFont
;
353 bool show_status
= false;
354 bool show_profile_results
= false;
355 string status_string
= "";
359 #define MENU_HIDE_TIME 5.0f
360 bool menu_show
= false;
362 bool menu_is_showing
= false;
366 /* use this to read paths from the file system
368 string getExtension(const string &file)
370 string::size_type dot = file.rfind('.');
371 string lcpath = file;
373 transform(lcpath.begin(), lcpath.end(), lcpath.begin(), tolower);
374 if(dot != string::npos)
376 suffix = lcpath.substr(dot + 1);
384 string
getSettingsString()
386 planar_object_recognizer
&detector(multi
->cams
[current_cam
]->detector
);
387 static char detector_settings_string
[2048];
388 sprintf( detector_settings_string
, "1.ransac dist %4.2f 2.iter %i detected points %i match count %i,\n"
389 "3.refine %6.4f 4.score %6.4f 5.best_support thresh %2i 6.tau %2i\n"
390 "smoothing: 7.position %5.3f 8.position_z %5.3f \n frames back: 9.raw %2i 0.returned %2i",
391 detector
.ransac_dist_threshold_ui
,
392 detector
.max_ransac_iterations_ui
,
393 detector
.detected_point_number
,
394 detector
.match_number
,
395 detector
.non_linear_refine_threshold_ui
,
396 detector
.match_score_threshold_ui
,
397 detector
.best_support_thresh_ui
,
398 detector
.point_detector_tau_ui
,
399 matrix_tracker
.getPositionSmoothing(),
400 matrix_tracker
.getPositionZSmoothing(),
401 matrix_tracker
.getFramesBackRaw(),
402 matrix_tracker
.getFramesBackReturned() );
404 return detector_settings_string
;
408 std::string
date(int now
)
414 timeinfo
= localtime ( &rawtime
);
415 strftime (tBuffer
,80,"%I:%M:%S:%p, %d %b %Y",timeinfo
);
417 stringstream _timeStr
;
419 timeStr
= _timeStr
.str();
424 // mouse input using GLUT.
425 void entry(int state
)
427 if (state
== GLUT_ENTERED
)
428 cout
<< "Mouse Entered" << endl
;
430 cout
<< "Mouse Left" << endl
;
433 void mouse(int button
, int state
, int x
, int y
)
435 if (button
== GLUT_RIGHT_BUTTON
)
437 if (state
== GLUT_DOWN
)
447 else if (button
== GLUT_LEFT_BUTTON
)
449 if (state
== GLUT_DOWN
)
452 if (cnt
>= NUMARTVERTS
-1)
462 lbutton_down
= false;
464 cout
<< "we are on image " << cnt
<< endl
;
467 // text drawing function
468 static void drawText(IplImage
*img
, const char *text
, CvPoint point
, CvFont
*font
, CvScalar colour
, double size
)
470 cvInitFont( font
, CV_FONT_HERSHEY_DUPLEX
, size
, size
, 0, 1, CV_AA
);
471 //cvInitFont( font, CV_FONT_HERSHEY_PLAIN, size, size, 0, 1, CV_AA);
472 cvPutText(img
, text
, point
, font
, colour
);
475 // read in ROI coords from txt file into vector.
476 static vector
<int> readROI(const char *filename
)
478 cout
<< filename
<< endl
;
480 ifstream
roi(filename
);
492 strcpy(s
, l
.c_str());
497 //roi_vec.push_back(atoi(s1));
498 v
.push_back(atoi(s1
));
499 s1
= strtok(NULL
, " ,");
506 cout
<< "roi file not found" << endl
;
511 //! GLUT callback on window size change
512 static void reshape(int width
, int height
)
514 //GLfloat h = (GLfloat) height / (GLfloat) width;
515 int winWidth
= video_width
;
516 int winHeight
= video_height
;
517 glViewport(0,0,winWidth
, winHeight
);
521 //! Print a command line help and exit.
522 static void usage(const char *s
)
524 cerr
<< "usage:\n" << s
525 << " [-m <model image>] [-m <model image>] ... \n "
526 " [-ml <model images file .xml>] [-r] [-t] [-g] [-a <path>] [-l] [-vd <num>] [-vs <width> <height>]\n"
527 " [-ds <width> <height>] [-fps <fps>] [-binoc [-nofullscreen]]\n\n"
528 //" -a <path> specify path to AVI (instead of v4l device)\n"
529 " -b <path> specify path to AVI (instead of v4l device), ignores -vs\n"
530 " -m specify model image (may be used multiple times)\n"
531 " -ml <path> load model images from <path> (xml) (respects additional -m paths)\n"
532 " -r do not load any data\n"
533 " -t train a new classifier\n"
534 " -g recompute geometric calibration\n"
535 " -a <path> load an AVI movie as an artvert\n"
536 " -i <path> load an image as an artvert\n"
537 " -l rebuild irradiance map from scratch\n"
538 " -vd <num> V4L video device number (0-n)\n"
539 " -vs <width> <height> video width and height (default 640x480)\n"
540 " -ds <width> <height> frame size at which to run the detector (default to video width/height)\n"
541 " -fps <fps> desired fps at which to run the image capture\n"
542 " -binoc run as if operating on binoculars (necessary for osx training)\n"
543 " -nofullscreen don't try to run fullscreen in -binoc mode\n\n";
551 printf("in exit_handler\n");
552 // shutdown detection thread
553 if ( detection_thread_running
)
555 printf("stopping detection\n");
556 shutdownDetectionThread();
559 if ( serial_thread_is_running
)
561 printf("stopping serial\n");
562 shutdownSerialThread();
564 // shutdown binocular training
565 if ( multi
&& multi
->model
.isInteractiveTrainBinocularsRunning() )
567 printf("stopping interactive train binoculars\n");
568 multi
->model
.abortInteractiveTrainBinoculars();
574 printf("stopping multithread capture\n");
577 multi
->cams
[0]->shutdownMultiThreadCapture();
582 printf("deleteing multi\n");
587 int serialport_init(const char* serialport
, int baud
)
589 struct termios toptions
;
592 //fprintf(stderr,"init_serialport: opening port %s @ %d bps\n",
595 fd
= open(serialport
, O_RDWR
| O_NOCTTY
| O_NDELAY
);
597 perror("init_serialport: Unable to open port ");
601 if (tcgetattr(fd
, &toptions
) < 0) {
602 perror("init_serialport: Couldn't get term attributes");
605 speed_t brate
= baud
; // let you override switch below if needed
607 case 4800: brate
=B4800
; break;
608 case 9600: brate
=B9600
; break;
610 case 14400: brate
=B14400
; break;
612 case 19200: brate
=B19200
; break;
614 case 28800: brate
=B28800
; break;
616 case 38400: brate
=B38400
; break;
617 case 57600: brate
=B57600
; break;
618 case 115200: brate
=B115200
; break;
620 cfsetispeed(&toptions
, brate
);
621 cfsetospeed(&toptions
, brate
);
624 toptions
.c_cflag
&= ~PARENB
;
625 toptions
.c_cflag
&= ~CSTOPB
;
626 toptions
.c_cflag
&= ~CSIZE
;
627 toptions
.c_cflag
|= CS8
;
629 toptions
.c_cflag
&= ~CRTSCTS
;
631 toptions
.c_cflag
|= CREAD
| CLOCAL
; // turn on READ & ignore ctrl lines
632 toptions
.c_iflag
&= ~(IXON
| IXOFF
| IXANY
); // turn off s/w flow ctrl
634 toptions
.c_lflag
&= ~(ICANON
| ECHO
| ECHOE
| ISIG
); // make raw
635 toptions
.c_oflag
&= ~OPOST
; // make raw
637 // see: http://unixwiz.net/techtips/termios-vmin-vtime.html
638 toptions
.c_cc
[VMIN
] = 0;
639 toptions
.c_cc
[VTIME
] = 20;
641 if( tcsetattr(fd
, TCSANOW
, &toptions
) < 0) {
642 perror("init_serialport: Couldn't set term attributes");
650 // arduino serial port read
651 int serialport_read_until(int fd
, char* buf
, char until
)
656 int timeout
= 1000*1000;
658 int n
= read(fd
, b
, 1); // read a char at a time
661 usleep( 100 ); // wait 100 usec try again
665 } while( b
[0] != until
&& timeout
> 0 );
668 fprintf(stderr
, "serialport_read_until timed out\n");
670 buf
[i
] = 0; // null terminate the string
675 bool loadOrTrain( int new_index
)
678 if ( new_index
< 0 || new_index
>= artvert_list
.size() )
680 fprintf(stderr
,"loadOrTrain: invalid index %i (artvert_list has %i members)\n", new_index
, (int)artvert_list
.size() );
686 new_artvert_switching_in_progress
= true;
687 bool wants_training
= model_file_needs_training
[new_index
];
688 string model_file
= artvert_list
[new_index
].model_file
;
689 // do we need to switch, or can we use the same model file?
690 if ( ( current_artvert_index
< 0 || current_artvert_index
>= artvert_list
.size() ) ||
691 model_file
!= artvert_list
[current_artvert_index
].model_file
)
694 bool trained
= multi
->loadOrTrainCache( wants_training
, model_file
.c_str(), running_on_binoculars
);
698 current_artvert_index
= -1;
699 new_artvert_switching_in_progress
= false;
704 // copy char model_file before munging with strcat
706 strcpy (s
, model_file
.c_str());
708 roi_vec
= readROI(s
);
710 strcpy( s
, model_file
.c_str() );
711 strcat(s
, ".artvertroi");
712 artvert_roi_vec
= readROI(s
);
713 if ( artvert_roi_vec
.empty() )
716 artvert_roi_vec
.insert( artvert_roi_vec
.begin(), roi_vec
.begin(), roi_vec
.end() );
719 // load model_image for use with diffing, later
720 // model_image = cvLoadImage(model_file.c_str());
722 c1
[0].x
= roi_vec
[0];
723 c1
[0].y
= roi_vec
[1];
724 c1
[1].x
= roi_vec
[2];
725 c1
[1].y
= roi_vec
[3];
726 c1
[2].x
= roi_vec
[4];
727 c1
[2].y
= roi_vec
[5];
728 c1
[3].x
= roi_vec
[6];
729 c1
[3].y
= roi_vec
[7];
732 // update current index
733 current_artvert_index
= new_index
;
735 // no longer switching
736 new_artvert_switching_in_progress
= false;
744 /*!\brief Initialize everything
746 * - Parse the command line
747 * - Initialize all the cameras
748 * - Load or interactively build a model, with its classifier.
749 * - Set the GLUT callbacks for geometric calibration or, if already done, for photometric calibration.
752 static bool init( int argc
, char** argv
)
754 // register exit function
755 atexit( &exit_handler
);
757 // dump opencv version
758 printf("built with opencv version %s\n", CV_VERSION
);
760 // more from before init should be moved here
761 bool redo_lighting
=false;
762 bool redo_training
= false;
763 bool redo_geom
= false;
764 char *avi_bg_path
=(char*)"";
766 bool got_fps
= false;
767 bool video_source_is_avi
= false;
768 char *model_file_list_file
= NULL
;
770 // parse command line
771 for (int i
=1; i
<argc
; i
++)
773 if (strcmp(argv
[i
], "-m") ==0)
778 a
.model_file
= argv
[i
+1];
779 a
.advert
= "cmdline "+a
.model_file
;
781 artvert_list
.push_back( a
);
782 printf(" -m: adding model image '%s'\n", argv
[i
+1] );
785 else if ( strcmp(argv
[i
], "-binoc")==0 )
787 running_on_binoculars
= true;
788 printf(" -binoc: running on binoculars\n");
790 else if ( strcmp(argv
[i
], "-nofullscreen")==0 )
792 no_fullscreen
= true;
793 printf(" -nofullscreen: won't go fullscreen\n");
795 else if ( strcmp(argv
[i
], "-ml" )== 0 )
799 model_file_list_file
= argv
[i
+1];
800 printf(" -ml: loading model image list from '%s'\n", argv
[i
+1] );
803 else if (strcmp(argv
[i
], "-r")==0)
805 redo_geom
=redo_training
=redo_lighting
=true;
807 else if (strcmp(argv
[i
], "-g")==0)
811 else if (strcmp(argv
[i
], "-l")==0)
815 else if (strcmp(argv
[i
], "-t")==0)
818 printf( "-t: redoing training\n");
820 else if (strcmp(argv
[i
], "-a")==0)
822 avi_capture
=cvCaptureFromAVI(argv
[i
+1]);
825 else if (strcmp(argv
[i
], "-i")==0)
827 IplImage
*image1
= cvLoadImage(argv
[i
]+1);
829 else if (strcmp(argv
[i
], "-b")==0)
831 video_source_is_avi
= true;
832 avi_bg_path
=argv
[i
+1];
833 printf(" -b: loading from avi '%s'\n", avi_bg_path
);
835 else if (strcmp(argv
[i
], "-i")==0)
837 image_path
=argv
[i
+1];
839 else if ( strcmp(argv
[i
], "-fps")==0 )
841 desired_capture_fps
=atoi(argv
[i
+1]);
845 else if (strcmp(argv
[i
], "-vd")==0)
847 v4l_device
=atoi(argv
[i
+1]);
848 printf(" -vd: using v4l device %i\n", v4l_device
);
851 else if (strcmp(argv
[i
], "-vs")==0)
853 video_width
=atoi(argv
[i
+1]);
854 video_height
=atoi(argv
[i
+2]);
857 // also set detect size (if not already set)
858 detect_width
= video_width
;
859 detect_height
= video_height
;
861 printf(" -vs: video size is %ix%i\n", video_width
, video_height
);
862 if ( video_width
== 0 || video_height
== 0 )
869 else if ( strcmp(argv
[i
], "-ds")==0 )
871 detect_width
= atoi(argv
[i
+1]);
872 detect_height
= atoi(argv
[i
+2]);
873 printf(" -ds: detection frame size is %ix%i\n", detect_width
, detect_height
);
874 if ( detect_width
== 0 || detect_height
== 0 )
882 else if (argv
[i
][0]=='-')
890 // read model files from model_file_list_file
891 if ( model_file_list_file
!= NULL
)
895 data
.loadFile( model_file_list_file
);
897 if ( data
.getNumTags( "artverts" ) == 1 )
899 data
.pushTag( "artverts" );
900 int num_filenames
= data
.getNumTags( "advert" );
901 printf(" -ml: opened %s, %i adverts\n", model_file_list_file
, num_filenames
);
902 for ( int i
=0; i
<num_filenames
; i
++ )
904 data
.pushTag("advert", i
);
906 a
.model_file
= data
.getValue( "model_filename", "model.bmp" );
907 a
.advert
= data
.getValue( "advert", "unknown advert" );
908 int num_artverts
= data
.getNumTags( "artvert" );
909 printf(" -ml: got advert, model file '%s', advert '%s', %i artverts\n", a
.model_file
.c_str(), a
.advert
.c_str(), num_artverts
);
910 for ( int j
=0; j
<num_artverts
; j
++ )
912 data
.pushTag("artvert", j
);
913 a
.name
= data
.getValue( "name", "unnamed" );
914 a
.artist
= data
.getValue( "artist", "unknown artist" );
915 if ( data
.getNumTags("movie_filename") != 0 )
918 a
.artvert_is_movie
= true;
919 a
.artvert_movie_file
= data
.getValue("movie_filename", "artvertmovie1.mp4" );
924 a
.artvert_image_file
= data
.getValue( "image_filename", "artvert1.png" );
926 printf(" %i: %s:%s:%s\n", j
, a
.name
.c_str(), a
.artist
.c_str(),
927 a
.artvert_is_movie
?(a
.artvert_movie_file
+"( movie)").c_str() : a
.artvert_image_file
.c_str() );
929 artvert_list
.push_back( a
);
938 printf(" -ml: error reading '%s': couldn't find 'artverts' tag\n", model_file_list_file
);
942 // check if model file list is empty
943 if ( artvert_list
.empty() )
947 artvert_list
.push_back( a
);
950 // set up training flags
951 for ( int i
=0; i
<artvert_list
.size(); i
++ )
952 model_file_needs_training
.push_back( redo_training
);
954 // check for video size arg if necessary
955 if ( video_source_is_avi
)
957 // try to read from video
958 CvCapture
* temp_cap
= cvCaptureFromAVI(avi_bg_path
);
959 video_width
= (int)cvGetCaptureProperty( temp_cap
, CV_CAP_PROP_FRAME_WIDTH
);
960 video_height
= (int)cvGetCaptureProperty( temp_cap
, CV_CAP_PROP_FRAME_HEIGHT
);
961 int video_fps
= (int)cvGetCaptureProperty( temp_cap
, CV_CAP_PROP_FPS
);
962 printf(" -b: read video width/height %i/%i from avi (ignoring -vs)\n", video_width
, video_height
);
965 detect_width
= video_width
;
966 detect_height
= video_height
;
970 desired_capture_fps
= video_fps
;
972 cvReleaseCapture(&temp_cap
);
975 //cout << avi_bg_path << endl;
976 cache_light
= !redo_lighting
;
978 glutReshapeWindow( video_width
, video_height
);
980 multi
= new MultiGrab();
982 if( multi
->init(avi_bg_path
, video_width
, video_height
, v4l_device
,
983 detect_width
, detect_height
, desired_capture_fps
) ==0)
985 cerr
<<"Initialization error.\n";
990 artvert_struct artvert1
= {"Arrebato, 1980", image1
, "Feb, 2009", "Iván Zulueta", "Polo", "Madrid, Spain"};
991 artvert_struct artvert2
= {"name2", image2
, "2008", "simon innings", "Helmut Lang", "Parlance Avenue"};
992 artvert_struct artvert3
= {"name3", image3
, "2008", "simon innings", "Loreal", "Parlance Avenue"};
993 artvert_struct artvert4
= {"name4", image4
, "2008", "simon innings", "Hugo Boss", "Parlance Avenue"};
994 artvert_struct artvert5
= {"name5", image5
, "2008", "simon innings", "Burger King", "Parlance Avenue"};
996 artverts
[0] = artvert1
;
997 artverts
[1] = artvert2
;
998 artverts
[2] = artvert3
;
999 artverts
[3] = artvert4
;
1000 artverts
[4] = artvert5
;
1006 // try to load geom cache + start the run loop
1007 geomCalibStart(!redo_geom
);
1010 startDetectionThread( 1 /* priority, only if running as root */ );
1012 last_frame_caught_time
.SetNow();
1013 frame_timer
.SetNow();
1016 startSerialThread();
1018 printf("init() finished\n");
1022 /*! The keyboard callback: reacts to '+' and '-' to change the viewed cam, 'q' exits.
1023 * 'd' turns on/off the dynamic lightmap update.
1024 * 'f' goes fullscreen.
1026 static void keyboard(unsigned char c
, int x
, int y
)
1028 char old_button_state
= button_state
;
1029 const char* filename
;
1038 if (current_cam
< multi
->cams
.size()-1)
1042 if (current_cam
>= 1)
1050 dynamic_light
= !dynamic_light
;
1053 delay_video
= !delay_video
;
1056 if (avi_play
== true)
1064 track_kalman
= !track_kalman
;
1067 show_status
= !show_status
;
1070 show_profile_results
= true;
1076 if (cnt
>= NUMARTVERTS
-1)
1080 cout
<< "we are on image " << cnt
<< endl
;
1083 button_state
|= BUTTON_RED
;
1086 button_state
|= BUTTON_GREEN
;
1089 button_state
|= BUTTON_BLUE
;
1096 if ( multi
&& show_status
)
1098 planar_object_recognizer
&detector(multi
->cams
[current_cam
]->detector
);
1099 bool something
= true;
1102 // detector settings
1104 detector
.ransac_dist_threshold_ui
*=1.02f
;
1107 detector
.ransac_dist_threshold_ui
/=1.02f
;
1110 detector
.max_ransac_iterations_ui
+=10;
1113 detector
.max_ransac_iterations_ui
-=10;
1116 detector
.non_linear_refine_threshold_ui
*=1.02f
;
1119 detector
.non_linear_refine_threshold_ui
/=1.02f
;
1122 detector
.match_score_threshold_ui
*=1.02f
;
1125 detector
.match_score_threshold_ui
/=1.02f
;
1128 detector
.best_support_thresh_ui
++;
1131 detector
.best_support_thresh_ui
--;
1134 detector
.point_detector_tau_ui
++;
1137 detector
.point_detector_tau_ui
--;
1140 matrix_tracker
.increasePositionSmoothing();
1143 matrix_tracker
.decreasePositionSmoothing();
1146 matrix_tracker
.increasePositionZSmoothing();
1149 matrix_tracker
.decreasePositionZSmoothing();
1152 matrix_tracker
.increaseFramesBackRaw();
1155 matrix_tracker
.decreaseFramesBackRaw();
1158 matrix_tracker
.increaseFramesBackReturned();
1161 matrix_tracker
.decreaseFramesBackReturned();
1171 printf("%s\n", getSettingsString().c_str());
1176 glutPostRedisplay();
1178 if ( old_button_state
!= button_state
)
1179 button_state_changed
= true;
1182 static void keyboardReleased(unsigned char c
, int x
, int y
)
1184 char old_button_state
= button_state
;
1188 button_state
&= (BUTTON_GREEN
| BUTTON_BLUE
);
1191 button_state
&= (BUTTON_RED
| BUTTON_BLUE
);
1194 button_state
&= (BUTTON_GREEN
| BUTTON_RED
);
1199 if ( old_button_state
!= button_state
)
1200 button_state_changed
= true;
1202 static void emptyWindow()
1204 glClear(GL_COLOR_BUFFER_BIT
| GL_DEPTH_BUFFER_BIT
);
1207 int main(int argc
, char *argv
[])
1209 glutInit(&argc
, argv
);
1210 glutInitDisplayMode(GLUT_RGB
| GLUT_DEPTH
| GLUT_DOUBLE
);
1211 //glutInitWindowSize(video_width,video_height); // hard set the init window size
1212 //glutInitWindowSize(800,450); // hard set the init window size
1213 glutDisplayFunc(emptyWindow
);
1215 bool start_fullscreen
= false;
1217 // if on apple, don't ever go fullscreen
1219 // look for -binoc flag on commandline
1220 // also look for -nofullscreen flag
1221 for ( int i
=0; i
<argc
; i
++ )
1223 if ( strcmp( argv
[i
], "-binoc" )== 0 )
1225 // if found, start_fullscreen is true
1226 start_fullscreen
= true;
1228 // if found, -nofullscreen cancels fullscreen
1229 if ( strcmp( argv
[i
], "-nofullscreen" )==0 )
1231 start_fullscreen
= false;
1236 if ( !start_fullscreen
)
1238 glutReshapeFunc(reshape
);
1239 glutCreateWindow("The Artvertiser 0.4");
1241 glutMouseFunc(mouse
);
1242 glutEntryFunc(entry
);
1244 if ( start_fullscreen
)
1246 glutGameModeString("1024x768:16@60");
1247 glutEnterGameMode();
1248 glutSetCursor(GLUT_CURSOR_NONE
);
1251 if (!init(argc
,argv
)) return -1;
1254 //ftglFont = new FTBufferFont("/usr/share/fonts/truetype/freefont/FreeMono.ttf");
1255 ftglFont
= new FTBufferFont("fonts/FreeSans.ttf");
1256 ftglFont
->FaceSize(12);
1257 ftglFont
->CharMap(ft_encoding_unicode
);
1259 //cvDestroyAllWindows();
1262 glutKeyboardFunc(keyboard
);
1263 glutKeyboardUpFunc(keyboardReleased
);
1265 glutLeaveGameMode();
1266 return 0; /* ANSI C requires main to return int. */
1269 //!\brief Draw a frame contained in an IplTexture object on an OpenGL viewport.
1270 static bool drawBackground(IplTexture
*tex
)
1272 //printf("draw background\n");
1273 if (!tex
|| !tex
->getIm()) return false;
1274 //printf("drawBackground: drawing frame with timestamp %f\n", raw_frame_timestamp.ToSeconds() );
1276 IplImage
*im
= tex
->getIm();
1277 int w
= im
->width
-1;
1278 int h
= im
->height
-1;
1280 glMatrixMode(GL_PROJECTION
);
1282 glMatrixMode(GL_MODELVIEW
);
1285 glDisable(GL_BLEND
);
1286 glDisable(GL_DEPTH_TEST
);
1293 glTexCoord2f(tex
->u(0), tex
->v(0));
1295 glTexCoord2f(tex
->u(w
), tex
->v(0));
1297 glTexCoord2f(tex
->u(w
), tex
->v(h
));
1299 glTexCoord2f(tex
->u(0), tex
->v(h
));
1303 tex
->disableTexture();
1308 /*! \brief Draw all the points
1311 static void drawDetectedPoints(int frame_width
, int frame_height
)
1315 planar_object_recognizer
&detector(multi
->cams
[current_cam
]->detector
);
1317 glMatrixMode(GL_PROJECTION
);
1319 glOrtho(0, frame_width
-1, frame_height
-1, 0, -1, 1);
1321 glMatrixMode(GL_MODELVIEW
);
1324 glDisable(GL_BLEND
);
1325 glDisable(GL_LIGHTING
);
1326 glDisable(GL_DEPTH_TEST
);
1331 // draw all detected points
1333 for ( int i
=0; detector
.isReady() && i
<detector
.detected_point_number
; ++i
)
1335 keypoint
& kp
= detector
.detected_points
[i
];
1337 float x
=PyrImage::convCoordf(kp
.u
, s
, 0);
1338 float y
=PyrImage::convCoordf(kp
.v
, s
, 0);
1341 // draw matching points red
1342 if ( detector
.object_is_detected
)
1344 glColor4f( 1, 0, 0, 1 );
1345 for (int i
=0; i
<detector
.match_number
; ++i
)
1347 image_object_point_match
* match
= detector
.matches
+i
;
1350 int s
= (int)(match
->image_point
->scale
);
1351 float x
=PyrImage::convCoordf(match
->image_point
->u
, s
, 0);
1352 float y
=PyrImage::convCoordf(match
->image_point
->v
, s
, 0);
1363 /*! \brief A draw callback during camera calibration
1365 * GLUT calls that function during camera calibration when repainting the
1366 * window is required.
1367 * During geometric calibration, no 3D is known: we just plot 2d points
1368 * where some feature points have been recognized.
1370 static void geomCalibDraw(void)
1372 glClear(GL_COLOR_BUFFER_BIT
| GL_DEPTH_BUFFER_BIT
);
1374 glDisable(GL_LIGHTING
);
1375 drawBackground(raw_frame_texture
);
1379 IplImage
*im
= multi
->cams
[current_cam
]->getLastProcessedFrame();
1380 planar_object_recognizer
&detector(multi
->cams
[current_cam
]->detector
);
1383 if (!detector
.isReady()) return;
1386 glMatrixMode(GL_PROJECTION
);
1388 glOrtho(0, im
->width
-1, im
->height
-1, 0, -1, 1);
1390 glMatrixMode(GL_MODELVIEW
);
1393 glDisable(GL_BLEND
);
1394 glDisable(GL_LIGHTING
);
1395 glDisable(GL_DEPTH_TEST
);
1397 if (detector
.object_is_detected
)
1402 for (int i
=0; i
<detector
.match_number
; ++i
)
1404 image_object_point_match
* match
= detector
.matches
+i
;
1407 int s
= (int)(match
->image_point
->scale
);
1408 float x
=PyrImage::convCoordf(match
->image_point
->u
, s
, 0);
1409 float y
=PyrImage::convCoordf(match
->image_point
->v
, s
, 0);
1421 /*!\brief Called when geometric calibration ends. It makes
1422 * sure that the CamAugmentation object is ready to work.
1424 static void geomCalibEnd()
1427 if (!multi
->model
.augm
.LoadOptimalStructureFromFile((char*)"camera_c.txt", (char*)"camera_r_t.txt"))
1429 cout
<< "failed to load camera calibration.\n";
1433 //glutDisplayFunc(0);
1438 /*! Called by GLUT during geometric calibration when there's nothing else to do.
1439 * This function grab frames from camera(s), run the 2D detection on every image,
1440 * and keep the result in memory for calibration. When enough homographies have
1441 * been detected, it tries to actually calibrate the cameras.
1443 static void geomCalibIdle(void)
1445 // detect the calibration object in every image
1446 // (this loop could be paralelized)
1448 for (int i
=0; i
<multi
->cams
.size(); ++i
)
1451 if (multi
->cams
[i
]->detect(dummy
, dummy
)) nbdet
++;
1455 if(!raw_frame_texture
) raw_frame_texture
= new IplTexture
;
1456 IplImage
* raw_frame
= raw_frame_texture
->getImage();
1457 multi
->cams
[current_cam
]->getLastDrawFrame( &raw_frame
);
1458 raw_frame_texture
->setImage(raw_frame
);
1459 //raw_frame_texture->setImage(multi->cams[current_cam]->frame);
1463 for (int i
=0; i
<multi
->cams
.size(); ++i
)
1465 if (multi
->cams
[i
]->detector
.object_is_detected
)
1467 add_detected_homography(i
, multi
->cams
[i
]->detector
, *calib
);
1471 calib
->AddHomography(i
);
1474 geom_calib_nb_homography
++;
1477 printf("geom calib: %.2f%%\n", 100.0f
*geom_calib_nb_homography
/150.0f
);
1479 if (geom_calib_nb_homography
>=150)
1481 if (calib
->Calibrate(
1483 (multi
->cams
.size() > 1 ? 1:2), // padding or random
1484 (multi
->cams
.size() > 1 ? 0:3),
1485 1, // padding ratio 1/2
1496 calib
->PrintOptimizedResultsToFile1();
1502 glutPostRedisplay();
1505 /*!\brief Start geometric calibration. If the calibration can be loaded from disk,
1506 * continue directly with photometric calibration.
1508 static void geomCalibStart(bool cache
)
1510 if (cache
&& multi
->model
.augm
.LoadOptimalStructureFromFile((char*)"camera_c.txt", (char*)"camera_r_t.txt"))
1516 // construct a CamCalibration object and register all the cameras
1517 calib
= new CamCalibration();
1519 for (int i
=0; i
<multi
->cams
.size(); ++i
)
1521 calib
->AddCamera(multi
->cams
[i
]->width
, multi
->cams
[i
]->height
);
1524 geom_calib_nb_homography
=0;
1525 glutDisplayFunc(geomCalibDraw
);
1526 glutIdleFunc(geomCalibIdle
);
1531 static void drawAugmentation()
1534 // we know that im is not NULL already
1535 // IplImage *im = multi->model.image;
1537 //for ( int tracked_or_raw=0; tracked_or_raw<2; tracked_or_raw++ )
1540 // Fetch object -> image, world->image and world -> object matrices
1543 /*if ( tracked_or_raw == 1 )
1545 // fetch from model:
1546 world = multi->model.augm.GetObjectToWorld();
1551 // or fetch interpolated position
1552 world
= cvCreateMat( 3, 4, CV_64FC1
);
1554 //printf(". now we want interpolated pose for %f\n", raw_frame_timestamp.ToSeconds() );
1556 matrix_tracker
.getInterpolatedPoseKalman( world
,
1557 multi
->cams
[0]->getFrameIndexForTime( raw_frame_timestamp
) );
1559 matrix_tracker
.getInterpolatedPose( world
, raw_frame_timestamp
);
1562 /*// apply a scale factor
1563 float scalef = 1.0f;
1564 for ( int i=0; i<3; i++ )
1565 cvmSet(world, i, i, scalef*cvmGet( world, i, i ));*/
1569 /*CvMat *proj = multi->model.augm.GetProjectionMatrix(current_cam);
1570 CvMat *old_proj = multi->model.augm.GetProjectionMatrix(current_cam);*/
1571 // we make our own project matrix:
1572 // fetch the pre-projection matrix from model.augm
1573 CvMat
* proj
= multi
->model
.augm
.GetPreProjectionMatrix(current_cam
);
1574 // multiply by the object-to-world matrix
1575 CamCalibration::Mat3x4Mul( proj
, world
, proj
);
1577 Mat3x4 moveObject
, rot
, obj2World
, movedRT_
;
1578 moveObject
.setTranslate( multi
->model
.getImageWidth()/2, multi
->model
.getImageHeight()/2,
1581 rot
.setRotate(Vec3(1,0,0),2*M_PI
*180.0/360.0);
1582 //rot.setIdentity();
1583 moveObject
.mul(rot
);
1586 CvMat cvMoveObject
= cvMat(3,4,CV_64FC1
, moveObject
.m
);
1587 CvMat movedRT
=cvMat(3,4,CV_64FC1
,movedRT_
.m
);
1590 // pose only during movement
1591 //if (pixel_shift >= 200 || !have_proj)
1594 // memcpy or vectorisation speedup?
1595 for( int i
= 0; i
< 3; i
++ )
1597 for( int j
= 0; j
< 4; j
++ )
1599 a_proj
[i
][j
] = cvmGet( proj
, i
, j
);
1600 obj2World
.m
[i
][j
] = cvmGet(world
, i
, j
);
1605 memcpy(old_a_proj
, a_proj
, sizeof(a_proj
));
1607 else // copy last known good projection over current
1609 memcpy(a_proj
, old_a_proj
, sizeof(old_a_proj
));
1613 printf("found matrix: %8.4f %8.4f %8.4f %8.4f\n"
1614 " %8.4f %8.4f %8.4f %8.4f\n"
1615 " %8.4f %8.4f %8.4f %8.4f\n",
1616 a_proj[0][0], a_proj[0][1], a_proj[0][2], a_proj[0][3],
1617 a_proj[1][0], a_proj[1][1], a_proj[1][2], a_proj[1][3],
1618 a_proj[2][0], a_proj[2][1], a_proj[2][2], a_proj[2][3]
1622 CamCalibration::Mat3x4Mul( world
, &cvMoveObject
, &movedRT
);
1623 // translate into OpenGL PROJECTION and MODELVIEW matrices
1624 PerspectiveCamera c
;
1625 //c.loadTdir(a_proj, multi->cams[0]->frame->width, multi->cams[0]->frame->height);
1626 c
.loadTdir(a_proj
, multi
->cams
[0]->detect_width
, multi
->cams
[0]->detect_height
);
1628 c
.setPlanes(100,1000000); // near/far clip planes
1629 cvReleaseMat(&proj
);
1631 // must set the model view after drawing the background.
1632 c
.setGlProjection();
1635 /*! this is the beginning of prototype code for an occlusion mask built
1636 * by comparison of the tracked plane with that of the model image
1638 // create a copy of frame texture and warp to model image perspective
1639 CvPoint2D32f *c2 = new CvPoint2D32f[4];
1640 // update corner points of ROI in pixel space
1641 c2[0].x = cvRound(multi->cams[0]->detector.detected_u_corner1);
1642 c2[0].y = cvRound(multi->cams[0]->detector.detected_v_corner1);
1643 c2[1].x = cvRound(multi->cams[0]->detector.detected_u_corner2);
1644 c2[1].y = cvRound(multi->cams[0]->detector.detected_v_corner2);
1645 c2[2].x = cvRound(multi->cams[0]->detector.detected_u_corner3);
1646 c2[2].y = cvRound(multi->cams[0]->detector.detected_v_corner3);
1647 c2[3].x = cvRound(multi->cams[0]->detector.detected_u_corner4);
1648 c2[3].y = cvRound(multi->cams[0]->detector.detected_v_corner4);
1650 CvMat* mmat = cvCreateMat(3,3, CV_32FC1);
1651 IplImage *warped = cvCreateImage(cvSize(model_image->width, model_image->height), 8, 3);
1652 mmat = cvGetPerspectiveTransform(c2, c1, mmat);
1653 cvWarpPerspective(raw_frame_texture->getIm(), warped, mmat);
1654 cvReleaseMat(&mmat);
1656 // find difference between model image and frame
1657 IplImage *i1=cvCreateImage(cvSize(im->width,im->height),im->depth,1);
1658 IplImage *i2=cvCreateImage(cvSize(im->width,im->height),im->depth,1);
1659 IplImage *diff=cvCreateImage(cvSize(im->width,im->height),im->depth,1);
1660 IplImage *display=cvCreateImage(cvSize(im->width,im->height),im->depth,1);
1662 cvCvtColor(im, i1,CV_BGR2GRAY);
1663 cvCvtColor(warped, i2,CV_BGR2GRAY);
1664 cvAbsDiff(i2,i1,diff);
1665 cvThreshold(diff, display, 35, 255, CV_THRESH_BINARY);
1667 cvReleaseImage(&warped);
1668 cvSaveImage("checkdiff.png", display);
1671 /* circles at corners of ROI. useful for debugging.
1672 cvCircle(raw_frame_texture->getIm(), c1, 10, CV_RGB(255, 0, 0), 2);
1673 cvCircle(raw_frame_texture->getIm(), c2, 10, CV_RGB(255, 0, 0), 2);
1674 cvCircle(raw_frame_texture->getIm(), c3, 10, CV_RGB(255, 0, 0), 2);
1675 cvCircle(raw_frame_texture->getIm(), c4, 10, CV_RGB(255, 0, 0), 2);
1677 tex = new IplTexture;
1678 tex->setImage(raw_frame_texture->getIm());
1679 drawBackground(tex);
1684 #ifndef DEBUG_SHADER
1686 glEnable(GL_DEPTH_TEST
);
1688 glBlendFunc(GL_SRC_ALPHA
, GL_ONE_MINUS_SRC_ALPHA
);
1689 glDisable(GL_CULL_FACE
);
1690 // multiply texture colour by surface colour of poly
1691 glTexEnvf(GL_TEXTURE_ENV
, GL_TEXTURE_ENV_MODE
, GL_MODULATE
);
1692 glHint(GL_PERSPECTIVE_CORRECTION_HINT
, GL_NICEST
);
1694 if (avi_play
== true)
1696 IplImage
*avi_frame
= 0;
1697 IplImage
*avi_image
= 0;
1698 avi_frame
= cvQueryFrame( avi_capture
);
1699 avi_image
= cvCreateImage(cvSize(video_width
/2, video_height
/2), 8, 3);
1700 cvResize(avi_frame
, avi_image
, 0);
1701 avi_image
->origin
= avi_frame
->origin
;
1702 GLenum format
= IsBGR(avi_image
->channelSeq
) ? GL_BGR_EXT
: GL_RGBA
;
1706 glGenTextures(1, &imageID
);
1707 glBindTexture(GL_TEXTURE_2D
, imageID
);
1708 glPixelStorei(GL_UNPACK_ALIGNMENT
, 1);
1709 glTexParameteri(GL_TEXTURE_2D
, GL_TEXTURE_MIN_FILTER
, GL_NEAREST
);
1710 glTexParameteri(GL_TEXTURE_2D
, GL_TEXTURE_MAG_FILTER
, GL_NEAREST
);
1711 glTexImage2D(GL_TEXTURE_2D
, 0, GL_RGBA
, avi_image
->width
, avi_image
->height
, 0, format
, GL_UNSIGNED_BYTE
, avi_image
->imageData
);
1716 glBindTexture(GL_TEXTURE_2D
, imageID
);
1717 glTexSubImage2D(GL_TEXTURE_2D
, 0, 0, 0, avi_image
->width
, avi_image
->height
, format
, GL_UNSIGNED_BYTE
, avi_image
->imageData
);
1722 if ( current_artvert_index
>= 0 &&
1723 current_artvert_index
< artvert_list
.size() )
1725 glBindTexture(GL_TEXTURE_2D
, imageID
);
1726 glTexParameteri(GL_TEXTURE_2D
, GL_TEXTURE_MIN_FILTER
, GL_NEAREST
);
1727 glTexParameteri(GL_TEXTURE_2D
, GL_TEXTURE_MAG_FILTER
, GL_NEAREST
);
1728 IplImage
* image
= artvert_list
.at(current_artvert_index
).getArtvertImage();
1729 GLenum format
= IsBGR(image
->channelSeq
) ? GL_BGR_EXT
: GL_RGBA
;
1730 glTexImage2D(GL_TEXTURE_2D
, 0, GL_RGBA
, image
->width
, image
->height
, 0, format
, GL_UNSIGNED_BYTE
, image
->imageData
);
1734 glEnable(GL_TEXTURE_2D
);
1736 glHint(GL_POLYGON_SMOOTH
, GL_NICEST
);
1737 glEnable(GL_POLYGON_SMOOTH
);
1740 #ifndef DEBUG_SHADER
1741 // apply the object transformation matrix
1742 Mat3x4 w2e(c.getWorldToEyeMat());
1743 w2e.mul(moveObject);
1744 c.setWorldToEyeMat(w2e);
1748 if (multi->model.map.isReady())
1750 glDisable(GL_LIGHTING);
1752 multi->model.map.enableShader(current_cam, world);
1754 multi->model.map.enableShader(current_cam, &movedRT);
1759 glColor4f(1.0, 1.0, 1.0, fade
);
1763 glVertex3f(artvert_roi_vec
[0], artvert_roi_vec
[1], 0);
1765 glVertex3f(artvert_roi_vec
[2], artvert_roi_vec
[3], 0);
1767 glVertex3f(artvert_roi_vec
[4], artvert_roi_vec
[5], 0);
1769 glVertex3f(artvert_roi_vec
[6], artvert_roi_vec
[7], 0);
1772 glDisable(GL_TEXTURE_2D
);
1774 /*! 'label' is a boolean set by the right mouse button and toggles the
1775 * in-scene artvert label.
1780 glBegin(GL_LINE_LOOP
);
1781 glColor3f(0.0, 1.0, 0.0);
1782 glVertex3f(roi_vec
[0]-10, roi_vec
[1]-10, 0);
1783 glVertex3f(roi_vec
[2]+10, roi_vec
[3]-10, 0);
1784 glVertex3f(roi_vec
[4]+10, roi_vec
[5]+10, 0);
1785 glVertex3f(roi_vec
[6]-10, roi_vec
[7]+10, 0);
1786 glVertex3f(roi_vec
[0]-10, roi_vec
[1]-10, 0);
1789 glTranslatef(roi_vec
[2]+12, roi_vec
[3], 0);
1790 glRotatef(180, 1.0, 0.0, 0.0);
1791 glRotatef(-45, 0.0, 1.0, 0.0);
1792 glColor4f(0.0, 1.0, 0.0, 1);
1794 glBegin(GL_LINE_LOOP
);
1795 glVertex3f(0, 10, -.2);
1796 glVertex3f(150, 10, -.2);
1797 glVertex3f(150, -60, -.2);
1798 glVertex3f(0, -60, -.2);
1801 glColor4f(0.0, 1.0, 0.0, .5);
1804 glVertex3f(0, 10, -.2);
1805 glVertex3f(150, 10, -.2);
1806 glVertex3f(150, -60, -.2);
1807 glVertex3f(0, -60, -.2);
1810 // render the text in the label
1811 glColor4f(1.0, 1.0, 1.0, 1);
1812 ftglFont
->Render(artverts
[cnt
].artvert
);
1813 glTranslatef(0, -12, 0);
1814 ftglFont
->Render(artverts
[cnt
].date
);
1815 glTranslatef(0, -12, 0);
1816 ftglFont
->Render(artverts
[cnt
].author
);
1817 glTranslatef(0, -12, 0);
1818 ftglFont
->Render(artverts
[cnt
].advert
);
1819 glTranslatef(0, -12, 0);
1820 ftglFont
->Render(artverts
[cnt
].street
);
1827 /*cvReleaseMat(&world);
1829 CvScalar c =cvGet2D(multi->model.image, multi->model.image->height/2, multi->model.image->width/2);
1830 glColor3d(c.val[2], c.val[1], c.val[0]);
1832 if (multi->model.map.isReady())
1833 multi->model.map.disableShader();
1835 glDisable(GL_LIGHTING);*/
1837 if ( avi_play
== true )
1839 cvReleaseImage(&avi_image
);
1840 cvReleaseImage(&avi_frame
);
1846 //#define DEBUG_SHADER
1847 /*! The paint callback during photometric calibration and augmentation. In this
1848 * case, we have access to 3D data. Thus, we can augment the calibration target
1853 glClear(GL_COLOR_BUFFER_BIT
| GL_DEPTH_BUFFER_BIT
);
1854 glDisable(GL_LIGHTING
);
1856 if ( multi
->model
.isInteractiveTrainBinocularsRunning() )
1857 multi
->model
.interactiveTrainBinocularsDraw();
1861 drawBackground(raw_frame_texture
);
1864 stringstream cnt_out
;
1866 cnt_str
= cnt_out
.str();
1868 //IplImage *pre_mask = cvCreateImage(cvSize(WIDTH, HEIGHT), 8, 3);
1873 int now
= glutGet(GLUT_ELAPSED_TIME
);
1875 cout << now/1000.0 << endl;
1880 double elapsed
= frame_timer
.Update();
1883 last_frame_caught_time
.SetNow();
1885 if ( fade
< (show_status
?MAX_FADE_SHOW
:MAX_FADE_NORMAL
) )
1887 fade
+= (1.0f
/SECONDS_LOST_FADE
)*elapsed
;
1890 fade
= show_status
?MAX_FADE_SHOW
:MAX_FADE_NORMAL
;
1891 //printf("frame_ok: fade %f\n", fade );
1897 double elapsed_since_last_caught
= (now
-last_frame_caught_time
).ToSeconds();
1898 if ( elapsed_since_last_caught
> SECONDS_LOST_TRACK
)
1901 fade
-= (1.0f
/SECONDS_LOST_FADE
)*elapsed
;
1905 //printf("frame_ok: fade %f, elapsed since last caught %f\n", fade, elapsed_since_last_caught );
1907 //printf("frame %s, lost_count %f -> fade pct %4.2f, fade %4.2f\n", frame_ok?"y":"n", frame_lost_count, fade_pct, fade );
1909 // draw augmentation
1910 if ( fade
> 0 && augment
== 1)
1916 draw_fps
= (draw_fps
*7.0+1.0/elapsed
)/8.0f
;
1919 // we need to setup a new projection matrix for the title font.
1920 glMatrixMode(GL_PROJECTION
);
1922 glMatrixMode(GL_MODELVIEW
);
1924 glTranslatef(-.98, 0.9, 0.0);
1925 glScalef(.003, .003, .003);
1926 ftglFont
->FaceSize(32);
1927 glColor4f(1.0, 1.0, 1.0, 1);
1928 ftglFont
->Render("the artvertiser 0.4");
1929 glTranslatef(0, -(video_height
+30), 0);
1930 glColor4f(1.0, 1.0, 1.0, .6);
1931 //ftglFont->FaceSize(16);
1932 //ftglFont->Render(date(now).c_str());
1933 if (frame_ok
== 1 and (now
/1000)%2== 0)
1935 glTranslatef(video_width
-295, video_height
+35, 0);
1936 glColor4f(0.0, 1.0, 0.0, .8);
1937 glBegin(GL_TRIANGLES
);
1938 glVertex3f(140, 0, 0);
1939 glVertex3f(150, 10, 0);
1940 glVertex3f(140, 20, 0);
1942 glTranslatef(70, 5, 0);
1943 ftglFont
->FaceSize(16);
1944 ftglFont
->Render("tracking");
1947 // reset the ftgl font size for next pass
1948 ftglFont
->FaceSize(12);
1953 //printf("draw status\n");
1955 drawDetectedPoints( raw_frame_texture
->getIm()->width
, raw_frame_texture
->getIm()->height
);
1957 char detect_fps_string
[256];
1958 sprintf(detect_fps_string
, "draw fps: %4.2f\ndetection fps: %4.2f", draw_fps
, detection_fps
);
1959 drawGlutString( detect_fps_string
, 1.0f
, 0.2f
, 0.2f
, 0.7, 0.94 );
1961 // now status string
1962 string draw_string
= status_string
;
1965 // show detector settings
1966 draw_string
+= "\n" + getSettingsString();
1968 drawGlutString( draw_string
.c_str(), 1.0f
, 0.2f
, 0.2f
, 0.01f
, 0.2f
);
1975 //cvReleaseImage(&image); // cleanup used image
1981 void startSerialThread()
1983 pthread_attr_t thread_attr
;
1984 pthread_attr_init(&thread_attr
);
1985 pthread_attr_setdetachstate(&thread_attr
, PTHREAD_CREATE_JOINABLE
);
1986 // launch the thread
1987 pthread_create( &serial_thread
, &thread_attr
, serialThreadFunc
, NULL
);
1988 serial_thread_is_running
= true;
1989 pthread_attr_destroy( &thread_attr
);
1992 void shutdownSerialThread()
1995 serial_thread_should_exit
= true;
1997 pthread_join( serial_thread
, &ret
);
1998 serial_thread_is_running
= false;
2002 void* serialThreadFunc( void* data
)
2006 char serialport
[256];
2007 int baudrate
= B9600
; // default
2012 fd
= serialport_init( "/dev/ttyUSB0", 9600 );
2013 printf("fd said %i, errno %i\n", fd
, errno
);
2015 while ( !serial_thread_should_exit
)
2017 int read
= serialport_read_until(fd
, buf
, '\n');
2018 //printf("read: %s then %s\n",buf2, buf);
2019 if ( (read
==0) && strlen( buf
) >= 4 /*includes final \n*/ )
2021 bool button_red
= (buf
[0]=='1');
2022 bool button_green
= (buf
[1]=='1');
2023 bool button_blue
= (buf
[2]=='1');
2024 // printf("buttons: %s %s %s", button_red?"red":" ", button_green?"green":" ", button_blue?"blue":" ");
2025 // bitmapped, to access all 7 press combinations
2026 char new_button_state
=
2027 ( button_green
? BUTTON_GREEN
: 0 ) |
2028 ( button_blue
? BUTTON_BLUE
: 0 ) |
2029 ( button_red
? BUTTON_RED
: 0 );
2030 // deal with debounce
2031 if ( new_button_state
!= button_state
)
2033 printf( "serialport read %s -> 0x%x (old was 0x%x)\n",
2034 buf
, new_button_state
, button_state
);
2035 button_state_changed
= true;
2036 button_state
= new_button_state
;
2045 static void startDetectionThread( int thread_priority
)
2047 pthread_attr_t thread_attr
;
2048 pthread_attr_init(&thread_attr
);
2049 pthread_attr_setdetachstate(&thread_attr
, PTHREAD_CREATE_JOINABLE
);
2050 // launch the thread
2051 pthread_create( &detection_thread
, &thread_attr
, detectionThreadFunc
, NULL
);
2052 if ( thread_priority
> 0 )
2054 printf("attempting to set detection thread priority to %i\n", thread_priority
);
2055 struct sched_param param
;
2056 param
.sched_priority
= thread_priority
;
2058 int res
= pthread_setschedparam( detection_thread
, SCHED_RR
, ¶m
);
2061 fprintf(stderr
,"pthread_setschedparam failed: %s\n",
2062 (res
== ENOSYS
) ? "ENOSYS" :
2063 (res
== EINVAL
) ? "EINVAL" :
2064 (res
== ENOTSUP
) ? "ENOTSUP" :
2065 (res
== EPERM
) ? "EPERM" :
2066 (res
== ESRCH
) ? "ESRCH" :
2071 detection_thread_running
= true;
2072 pthread_attr_destroy( &thread_attr
);
2075 static void shutdownDetectionThread()
2078 detection_thread_should_exit
= true;
2080 pthread_join( detection_thread
, &ret
);
2081 detection_thread_running
= false;
2084 static void* detectionThreadFunc( void* _data
)
2087 FTime detection_thread_timer
;
2088 detection_thread_timer
.SetNow();
2090 while ( !detection_thread_should_exit
)
2092 PROFILE_THIS_BLOCK("detection_thread");
2094 if ( new_artvert_requested
)
2098 // go with the loading
2099 loadOrTrain(new_artvert_requested_index
);
2100 new_artvert_requested
= false;
2103 bool frame_retrieved
= false;
2104 bool frame_retrieved_and_ok
= multi
->cams
[0]->detect( frame_retrieved
, frame_ok
);
2105 if( frame_retrieved
)
2107 double elapsed
= detection_thread_timer
.Update();
2108 detection_fps
= (detection_fps
*0.0 + (1.0/elapsed
))/1.0;
2110 if ( !frame_retrieved_and_ok
)
2112 PROFILE_THIS_BLOCK("sleep till next");
2117 if ( detection_thread_should_exit
)
2120 multi
->model
.augm
.Clear();
2121 if (multi
->cams
[0]->detector
.object_is_detected
)
2123 add_detected_homography(0, multi
->cams
[0]->detector
, multi
->model
.augm
);
2127 multi
->model
.augm
.AddHomography();
2130 frame_ok
= multi
->model
.augm
.Accomodate(4, 1e-4);
2134 // fetch surface normal in world coordinates
2135 CvMat
*mat
= multi
->model
.augm
.GetObjectToWorld();
2137 for (int j
=0; j
<3; j
++)
2138 normal
[j
] = cvGet2D(mat
, j
, 2).val
[0];
2140 // continue to track
2142 matrix_tracker
.addPoseKalman( mat
, multi
->cams
[0]->getFrameIndexForTime(
2143 multi
->cams
[0]->getLastProcessedFrameTimestamp() ) );
2145 matrix_tracker
.addPose( mat
, multi
->cams
[0]->getLastProcessedFrameTimestamp() );
2152 printf("detection thread exiting\n");
2158 /*! GLUT calls this during photometric calibration or augmentation phase when
2159 * there's nothing else to do. This function does the 2D detection and bundle
2160 * adjusts the 3D pose of the calibration pattern.
2164 if ( running_on_binoculars
&& !no_fullscreen
)
2166 static int fullscreen_timer
= 30;
2167 if ( fullscreen_timer
> 0 )
2169 fullscreen_timer
--;
2170 if( fullscreen_timer
<= 0 )
2175 // detect the calibration object in every image
2176 // (this loop could be paralelized)
2179 if(!raw_frame_texture
) raw_frame_texture
= new IplTexture
;
2181 PROFILE_SECTION_PUSH("getting last frame");
2185 IplImage
* captured_frame
;
2186 multi
->cams
[current_cam
]->getLastDrawFrame( &captured_frame
, &raw_frame_timestamp
);
2188 static list
< pair
<IplImage
*, FTime
> > frameRingBuffer
;
2189 while ( frameRingBuffer
.size()<VIDEO_DELAY_FRAMES
)
2191 IplImage
* first_frame
= cvCreateImage( cvGetSize( captured_frame
), captured_frame
->depth
, captured_frame
->nChannels
);
2192 cvCopy( captured_frame
, first_frame
);
2193 frameRingBuffer
.push_back( make_pair( first_frame
, raw_frame_timestamp
) );
2196 IplImage
* ringbuffered
= frameRingBuffer
.front().first
;
2197 cvCopy( captured_frame
, ringbuffered
);
2198 frameRingBuffer
.push_back( make_pair( ringbuffered
, raw_frame_timestamp
) );
2200 frameRingBuffer
.pop_front();
2202 IplImage
* raw_frame
= frameRingBuffer
.front().first
;
2203 raw_frame_timestamp
= frameRingBuffer
.front().second
;
2205 raw_frame_texture
->setImage(raw_frame
);
2209 IplImage
* raw_frame
= raw_frame_texture
->getImage();
2210 multi
->cams
[current_cam
]->getLastDrawFrame( &raw_frame
, &raw_frame_timestamp
);
2211 raw_frame_texture
->setImage(raw_frame
);
2214 PROFILE_SECTION_POP();
2217 if ( multi
->model
.isInteractiveTrainBinocularsRunning() )
2219 bool button_red
= button_state
& BUTTON_RED
;
2220 bool button_green
= button_state
& BUTTON_GREEN
;
2221 bool button_blue
= button_state
& BUTTON_BLUE
;
2222 multi
->model
.interactiveTrainBinocularsUpdate( raw_frame_texture
->getImage(),
2223 button_red
, button_green
, button_blue
);
2230 glutPostRedisplay();
2232 PROFILE_SECTION_POP();
2234 if ( show_profile_results
)
2236 // show profiler output
2237 printf("showing results\n");
2238 FProfiler::Display( FProfiler::SORT_TIME
/*SORT_EXECUTION*/ );
2239 show_profile_results
= false;
2243 //! Starts photometric calibration.
2247 glutDisplayFunc(draw
);
2254 bool menu_show = false;
2256 bool menu_is_showing = false;
2257 bool menu_up = false;
2258 bool menu_down = false;
2267 if ( menu_is_showing
)
2271 if ( (now
-menu_timer
).ToSeconds() > MENU_HIDE_TIME
)
2273 menu_is_showing
= false;
2277 if ( !button_state_changed
|| new_artvert_switching_in_progress
)
2280 printf("menu sees new button state: %s %s %s\n",
2281 button_state
&BUTTON_RED
?"red":" ",
2282 button_state
&BUTTON_GREEN
?"green":" ",
2283 button_state
&BUTTON_BLUE
?"blue":" ");
2285 // clear changed flag
2286 button_state_changed
= false;
2288 if ( ( button_state
== BUTTON_GREEN
) && !menu_is_showing
)
2290 menu_is_showing
= true;
2291 menu_timer
.SetNow();
2292 if ( menu_index
>= artvert_list
.size() )
2293 menu_index
= artvert_list
.size()-1;
2298 // only process rest of buttons if menu is showing
2299 if ( !menu_is_showing
)
2303 if( button_state
== BUTTON_BLUE
)
2306 if ( menu_index
>= artvert_list
.size() )
2308 menu_timer
.SetNow();
2310 if ( button_state
== BUTTON_RED
)
2313 if ( menu_index
< 0 )
2314 menu_index
= artvert_list
.size()-1;
2315 menu_timer
.SetNow();
2319 if ( button_state
== BUTTON_GREEN
)
2322 new_artvert_requested_index
= menu_index
;
2323 new_artvert_requested
= true;
2325 menu_is_showing
= false;
2337 if ( !menu_is_showing
)
2339 // draw switching text?
2340 if ( new_artvert_switching_in_progress
)
2342 glMatrixMode(GL_PROJECTION
);
2344 glMatrixMode(GL_MODELVIEW
);
2346 glTranslatef(-.8, 0.65, 0.0);
2347 glScalef(.003, .003, .003);
2348 ftglFont
->FaceSize(24);
2349 glColor4f(0.0, 1.0, 0.0, 1);
2351 if ( multi
->model
.isLearnInProgress() )
2353 // must manually tokenize
2355 strncpy( message
, multi
->model
.getLearnProgressMessage(), 2048 );
2356 char* ptr
= strtok( message
,"\n");
2359 ftglFont
->Render(ptr
);
2360 glTranslatef(0, -26, 0 );
2361 ptr
= strtok( NULL
, "\n" );
2365 ftglFont
->Render("changing artvert...");
2376 glMatrixMode(GL_PROJECTION
);
2378 glMatrixMode(GL_MODELVIEW
);
2381 glTranslatef(-.85, 0.65, 0.0);
2382 glScalef(.003, .003, .003);
2383 ftglFont
->FaceSize(24);
2384 glColor4f(.25, 1.0, 0.0, 1);
2386 ftglFont
->Render("Select artvert:");
2387 glTranslatef( 0, -26, 0 );
2389 for ( int i
=0; i
<artvert_list
.size(); i
++ )
2391 string advert
= artvert_list
[i
].advert
;
2392 string name
= artvert_list
[i
].name
;
2393 string artist
= artvert_list
[i
].artist
;
2394 string line
= string(" ") + advert
+ " : '" + name
+ "' by " + artist
;
2396 if ( i
== menu_index
)
2398 glColor4f( 1,0.37,0,1 );
2402 glColor4f( .25f
, 1.f
, 0.0f
, 1 );
2404 ftglFont
->Render(line
.c_str());
2405 glTranslatef(0, -26, 0 );