/*
* This file is part of robotreality
*
* Copyright(c) sschulz <AT> techfak.uni-bielefeld.de
* http://opensource.cit-ec.de/projects/robotreality
*
* This file may be licensed under the terms of the
* GNU General Public License Version 3 (the ``GPL''),
* or (at your option) any later version.
*
* Software distributed under the License is distributed
* on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
* express or implied. See the GPL for the specific language
* governing rights and limitations.
*
* You should have received a copy of the GPL along with this
* program. If not, go to http://www.gnu.org/licenses/gpl.html
* or write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The development of this software was supported by the
* Excellence Cluster EXC 277 Cognitive Interaction Technology.
* The Excellence Cluster EXC 277 is a grant of the Deutsche
* Forschungsgemeinschaft (DFG) in the context of the German
* Excellence Initiative.
*
*/
///NOTE: settings in nvidia-settings: projector as absolute, +0,+45

#include "HeadProjector.h"

#define CAM_IN_BAYERMODE 0
#define DEBUG_IMAGES 0

#define LATENCY_TEST_ENABLED 0
#define LATENCY_TEST_COMPORT "/dev/ttyS0"


void mouse_wrapper(int event, int x, int y, int flags, void* ptr){
    HeadProjector* _ptr = (HeadProjector*)ptr;
    if(_ptr != NULL)
        _ptr->handle_mouse_event(event, x, y, flags);
}

void mouse_wrapper2(int event, int x, int y, int flags, void* ptr){
    HeadProjector* _ptr = (HeadProjector*)ptr;
    if(_ptr != NULL)
        _ptr->handle_mouse_event2(event, x, y, flags);
}


HeadProjector::HeadProjector(char *name){
	init();
	
	if (name != NULL){
		tconfig.load(name);
	}
	
	#if USE_GPU
	int gpus = getCudaEnabledDeviceCount();
	printf("> found %d CUDA GPUs\n", gpus);
		if (gpus == 0){
		printf("> error: compiled with USE_GPU but no gpu available (gpu support missing or no cuda dev found)\n");
		exit(0);
	}
	#if USE_GL
	printf("> setting GL device (fast rendering of GpuMat to screen)...\n");
	setGlDevice();
	#else
	printf("> WARNING: not setting GL device (NO fast rendering of GpuMat to screen)...\n");
	#endif
	#endif
	
}

HeadProjector::~HeadProjector(){

}

void HeadProjector::init(){
	printf("> initialising...\n");
	
	image_projected = Mat::zeros(480,848,CV_8UC3);
	#if (USE_GPU) && (USE_GL)
	namedWindow("CAM", WINDOW_OPENGL | WINDOW_AUTOSIZE);
	namedWindow("PROJECTION", WINDOW_OPENGL | CV_WINDOW_AUTOSIZE);
	#else
	namedWindow("CAM", CV_WINDOW_AUTOSIZE);
	namedWindow("PROJECTION", CV_WINDOW_AUTOSIZE);
	#endif
	
	//config
	tconfig.add_variable("camera_id", &camera_id, 0);
	tconfig.add_variable("lens_center", &lens_center, Point2f(320, 240));
	
	
	init_mousehandler();
	init_calibration();
	
}


void HeadProjector::init_calibration(){
	clicked_points_count = 0;
	
	//size of checkerboard pattern (count inner corners h/v)
	pattern_size.height = 6;
	pattern_size.width  = 8;
	square_size = 1.0;
	
	//init image points
	tconfig.add_variable("clicked_points", &clicked_points, vector<Point2f>());
	
	//calibration points
	tconfig.add_variable("calibration_index", &calibration_index, 0);
	
	tconfig.add_variable("calibration_points:0", &calibration_points[0],  vector<Point2f>());
	tconfig.add_variable("calibration_points:1", &calibration_points[1],  vector<Point2f>());
	tconfig.add_variable("calibration_points:2", &calibration_points[2],  vector<Point2f>());
	
	tconfig.add_variable("use_single_deformation", &use_single_deformation, false);
	
	clicked_points.clear();
	calibration_points[0].clear();
	calibration_points[1].clear();
	calibration_points[2].clear();
	
	for( int i = 0; i < pattern_size.height; i++){
		for( int j = 0; j < pattern_size.width; j++){
			clicked_points.push_back(Point2f(40+600.0*j/pattern_size.width, 40+300.0*i/pattern_size.height));
			
			calibration_points[0].push_back(Point2f(0+40+800.0*j/pattern_size.width, 0+40+400.0*i/pattern_size.height));
			calibration_points[1].push_back(Point2f(4+40+800.0*j/pattern_size.width, 4+40+400.0*i/pattern_size.height));
			calibration_points[2].push_back(Point2f(8+40+800.0*j/pattern_size.width, 8+40+400.0*i/pattern_size.height));
		}
	}
	
}

Point2f HeadProjector::warp_perspective_point2f(Point2f in, Mat h){
}


Mat HeadProjector::init_distortion_map(const Mat& src,const Mat& cameraMatrix, const Mat& distCoeffs, const Mat& zoom_warp){
	//source: https://code.ros.org/trac/opencv/ticket/1387x9y1mNr5hIEXeqJuHm9bxduPiFw
	//modified to zoom as well
	vector<Point2f> pixel_locations_src;

	for (int i = 0; i < src.size().height; i++) {
		for (int j = 0; j < src.size().width; j++) {
			pixel_locations_src.push_back(Point2f(j,i));
		}
	}

	Mat fractional_locations_dst = Mat(src.size(), CV_32FC2);
	
	undistortPoints(pixel_locations_src, fractional_locations_dst, cameraMatrix, distCoeffs);
	
	Mat pixel_locations_dst = Mat(src.size(), CV_32FC2);

	Mat cameraMatrix2 = cameraMatrix ;
	
	//const float fx = cameraMatrix.at<double>(0,0);
	//const float fy = cameraMatrix.at<double>(1,1);
	//const float cx = cameraMatrix.at<double>(0,2);
	//const float cy = cameraMatrix.at<double>(1,2);
	
	// is there a faster way to do this? -> YES! see below
	//for (int i = 0; i < fractional_locations_dst.size().height; i++) {
	//	for (int j = 0; j < fractional_locations_dst.size().width; j++) {
	//		const float x = fractional_locations_dst.at<Point2f>(i,j).x*fx + cx;
	//		const float y = fractional_locations_dst.at<Point2f>(i,j).y*fy + cy;
	//		pixel_locations_dst.at<Point2f>(i,j) = Point2f(x,y);
	//	}
	//}
	
	//use opencv functions for that:
	perspectiveTransform(fractional_locations_dst, pixel_locations_dst, cameraMatrix);
	
	
	//this is actually doing our scale step:
	perspectiveTransform(pixel_locations_dst, pixel_locations_dst, zoom_warp);
	
	//we have to reshape the matrix (perspective transform makes a 1d vect out of our 2d vect)
	pixel_locations_dst = pixel_locations_dst.reshape(2, src.size().height);
	
	
	return pixel_locations_dst;
}


Point2f HeadProjector::remap_to_undistorted_image_coords(Point2f p, Mat warp_zoom_matrix, Mat camera_matrix, Mat dist_coeffs, Mat zoom_to_full_image){
	vector<Point2f> in;
	vector<Point2f> out;
	in.push_back(p);
	
	//step1: warp
	perspectiveTransform(in, out, warp_zoom_matrix);
// 	remap(image_projected, tmp1, map1, map2, INTER_CUBIC);
// 	oid undistortPoints(InputArray src, OutputArray dst, InputArray cameraMatrix, InputArray distCoeffs, InputArray R=noArray(), InputArray P=noArray())
	
	//step1: undistort:
	undistortPoints(out, out, camera_matrix, dist_coeffs, noArray(), camera_matrix);
	
	//step2: warp perspective (zoom in/out)
	perspectiveTransform(out, out, zoom_to_full_image);
	return out.at(0);
}

void HeadProjector::calculate_calibration(){
	calculate_calibration(0); //r
	calculate_calibration(1); //g
	calculate_calibration(2); //n
}

void HeadProjector::calculate_calibration(int index){
	
	new_camera_matrix[index] = Mat::eye(3, 3, CV_64F);
	new_dist_coeffs[index] = Mat::zeros(8, 1, CV_64F);
	int flags = CV_CALIB_RATIONAL_MODEL; //CV_CALIB_ZERO_TANGENT_DIST ; //CV_CALIB_RATIONAL_MODEL | CV_CALIB_ZERO_TANGENT_DIST  ; //CV_CALIB_FIX_K4|CV_CALIB_FIX_K5; // | CV_CALIB_USE_INTRINSIC_GUESS;
	
	
	//init target points:
	vector<vector<Point3f> > object_points(1);
	for( int i = 0; i < pattern_size.height; i++){
		for( int j = 0; j < pattern_size.width; j++){
			double x = ((float)j) * 848.0 /  (pattern_size.width-1);
			double y = ((float)i) * 480.0 /  (pattern_size.height-1);
			object_points[0].push_back(Point3f(x,y, 0));
		}
	}
	
	//REMAP clicked points bounding rect to full screen:
// 	*calibration_points[index] = *clicked_points;

	vector<Point2f> all_calibs;
	for(int t=0; t<3; t++){
		all_calibs.insert(all_calibs.begin(), calibration_points[t].begin(), calibration_points[t].end());
	}

	Rect bounding_rect = boundingRect(all_calibs);
	Point2f src[4], dst[4];
	
	
	//scale to full image:
	src[0] = Point2f(bounding_rect.x,bounding_rect.y);
	src[1] = Point2f(bounding_rect.x+bounding_rect.width,bounding_rect.y);
	src[2] = Point2f(bounding_rect.x+bounding_rect.width,bounding_rect.y+bounding_rect.height);
	src[3] = Point2f(bounding_rect.x,bounding_rect.y+bounding_rect.height);
	
	/*
	printf("%5.2f %5.2f\n",src[0].x,src[0].y);
	printf("%5.2f %5.2f\n",src[1].x,src[1].y);
	printf("%5.2f %5.2f\n",src[2].x,src[2].y);
	printf("%5.2f %5.2f\n",src[3].x,src[3].y);
	*/
	double border = 50;
	dst[0] = Point2f(border,border);
	dst[1] = Point2f(image_projected.size().width-border*2,border);
	dst[2] = Point2f(image_projected.size().width-border*2,image_projected.size().height-border*2);
	dst[3] = Point2f(border,image_projected.size().height-border*2);
	
	new_warp_zoom_matrix[index] = getPerspectiveTransform(src, dst);
	new_warp_zoom_matrix_inverse[index] = getPerspectiveTransform(dst, src);
	

	vector<vector <Point2f> > clicked_warp_points_v(1);
	perspectiveTransform(calibration_points[index], clicked_warp_points_v[0], new_warp_zoom_matrix[index]);
	
	///CALIB camera
	vector<Mat> rvecs, tvecs;
	double rms = calibrateCamera(object_points, clicked_warp_points_v, image_projected.size(), new_camera_matrix[index], new_dist_coeffs[index], rvecs, tvecs, flags);
	Mat new_cam_matrix = new_camera_matrix[index];
	printf("> rms error by calib is %g\n",rms);
	
	Mat map1, map2;
	Mat out;
	
	//find undistortion mapping:
	initUndistortRectifyMap(new_camera_matrix[index], new_dist_coeffs[index], Mat(), new_cam_matrix, image_projected.size(), CV_32FC1, map1, map2);
	
	//find undistortion mapping of clicked points:
	undistortPoints(Mat(clicked_warp_points_v[0]), out, new_camera_matrix[index], new_dist_coeffs[index], noArray(), new_cam_matrix);
	
	//now rewarp to full image -> init target points of cam image::
	vector<Point2f> camera_image_points;
	
	double width  = cam_input.size().width;
	double height = cam_input.size().height;
	
	for( int i = 0; i < pattern_size.height; i++){
		for( int j = 0; j < pattern_size.width; j++){
			double x = ((float)j) * width /  (pattern_size.width-1);
			double y = ((float)i) * height /  (pattern_size.height-1);
			camera_image_points.push_back(Point2f(x,y));
		}
	}
	
	//how do we get from camera image to undistorted points?
	new_zoom_to_full_image[index] = findHomography(out, camera_image_points, CV_RANSAC); //, int method=0, double ransacReprojThreshold=3, OutputArray mask=noArray() )
	new_zoom_to_full_image_inverse[index] = findHomography(camera_image_points, out, CV_RANSAC); //, int method=0, double ransacReprojThreshold=3, OutputArray mask=noArray() )
	
	//calc distortion map:
	Mat distort_map = init_distortion_map(image_projected, new_camera_matrix[index], new_dist_coeffs[index], new_zoom_to_full_image[index]);
	

	//modify for additional warp:
	warpPerspective(distort_map, new_distortion_map[index], new_warp_zoom_matrix_inverse[index], distort_map.size(), INTER_LINEAR, BORDER_CONSTANT); //TRANSPARENT);

	//Mat howproj = image_projected.clone();
	///imshow("PROJECTION", image_projected);
	image_projection_config = image_projected;
	
	//virtual cam grabs with 512x512 but we want to "cut" out a region corresponding to 640x480 of our real cam.
	//512*480/640 = 384 is our height, center image aroung 256 -> cam y = (256-384/2) ... (256+384/2-1) = 64..447
	Point2f vsrc[4], vdst[4];

	//scale to full image:
	vsrc[0] = Point2f(  0, 64);
	vsrc[1] = Point2f(511, 64);
	vsrc[2] = Point2f(511, 64+384);
	vsrc[3] = Point2f(  0, 64+384);
	vdst[0] = Point2f(  0,  0);
	vdst[1] = Point2f(639,  0);
	vdst[2] = Point2f(639,479);
	vdst[3] = Point2f(  0, 479);

	vscale = getPerspectiveTransform(vsrc, vdst);
	
	#if USE_GPU
        Mat map_x, map_y;
        map_y = Mat(new_distortion_map[index].size(), CV_32FC1);
        convertMaps(new_distortion_map[index], Mat(), map_x, map_y, CV_32FC1, false);
        distort_x[index].upload(map_x);
	distort_y[index].upload(map_y);
	#endif


	//calc lens_center
	remapped_lens_center = remap_to_undistorted_image_coords(lens_center,  new_warp_zoom_matrix[index], new_camera_matrix[index], new_dist_coeffs[index], new_zoom_to_full_image[index]);
	show_projection_config();
}

void HeadProjector::show_projection_config(){
	///print dots in image
	image_projected = Mat::zeros(image_projected.size(), image_projected.type());
	vector<Point2f>::iterator it;
	int i=0;
	
	vector<Point2f>::iterator itp;
	for(int i=2; i>=calibration_index; i--){
		int num=0;
		//print R, G, B calib points
		for(itp=calibration_points[i].begin(); itp<calibration_points[i].end(); itp++){
			Point2f to     = *itp;
			char buf[256];
			snprintf(buf, 256, "%d", num++);
			putText(image_projected, buf, to + Point2f(5,5), FONT_HERSHEY_SIMPLEX, .5, CV_RGB(i==0?255:0,i==1?255:0,i==2?255:0), 1, CV_AA);
			line(image_projected, to-Point2f(3,3), to+Point2f(3,3), CV_RGB(i==0?255:0,i==1?255:0,i==2?255:0), 1);
			line(image_projected, to-Point2f(-3,3), to+Point2f(-3,3), CV_RGB(i==0?255:0,i==1?255:0,i==2?255:0), 1);
		}
	}
	
	//show len
	
	//show lens center
	line(image_projected, (lens_center)-Point2f(20,20), (lens_center)+Point2f(20,20),  CV_RGB(255,255, 255), 1, CV_AA);
	line(image_projected, (lens_center)-Point2f(20,-20), (lens_center)+Point2f(20,-20),  CV_RGB(255,255, 255), 1, CV_AA);
	
	line(image_projected, mouse_pos-Point2f( 3,3), mouse_pos+Point2f( 3,3), CV_RGB((calibration_index)==0?255:0,(calibration_index)==1?255:0,(calibration_index)==2?255:0), 1);
	line(image_projected, mouse_pos-Point2f(-3,3), mouse_pos+Point2f(-3,3), CV_RGB((calibration_index)==0?255:0,(calibration_index)==1?255:0,(calibration_index)==2?255:0), 1);
	
	imshow("PROJECTION", image_projected);
}

void HeadProjector::change_selection(int key){
	switch(key){
		case('1'): //RED
			calibration_index       = 0;
			break;
		case('2'): //GREEN
			calibration_index       = 1;
			break;
		case('3'): //BLUE
			calibration_index       = 2;
			break;
		case('d'):
			use_single_deformation = !(use_single_deformation);
			break;
		default:
			break; //ignore
	}
	show_projection_config();
}

void HeadProjector::init_mousehandler(){
	//enable mouse callback
	cv::setMouseCallback("PROJECTION", &mouse_wrapper, (void*)this);
	calculate_projection_matrix();
}

void HeadProjector::add_lens_center(int x, int y){
	lens_center.x = x;
	lens_center.y = y;
}


void HeadProjector::add_projection_point(int x, int y){
	bool valid = false;
	Point2f c(x>0?x:0,y>0?y:0);
	
	//find closest point:
	double dist_min = 9999999.999;
	int best = -1;
	int index = 0;
	
	vector<Point2f>::iterator it;
	for(it = calibration_points[calibration_index].begin(); it<calibration_points[calibration_index].end(); it++){
		Point2f p = *it;
		double distance = pow((p.x-c.x),2) + pow((p.y-c.y),2);
		if (distance <= dist_min){
			dist_min = distance;
			best = index;
		}
		index++;
	}
	
	if (best != -1){
		calibration_points[calibration_index].at(best) = c;
	}
	
		
// 	if (clicked_points_count < pattern_size.width*pattern_size.height){
// 		(*clicked_points)[clicked_points_count] = Point2f(x,y);
// 		clicked_points_count++;
// 	}
	calculate_calibration();
	
}

int HeadProjector::select_projection_point(int x, int y){
	bool valid = false;
	Point2f c(x>0?x:0,y>0?y:0);
	
	//find closest point:
	double dist_min = 9999999.999;
	int best = -1;
	int index = 0;
	
	vector<Point2f>::iterator it;
	for(it = calibration_points[calibration_index].begin(); it<calibration_points[calibration_index].end(); it++){
		Point2f p = *it;
		double distance = pow((p.x-c.x),2) + pow((p.y-c.y),2);
		if (distance <= dist_min){
			dist_min = distance;
			best = index;
		}
		index++;
	}
	
	return best;
}
	
		


void HeadProjector::calculate_projection_matrix(){
}

void HeadProjector::handle_mouse_event(int event, int x, int y, int flags){
	//printf("> mouse event %6d [%3d:%3d] flags %d\n",event,x,y,flags);
	if (event == CV_EVENT_LBUTTONDOWN){
		selected_projection_point = select_projection_point(x-15,y-15);
	}else if (event == CV_EVENT_LBUTTONUP){
		calculate_calibration();
	}else if (event == CV_EVENT_RBUTTONDOWN){
		add_lens_center(x,y);
	}else if(event == CV_EVENT_MOUSEMOVE){
		mouse_pos.x = x>15?x-15:x;
		mouse_pos.y = y>15?y-15:y;
		if (flags & CV_EVENT_FLAG_LBUTTON){
			if (selected_projection_point != -1){
				calibration_points[calibration_index].at(selected_projection_point) = mouse_pos;
			}
			show_projection_config();
		}
	}
	//printf("EV %d\n",event);
}

void HeadProjector::handle_mouse_event2(int event, int x, int y, int flags){
	if (event == CV_EVENT_LBUTTONDOWN){
		latency_test_clicked_pos.x = x;
		latency_test_clicked_pos.y = y;
	}
}


void HeadProjector::project_image(Mat in, Mat out){
	
}


void HeadProjector::calc_projection(){
	if (DEBUG_IMAGES){
		Mat tmp = image_projected.clone();
		imshow("in", tmp);
	}
	
	//Mat newi = Mat::zeros(cam_input.size(), cam_input.type());
	
	if (DEBUG_IMAGES){
		//print straight lines:
		for(int x=0; x<pattern_size.width-1; x++){
			double pos_x = (x * cam_input.size().width-1) / (float)(pattern_size.width-1);
			line(cam_input, Point2f(pos_x, 0), Point2f(pos_x, cam_input.size().height-1),CV_RGB(222,222,220), 1);
		}
		for(int y=0; y<pattern_size.height-1; y++){
			double pos_y = (y * cam_input.size().height-1) / (float)(pattern_size.height-1);
			line(cam_input, Point2f(0, pos_y), Point2f(cam_input.size().width-1, pos_y),CV_RGB(222,222,220), 1);
		}
		imshow("111", cam_input);
	}
	
// 	cam_input = debug_output_image;
	
	if (cam_input_projected_rgbsplit.size() < 3){
		//init:
		cam_input_projected_rgbsplit.push_back(Mat(cam_input.size(), CV_8UC1));
		cam_input_projected_rgbsplit.push_back(Mat(cam_input.size(), CV_8UC1));
		cam_input_projected_rgbsplit.push_back(Mat(cam_input.size(), CV_8UC1));
		#if USE_GPU
		gpu_cam_input_projected_rgbsplit.push_back(GpuMat(cam_input.size(), CV_8UC1));
		gpu_cam_input_projected_rgbsplit.push_back(GpuMat(cam_input.size(), CV_8UC1));
		gpu_cam_input_projected_rgbsplit.push_back(GpuMat(cam_input.size(), CV_8UC1));
		#endif
	}
	
	#if USE_GPU
		gpu_cam_input.upload(cam_input);
		
		if (use_single_deformation){
			remap(gpu_cam_input, gpu_cam_input_projected, distort_x[2], distort_y[2], CV_INTER_LINEAR);
		}else{
			split(gpu_cam_input, gpu_cam_input_rgbsplit);
			remap(gpu_cam_input_rgbsplit.at(0), gpu_cam_input_projected_rgbsplit.at(0), distort_x[2], distort_y[2], CV_INTER_LINEAR); //B
			remap(gpu_cam_input_rgbsplit.at(1), gpu_cam_input_projected_rgbsplit.at(1), distort_x[1], distort_y[1], CV_INTER_LINEAR); //G
			remap(gpu_cam_input_rgbsplit.at(2), gpu_cam_input_projected_rgbsplit.at(2), distort_x[0], distort_y[0], CV_INTER_LINEAR); //R
			merge(gpu_cam_input_projected_rgbsplit, gpu_cam_input_projected);
		}
	#else
		if (use_single_deformation){
			remap(cam_input, cam_input_projected, new_distortion_map[2], Mat(), CV_INTER_LINEAR);
		}else{
			//split to r,g,b
			split(cam_input, cam_input_rgbsplit);
			//remap single channels
			remap(cam_input_rgbsplit.at(0), cam_input_projected_rgbsplit.at(0), new_distortion_map[2], Mat(), CV_INTER_LINEAR); //B
			remap(cam_input_rgbsplit.at(1), cam_input_projected_rgbsplit.at(1), new_distortion_map[1], Mat(), CV_INTER_LINEAR); //G
			remap(cam_input_rgbsplit.at(2), cam_input_projected_rgbsplit.at(2), new_distortion_map[0], Mat(), CV_INTER_LINEAR); //R
			//join
			merge(cam_input_projected_rgbsplit, cam_input_projected);
		}
		
	#endif
	
	
	
}

void HeadProjector::show_projection(){
	 if (DEBUG_IMAGES){
                int u=0;
                vector<Point2f>::iterator itp;
                for(itp=clicked_points.begin(); itp<clicked_points.end(); itp++){
                        Point2f to     = *itp;
                        circle(cam_input_projected, to, 3, CV_RGB(0,255,255), 1);
                }
        }
        
        if(1){
		int u=0;
		vector<Point2f>::iterator itp;
		for(int i=0; i<3; i++){
			//print R, G, B calib points
			for(itp=calibration_points[i].begin(); itp<calibration_points[i].end(); itp++){
				Point2f to     = *itp;
				circle(cam_input_projected, to, 2, CV_RGB(i==0?255:0,i==1?255:0,i==2?255:0), 1);
				line(cam_input_projected, to-Point2f(3,3), to+Point2f(3,3), CV_RGB(i==0?255:0,i==1?255:0,i==2?255:0), 1);
				line(cam_input_projected, to-Point2f(-3,3), to+Point2f(-3,3), CV_RGB(i==0?255:0,i==1?255:0,i==2?255:0), 1);
			}
		}
		
	}

	#if USE_GPU
	    #if USE_GL
		imshow("CAM", gpu_cam_input_projected);
	    #else
		gpu_cam_input_projected.download(cam_input_projected);
        	imshow("CAM", cam_input_projected);
            #endif
        #else
        imshow("CAM", cam_input_projected);
        #endif
}

void HeadProjector::add_lens_black_spot(){
	circle(cam_input, remapped_lens_center, 35, CV_RGB(0,0,0), -11, CV_AA);
}

void HeadProjector::quit(){
	tconfig.save("/tmp/cfg.headprojector.on_quit");
	exit(0);
}


void HeadProjector::latency_test_set_trigger(int fd, int level){
	int sts;
	ioctl(fd,TIOCMGET,&sts);
	if (level) sts |= TIOCM_DTR;
	else       sts &= ~TIOCM_DTR;
	ioctl(fd,TIOCMSET,&sts);
	printf("> latency test: trigger = %d\n",level);
}



void HeadProjector::latency_test_thread(){
	//setup com port for trigger:
	latency_test_fd = open(LATENCY_TEST_COMPORT,O_RDWR); //serial port for trigger
	
	latency_test_set_trigger(latency_test_fd, 1);
	sleep(1);
	
	while(1){
		//set trigger = 0
		latency_test_set_trigger(latency_test_fd, 0);
		
		//sleep between x00 and 250 ms
		int sleep_ms = 500 + (rand()%50);
		boost::this_thread::sleep(boost::posix_time::milliseconds(sleep_ms));
		
		//set trigger = 1
		latency_test_trigger_detected = false;
		latency_test_set_trigger(latency_test_fd, 1);
		trigger_tickmeter.reset();
		trigger_tickmeter.start();
		
		//wait until trigger is detected:
		while(!latency_test_trigger_detected){
			//sleep 10ms
			boost::this_thread::sleep(boost::posix_time::milliseconds(10));
		}
		
	}
}


void HeadProjector::run(){
	//start latency measurement thread
	#if LATENCY_TEST_ENABLED
	boost::thread *thread = new boost::thread(boost::bind( &HeadProjector::latency_test_thread, this));
	#endif
	
	
	int device = camera_id;
	
	if (!USE_GPU){
		printf("> WARNING: COMPILED WITHOUT GPU SUPPORT! GPU IS MANDATORY FOR SHORT LATENCY!\n");
		printf(">          i will start anyway (for debugging) but DO NOT use this in a production \n");
		printf("           system!\n");
		#if !USE_TESTIMAGE
		sleep(1);
		#endif
	}
	VideoCapture *camera;
	
	
	#if !USE_TESTIMAGE
	printf("> opening cam(%d)\n", device);
	camera = new VideoCapture(device);
	
	if(!camera->isOpened()){
		printf("> ERROR: failed to open cam %d\n",device);
		exit(1);
	}
	
// printf("pal %d\n",camera->capture->imageProperties.palette);
// 	try_palette(capture->deviceHandle, &capture->imageProperties, VIDEO_PALETTE_YUV420, 16);

	if (!camera->set(CV_CAP_PROP_FPS, 30.0)){
		printf("> ERROR: set fps failed\n");
// 		exit(1);
	}
	printf("> running with %3.2f fps\n",camera->get(CV_CAP_PROP_FPS));

	printf("> grabbing first frame\n");
	while(!camera->grab()){}
	
// 	cam_input = Mat::zeros(Size(640,480), cam_input.type());
	
	if(!camera->retrieve(cam_input, 0)){
		printf("> ERROR: retrieve failed [this should NEVER fail!]\n");
		exit(1);
	}
	printf("> framesize: %3d x %3d\n",cam_input.size().width,cam_input.size().height);
	printf("> channels : %d\n",cam_input.channels());

	if (cam_input.channels() != 3){
		printf("> ERROR: expected 3 input channel image! make sure that we open the USB head view camera!\n");
		exit(0);
	}

	#else
	string filename = __FILE__;
	int pos = filename.find_last_of('/');
	filename = filename.substr(0,pos);
	
	char buf[256];
	sprintf(buf, "%s/%s", filename.c_str(), TEST_IMAGE_FILENAME);
	printf("> using test image '%s'\n",buf);
	cam_input = imread(buf);
// 	imshow("ii", cam_input);
	
	if (cam_input.data ==NULL){
		printf("> error can not read '%s'\n",buf);
		exit(0);
	}
	#endif
	
	calculate_calibration();
	
	
	#if LATENCY_TEST_ENABLED
	namedWindow("LATENCY_TEST", WINDOW_OPENGL|CV_WINDOW_AUTOSIZE);
	imshow("LATENCY_TEST", cam_input);
	waitKey(1);
	setMouseCallback("LATENCY_TEST", &mouse_wrapper2, (void*)this);

	int cnt = 0;
	int state = 0;
	while(waitKey(1) == -1){
		#if !USE_TESTIMAGE
		camera->grab();
		camera->retrieve(cam_input, 0);
		#endif
		circle(cam_input, latency_test_clicked_pos, 10, CV_RGB(255,0,0), 1);
		Vec3b value = cam_input.at<Vec3b>(latency_test_clicked_pos.y,latency_test_clicked_pos.x);
	        printf("> latency value %d\n",value[0]);
		imshow("LATENCY_TEST", cam_input);
		if (cnt++ > 30){
			cnt = 0;
			state = 1-state;
			latency_test_trigger_detected=true;
		}
	}
	Mat white(Size(640,480), CV_8UC3, cvScalar( 0, 0,0)); //cvScalar(255,255,255));
	circle(white, latency_test_clicked_pos, 50, CV_RGB(255,255,255), -1);
	Mat black(Size(640,480), CV_8UC3, cvScalar(  0,  0,  0));
	#endif
	
	printf("> entering grab loop\n");
	
	TickMeter tm_grab, tm_frame, tm_show, tm_remap;
	double trigger_detection_time=0.0;
	while(1){
		//measure time
		tm_grab.reset(); tm_grab.start();
		tm_frame.reset(); tm_frame.start();
	
		#if USE_TESTIMAGE
		if (1){
			if (1){
				
		#else
		if (!camera->grab()){
			printf("> ERROR: single frame grab failed [high cpu load? run as root?]\n");
		}else{
			if (!camera->retrieve(cam_input, 0)){
				printf("> ERROR: retrieve failed?!\n");
			}else{
		#endif
				#if LATENCY_TEST_ENABLED
				Vec3b value = cam_input.at<Vec3b>(latency_test_clicked_pos.y,latency_test_clicked_pos.x);
				printf("> latency value %d\n",value[0]);
				if (value[0] > 160){
					if (!latency_test_trigger_detected){
						trigger_tickmeter.stop();
						trigger_detection_time = trigger_tickmeter.getTimeMilli();
						latency_test_trigger_detected = true;
					}
					cam_input = white;
				}else{
					cam_input = black;
				}
				#endif
				
				
				
				if (DEBUG_IMAGES){
					imshow("CAM INPUT", cam_input);
				}
				
				tm_grab.stop();
				
				//add blind spot for lens
				add_lens_black_spot();
				
				//project image to screen:
				tm_remap.reset(); tm_remap.start();
				calc_projection();
				tm_remap.stop();

				tm_show.reset(); tm_show.start();
				show_projection();
				tm_show.stop();
				
			}
		}
		
		//process keys
		#if USE_TESTIMAGE
		int key = waitKey(1000/30) & 0xFFFF;
		#else
		int key = waitKey(1) & 0xFFFF;
		#endif
		if ((key == 'x') || (key == 'X')){
			quit();
		}else{
			change_selection(key);
		}
		tm_frame.stop();

		printf("> running with %3.2ffps [grab=%3.2fms, remap=%3.2fms, show=%3.2fms]\n",1000.0/tm_frame.getTimeMilli(), tm_grab.getTimeMilli(), tm_remap.getTimeMilli(), tm_show.getTimeMilli());
		
	}

	

}