#include #include #include #include #include #include #include #include "opencv_inc.h" #include #include #include #include #include #include #include using namespace std; //Subwindow extern int xBegin2, yBegin2; extern int mButton2; extern float distance_gl2, twist2, elevation2, azimuth2; extern float xOrig2, yOrig2, zOrig2; #include "geometory.h" extern double robot_z; //void display2(); //void myMouse2( int button, int state, int x, int y ); //void myMotion2( int x, int y ); //void resetview2( void ); //void polarview2( void ); //void reshape2(int ,int); //void myMouse2( int button, int state, int x, int y ); int wait_sleep(double time); void obstacle_view(); void move_obstacle_view(); extern struct ROBOT myrobot; char *outputWindow = "Output"; char *outputWindow2 = "Depth"; IplImage *video_buf; IplImage *video; IplImage *depth_buf; IplImage *depth_buf2; IplImage *depth_buf3; IplImage *depth; const long width = 320; const long out_width = 320; const long height = 240; const long out_height = 240; double depth_noise = 0.02; double camera_angleofview=120.0; //struct PointCloud pt_xyz; pcl::PointCloud pt_xyz; pcl::PointCloud pt_xyz_out; extern double robot_z; extern struct ROBOT myrobot; pcl::PointCloud rotation_z(pcl::PointCloud cloud, double theta); void cv_noise(IplImage *input, IplImage *output) { //ノイズを付与する double p[3]; for (int y = 0; y < input->height; y++) { for (int x = 0; x < input->width; x++) { for (int c = 0; c < input->nChannels; c++) { p[c] = (unsigned char)input->imageData[input->widthStep * y + x * input->nChannels + c]; p[c] = p[c] + p[c] * rand_normal(0, 1.0) * depth_noise; if (p[c] < 0.0) p[c] = 0.0; if (p[c] > 255.0) p[c] = 255.0; output->imageData[output->widthStep * y + x * output->nChannels + c] = (int)p[c]; } } } } void cv_bridge_init() { video_buf = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3); video = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3); depth_buf = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1); depth_buf2 = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3); depth_buf3 = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3); depth = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1); cvNamedWindow(outputWindow2, CV_WINDOW_AUTOSIZE); } void polarview2(void) { azimuth2 = myrobot.theta / M_PI * 180.0 - 90.0 / M_PI * 180.0; xOrig2 = -myrobot.y; yOrig2 = -myrobot.x; zOrig2 = -robot_z; //マウスで移動 glTranslatef(0.0, 0.0, -distance_gl2); glRotatef(-twist2, 0.0, 0.0, 1.0); glRotatef(-elevation2, 1.0, 0.0, 0.0); glRotatef(-azimuth2, 0.0, 1.0, 0.0); //キーボードで移動 glTranslatef(xOrig2, zOrig2, yOrig2); //cout<<"twist2:"<width; x++) { for (int y = 0; y < input->height; y++) { if ((int)(input->imageData[y * input->widthStep + x * input->nChannels]) * N >= 255.0) { output->imageData[(y)*output->widthStep + (x)*output->nChannels + 2] = 0; output->imageData[(y)*output->widthStep + (x)*output->nChannels + 1] = 0; output->imageData[(y)*output->widthStep + (x)*output->nChannels] = 0; } else { output->imageData[(y)*output->widthStep + (x)*output->nChannels + 2] = 255 - (input->imageData[y * input->widthStep + x * input->nChannels + 2]) * N; output->imageData[(y)*output->widthStep + (x)*output->nChannels + 1] = 255 - (input->imageData[y * input->widthStep + x * input->nChannels + 1]) * N; output->imageData[(y)*output->widthStep + (x)*output->nChannels] = 255 - (input->imageData[y * input->widthStep + x * input->nChannels]) * N; } } } // cvFlip(output,output,1); } //http://marina.sys.wakayama-u.ac.jp/~tokoi/transparent/showdepth.c int gl_showdepth(void) { GLint view[4]; GLubyte *buffer; /* 現在のビューポートのサイズを得る */ glGetIntegerv(GL_VIEWPORT, view); buffer = (GLubyte *)malloc(view[2] * view[3]); if (buffer) { GLdouble model[16]; GLdouble proj[16]; GLdouble ox, oy, oz; /* 画面表示の完了を待つ */ glFinish(); /* デプスバッファの読み込み */ glReadPixels(view[0], view[1], view[2], view[3], GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, depth_buf->imageData); cvConvertImage(depth_buf, depth_buf2, CV_CVTIMG_FLIP + CV_CVTIMG_SWAP_RB); cv_inverse(depth_buf2, depth_buf3); glGetDoublev(GL_MODELVIEW_MATRIX, model); glGetDoublev(GL_PROJECTION_MATRIX, proj); gluUnProject(view[0] + 0.5, view[1] + 0.5, 0.0, model, proj, view, &ox, &oy, &oz); glRasterPos3d(ox, oy, oz); free(buffer); return 1; } return 0; } void GET_WORLD_COODINATE() { GLint view[4]; glGetIntegerv(GL_VIEWPORT, view); double modelview[16]; glGetDoublev(GL_MODELVIEW_MATRIX, modelview); double projection[16]; glGetDoublev(GL_PROJECTION_MATRIX, projection); int viewport[4]; glGetIntegerv(GL_VIEWPORT, viewport); float z[320 * 240]; glReadPixels(view[0], view[1], view[2], view[3], GL_DEPTH_COMPONENT, GL_FLOAT, z); pt_xyz.points.resize(width * height); for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { double objX, objY, objZ; gluUnProject(x, height - y, z[y * width + x], modelview, projection, viewport, &objX, &objY, &objZ); // pt_xyz.point_x[y * width + x] = objX; // pt_xyz.point_y[y * width + x] = objY; // pt_xyz.point_z[y * width + x] = objZ; int i=y * width + x; pt_xyz.points[i].x = objX-myrobot.y; pt_xyz.points[i].y = -objZ+myrobot.x; pt_xyz.points[i].z = -objY+robot_z; const double rgb_camera_limit=20.0; if(pt_xyz.points[i].y*pt_xyz.points[i].y+pt_xyz.points[i].x*pt_xyz.points[i].x>rgb_camera_limit*rgb_camera_limit){ pt_xyz.points[i].x =numeric_limits::quiet_NaN(); pt_xyz.points[i].y =numeric_limits::quiet_NaN(); pt_xyz.points[i].z=numeric_limits::quiet_NaN(); } } } pt_xyz_out=rotation_z(pt_xyz,myrobot.theta); } int read_buf_flg=0; void display2() { glClearColor(1, 1, 1, 1); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glPushMatrix(); polarview2(); glEnable(GL_DEPTH_TEST); //メイン描画の関数 obstacle_view(); //Robotの表示 move_obstacle_view(); if(read_buf_flg==0){ GET_WORLD_COODINATE(); glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, video_buf->imageData); cvConvertImage(video_buf, video, CV_CVTIMG_FLIP + CV_CVTIMG_SWAP_RB); gl_showdepth(); cvCvtColor(depth_buf3, depth, CV_BGR2GRAY); } cv_noise(depth, depth); //cvShowImage(outputWindow, video); cvShowImage(outputWindow2, depth); glPopMatrix(); glutSwapBuffers(); glutPostRedisplay(); wait_sleep(1); cvWaitKey(10); } //視点のリセット void resetview2(void) { distance_gl2 = 0; twist2 = 0.0; elevation2 = 0.0; azimuth2 = 0.0; } void reshape2(int w, int h) { glutReshapeWindow(width, height); glViewport(0, 0, w, h); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluPerspective(camera_angleofview, (double)w / (double)h, 0.05, 100.0); glMatrixMode(GL_MODELVIEW); } pcl::PointCloud rotation_z(pcl::PointCloud cloud, double theta) { Eigen::Matrix4f rot_z; pcl::PointCloud cloud_out; double theta_z = theta ;//角度の変換 rot_z(0,0) = cos(theta_z); rot_z(1,0) = -sin(theta_z); rot_z(2,0) = 0.0; rot_z(3,0) = 0.0; rot_z(0,1) = sin(theta_z); rot_z(1,1) = cos(theta_z); rot_z(2,1) = 0.0; rot_z(3,1) = 0.0; rot_z(0,2) = 0.0; rot_z(1,2) = 0.0; rot_z(2,2) = 1.0; rot_z(3,2) = 0.0; rot_z(0,3) = 0.0; rot_z(1,3) = 0.0; rot_z(2,3) = 0.0; rot_z(3,3) = 1.0; pcl::transformPointCloud( cloud, cloud_out, rot_z ); return cloud_out; }