/******************************************************************** ** Image Component Library (ICL) ** ** ** ** Copyright (C) 2006-2012 CITEC, University of Bielefeld ** ** Neuroinformatics Group ** ** Website: www.iclcv.org and ** ** http://opensource.cit-ec.de/projects/icl ** ** ** ** File : ICLGeom/src/PointcloudSceneObject.cpp ** ** Module : ICLGeom ** ** Authors: Andre Ueckermann ** ** ** ** ** ** Commercial License ** ** ICL can be used commercially, please refer to our website ** ** www.iclcv.org for more details. ** ** ** ** GNU General Public License Usage ** ** Alternatively, this file may be used under the terms of the ** ** GNU General Public License version 3.0 as published by the ** ** Free Software Foundation and appearing in the file LICENSE.GPL ** ** included in the packaging of this file. Please review the ** ** following information to ensure the GNU General Public License ** ** version 3.0 requirements will be met: ** ** http://www.gnu.org/copyleft/gpl.html. ** ** ** ** The development of this software was supported by the ** ** Excellence Cluster EXC 277 Cognitive Interaction Technology. ** ** The Excellence Cluster EXC 277 is a grant of the Deutsche ** ** Forschungsgemeinschaft (DFG) in the context of the German ** ** Excellence Initiative. ** ** ** *********************************************************************/ #define __CL_ENABLE_EXCEPTIONS //enables openCL error catching #include #ifdef HAVE_OPENCL #include #endif #include #include namespace icl{ namespace geom{ //OpenCL kernel code static char pointcloudViewerKernel[] = "__kernel void \n" "calculateUniColor(__global float const * depth, __global float4 const * vrOffset, __global float4 const * vrDirection, __global float const * norms, __global float4 * outV, __global float4 * outC, float4 const color, float const scale) \n" "{ \n" " size_t id = get_global_id(0); \n" " float d = 1.046 * (depth[id]==2047 ? 0 : 1000. / (depth[id] * -0.0030711016 + 3.3309495161)); \n" " float lambda = d*norms[id]*scale; \n" " float4 out = vrOffset[id]+vrDirection[id]*lambda; \n" " out.w=1; \n" " outV[id]=out; \n" " if(depth[id]==2047) \n" " { \n" " outC[id]=((float4)(0,0,0,0)); \n" " } \n" " else \n" " { \n" " outC[id]=color; \n" " } \n" "} \n" "__kernel void \n" "calculateRGBColor(__global float const * depth, __global float4 const * vrOffset, __global float4 const * vrDirection, __global float const * norms, __global float4 * outV, __global float4 * outC, __global uchar const * colorR, __global uchar const * colorG, __global uchar const * colorB, float const scale, __global float const * H, int const w, int const h) \n" "{ \n" " size_t id = get_global_id(0); \n" " float d = 1.046 * (depth[id]==2047 ? 0 : 1000. / (depth[id] * -0.0030711016 + 3.3309495161)); \n" " float lambda = d*norms[id]*scale; \n" " float4 out = vrOffset[id]+vrDirection[id]*lambda; \n" " out.w=1; \n" " outV[id]=out; \n" " if(depth[id]==2047) \n" " { \n" " outC[id]=((float4)(0,0,0,0)); \n" " } \n" " else \n" " { \n" " int y = (int)floor((float)id/(float)w); \n" " int x = id-y*w; \n" " float az = H[6]*x + H[7] * y + H[8]; \n" " int ax = (int)round(( H[0]*(float)x + H[1] * (float)y + H[2] ) / az); \n" " int ay = (int)round(( H[3]*(float)x + H[4] * (float)y + H[5] ) / az); \n" " if(ax>=0 && ax=0 && ay>8) { \n" " case 0: \n" " R[id] = 255; \n" " G[id] = 255-lb; \n" " B[id] = 255-lb; \n" " break; \n" " case 1: \n" " R[id] = 255; \n" " G[id] = lb; \n" " B[id] = 0; \n" " break; \n" " case 2: \n" " R[id] = 255-lb; \n" " G[id] = 255; \n" " B[id] = 0; \n" " break; \n" " case 3: \n" " R[id] = 0; \n" " G[id] = 255; \n" " B[id] = lb; \n" " break; \n" " case 4: \n" " R[id] = 0; \n" " G[id] = 255-lb; \n" " B[id] = 255; \n" " break; \n" " case 5: \n" " R[id] = 0; \n" " G[id] = 0; \n" " B[id] = 255-lb; \n" " break; \n" " default: \n" " R[id] = 0; \n" " G[id] = 0; \n" " B[id] = 0; \n" " break; \n" " } \n" "} \n" ; PointcloudSceneObject::PointcloudSceneObject(Size size, const Camera &cam){ H.id(); w=size.width; h=size.height; dim=w*h; s=size; setLockingEnabled(true); viewrayOffsets = new PointNormalEstimation::Vec4[w*h]; viewrayDirections = new PointNormalEstimation::Vec4[w*h]; highlightedIdx = -1; create_view_rays(w,h,cam); ViewRay v00 = cam.getViewRay(Point32f(w/2,h/2)); int i=0; norms.resize(w*h); for(int y=0;y platformList;//get number of available openCL platforms int selectedDevice=0;//initially select platform 0 try{ if(cl::Platform::get(&platformList)==CL_SUCCESS){ std::cout<<"openCL platform found"< deviceList; if(platformList.at(i).getDevices(CL_DEVICE_TYPE_GPU, &deviceList)==CL_SUCCESS){ std::cout<<"GPU-DEVICE(S) FOUND"<(); std::cout<<"selected devices: "< data(3); data[0] = pseudoImageRArray; data[1] = pseudoImageGArray; data[2] = pseudoImageBArray; hsvImage = Img8u(Size(w,h),3,data,false); }catch (cl::Error err) {//catch openCL errors std::cout<< "ERROR: "<< err.what()<< "("<< err.err()<< ")"<>8) { case 0: hsvImage(x,y,0) = 255; hsvImage(x,y,1) = 255-lb; hsvImage(x,y,2) = 255-lb; break; case 1: hsvImage(x,y,0) = 255; hsvImage(x,y,1) = lb; hsvImage(x,y,2) = 0; break; case 2: hsvImage(x,y,0) = 255-lb; hsvImage(x,y,1) = 255; hsvImage(x,y,2) = 0; break; case 3: hsvImage(x,y,0) = 0; hsvImage(x,y,1) = 255; hsvImage(x,y,2) = lb; break; case 4: hsvImage(x,y,0) = 0; hsvImage(x,y,1) = 255-lb; hsvImage(x,y,2) = 255; break; case 5: hsvImage(x,y,0) = 0; hsvImage(x,y,1) = 0; hsvImage(x,y,2) = 255-lb; break; default: hsvImage(x,y,0) = 0; hsvImage(x,y,1) = 0; hsvImage(x,y,2) = 0; break; } } } } return hsvImage; } void PointcloudSceneObject::calculateUniColor(const Img32f &depthImg, GeomColor color, bool vSync){ if(normalLinesSet==true){ clearNormalLines(); } if(useCL==true && clReady==true){ #ifdef HAVE_OPENCL try{ rawImageArray=(float*)depthImg.begin(0);//image to float array rawImageBuffer = cl::Buffer(//write new image to buffer context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, w*h * sizeof(float), (void *) &rawImageArray[0]); cl_float4 vcolor; vcolor.x=color[0]/255.; vcolor.y=color[1]/255.; vcolor.z=color[2]/255.; vcolor.w=1.; kernelUnicolor.setArg(0, rawImageBuffer);//set parameter for kernel kernelUnicolor.setArg(1, vrOffsetBuffer); kernelUnicolor.setArg(2, vrDirectionBuffer); kernelUnicolor.setArg(3, normsBuffer); kernelUnicolor.setArg(4, verticesBuffer); kernelUnicolor.setArg(5, vertColorsBuffer); kernelUnicolor.setArg(6, vcolor); kernelUnicolor.setArg(7, depthScaling); queue.enqueueNDRangeKernel(//run kernel kernelUnicolor, cl::NullRange, cl::NDRange(w*h), //input size for get global id cl::NullRange); cl_bool vS=CL_TRUE; //default: blocking (use after complete read) if(vSync==true){ vS=CL_FALSE; //if vSync is activated: no block (read if needed, faster but incomplete update) } queue.enqueueReadBuffer(//read output from kernel verticesBuffer, vS, 0, w*h * sizeof(cl_float4), reinterpret_cast(m_vertices.data())); queue.enqueueReadBuffer(//read output from kernel vertColorsBuffer, vS, 0, w*h * sizeof(cl_float4), reinterpret_cast(m_vertexColors.data())); }catch (cl::Error err) {//catch openCL errors std::cout<< "ERROR: "<< err.what()<< "("<< err.err()<< ")"< homogeneity, bool vSync){ if(normalLinesSet==true){ clearNormalLines(); } if(useCL==true && clReady==true){ #ifdef HAVE_OPENCL try{ rawImageArray=(float*)depthImg.begin(0);//image to float array colorImageRArray=(cl_uchar*)colorImg.begin(0);//image to char array colorImageGArray=(cl_uchar*)colorImg.begin(1);//image to char array colorImageBArray=(cl_uchar*)colorImg.begin(2);//image to char array rawImageBuffer = cl::Buffer(//write new image to buffer context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, w*h * sizeof(float), (void *) &rawImageArray[0]); colorRBuffer = cl::Buffer( context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, w*h * sizeof(cl_uchar), (void *) &colorImageRArray[0]); colorGBuffer = cl::Buffer( context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, w*h * sizeof(cl_uchar), (void *) &colorImageGArray[0]); colorBBuffer = cl::Buffer( context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, w*h * sizeof(cl_uchar), (void *) &colorImageBArray[0]); homogeneityBuffer = cl::Buffer( context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, 9 * sizeof(float), (void *) &homogeneity[0]); kernelRGBcolor.setArg(0, rawImageBuffer);//set parameter for kernel kernelRGBcolor.setArg(1, vrOffsetBuffer); kernelRGBcolor.setArg(2, vrDirectionBuffer); kernelRGBcolor.setArg(3, normsBuffer); kernelRGBcolor.setArg(4, verticesBuffer); kernelRGBcolor.setArg(5, vertColorsBuffer); kernelRGBcolor.setArg(6, colorRBuffer); kernelRGBcolor.setArg(7, colorGBuffer); kernelRGBcolor.setArg(8, colorBBuffer); kernelRGBcolor.setArg(9, depthScaling); kernelRGBcolor.setArg(10, homogeneityBuffer); kernelRGBcolor.setArg(11, w); kernelRGBcolor.setArg(12, h); queue.enqueueNDRangeKernel(//run kernel kernelRGBcolor, cl::NullRange, cl::NDRange(w*h), //input size for get global id cl::NullRange); cl_bool vS=CL_TRUE; //default: blocking (use after complete read) if(vSync==true){ vS=CL_FALSE; //if vSync is activated: no block (read if needed, faster but incomplete update) } queue.enqueueReadBuffer(//read output from kernel verticesBuffer, vS, 0, w*h * sizeof(cl_float4), reinterpret_cast(m_vertices.data())); queue.enqueueReadBuffer(//read output from kernel vertColorsBuffer, vS, 0, w*h * sizeof(cl_float4), reinterpret_cast(m_vertexColors.data())); }catch (cl::Error err) {//catch openCL errors std::cout<< "ERROR: "<< err.what()<< "("<< err.err()<< ")"<(m_vertices.data())); queue.enqueueReadBuffer(//read output from kernel vertColorsBuffer, vS, 0, w*h * sizeof(cl_float4), reinterpret_cast(m_vertexColors.data())); }catch (cl::Error err) {//catch openCL errors std::cout<< "ERROR: "<< err.what()<< "("<< err.err()<< ")"<(m_vertices.data())); queue.enqueueReadBuffer(//read output from kernel vertColorsBuffer, vS, 0, w*h * sizeof(cl_float4), reinterpret_cast(m_vertexColors.data())); }catch (cl::Error err) {//catch openCL errors std::cout<< "ERROR: "<< err.what()<< "("<< err.err()<< ")"<(m_vertices.data())); queue.enqueueReadBuffer(//read output from kernel vertColorsBuffer, vS, 0, w*h * sizeof(cl_float4), reinterpret_cast(m_vertexColors.data())); }catch (cl::Error err) {//catch openCL errors std::cout<< "ERROR: "<< err.what()<< "("<< err.err()<< ")"< R = T.part<0,0,3,3>(); Mat T2 = R.transp().resize<4,4>(0); T2(3,3) =1; if(useCL==true && clReady==true){ #ifdef HAVE_OPENCL try{ rawImageArray=(float*)depthImg.begin(0);//image to float array rawImageBuffer = cl::Buffer(//write new image to buffer context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, w*h * sizeof(float), (void *) &rawImageArray[0]); normalsBuffer = cl::Buffer( context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, w*h * sizeof(cl_float4), (void *) &pNormals[0]); camBuffer = cl::Buffer( context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, 16 * sizeof(float), (void *) &T2[0]); kernelNormalcolorWorld.setArg(0, rawImageBuffer);//set parameter for kernel kernelNormalcolorWorld.setArg(1, vrOffsetBuffer); kernelNormalcolorWorld.setArg(2, vrDirectionBuffer); kernelNormalcolorWorld.setArg(3, normsBuffer); kernelNormalcolorWorld.setArg(4, verticesBuffer); kernelNormalcolorWorld.setArg(5, vertColorsBuffer); kernelNormalcolorWorld.setArg(6, normalsBuffer); kernelNormalcolorWorld.setArg(7, depthScaling); kernelNormalcolorWorld.setArg(8, camBuffer); kernelNormalcolorWorld.setArg(9, outNormalsBuffer); queue.enqueueNDRangeKernel(//run kernel kernelNormalcolorWorld, cl::NullRange, cl::NDRange(w*h), //input size for get global id cl::NullRange); cl_bool vS=CL_TRUE; //default: blocking (use after complete read) if(vSync==true){ vS=CL_FALSE; //if vSync is activated: no block (read if needed, faster but incomplete update) } if(useNormalLines==true){//No vSync if normal lines are shown vS=CL_TRUE; } queue.enqueueReadBuffer(//read output from kernel verticesBuffer, vS, 0, w*h * sizeof(cl_float4), reinterpret_cast(m_vertices.data())); queue.enqueueReadBuffer(//read output from kernel vertColorsBuffer, vS, 0, w*h * sizeof(cl_float4), reinterpret_cast(m_vertexColors.data())); queue.enqueueReadBuffer(//read output from kernel outNormalsBuffer, CL_TRUE, // block 0, w*h * sizeof(cl_float4), (cl_float4*) pWNormals); }catch (cl::Error err) {//catch openCL errors std::cout<< "ERROR: "<< err.what()<< "("<< err.err()<< ")"< Q; Q.row(0) = M.row(0); Q.row(1) = M.row(1); Q.row(2) = M.row(3); FixedMatrix Qpinv = Q.pinv(); const Vec pos = cam.getPosition(); int i=0; if((pos[0]*pos[0] + pos[1]*pos[1] + pos[2]*pos[2]) < 1e-20){ for(int y=0;y(x, y, 1); v.direction[3] = 0; v.direction.normalize(); v.direction *= -1; v.direction[3] = 1; } } }else{ for(int y=0;y(x, y, 1); v.direction = pos - homogenize(v.direction); v.direction[3] = 0; v.direction.normalize(); v.direction *= -1; v.direction[3] = 1; } } } } void PointcloudSceneObject::setUseCL(bool use){ useCL=use; } bool PointcloudSceneObject::isCLReady(){ return clReady; } bool PointcloudSceneObject::isCLActive(){ return useCL; } void PointcloudSceneObject::setUseDrawNormalLines(bool use){ useNormalLines=use; } void PointcloudSceneObject::drawNormalLines(PointNormalEstimation::Vec4* pNormals){ for(int y=0; y