diff --git a/Modules/ToFProcessing/mitkToFDistanceImageToSurfaceFilter.cpp b/Modules/ToFProcessing/mitkToFDistanceImageToSurfaceFilter.cpp index 56035d806c..5d384080d1 100644 --- a/Modules/ToFProcessing/mitkToFDistanceImageToSurfaceFilter.cpp +++ b/Modules/ToFProcessing/mitkToFDistanceImageToSurfaceFilter.cpp @@ -1,304 +1,321 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include #include #include #include "mitkImageReadAccessor.h" #include #include #include #include #include #include #include #include #include mitk::ToFDistanceImageToSurfaceFilter::ToFDistanceImageToSurfaceFilter() : - m_IplScalarImage(NULL), m_CameraIntrinsics(), m_TextureImageWidth(0), m_TextureImageHeight(0), m_InterPixelDistance(), m_TextureIndex(0) + m_IplScalarImage(NULL), m_CameraIntrinsics(), m_TextureImageWidth(0), m_TextureImageHeight(0), m_InterPixelDistance(), m_TextureIndex(0), + m_GenerateTriangularMesh(true) { m_InterPixelDistance.Fill(0.045); m_CameraIntrinsics = mitk::CameraIntrinsics::New(); m_CameraIntrinsics->SetFocalLength(273.138946533,273.485900879); m_CameraIntrinsics->SetPrincipalPoint(107.867935181,98.3807373047); m_CameraIntrinsics->SetDistorsionCoeffs(-0.486690014601f,0.553943634033f,0.00222016777843f,-0.00300851115026f); m_ReconstructionMode = WithInterPixelDistance; } mitk::ToFDistanceImageToSurfaceFilter::~ToFDistanceImageToSurfaceFilter() { } void mitk::ToFDistanceImageToSurfaceFilter::SetInput( Image* distanceImage, mitk::CameraIntrinsics::Pointer cameraIntrinsics ) { this->SetCameraIntrinsics(cameraIntrinsics); this->SetInput(0,distanceImage); } void mitk::ToFDistanceImageToSurfaceFilter::SetInput( unsigned int idx, Image* distanceImage, mitk::CameraIntrinsics::Pointer cameraIntrinsics ) { this->SetCameraIntrinsics(cameraIntrinsics); this->SetInput(idx,distanceImage); } void mitk::ToFDistanceImageToSurfaceFilter::SetInput( mitk::Image* distanceImage ) { this->SetInput(0,distanceImage); } void mitk::ToFDistanceImageToSurfaceFilter::SetInput( unsigned int idx, mitk::Image* distanceImage ) { if ((distanceImage == NULL) && (idx == this->GetNumberOfInputs() - 1)) // if the last input is set to NULL, reduce the number of inputs by one this->SetNumberOfInputs(this->GetNumberOfInputs() - 1); else this->ProcessObject::SetNthInput(idx, distanceImage); // Process object is not const-correct so the const_cast is required here this->CreateOutputsForAllInputs(); } mitk::Image* mitk::ToFDistanceImageToSurfaceFilter::GetInput() { return this->GetInput(0); } mitk::Image* mitk::ToFDistanceImageToSurfaceFilter::GetInput( unsigned int idx ) { if (this->GetNumberOfInputs() < 1) { mitkThrow() << "No input given for ToFDistanceImageToSurfaceFilter"; } return static_cast< mitk::Image*>(this->ProcessObject::GetInput(idx)); } void mitk::ToFDistanceImageToSurfaceFilter::GenerateData() { mitk::Surface::Pointer output = this->GetOutput(); assert(output); mitk::Image::Pointer input = this->GetInput(); assert(input); // mesh points int xDimension = input->GetDimension(0); int yDimension = input->GetDimension(1); unsigned int size = xDimension*yDimension; //size of the image-array std::vector isPointValid; isPointValid.resize(size); vtkSmartPointer points = vtkSmartPointer::New(); points->SetDataTypeToDouble(); + vtkSmartPointer verts = vtkSmartPointer::New(); vtkSmartPointer polys = vtkSmartPointer::New(); vtkSmartPointer scalarArray = vtkSmartPointer::New(); vtkSmartPointer textureCoords = vtkSmartPointer::New(); textureCoords->SetNumberOfComponents(2); textureCoords->Allocate(size); //Make a vtkIdList to save the ID's of the polyData corresponding to the image //pixel ID's. See below for more documentation. m_VertexIdList = vtkSmartPointer::New(); //Allocate the object once else it would automatically allocate new memory //for every vertex and perform a copy which is expensive. m_VertexIdList->Allocate(size); m_VertexIdList->SetNumberOfIds(size); for(int i = 0; i < size; ++i) { m_VertexIdList->SetId(i, 0); } float* scalarFloatData = NULL; if (this->m_IplScalarImage) // if scalar image is defined use it for texturing { scalarFloatData = (float*)this->m_IplScalarImage->imageData; } else if (this->GetInput(m_TextureIndex)) // otherwise use intensity image (input(2)) { ImageReadAccessor inputAcc(this->GetInput(m_TextureIndex)); scalarFloatData = (float*)inputAcc.GetData(); } ImageReadAccessor inputAcc(input, input->GetSliceData(0,0,0)); float* inputFloatData = (float*)inputAcc.GetData(); //calculate world coordinates mitk::ToFProcessingCommon::ToFPoint2D focalLengthInPixelUnits; mitk::ToFProcessingCommon::ToFScalarType focalLengthInMm; if((m_ReconstructionMode == WithOutInterPixelDistance) || (m_ReconstructionMode == Kinect)) { focalLengthInPixelUnits[0] = m_CameraIntrinsics->GetFocalLengthX(); focalLengthInPixelUnits[1] = m_CameraIntrinsics->GetFocalLengthY(); } else if( m_ReconstructionMode == WithInterPixelDistance) { //convert focallength from pixel to mm focalLengthInMm = (m_CameraIntrinsics->GetFocalLengthX()*m_InterPixelDistance[0]+m_CameraIntrinsics->GetFocalLengthY()*m_InterPixelDistance[1])/2.0; } mitk::ToFProcessingCommon::ToFPoint2D principalPoint; principalPoint[0] = m_CameraIntrinsics->GetPrincipalPointX(); principalPoint[1] = m_CameraIntrinsics->GetPrincipalPointY(); mitk::Point3D origin = input->GetGeometry()->GetOrigin(); for (int j=0; jInsertPoint(pixelID, cartesianCoordinates.GetDataPointer()). //If we use points->InsertNextPoint(...) instead, the ID's do not //correspond to the image pixel ID's. Thus, we have to save them //in the vertexIdList. m_VertexIdList->SetId(pixelID, points->InsertNextPoint(cartesianCoordinates.GetDataPointer())); - if((i >= 1) && (j >= 1)) + if (m_GenerateTriangularMesh) { - //This little piece of art explains the ID's: - // - // P(x_1y_1)---P(xy_1) - // | | - // | | - // | | - // P(x_1y)-----P(xy) - // - //We can only start triangulation if we are at vertex (1,1), - //because we need the other 3 vertices near this one. - //To go one pixel line back in the image array, we have to - //subtract 1x xDimension. - vtkIdType xy = pixelID; - vtkIdType x_1y = pixelID-1; - vtkIdType xy_1 = pixelID-xDimension; - vtkIdType x_1y_1 = xy_1-1; - - //Find the corresponding vertex ID's in the saved vertexIdList: - vtkIdType xyV = m_VertexIdList->GetId(xy); - vtkIdType x_1yV = m_VertexIdList->GetId(x_1y); - vtkIdType xy_1V = m_VertexIdList->GetId(xy_1); - vtkIdType x_1y_1V = m_VertexIdList->GetId(x_1y_1); - - if (isPointValid[xy]&&isPointValid[x_1y]&&isPointValid[x_1y_1]&&isPointValid[xy_1]) // check if points of cell are valid + if((i >= 1) && (j >= 1)) { - polys->InsertNextCell(3); - polys->InsertCellPoint(x_1yV); - polys->InsertCellPoint(xyV); - polys->InsertCellPoint(x_1y_1V); - - polys->InsertNextCell(3); - polys->InsertCellPoint(x_1y_1V); - polys->InsertCellPoint(xyV); - polys->InsertCellPoint(xy_1V); + //This little piece of art explains the ID's: + // + // P(x_1y_1)---P(xy_1) + // | | + // | | + // | | + // P(x_1y)-----P(xy) + // + //We can only start triangulation if we are at vertex (1,1), + //because we need the other 3 vertices near this one. + //To go one pixel line back in the image array, we have to + //subtract 1x xDimension. + vtkIdType xy = pixelID; + vtkIdType x_1y = pixelID-1; + vtkIdType xy_1 = pixelID-xDimension; + vtkIdType x_1y_1 = xy_1-1; + + //Find the corresponding vertex ID's in the saved vertexIdList: + vtkIdType xyV = m_VertexIdList->GetId(xy); + vtkIdType x_1yV = m_VertexIdList->GetId(x_1y); + vtkIdType xy_1V = m_VertexIdList->GetId(xy_1); + vtkIdType x_1y_1V = m_VertexIdList->GetId(x_1y_1); + + if (isPointValid[xy]&&isPointValid[x_1y]&&isPointValid[x_1y_1]&&isPointValid[xy_1]) // check if points of cell are valid + { + polys->InsertNextCell(3); + polys->InsertCellPoint(x_1yV); + polys->InsertCellPoint(xyV); + polys->InsertCellPoint(x_1y_1V); + + polys->InsertNextCell(3); + polys->InsertCellPoint(x_1y_1V); + polys->InsertCellPoint(xyV); + polys->InsertCellPoint(xy_1V); + } } } + else + { + verts->InsertNextCell(1); + verts->InsertCellPoint(m_VertexIdList->GetId(pixelID)); + } //Scalar values are necessary for mapping colors/texture onto the surface if (scalarFloatData) { scalarArray->InsertTuple1(m_VertexIdList->GetId(pixelID), scalarFloatData[pixelID]); } //These Texture Coordinates will map color pixel and vertices 1:1 (e.g. for Kinect). float xNorm = (((float)i)/xDimension);// correct video texture scale for kinect float yNorm = ((float)j)/yDimension; //don't flip. we don't need to flip. textureCoords->InsertTuple2(m_VertexIdList->GetId(pixelID), xNorm, yNorm); } } } vtkSmartPointer mesh = vtkSmartPointer::New(); mesh->SetPoints(points); - mesh->SetPolys(polys); + if (m_GenerateTriangularMesh) + { + mesh->SetPolys(polys); + } + else + { + mesh->SetVerts(verts); + } //Pass the scalars to the polydata (if they were set). if (scalarArray->GetNumberOfTuples()>0) { mesh->GetPointData()->SetScalars(scalarArray); } //Pass the TextureCoords to the polydata anyway (to save them). mesh->GetPointData()->SetTCoords(textureCoords); output->SetVtkPolyData(mesh); } void mitk::ToFDistanceImageToSurfaceFilter::CreateOutputsForAllInputs() { this->SetNumberOfOutputs(this->GetNumberOfInputs()); // create outputs for all inputs for (unsigned int idx = 0; idx < this->GetNumberOfOutputs(); ++idx) if (this->GetOutput(idx) == NULL) { DataObjectPointer newOutput = this->MakeOutput(idx); this->SetNthOutput(idx, newOutput); } this->Modified(); } void mitk::ToFDistanceImageToSurfaceFilter::GenerateOutputInformation() { this->GetOutput(); itkDebugMacro(<<"GenerateOutputInformation()"); } void mitk::ToFDistanceImageToSurfaceFilter::SetScalarImage(IplImage* iplScalarImage) { this->m_IplScalarImage = iplScalarImage; this->Modified(); } IplImage* mitk::ToFDistanceImageToSurfaceFilter::GetScalarImage() { return this->m_IplScalarImage; } void mitk::ToFDistanceImageToSurfaceFilter::SetTextureImageWidth(int width) { this->m_TextureImageWidth = width; } void mitk::ToFDistanceImageToSurfaceFilter::SetTextureImageHeight(int height) { this->m_TextureImageHeight = height; } diff --git a/Modules/ToFProcessing/mitkToFDistanceImageToSurfaceFilter.h b/Modules/ToFProcessing/mitkToFDistanceImageToSurfaceFilter.h index 9f12e4b86c..8ad3daac71 100644 --- a/Modules/ToFProcessing/mitkToFDistanceImageToSurfaceFilter.h +++ b/Modules/ToFProcessing/mitkToFDistanceImageToSurfaceFilter.h @@ -1,172 +1,176 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef __mitkToFDistanceImageToSurfaceFilter_h #define __mitkToFDistanceImageToSurfaceFilter_h #include #include #include #include #include #include "mitkCameraIntrinsics.h" #include #include #include #include namespace mitk { /** * @brief Converts a Time-of-Flight (ToF) distance image to a 3D surface using the pinhole camera model for coordinate computation. * The intrinsic parameters of the camera (FocalLength, PrincipalPoint, InterPixelDistance) are set via SetCameraIntrinsics(). The * measured distance for each pixel corresponds to the distance between the object point and the corresponding image point on the * image plane. * * The coordinate conversion follows the model of a common pinhole camera where the origin of the camera * coordinate system (world coordinates) is at the pinhole * \image html ../Modules/ToFProcessing/Documentation/PinholeCameraModel.png * The definition of the image plane and its coordinate systems (pixel and mm) is depicted in the following image * \image html ../Modules/ToFProcessing/Documentation/ImagePlane.png * * @ingroup SurfaceFilters * @ingroup ToFProcessing */ class mitkToFProcessing_EXPORT ToFDistanceImageToSurfaceFilter : public SurfaceSource { public: mitkClassMacro( ToFDistanceImageToSurfaceFilter , SurfaceSource ); itkNewMacro( Self ); itkSetMacro(CameraIntrinsics, mitk::CameraIntrinsics::Pointer); itkGetMacro(CameraIntrinsics, mitk::CameraIntrinsics::Pointer); itkSetMacro(InterPixelDistance,ToFProcessingCommon::ToFPoint2D); itkGetMacro(InterPixelDistance,ToFProcessingCommon::ToFPoint2D); itkSetMacro(TextureIndex,int); itkSetMacro(VertexIdList, vtkSmartPointer); itkGetMacro(VertexIdList, vtkSmartPointer); + itkSetMacro(GenerateTriangularMesh,bool); + itkGetMacro(GenerateTriangularMesh,bool); + /** * @brief The ReconstructionModeType enum: Defines the reconstruction mode, if using no interpixeldistances and focal lenghts in pixel units or interpixeldistances and focal length in mm. The Kinect option defines a special reconstruction mode for the kinect. */ enum ReconstructionModeType{ WithOutInterPixelDistance = 1, WithInterPixelDistance = 2, Kinect = 3}; itkSetEnumMacro(ReconstructionMode,ReconstructionModeType); itkGetEnumMacro(ReconstructionMode,ReconstructionModeType); /*! \brief Set scalar image used as texture of the surface. \param iplScalarImage OpenCV image for texturing */ void SetScalarImage(IplImage* iplScalarImage); /*! \brief Set scalar image used as texture of the surface. \return OpenCV image for texturing */ IplImage* GetScalarImage(); /*! \brief Set width of the scalar image used for texturing the surface \param width width (x-dimension) of the texture image */ void SetTextureImageWidth(int width); /*! \brief Set height of the scalar image used for texturing the surface \param height height (y-dimension) of the texture image */ void SetTextureImageHeight(int height); /*! \brief Sets the input of this filter \param distanceImage input is the distance image of e.g. a ToF camera */ virtual void SetInput( Image* distanceImage); /*! \brief Sets the input of this filter and the intrinsic parameters \param distanceImage input is the distance image of e.g. a ToF camera */ virtual void SetInput( Image* distanceImage, mitk::CameraIntrinsics::Pointer cameraIntrinsics ); /*! \brief Sets the input of this filter at idx \param idx number of the current input \param distanceImage input is the distance image of e.g. a ToF camera */ virtual void SetInput(unsigned int idx, Image* distanceImage); /*! \brief Sets the input of this filter at idx and the intrinsic parameters \param idx number of the current input \param distanceImage input is the distance image of e.g. a ToF camera \param cameraIntrinsics This is the camera model which holds parameters like focal length, pixel size, etc. which are needed for the reconstruction of the surface. */ virtual void SetInput( unsigned int idx, Image* distanceImage, mitk::CameraIntrinsics::Pointer cameraIntrinsics ); /*! \brief Returns the input of this filter */ Image* GetInput(); /*! \brief Returns the input with id idx of this filter */ Image* GetInput(unsigned int idx); protected: /*! \brief Standard constructor */ ToFDistanceImageToSurfaceFilter(); /*! \brief Standard destructor */ ~ToFDistanceImageToSurfaceFilter(); virtual void GenerateOutputInformation(); /*! \brief Method generating the output of this filter. Called in the updated process of the pipeline. This method generates the output of the ToFSurfaceSource: The generated surface of the 3d points */ virtual void GenerateData(); /** * \brief Create an output for each input * * This Method sets the number of outputs to the number of inputs * and creates missing outputs objects. * \warning any additional outputs that exist before the method is called are deleted */ void CreateOutputsForAllInputs(); IplImage* m_IplScalarImage; ///< Scalar image used for surface texturing mitk::CameraIntrinsics::Pointer m_CameraIntrinsics; ///< Specifies the intrinsic parameters int m_TextureImageWidth; ///< Width (x-dimension) of the texture image int m_TextureImageHeight; ///< Height (y-dimension) of the texture image ToFProcessingCommon::ToFPoint2D m_InterPixelDistance; ///< distance in mm between two adjacent pixels on the ToF camera chip int m_TextureIndex; ///< Index of the input used as texture image when no scalar image was set via SetIplScalarImage(). 0 = Distance, 1 = Amplitude, 2 = Intensity + bool m_GenerateTriangularMesh; ReconstructionModeType m_ReconstructionMode; ///< The ReconstructionModeType enum: Defines the reconstruction mode, if using no interpixeldistances and focal lenghts in pixel units or interpixeldistances and focal length in mm. The Kinect option defines a special reconstruction mode for the kinect. vtkSmartPointer m_VertexIdList; ///< Make a vtkIdList to save the ID's of the polyData corresponding to the image pixel ID's. This can be accessed after generate data to obtain the mapping. }; } //END mitk namespace #endif