diff --git a/Core/Code/Rendering/mitkGeometry2DDataVtkMapper3D.cpp b/Core/Code/Rendering/mitkGeometry2DDataVtkMapper3D.cpp index afc74023d7..04ebb884e5 100644 --- a/Core/Code/Rendering/mitkGeometry2DDataVtkMapper3D.cpp +++ b/Core/Code/Rendering/mitkGeometry2DDataVtkMapper3D.cpp @@ -1,588 +1,578 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include "mitkGeometry2DDataVtkMapper3D.h" #include "mitkImageVtkMapper2D.h" #include "mitkSmartPointerProperty.h" #include "mitkSurface.h" #include "mitkVtkRepresentationProperty.h" #include "mitkWeakPointerProperty.h" #include "mitkNodePredicateDataType.h" #include "mitkNodePredicateOr.h" #include "vtkNeverTranslucentTexture.h" +#include "vtkMitkLevelWindowFilter.h" #include #include #include #include #include #include #include #include #include #include #include #include namespace mitk { Geometry2DDataVtkMapper3D::Geometry2DDataVtkMapper3D() : m_NormalsActorAdded(false), m_DataStorage(NULL) { m_EdgeTuber = vtkTubeFilter::New(); m_EdgeMapper = vtkPolyDataMapper::New(); m_SurfaceCreator = Geometry2DDataToSurfaceFilter::New(); m_SurfaceCreatorBoundingBox = BoundingBox::New(); m_SurfaceCreatorPointsContainer = BoundingBox::PointsContainer::New(); m_Edges = vtkFeatureEdges::New(); m_Edges->BoundaryEdgesOn(); m_Edges->FeatureEdgesOff(); m_Edges->NonManifoldEdgesOff(); m_Edges->ManifoldEdgesOff(); m_EdgeTransformer = vtkTransformPolyDataFilter::New(); m_NormalsTransformer = vtkTransformPolyDataFilter::New(); m_EdgeActor = vtkActor::New(); m_BackgroundMapper = vtkPolyDataMapper::New(); m_BackgroundActor = vtkActor::New(); m_Prop3DAssembly = vtkAssembly::New(); m_ImageAssembly = vtkAssembly::New(); m_SurfaceCreatorBoundingBox->SetPoints( m_SurfaceCreatorPointsContainer ); m_Cleaner = vtkCleanPolyData::New(); m_Cleaner->PieceInvariantOn(); m_Cleaner->ConvertLinesToPointsOn(); m_Cleaner->ConvertPolysToLinesOn(); m_Cleaner->ConvertStripsToPolysOn(); m_Cleaner->PointMergingOn(); // Make sure that the FeatureEdge algorithm is initialized with a "valid" // (though empty) input vtkPolyData *emptyPolyData = vtkPolyData::New(); m_Cleaner->SetInput( emptyPolyData ); emptyPolyData->Delete(); m_Edges->SetInput(m_Cleaner->GetOutput()); m_EdgeTransformer->SetInput( m_Edges->GetOutput() ); m_EdgeTuber->SetInput( m_EdgeTransformer->GetOutput() ); m_EdgeTuber->SetVaryRadiusToVaryRadiusOff(); m_EdgeTuber->SetNumberOfSides( 12 ); m_EdgeTuber->CappingOn(); m_EdgeMapper->SetInput( m_EdgeTuber->GetOutput() ); m_EdgeMapper->ScalarVisibilityOff(); m_BackgroundMapper->SetInput(emptyPolyData); m_EdgeActor->SetMapper( m_EdgeMapper ); m_BackgroundActor->GetProperty()->SetAmbient( 0.5 ); m_BackgroundActor->GetProperty()->SetColor( 0.0, 0.0, 0.0 ); m_BackgroundActor->GetProperty()->SetOpacity( 0.0 ); m_BackgroundActor->SetMapper( m_BackgroundMapper ); vtkProperty * backfaceProperty = m_BackgroundActor->MakeProperty(); backfaceProperty->SetColor( 0.0, 0.0, 0.0 ); m_BackgroundActor->SetBackfaceProperty( backfaceProperty ); backfaceProperty->Delete(); m_FrontHedgeHog = vtkHedgeHog::New(); m_BackHedgeHog = vtkHedgeHog::New(); m_FrontNormalsMapper = vtkPolyDataMapper::New(); m_FrontNormalsMapper->SetInput( m_FrontHedgeHog->GetOutput() ); m_BackNormalsMapper = vtkPolyDataMapper::New(); m_Prop3DAssembly->AddPart( m_EdgeActor ); m_Prop3DAssembly->AddPart( m_ImageAssembly ); m_FrontNormalsActor = vtkActor::New(); m_FrontNormalsActor->SetMapper(m_FrontNormalsMapper); m_BackNormalsActor = vtkActor::New(); m_BackNormalsActor->SetMapper(m_BackNormalsMapper); m_ImageMapperDeletedCommand = MemberCommandType::New(); m_ImageMapperDeletedCommand->SetCallbackFunction( this, &Geometry2DDataVtkMapper3D::ImageMapperDeletedCallback ); } Geometry2DDataVtkMapper3D::~Geometry2DDataVtkMapper3D() { m_ImageAssembly->Delete(); m_Prop3DAssembly->Delete(); m_EdgeTuber->Delete(); m_EdgeMapper->Delete(); m_EdgeTransformer->Delete(); m_Cleaner->Delete(); m_Edges->Delete(); m_NormalsTransformer->Delete(); m_EdgeActor->Delete(); m_BackgroundMapper->Delete(); m_BackgroundActor->Delete(); m_FrontNormalsMapper->Delete(); m_FrontNormalsActor->Delete(); m_FrontHedgeHog->Delete(); m_BackNormalsMapper->Delete(); m_BackNormalsActor->Delete(); m_BackHedgeHog->Delete(); // Delete entries in m_ImageActors list one by one m_ImageActors.clear(); m_DataStorage = NULL; } vtkProp* Geometry2DDataVtkMapper3D::GetVtkProp(mitk::BaseRenderer * /*renderer*/) { if ( (this->GetDataNode() != NULL ) && (m_ImageAssembly != NULL) ) { // Do not transform the entire Prop3D assembly, but only the image part // here. The colored frame is transformed elsewhere (via m_EdgeTransformer), // since only vertices should be transformed there, not the poly data // itself, to avoid distortion for anisotropic datasets. m_ImageAssembly->SetUserTransform( this->GetDataNode()->GetVtkTransform() ); } return m_Prop3DAssembly; } void Geometry2DDataVtkMapper3D::UpdateVtkTransform(mitk::BaseRenderer * /*renderer*/) { m_ImageAssembly->SetUserTransform( this->GetDataNode()->GetVtkTransform(this->GetTimestep()) ); } const Geometry2DData* Geometry2DDataVtkMapper3D::GetInput() { return static_cast ( GetData() ); } void Geometry2DDataVtkMapper3D::SetDataStorageForTexture(mitk::DataStorage* storage) { if(storage != NULL && m_DataStorage != storage ) { m_DataStorage = storage; this->Modified(); } } void Geometry2DDataVtkMapper3D::ImageMapperDeletedCallback( itk::Object *caller, const itk::EventObject& /*event*/ ) { ImageVtkMapper2D *imageMapper = dynamic_cast< ImageVtkMapper2D * >( caller ); if ( (imageMapper != NULL) ) { if ( m_ImageActors.count( imageMapper ) > 0) { m_ImageActors[imageMapper].m_Sender = NULL; // sender is already destroying itself m_ImageActors.erase( imageMapper ); } } } void Geometry2DDataVtkMapper3D::GenerateDataForRenderer(BaseRenderer* renderer) { SetVtkMapperImmediateModeRendering(m_EdgeMapper); SetVtkMapperImmediateModeRendering(m_BackgroundMapper); // Remove all actors from the assembly, and re-initialize it with the // edge actor m_ImageAssembly->GetParts()->RemoveAllItems(); if ( !this->IsVisible(renderer) ) { // visibility has explicitly to be set in the single actors // due to problems when using cell picking: // even if the assembly is invisible, the renderer contains // references to the assemblies parts. During picking the // visibility of each part is checked, and not only for the // whole assembly. m_ImageAssembly->VisibilityOff(); m_EdgeActor->VisibilityOff(); return; } // visibility has explicitly to be set in the single actors // due to problems when using cell picking: // even if the assembly is invisible, the renderer contains // references to the assemblies parts. During picking the // visibility of each part is checked, and not only for the // whole assembly. m_ImageAssembly->VisibilityOn(); m_EdgeActor->VisibilityOn(); Geometry2DData::Pointer input = const_cast< Geometry2DData * >(this->GetInput()); if (input.IsNotNull() && (input->GetGeometry2D() != NULL)) { SmartPointerProperty::Pointer surfacecreatorprop; surfacecreatorprop = dynamic_cast< SmartPointerProperty * >(GetDataNode()->GetProperty("surfacegeometry", renderer)); if ( (surfacecreatorprop.IsNull()) || (surfacecreatorprop->GetSmartPointer().IsNull()) || ((m_SurfaceCreator = dynamic_cast (surfacecreatorprop->GetSmartPointer().GetPointer())).IsNull() ) ) { m_SurfaceCreator->PlaceByGeometryOn(); surfacecreatorprop = SmartPointerProperty::New( m_SurfaceCreator ); GetDataNode()->SetProperty("surfacegeometry", surfacecreatorprop); } m_SurfaceCreator->SetInput(input); int res; if (GetDataNode()->GetIntProperty("xresolution", res, renderer)) { m_SurfaceCreator->SetXResolution(res); } if (GetDataNode()->GetIntProperty("yresolution", res, renderer)) { m_SurfaceCreator->SetYResolution(res); } double tubeRadius = 1.0; // Radius of tubular edge surrounding plane // Clip the Geometry2D with the reference geometry bounds (if available) if ( input->GetGeometry2D()->HasReferenceGeometry() ) { Geometry3D *referenceGeometry = input->GetGeometry2D()->GetReferenceGeometry(); BoundingBox::PointType boundingBoxMin, boundingBoxMax; boundingBoxMin = referenceGeometry->GetBoundingBox()->GetMinimum(); boundingBoxMax = referenceGeometry->GetBoundingBox()->GetMaximum(); if ( referenceGeometry->GetImageGeometry() ) { for ( unsigned int i = 0; i < 3; ++i ) { boundingBoxMin[i] -= 0.5; boundingBoxMax[i] -= 0.5; } } m_SurfaceCreatorPointsContainer->CreateElementAt( 0 ) = boundingBoxMin; m_SurfaceCreatorPointsContainer->CreateElementAt( 1 ) = boundingBoxMax; m_SurfaceCreatorBoundingBox->ComputeBoundingBox(); m_SurfaceCreator->SetBoundingBox( m_SurfaceCreatorBoundingBox ); tubeRadius = referenceGeometry->GetDiagonalLength() / 450.0; } // If no reference geometry is available, clip with the current global // bounds else if (m_DataStorage.IsNotNull()) { m_SurfaceCreator->SetBoundingBox(m_DataStorage->ComputeVisibleBoundingBox(NULL, "includeInBoundingBox")); tubeRadius = sqrt( m_SurfaceCreator->GetBoundingBox()->GetDiagonalLength2() ) / 450.0; } // Calculate the surface of the Geometry2D m_SurfaceCreator->Update(); Surface *surface = m_SurfaceCreator->GetOutput(); // Check if there's something to display, otherwise return if ( (surface->GetVtkPolyData() == 0 ) || (surface->GetVtkPolyData()->GetNumberOfCells() == 0) ) { m_ImageAssembly->VisibilityOff(); return; } // add a graphical representation of the surface normals if requested DataNode* node = this->GetDataNode(); bool displayNormals = false; bool colorTwoSides = false; bool invertNormals = false; node->GetBoolProperty("draw normals 3D", displayNormals, renderer); node->GetBoolProperty("color two sides", colorTwoSides, renderer); node->GetBoolProperty("invert normals", invertNormals, renderer); //if we want to draw the display normals or render two sides we have to get the colors if( displayNormals || colorTwoSides ) { //get colors float frontColor[3] = { 0.0, 0.0, 1.0 }; node->GetColor( frontColor, renderer, "front color" ); float backColor[3] = { 1.0, 0.0, 0.0 }; node->GetColor( backColor, renderer, "back color" ); if ( displayNormals ) { m_NormalsTransformer->SetInput( surface->GetVtkPolyData() ); m_NormalsTransformer->SetTransform(node->GetVtkTransform(this->GetTimestep()) ); m_FrontHedgeHog->SetInput( m_NormalsTransformer->GetOutput() ); m_FrontHedgeHog->SetVectorModeToUseNormal(); m_FrontHedgeHog->SetScaleFactor( invertNormals ? 1.0 : -1.0 ); m_FrontNormalsActor->GetProperty()->SetColor( frontColor[0], frontColor[1], frontColor[2] ); m_BackHedgeHog->SetInput( m_NormalsTransformer->GetOutput() ); m_BackHedgeHog->SetVectorModeToUseNormal(); m_BackHedgeHog->SetScaleFactor( invertNormals ? -1.0 : 1.0 ); m_BackNormalsActor->GetProperty()->SetColor( backColor[0], backColor[1], backColor[2] ); //if there is no actor added yet, add one if ( !m_NormalsActorAdded ) { m_Prop3DAssembly->AddPart( m_FrontNormalsActor ); m_Prop3DAssembly->AddPart( m_BackNormalsActor ); m_NormalsActorAdded = true; } } //if we don't want to display normals AND there is an actor added remove the actor else if ( m_NormalsActorAdded ) { m_Prop3DAssembly->RemovePart( m_FrontNormalsActor ); m_Prop3DAssembly->RemovePart( m_BackNormalsActor ); m_NormalsActorAdded = false; } if ( colorTwoSides ) { if ( !invertNormals ) { m_BackgroundActor->GetProperty()->SetColor( backColor[0], backColor[1], backColor[2] ); m_BackgroundActor->GetBackfaceProperty()->SetColor( frontColor[0], frontColor[1], frontColor[2] ); } else { m_BackgroundActor->GetProperty()->SetColor( frontColor[0], frontColor[1], frontColor[2] ); m_BackgroundActor->GetBackfaceProperty()->SetColor( backColor[0], backColor[1], backColor[2] ); } } } // Add black background for all images (which may be transparent) m_BackgroundMapper->SetInput( surface->GetVtkPolyData() ); m_ImageAssembly->AddPart( m_BackgroundActor ); LayerSortedActorList layerSortedActors; // Traverse the data tree to find nodes resliced by ImageMapperGL2D mitk::NodePredicateOr::Pointer p = mitk::NodePredicateOr::New(); //use a predicate to get all data nodes which are "images" or inherit from mitk::Image mitk::TNodePredicateDataType< mitk::Image >::Pointer predicateAllImages = mitk::TNodePredicateDataType< mitk::Image >::New(); mitk::DataStorage::SetOfObjects::ConstPointer all = m_DataStorage->GetSubset(predicateAllImages); //process all found images for (mitk::DataStorage::SetOfObjects::ConstIterator it = all->Begin(); it != all->End(); ++it) { DataNode *node = it->Value(); if (node != NULL) this->ProcessNode(node, renderer, surface, layerSortedActors); } // Add all image actors to the assembly, sorted according to // layer property LayerSortedActorList::iterator actorIt; for ( actorIt = layerSortedActors.begin(); actorIt != layerSortedActors.end(); ++actorIt ) { m_ImageAssembly->AddPart( actorIt->second ); } // Configurate the tube-shaped frame: size according to the surface // bounds, color as specified in the plane's properties vtkPolyData *surfacePolyData = surface->GetVtkPolyData(); m_Cleaner->SetInput(surfacePolyData); m_EdgeTransformer->SetTransform(this->GetDataNode()->GetVtkTransform(this->GetTimestep()) ); // Adjust the radius according to extent m_EdgeTuber->SetRadius( tubeRadius ); // Get the plane's color and set the tube properties accordingly ColorProperty::Pointer colorProperty; colorProperty = dynamic_cast(this->GetDataNode()->GetProperty( "color" )); if ( colorProperty.IsNotNull() ) { const Color& color = colorProperty->GetColor(); m_EdgeActor->GetProperty()->SetColor(color.GetRed(), color.GetGreen(), color.GetBlue()); } else { m_EdgeActor->GetProperty()->SetColor( 1.0, 1.0, 1.0 ); } m_ImageAssembly->SetUserTransform(this->GetDataNode()->GetVtkTransform(this->GetTimestep()) ); } VtkRepresentationProperty* representationProperty; this->GetDataNode()->GetProperty(representationProperty, "material.representation", renderer); if ( representationProperty != NULL ) m_BackgroundActor->GetProperty()->SetRepresentation( representationProperty->GetVtkRepresentation() ); } void Geometry2DDataVtkMapper3D::ProcessNode( DataNode * node, BaseRenderer* renderer, Surface * surface, LayerSortedActorList &layerSortedActors ) { if ( node != NULL ) { //we need to get the information from the 2D mapper to render the texture on the 3D plane ImageVtkMapper2D *imageMapper = dynamic_cast< ImageVtkMapper2D * >( node->GetMapper(1) ); //GetMapper(1) provides the 2D mapper for the data node //if there is a 2D mapper, which is not the standard image mapper... if(!imageMapper && node->GetMapper(1)) { //... check if it is the composite mapper std::string cname(node->GetMapper(1)->GetNameOfClass()); if(!cname.compare("CompositeMapper")) //string.compare returns 0 if the two strings are equal. { //get the standard image mapper. //This is a special case in MITK and does only work for the CompositeMapper. imageMapper = dynamic_cast( node->GetMapper(3) ); } } if ( (node->IsVisible(renderer)) && imageMapper ) { WeakPointerProperty::Pointer rendererProp = dynamic_cast< WeakPointerProperty * >(GetDataNode()->GetPropertyList()->GetProperty("renderer")); if ( rendererProp.IsNotNull() ) { BaseRenderer::Pointer planeRenderer = dynamic_cast< BaseRenderer * >(rendererProp->GetWeakPointer().GetPointer()); // Retrieve and update image to be mapped const ImageVtkMapper2D::LocalStorage* localStorage = imageMapper->GetLocalStorage(planeRenderer); if ( planeRenderer.IsNotNull() ) { // perform update of imagemapper if needed (maybe the respective 2D renderwindow is not rendered/update before) imageMapper->Update(planeRenderer); // If it has not been initialized already in a previous pass, // generate an actor and a texture object to // render the image associated with the ImageVtkMapper2D. vtkActor *imageActor; vtkDataSetMapper *dataSetMapper = NULL; vtkTexture *texture; if ( m_ImageActors.count( imageMapper ) == 0 ) { dataSetMapper = vtkDataSetMapper::New(); //Enable rendering without copying the image. dataSetMapper->ImmediateModeRenderingOn(); texture = vtkNeverTranslucentTexture::New(); texture->RepeatOff(); imageActor = vtkActor::New(); imageActor->SetMapper( dataSetMapper ); imageActor->SetTexture( texture ); imageActor->GetProperty()->SetOpacity(0.999); // HACK! otherwise VTK wouldn't recognize this as translucent surface (if LUT values map to alpha < 255 // improvement: apply "opacity" property onle HERE and also in 2D image mapper. DO NOT change LUT to achieve translucent images (see method ChangeOpacity in image mapper 2D) // Make imageActor the sole owner of the mapper and texture // objects dataSetMapper->UnRegister( NULL ); texture->UnRegister( NULL ); // Store the actor so that it may be accessed in following // passes. m_ImageActors[imageMapper].Initialize(imageActor, imageMapper, m_ImageMapperDeletedCommand); } else { // Else, retrieve the actor and associated objects from the // previous pass. imageActor = m_ImageActors[imageMapper].m_Actor; dataSetMapper = (vtkDataSetMapper *)imageActor->GetMapper(); texture = imageActor->GetTexture(); } // Set poly data new each time its object changes (e.g. when // switching between planar and curved geometries) if ( (dataSetMapper != NULL) && (dataSetMapper->GetInput() != surface->GetVtkPolyData()) ) { dataSetMapper->SetInput( surface->GetVtkPolyData() ); } //Check if the m_ReslicedImage is NULL. //This is the case when no image geometry is met by //the reslicer. In that case, the texture has to be //empty (black) and we don't have to do anything. //See fixed bug #13275 if(localStorage->m_ReslicedImage != NULL) { - bool binaryOutline = node->IsOn( "outline binary", renderer ); - if( binaryOutline ) - { - texture->SetInput( localStorage->m_ReslicedImage ); - } - else - { - texture->SetInput( localStorage->m_Texture->GetInput() ); - } - // VTK (mis-)interprets unsigned char (binary) images as color images; - // So, we must manually turn on their mapping through a (gray scale) lookup table; - texture->SetMapColorScalarsThroughLookupTable( localStorage->m_Texture->GetMapColorScalarsThroughLookupTable() ); + texture->SetInputConnection(localStorage->m_LevelWindowFilter->GetOutputPort()); + + // do not use a VTK lookup table (we do that ourselves in m_LevelWindowFilter) + texture->MapColorScalarsThroughLookupTableOff(); //re-use properties from the 2D image mapper imageActor->SetProperty( localStorage->m_Actor->GetProperty() ); imageActor->GetProperty()->SetAmbient(0.5); // Set texture interpolation on/off bool textureInterpolation = node->IsOn( "texture interpolation", renderer ); texture->SetInterpolate( textureInterpolation ); - //get the lookuptable from the 2D image mapper - texture->SetLookupTable( localStorage->m_Texture->GetLookupTable() ); - // Store this actor to be added to the actor assembly, sort // by layer int layer = 1; node->GetIntProperty( "layer", layer ); layerSortedActors.insert(std::pair< int, vtkActor * >( layer, imageActor ) ); } } } } } } void Geometry2DDataVtkMapper3D::ActorInfo::Initialize(vtkActor* actor, itk::Object* sender, itk::Command* command) { m_Actor = actor; m_Sender = sender; // Get informed when ImageMapper object is deleted, so that // the data structures built here can be deleted as well m_ObserverID = sender->AddObserver( itk::DeleteEvent(), command ); } Geometry2DDataVtkMapper3D::ActorInfo::ActorInfo() : m_Actor(NULL), m_Sender(NULL), m_ObserverID(0) { } Geometry2DDataVtkMapper3D::ActorInfo::~ActorInfo() { if(m_Sender != NULL) { m_Sender->RemoveObserver(m_ObserverID); } if(m_Actor != NULL) { m_Actor->Delete(); } } } // namespace mitk diff --git a/Core/Code/Rendering/mitkImageVtkMapper2D.cpp b/Core/Code/Rendering/mitkImageVtkMapper2D.cpp index d0d9fa5333..53b666135d 100644 --- a/Core/Code/Rendering/mitkImageVtkMapper2D.cpp +++ b/Core/Code/Rendering/mitkImageVtkMapper2D.cpp @@ -1,1061 +1,1058 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ //MITK #include #include #include #include #include #include #include #include #include #include #include #include //#include #include #include "mitkImageStatisticsHolder.h" #include "mitkPlaneClipping.h" //MITK Rendering #include "mitkImageVtkMapper2D.h" #include "vtkMitkThickSlicesFilter.h" #include "vtkMitkLevelWindowFilter.h" #include "vtkNeverTranslucentTexture.h" //VTK #include #include #include #include #include #include #include #include #include #include #include #include #include #include //ITK #include mitk::ImageVtkMapper2D::ImageVtkMapper2D() { } mitk::ImageVtkMapper2D::~ImageVtkMapper2D() { //The 3D RW Mapper (Geometry2DDataVtkMapper3D) is listening to this event, //in order to delete the images from the 3D RW. this->InvokeEvent( itk::DeleteEvent() ); } //set the two points defining the textured plane according to the dimension and spacing void mitk::ImageVtkMapper2D::GeneratePlane(mitk::BaseRenderer* renderer, vtkFloatingPointType planeBounds[6]) { LocalStorage *localStorage = m_LSH.GetLocalStorage(renderer); float depth = this->CalculateLayerDepth(renderer); //Set the origin to (xMin; yMin; depth) of the plane. This is necessary for obtaining the correct //plane size in crosshair rotation and swivel mode. localStorage->m_Plane->SetOrigin(planeBounds[0], planeBounds[2], depth); //These two points define the axes of the plane in combination with the origin. //Point 1 is the x-axis and point 2 the y-axis. //Each plane is transformed according to the view (axial, coronal and saggital) afterwards. localStorage->m_Plane->SetPoint1(planeBounds[1] , planeBounds[2], depth); //P1: (xMax, yMin, depth) localStorage->m_Plane->SetPoint2(planeBounds[0], planeBounds[3], depth); //P2: (xMin, yMax, depth) } float mitk::ImageVtkMapper2D::CalculateLayerDepth(mitk::BaseRenderer* renderer) { //get the clipping range to check how deep into z direction we can render images double maxRange = renderer->GetVtkRenderer()->GetActiveCamera()->GetClippingRange()[1]; //Due to a VTK bug, we cannot use the whole clipping range. /100 is empirically determined float depth = -maxRange*0.01; // divide by 100 int layer = 0; GetDataNode()->GetIntProperty( "layer", layer, renderer); //add the layer property for each image to render images with a higher layer on top of the others depth += layer*10; //*10: keep some room for each image (e.g. for QBalls in between) if(depth > 0.0f) { depth = 0.0f; MITK_WARN << "Layer value exceeds clipping range. Set to minimum instead."; } return depth; } const mitk::Image* mitk::ImageVtkMapper2D::GetInput( void ) { return static_cast< const mitk::Image * >( this->GetData() ); } vtkProp* mitk::ImageVtkMapper2D::GetVtkProp(mitk::BaseRenderer* renderer) { //return the actor corresponding to the renderer return m_LSH.GetLocalStorage(renderer)->m_Actors; } void mitk::ImageVtkMapper2D::MitkRenderOverlay(BaseRenderer* renderer) { if ( this->IsVisible(renderer)==false ) return; if ( this->GetVtkProp(renderer)->GetVisibility() ) { this->GetVtkProp(renderer)->RenderOverlay(renderer->GetVtkRenderer()); } } void mitk::ImageVtkMapper2D::MitkRenderOpaqueGeometry(BaseRenderer* renderer) { if ( this->IsVisible( renderer )==false ) return; if ( this->GetVtkProp(renderer)->GetVisibility() ) { this->GetVtkProp(renderer)->RenderOpaqueGeometry( renderer->GetVtkRenderer() ); } } void mitk::ImageVtkMapper2D::MitkRenderTranslucentGeometry(BaseRenderer* renderer) { if ( this->IsVisible(renderer)==false ) return; if ( this->GetVtkProp(renderer)->GetVisibility() ) { this->GetVtkProp(renderer)->RenderTranslucentPolygonalGeometry(renderer->GetVtkRenderer()); } } void mitk::ImageVtkMapper2D::MitkRenderVolumetricGeometry(BaseRenderer* renderer) { if(IsVisible(renderer)==false) return; if ( GetVtkProp(renderer)->GetVisibility() ) { this->GetVtkProp(renderer)->RenderVolumetricGeometry(renderer->GetVtkRenderer()); } } void mitk::ImageVtkMapper2D::GenerateDataForRenderer( mitk::BaseRenderer *renderer ) { LocalStorage *localStorage = m_LSH.GetLocalStorage(renderer); mitk::Image *input = const_cast< mitk::Image * >( this->GetInput() ); mitk::DataNode* datanode = this->GetDataNode(); if ( input == NULL || input->IsInitialized() == false ) { return; } //check if there is a valid worldGeometry const Geometry2D *worldGeometry = renderer->GetCurrentWorldGeometry2D(); if( ( worldGeometry == NULL ) || ( !worldGeometry->IsValid() ) || ( !worldGeometry->HasReferenceGeometry() )) { return; } input->Update(); // early out if there is no intersection of the current rendering geometry // and the geometry of the image that is to be rendered. if ( !RenderingGeometryIntersectsImage( worldGeometry, input->GetSlicedGeometry() ) ) { // set image to NULL, to clear the texture in 3D, because // the latest image is used there if the plane is out of the geometry // see bug-13275 localStorage->m_ReslicedImage = NULL; localStorage->m_Mapper->SetInput( localStorage->m_EmptyPolyData ); return; } //set main input for ExtractSliceFilter localStorage->m_Reslicer->SetInput(input); localStorage->m_Reslicer->SetWorldGeometry(worldGeometry); localStorage->m_Reslicer->SetTimeStep( this->GetTimestep() ); //set the transformation of the image to adapt reslice axis localStorage->m_Reslicer->SetResliceTransformByGeometry( input->GetTimeSlicedGeometry()->GetGeometry3D( this->GetTimestep() ) ); //is the geometry of the slice based on the input image or the worldgeometry? bool inPlaneResampleExtentByGeometry = false; datanode->GetBoolProperty("in plane resample extent by geometry", inPlaneResampleExtentByGeometry, renderer); localStorage->m_Reslicer->SetInPlaneResampleExtentByGeometry(inPlaneResampleExtentByGeometry); // Initialize the interpolation mode for resampling; switch to nearest // neighbor if the input image is too small. if ( (input->GetDimension() >= 3) && (input->GetDimension(2) > 1) ) { VtkResliceInterpolationProperty *resliceInterpolationProperty; datanode->GetProperty( resliceInterpolationProperty, "reslice interpolation" ); int interpolationMode = VTK_RESLICE_NEAREST; if ( resliceInterpolationProperty != NULL ) { interpolationMode = resliceInterpolationProperty->GetInterpolation(); } switch ( interpolationMode ) { case VTK_RESLICE_NEAREST: localStorage->m_Reslicer->SetInterpolationMode(ExtractSliceFilter::RESLICE_NEAREST); break; case VTK_RESLICE_LINEAR: localStorage->m_Reslicer->SetInterpolationMode(ExtractSliceFilter::RESLICE_LINEAR); break; case VTK_RESLICE_CUBIC: localStorage->m_Reslicer->SetInterpolationMode(ExtractSliceFilter::RESLICE_CUBIC); break; } } else { localStorage->m_Reslicer->SetInterpolationMode(ExtractSliceFilter::RESLICE_NEAREST); } //set the vtk output property to true, makes sure that no unneeded mitk image convertion //is done. localStorage->m_Reslicer->SetVtkOutputRequest(true); //Thickslicing int thickSlicesMode = 0; int thickSlicesNum = 1; // Thick slices parameters if( input->GetPixelType().GetNumberOfComponents() == 1 ) // for now only single component are allowed { DataNode *dn=renderer->GetCurrentWorldGeometry2DNode(); if(dn) { ResliceMethodProperty *resliceMethodEnumProperty=0; if( dn->GetProperty( resliceMethodEnumProperty, "reslice.thickslices" ) && resliceMethodEnumProperty ) thickSlicesMode = resliceMethodEnumProperty->GetValueAsId(); IntProperty *intProperty=0; if( dn->GetProperty( intProperty, "reslice.thickslices.num" ) && intProperty ) { thickSlicesNum = intProperty->GetValue(); if(thickSlicesNum < 1) thickSlicesNum=1; if(thickSlicesNum > 10) thickSlicesNum=10; } } else { MITK_WARN << "no associated widget plane data tree node found"; } } const PlaneGeometry *planeGeometry = dynamic_cast< const PlaneGeometry * >( worldGeometry ); if(thickSlicesMode > 0) { double dataZSpacing = 1.0; Vector3D normInIndex, normal; if ( planeGeometry != NULL ){ normal = planeGeometry->GetNormal(); }else{ const mitk::AbstractTransformGeometry* abstractGeometry = dynamic_cast< const AbstractTransformGeometry * >(worldGeometry); if(abstractGeometry != NULL) normal = abstractGeometry->GetPlane()->GetNormal(); else return; //no fitting geometry set } normal.Normalize(); input->GetTimeSlicedGeometry()->GetGeometry3D( this->GetTimestep() )->WorldToIndex( normal, normInIndex ); dataZSpacing = 1.0 / normInIndex.GetNorm(); localStorage->m_Reslicer->SetOutputDimensionality( 3 ); localStorage->m_Reslicer->SetOutputSpacingZDirection(dataZSpacing); localStorage->m_Reslicer->SetOutputExtentZDirection( -thickSlicesNum, 0+thickSlicesNum ); // Do the reslicing. Modified() is called to make sure that the reslicer is // executed even though the input geometry information did not change; this // is necessary when the input /em data, but not the /em geometry changes. localStorage->m_TSFilter->SetThickSliceMode( thickSlicesMode-1 ); localStorage->m_TSFilter->SetInput( localStorage->m_Reslicer->GetVtkOutput() ); //vtkFilter=>mitkFilter=>vtkFilter update mechanism will fail without calling manually localStorage->m_Reslicer->Modified(); localStorage->m_Reslicer->Update(); localStorage->m_TSFilter->Modified(); localStorage->m_TSFilter->Update(); localStorage->m_ReslicedImage = localStorage->m_TSFilter->GetOutput(); } else { //this is needed when thick mode was enable bevore. These variable have to be reset to default values localStorage->m_Reslicer->SetOutputDimensionality( 2 ); localStorage->m_Reslicer->SetOutputSpacingZDirection(1.0); localStorage->m_Reslicer->SetOutputExtentZDirection( 0, 0 ); localStorage->m_Reslicer->Modified(); //start the pipeline with updating the largest possible, needed if the geometry of the input has changed localStorage->m_Reslicer->UpdateLargestPossibleRegion(); localStorage->m_ReslicedImage = localStorage->m_Reslicer->GetVtkOutput(); } // Bounds information for reslicing (only reuqired if reference geometry // is present) //this used for generating a vtkPLaneSource with the right size vtkFloatingPointType sliceBounds[6]; for ( int i = 0; i < 6; ++i ) { sliceBounds[i] = 0.0; } localStorage->m_Reslicer->GetClippedPlaneBounds(sliceBounds); //get the spacing of the slice localStorage->m_mmPerPixel = localStorage->m_Reslicer->GetOutputSpacing(); // calculate minimum bounding rect of IMAGE in texture vtkFloatingPointType textureClippingBounds[6]; for ( int i = 0; i < 6; ++i ) { textureClippingBounds[i] = 0.0; } // Calculate the actual bounds of the transformed plane clipped by the // dataset bounding box; this is required for drawing the texture at the // correct position during 3D mapping. mitk::PlaneClipping::CalculateClippedPlaneBounds( input->GetGeometry(), planeGeometry, textureClippingBounds ); textureClippingBounds[0] = static_cast< int >( textureClippingBounds[0] / localStorage->m_mmPerPixel[0] + 0.5 ); textureClippingBounds[1] = static_cast< int >( textureClippingBounds[1] / localStorage->m_mmPerPixel[0] + 0.5 ); textureClippingBounds[2] = static_cast< int >( textureClippingBounds[2] / localStorage->m_mmPerPixel[1] + 0.5 ); textureClippingBounds[3] = static_cast< int >( textureClippingBounds[3] / localStorage->m_mmPerPixel[1] + 0.5 ); //get the number of scalar components to distinguish between different image types int numberOfComponents = localStorage->m_ReslicedImage->GetNumberOfScalarComponents(); //get the binary property bool binary = false; bool binaryOutline = false; datanode->GetBoolProperty( "binary", binary, renderer ); if(binary) //binary image { datanode->GetBoolProperty( "outline binary", binaryOutline, renderer ); if(binaryOutline) //contour rendering { if ( input->GetPixelType().GetBpe() <= 8 ) { //generate contours/outlines localStorage->m_OutlinePolyData = CreateOutlinePolyData(renderer); float binaryOutlineWidth(1.0); if ( datanode->GetFloatProperty( "outline width", binaryOutlineWidth, renderer ) ) { if ( localStorage->m_Actors->GetNumberOfPaths() > 1 ) { float binaryOutlineShadowWidth(1.5); datanode->GetFloatProperty( "outline shadow width", binaryOutlineShadowWidth, renderer ); dynamic_cast(localStorage->m_Actors->GetParts()->GetItemAsObject(0)) ->GetProperty()->SetLineWidth( binaryOutlineWidth * binaryOutlineShadowWidth ); } localStorage->m_Actor->GetProperty()->SetLineWidth( binaryOutlineWidth ); } } else { binaryOutline = false; this->ApplyLookuptable(renderer, textureClippingBounds); MITK_WARN << "Type of all binary images should be (un)signed char. Outline does not work on other pixel types!"; } } else //standard binary image { if(numberOfComponents != 1) { MITK_ERROR << "Rendering Error: Binary Images with more then 1 component are not supported!"; } } } if (!(numberOfComponents == 1 || numberOfComponents == 3 || numberOfComponents == 4)) { MITK_WARN << "Unknown number of components!"; } this->ApplyLookuptable(renderer, textureClippingBounds); - // do not use a VTK lookup table (we do that ourselves in m_LevelWindowFilter) localStorage->m_Texture->MapColorScalarsThroughLookupTableOff(); this->ApplyColor( renderer ); this->ApplyOpacity( renderer ); this->TransformActor( renderer ); vtkActor* contourShadowActor = dynamic_cast (localStorage->m_Actors->GetParts()->GetItemAsObject(0)); if(binary && binaryOutline) //connect the mapper with the polyData which contains the lines { //We need the contour for the binary outline property as actor localStorage->m_Mapper->SetInput(localStorage->m_OutlinePolyData); localStorage->m_Actor->SetTexture(NULL); //no texture for contours bool binaryOutlineShadow( false ); datanode->GetBoolProperty( "outline binary shadow", binaryOutlineShadow, renderer ); if ( binaryOutlineShadow ) contourShadowActor->SetVisibility( true ); else contourShadowActor->SetVisibility( false ); } else { //Connect the mapper with the input texture. This is the standard case. //setup the textured plane this->GeneratePlane( renderer, sliceBounds ); //set the plane as input for the mapper localStorage->m_Mapper->SetInputConnection(localStorage->m_Plane->GetOutputPort()); //set the texture for the actor localStorage->m_Actor->SetTexture(localStorage->m_Texture); contourShadowActor->SetVisibility( false ); } // We have been modified => save this for next Update() localStorage->m_LastUpdateTime.Modified(); } void mitk::ImageVtkMapper2D::ApplyColor( mitk::BaseRenderer* renderer ) { LocalStorage *localStorage = this->GetLocalStorage( renderer ); float rgb[3]= { 1.0f, 1.0f, 1.0f }; // check for color prop and use it for rendering if it exists // binary image hovering & binary image selection bool hover = false; bool selected = false; GetDataNode()->GetBoolProperty("binaryimage.ishovering", hover, renderer); GetDataNode()->GetBoolProperty("selected", selected, renderer); if(hover && !selected) { mitk::ColorProperty::Pointer colorprop = dynamic_cast(GetDataNode()->GetProperty ("binaryimage.hoveringcolor", renderer)); if(colorprop.IsNotNull()) { memcpy(rgb, colorprop->GetColor().GetDataPointer(), 3*sizeof(float)); } else { GetColor( rgb, renderer ); } } if(selected) { mitk::ColorProperty::Pointer colorprop = dynamic_cast(GetDataNode()->GetProperty ("binaryimage.selectedcolor", renderer)); if(colorprop.IsNotNull()) { memcpy(rgb, colorprop->GetColor().GetDataPointer(), 3*sizeof(float)); } else { GetColor( rgb, renderer ); } } if(!hover && !selected) { GetColor( rgb, renderer ); } double rgbConv[3] = {(double)rgb[0], (double)rgb[1], (double)rgb[2]}; //conversion to double for VTK dynamic_cast (localStorage->m_Actors->GetParts()->GetItemAsObject(0))->GetProperty()->SetColor(rgbConv); localStorage->m_Actor->GetProperty()->SetColor(rgbConv); if ( localStorage->m_Actors->GetParts()->GetNumberOfItems() > 1 ) { float rgb[3]= { 1.0f, 1.0f, 1.0f }; mitk::ColorProperty::Pointer colorprop = dynamic_cast(GetDataNode()->GetProperty ("outline binary shadow color", renderer)); if(colorprop.IsNotNull()) { memcpy(rgb, colorprop->GetColor().GetDataPointer(), 3*sizeof(float)); } double rgbConv[3] = {(double)rgb[0], (double)rgb[1], (double)rgb[2]}; //conversion to double for VTK dynamic_cast( localStorage->m_Actors->GetParts()->GetItemAsObject(0) )->GetProperty()->SetColor(rgbConv); } } void mitk::ImageVtkMapper2D::ApplyOpacity( mitk::BaseRenderer* renderer ) { LocalStorage* localStorage = this->GetLocalStorage( renderer ); float opacity = 1.0f; // check for opacity prop and use it for rendering if it exists GetOpacity( opacity, renderer ); //set the opacity according to the properties localStorage->m_Actor->GetProperty()->SetOpacity(opacity); if ( localStorage->m_Actors->GetParts()->GetNumberOfItems() > 1 ) { dynamic_cast( localStorage->m_Actors->GetParts()->GetItemAsObject(0) )->GetProperty()->SetOpacity(opacity); } } void mitk::ImageVtkMapper2D::ApplyLookuptable( mitk::BaseRenderer* renderer, vtkFloatingPointType* bounds ) { //Have the following 4 different use cases how to generate the lookuptable: //1. We have a binary image -> The lut range is set to 0.0, 1.0 //2. The user sets a lut we can use //3. The user sets a transfer function we can use //4. Nothing defined: The default color lookuptable is used //@Warning: If the user sets a lut and a transfer function the lut will be used! LocalStorage* localStorage = m_LSH.GetLocalStorage(renderer); bool binary = false; this->GetDataNode()->GetBoolProperty( "binary", binary, renderer ); vtkLookupTable *usedLookupTable = localStorage->m_DefaultLookupTable; vtkScalarsToColors *usedScalarsToColors = localStorage->m_DefaultLookupTable; // If lookup table or transferfunction use is requested... mitk::LookupTableProperty::Pointer lookupTableProp = dynamic_cast(this->GetDataNode()->GetProperty("LookupTable")); mitk::TransferFunctionProperty::Pointer transferFunctionProp = dynamic_cast(this->GetDataNode()->GetProperty("Image Rendering.Transfer Function",renderer )); if(binary) // is it a binary image? { usedScalarsToColors = usedLookupTable = localStorage->m_BinaryLookupTable; } else if( lookupTableProp.IsNotNull() ) // is a lookuptable set? { if( transferFunctionProp.IsNotNull() ) { MITK_WARN << "A LookupTable and a transfer function TransferFunction property is set! Only the Image Rendering.Transfer Function will be used. If you want to use the color transfer function, remove or rename the LookupTable property."; } //If a lookup table is supplied by the user: //only update the lut, when the properties have changed... if( lookupTableProp->GetLookupTable()->GetMTime() <= this->GetDataNode()->GetPropertyList()->GetMTime() ) { lookupTableProp->GetLookupTable()->ChangeOpacityForAll( lookupTableProp->GetLookupTable()->GetVtkLookupTable()->GetAlpha()*localStorage->m_Actor->GetProperty()->GetOpacity() ); lookupTableProp->GetLookupTable()->ChangeOpacity(0, 0.0); } //If the user defines a lut, we dont want to use the color and take white instead. dynamic_cast (localStorage->m_Actors->GetParts()->GetItemAsObject(0))->GetProperty()->SetColor(1.0, 1.0, 1.0); localStorage->m_Actor->GetProperty()->SetColor(1.0, 1.0, 1.0); usedScalarsToColors = usedLookupTable = lookupTableProp->GetLookupTable()->GetVtkLookupTable(); } else if(transferFunctionProp.IsNotNull()) // is a color transfer function set? { usedScalarsToColors = transferFunctionProp->GetValue()->GetColorTransferFunction(); usedLookupTable = 0; } else { //default lookuptable LevelWindow levelWindow; this->GetLevelWindow( levelWindow, renderer ); usedLookupTable->SetRange( levelWindow.GetLowerWindowBound(), levelWindow.GetUpperWindowBound() ); this->ApplyColor( renderer ); } - localStorage->m_Texture->SetInput( localStorage->m_ReslicedImage ); - // check for texture interpolation property bool textureInterpolation = false; GetDataNode()->GetBoolProperty( "texture interpolation", textureInterpolation, renderer ); //set the interpolation modus according to the property localStorage->m_Texture->SetInterpolate(textureInterpolation); mitk::LevelWindow opacLevelWindow; if( this->GetLevelWindow( opacLevelWindow, renderer, "opaclevelwindow" ) ) { //pass the opaque level window to the filter localStorage->m_LevelWindowFilter->SetMinOpacity(opacLevelWindow.GetLowerWindowBound()); localStorage->m_LevelWindowFilter->SetMaxOpacity(opacLevelWindow.GetUpperWindowBound()); } else { //no opaque level window localStorage->m_LevelWindowFilter->SetMinOpacity(0.0); localStorage->m_LevelWindowFilter->SetMaxOpacity(255.0); } localStorage->m_LevelWindowFilter->SetLookupTable(usedScalarsToColors); localStorage->m_LevelWindowFilter->SetInput(localStorage->m_ReslicedImage); localStorage->m_LevelWindowFilter->SetClippingBounds(bounds); //connect the texture with the output of the levelwindow filter localStorage->m_Texture->SetInputConnection(localStorage->m_LevelWindowFilter->GetOutputPort()); } void mitk::ImageVtkMapper2D::Update(mitk::BaseRenderer* renderer) { if ( !this->IsVisible( renderer ) ) { return; } mitk::Image* data = const_cast( this->GetInput() ); if ( data == NULL ) { return; } // Calculate time step of the input data for the specified renderer (integer value) this->CalculateTimeStep( renderer ); // Check if time step is valid const TimeSlicedGeometry *dataTimeGeometry = data->GetTimeSlicedGeometry(); if ( ( dataTimeGeometry == NULL ) || ( dataTimeGeometry->GetTimeSteps() == 0 ) || ( !dataTimeGeometry->IsValidTime( this->GetTimestep() ) ) ) { return; } const DataNode *node = this->GetDataNode(); data->UpdateOutputInformation(); LocalStorage *localStorage = m_LSH.GetLocalStorage(renderer); //check if something important has changed and we need to rerender if ( (localStorage->m_LastUpdateTime < node->GetMTime()) //was the node modified? || (localStorage->m_LastUpdateTime < data->GetPipelineMTime()) //Was the data modified? || (localStorage->m_LastUpdateTime < renderer->GetCurrentWorldGeometry2DUpdateTime()) //was the geometry modified? || (localStorage->m_LastUpdateTime < renderer->GetCurrentWorldGeometry2D()->GetMTime()) || (localStorage->m_LastUpdateTime < node->GetPropertyList()->GetMTime()) //was a property modified? || (localStorage->m_LastUpdateTime < node->GetPropertyList(renderer)->GetMTime()) ) { this->GenerateDataForRenderer( renderer ); } // since we have checked that nothing important has changed, we can set // m_LastUpdateTime to the current time localStorage->m_LastUpdateTime.Modified(); } void mitk::ImageVtkMapper2D::SetDefaultProperties(mitk::DataNode* node, mitk::BaseRenderer* renderer, bool overwrite) { mitk::Image::Pointer image = dynamic_cast(node->GetData()); // Properties common for both images and segmentations node->AddProperty( "depthOffset", mitk::FloatProperty::New( 0.0 ), renderer, overwrite ); node->AddProperty( "outline binary", mitk::BoolProperty::New( false ), renderer, overwrite ); node->AddProperty( "outline width", mitk::FloatProperty::New( 1.0 ), renderer, overwrite ); node->AddProperty( "outline binary shadow", mitk::BoolProperty::New( false ), renderer, overwrite ); node->AddProperty( "outline binary shadow color", ColorProperty::New(0.0,0.0,0.0), renderer, overwrite ); node->AddProperty( "outline shadow width", mitk::FloatProperty::New( 1.5 ), renderer, overwrite ); if(image->IsRotated()) node->AddProperty( "reslice interpolation", mitk::VtkResliceInterpolationProperty::New(VTK_RESLICE_CUBIC) ); else node->AddProperty( "reslice interpolation", mitk::VtkResliceInterpolationProperty::New() ); node->AddProperty( "texture interpolation", mitk::BoolProperty::New( mitk::DataNodeFactory::m_TextureInterpolationActive ) ); // set to user configurable default value (see global options) node->AddProperty( "in plane resample extent by geometry", mitk::BoolProperty::New( false ) ); node->AddProperty( "bounding box", mitk::BoolProperty::New( false ) ); std::string photometricInterpretation; // DICOM tag telling us how pixel values should be displayed if ( node->GetStringProperty( "dicom.pixel.PhotometricInterpretation", photometricInterpretation ) ) { // modality provided by DICOM or other reader if ( photometricInterpretation.find("MONOCHROME1") != std::string::npos ) // meaning: display MINIMUM pixels as WHITE { // generate LUT (white to black) mitk::LookupTable::Pointer mitkLut = mitk::LookupTable::New(); vtkLookupTable* bwLut = mitkLut->GetVtkLookupTable(); bwLut->SetTableRange (0, 1); bwLut->SetSaturationRange (0, 0); bwLut->SetHueRange (0, 0); bwLut->SetValueRange (1, 0); bwLut->SetAlphaRange (1, 1); bwLut->SetRampToLinear(); bwLut->Build(); mitk::LookupTableProperty::Pointer mitkLutProp = mitk::LookupTableProperty::New(); mitkLutProp->SetLookupTable(mitkLut); node->SetProperty( "LookupTable", mitkLutProp ); } else if ( photometricInterpretation.find("MONOCHROME2") != std::string::npos ) // meaning: display MINIMUM pixels as BLACK { // apply default LUT (black to white) node->SetProperty( "color", mitk::ColorProperty::New( 1,1,1 ), renderer ); } // PALETTE interpretation should be handled ok by RGB loading } bool isBinaryImage(false); if ( ! node->GetBoolProperty("binary", isBinaryImage) ) { // ok, property is not set, use heuristic to determine if this // is a binary image mitk::Image::Pointer centralSliceImage; ScalarType minValue = 0.0; ScalarType maxValue = 0.0; ScalarType min2ndValue = 0.0; ScalarType max2ndValue = 0.0; mitk::ImageSliceSelector::Pointer sliceSelector = mitk::ImageSliceSelector::New(); sliceSelector->SetInput(image); sliceSelector->SetSliceNr(image->GetDimension(2)/2); sliceSelector->SetTimeNr(image->GetDimension(3)/2); sliceSelector->SetChannelNr(image->GetDimension(4)/2); sliceSelector->Update(); centralSliceImage = sliceSelector->GetOutput(); if ( centralSliceImage.IsNotNull() && centralSliceImage->IsInitialized() ) { minValue = centralSliceImage->GetStatistics()->GetScalarValueMin(); maxValue = centralSliceImage->GetStatistics()->GetScalarValueMax(); min2ndValue = centralSliceImage->GetStatistics()->GetScalarValue2ndMin(); max2ndValue = centralSliceImage->GetStatistics()->GetScalarValue2ndMax(); } if ((maxValue == min2ndValue && minValue == max2ndValue) || minValue == maxValue) { // centralSlice is strange, lets look at all data minValue = image->GetStatistics()->GetScalarValueMin(); maxValue = image->GetStatistics()->GetScalarValueMaxNoRecompute(); min2ndValue = image->GetStatistics()->GetScalarValue2ndMinNoRecompute(); max2ndValue = image->GetStatistics()->GetScalarValue2ndMaxNoRecompute(); } isBinaryImage = ( maxValue == min2ndValue && minValue == max2ndValue ); } // some more properties specific for a binary... if (isBinaryImage) { node->AddProperty( "opacity", mitk::FloatProperty::New(0.3f), renderer, overwrite ); node->AddProperty( "color", ColorProperty::New(1.0,0.0,0.0), renderer, overwrite ); node->AddProperty( "binaryimage.selectedcolor", ColorProperty::New(1.0,0.0,0.0), renderer, overwrite ); node->AddProperty( "binaryimage.selectedannotationcolor", ColorProperty::New(1.0,0.0,0.0), renderer, overwrite ); node->AddProperty( "binaryimage.hoveringcolor", ColorProperty::New(1.0,0.0,0.0), renderer, overwrite ); node->AddProperty( "binaryimage.hoveringannotationcolor", ColorProperty::New(1.0,0.0,0.0), renderer, overwrite ); node->AddProperty( "binary", mitk::BoolProperty::New( true ), renderer, overwrite ); node->AddProperty("layer", mitk::IntProperty::New(10), renderer, overwrite); } else //...or image type object { node->AddProperty( "opacity", mitk::FloatProperty::New(1.0f), renderer, overwrite ); node->AddProperty( "color", ColorProperty::New(1.0,1.0,1.0), renderer, overwrite ); node->AddProperty( "binary", mitk::BoolProperty::New( false ), renderer, overwrite ); node->AddProperty("layer", mitk::IntProperty::New(0), renderer, overwrite); } if(image.IsNotNull() && image->IsInitialized()) { if((overwrite) || (node->GetProperty("levelwindow", renderer)==NULL)) { /* initialize level/window from DICOM tags */ std::string sLevel; std::string sWindow; if ( image->GetPropertyList()->GetStringProperty( "dicom.voilut.WindowCenter", sLevel ) && image->GetPropertyList()->GetStringProperty( "dicom.voilut.WindowWidth", sWindow ) ) { float level = atof( sLevel.c_str() ); float window = atof( sWindow.c_str() ); mitk::LevelWindow contrast; std::string sSmallestPixelValueInSeries; std::string sLargestPixelValueInSeries; if ( image->GetPropertyList()->GetStringProperty( "dicom.series.SmallestPixelValueInSeries", sSmallestPixelValueInSeries ) && image->GetPropertyList()->GetStringProperty( "dicom.series.LargestPixelValueInSeries", sLargestPixelValueInSeries ) ) { float smallestPixelValueInSeries = atof( sSmallestPixelValueInSeries.c_str() ); float largestPixelValueInSeries = atof( sLargestPixelValueInSeries.c_str() ); contrast.SetRangeMinMax( smallestPixelValueInSeries-1, largestPixelValueInSeries+1 ); // why not a little buffer? // might remedy some l/w widget challenges } else { contrast.SetAuto( static_cast(node->GetData()), false, true ); // we need this as a fallback } contrast.SetLevelWindow( level, window, true ); node->SetProperty( "levelwindow", LevelWindowProperty::New( contrast ), renderer ); } } if(((overwrite) || (node->GetProperty("opaclevelwindow", renderer)==NULL)) && (image->GetPixelType().GetPixelTypeId() == itk::ImageIOBase::RGBA) && (image->GetPixelType().GetTypeId() == typeid( unsigned char)) ) { mitk::LevelWindow opaclevwin; opaclevwin.SetRangeMinMax(0,255); opaclevwin.SetWindowBounds(0,255); mitk::LevelWindowProperty::Pointer prop = mitk::LevelWindowProperty::New(opaclevwin); node->SetProperty( "opaclevelwindow", prop, renderer ); } } Superclass::SetDefaultProperties(node, renderer, overwrite); } mitk::ImageVtkMapper2D::LocalStorage* mitk::ImageVtkMapper2D::GetLocalStorage(mitk::BaseRenderer* renderer) { return m_LSH.GetLocalStorage(renderer); } vtkSmartPointer mitk::ImageVtkMapper2D::CreateOutlinePolyData(mitk::BaseRenderer* renderer ){ LocalStorage* localStorage = this->GetLocalStorage(renderer); //get the min and max index values of each direction int* extent = localStorage->m_ReslicedImage->GetExtent(); int xMin = extent[0]; int xMax = extent[1]; int yMin = extent[2]; int yMax = extent[3]; int* dims = localStorage->m_ReslicedImage->GetDimensions(); //dimensions of the image int line = dims[0]; //how many pixels per line? int x = xMin; //pixel index x int y = yMin; //pixel index y char* currentPixel; //get the depth for each contour float depth = CalculateLayerDepth(renderer); vtkSmartPointer points = vtkSmartPointer::New(); //the points to draw vtkSmartPointer lines = vtkSmartPointer::New(); //the lines to connect the points // We take the pointer to the first pixel of the image currentPixel = static_cast(localStorage->m_ReslicedImage->GetScalarPointer() ); while (y <= yMax) { //if the current pixel value is set to something if ((currentPixel) && (*currentPixel != 0)) { //check in which direction a line is necessary //a line is added if the neighbor of the current pixel has the value 0 //and if the pixel is located at the edge of the image //if vvvvv not the first line vvvvv if (y > yMin && *(currentPixel-line) == 0) { //x direction - bottom edge of the pixel //add the 2 points vtkIdType p1 = points->InsertNextPoint(x*localStorage->m_mmPerPixel[0], y*localStorage->m_mmPerPixel[1], depth); vtkIdType p2 = points->InsertNextPoint((x+1)*localStorage->m_mmPerPixel[0], y*localStorage->m_mmPerPixel[1], depth); //add the line between both points lines->InsertNextCell(2); lines->InsertCellPoint(p1); lines->InsertCellPoint(p2); } //if vvvvv not the last line vvvvv if (y < yMax && *(currentPixel+line) == 0) { //x direction - top edge of the pixel vtkIdType p1 = points->InsertNextPoint(x*localStorage->m_mmPerPixel[0], (y+1)*localStorage->m_mmPerPixel[1], depth); vtkIdType p2 = points->InsertNextPoint((x+1)*localStorage->m_mmPerPixel[0], (y+1)*localStorage->m_mmPerPixel[1], depth); lines->InsertNextCell(2); lines->InsertCellPoint(p1); lines->InsertCellPoint(p2); } //if vvvvv not the first pixel vvvvv if ( (x > xMin || y > yMin) && *(currentPixel-1) == 0) { //y direction - left edge of the pixel vtkIdType p1 = points->InsertNextPoint(x*localStorage->m_mmPerPixel[0], y*localStorage->m_mmPerPixel[1], depth); vtkIdType p2 = points->InsertNextPoint(x*localStorage->m_mmPerPixel[0], (y+1)*localStorage->m_mmPerPixel[1], depth); lines->InsertNextCell(2); lines->InsertCellPoint(p1); lines->InsertCellPoint(p2); } //if vvvvv not the last pixel vvvvv if ( (y < yMax || (x < xMax) ) && *(currentPixel+1) == 0) { //y direction - right edge of the pixel vtkIdType p1 = points->InsertNextPoint((x+1)*localStorage->m_mmPerPixel[0], y*localStorage->m_mmPerPixel[1], depth); vtkIdType p2 = points->InsertNextPoint((x+1)*localStorage->m_mmPerPixel[0], (y+1)*localStorage->m_mmPerPixel[1], depth); lines->InsertNextCell(2); lines->InsertCellPoint(p1); lines->InsertCellPoint(p2); } /* now consider pixels at the edge of the image */ //if vvvvv left edge of image vvvvv if (x == xMin) { //draw left edge of the pixel vtkIdType p1 = points->InsertNextPoint(x*localStorage->m_mmPerPixel[0], y*localStorage->m_mmPerPixel[1], depth); vtkIdType p2 = points->InsertNextPoint(x*localStorage->m_mmPerPixel[0], (y+1)*localStorage->m_mmPerPixel[1], depth); lines->InsertNextCell(2); lines->InsertCellPoint(p1); lines->InsertCellPoint(p2); } //if vvvvv right edge of image vvvvv if (x == xMax) { //draw right edge of the pixel vtkIdType p1 = points->InsertNextPoint((x+1)*localStorage->m_mmPerPixel[0], y*localStorage->m_mmPerPixel[1], depth); vtkIdType p2 = points->InsertNextPoint((x+1)*localStorage->m_mmPerPixel[0], (y+1)*localStorage->m_mmPerPixel[1], depth); lines->InsertNextCell(2); lines->InsertCellPoint(p1); lines->InsertCellPoint(p2); } //if vvvvv bottom edge of image vvvvv if (y == yMin) { //draw bottom edge of the pixel vtkIdType p1 = points->InsertNextPoint(x*localStorage->m_mmPerPixel[0], y*localStorage->m_mmPerPixel[1], depth); vtkIdType p2 = points->InsertNextPoint((x+1)*localStorage->m_mmPerPixel[0], y*localStorage->m_mmPerPixel[1], depth); lines->InsertNextCell(2); lines->InsertCellPoint(p1); lines->InsertCellPoint(p2); } //if vvvvv top edge of image vvvvv if (y == yMax) { //draw top edge of the pixel vtkIdType p1 = points->InsertNextPoint(x*localStorage->m_mmPerPixel[0], (y+1)*localStorage->m_mmPerPixel[1], depth); vtkIdType p2 = points->InsertNextPoint((x+1)*localStorage->m_mmPerPixel[0], (y+1)*localStorage->m_mmPerPixel[1], depth); lines->InsertNextCell(2); lines->InsertCellPoint(p1); lines->InsertCellPoint(p2); } }//end if currentpixel is set x++; if (x > xMax) { //reached end of line x = xMin; y++; } // Increase the pointer-position to the next pixel. // This is safe, as the while-loop and the x-reset logic above makes // sure we do not exceed the bounds of the image currentPixel++; }//end of while // Create a polydata to store everything in vtkSmartPointer polyData = vtkSmartPointer::New(); // Add the points to the dataset polyData->SetPoints(points); // Add the lines to the dataset polyData->SetLines(lines); return polyData; } void mitk::ImageVtkMapper2D::TransformActor(mitk::BaseRenderer* renderer) { LocalStorage *localStorage = m_LSH.GetLocalStorage(renderer); //get the transformation matrix of the reslicer in order to render the slice as axial, coronal or saggital vtkSmartPointer trans = vtkSmartPointer::New(); vtkSmartPointer matrix = localStorage->m_Reslicer->GetResliceAxes(); trans->SetMatrix(matrix); //transform the plane/contour (the actual actor) to the corresponding view (axial, coronal or saggital) localStorage->m_Actor->SetUserTransform(trans); //transform the origin to center based coordinates, because MITK is center based. localStorage->m_Actor->SetPosition( -0.5*localStorage->m_mmPerPixel[0], -0.5*localStorage->m_mmPerPixel[1], 0.0); if ( localStorage->m_Actors->GetNumberOfPaths() > 1 ) { vtkActor* secondaryActor = dynamic_cast( localStorage->m_Actors->GetParts()->GetItemAsObject(0) ); secondaryActor->SetUserTransform(trans); secondaryActor->SetPosition( -0.5*localStorage->m_mmPerPixel[0], -0.5*localStorage->m_mmPerPixel[1], 0.0); } } bool mitk::ImageVtkMapper2D::RenderingGeometryIntersectsImage( const Geometry2D* renderingGeometry, SlicedGeometry3D* imageGeometry ) { // if either one of the two geometries is NULL we return true // for safety reasons if ( renderingGeometry == NULL || imageGeometry == NULL ) return true; // get the distance for the first cornerpoint ScalarType initialDistance = renderingGeometry->SignedDistance( imageGeometry->GetCornerPoint( 0 ) ); for( int i=1; i<8; i++ ) { mitk::Point3D cornerPoint = imageGeometry->GetCornerPoint( i ); // get the distance to the other cornerpoints ScalarType distance = renderingGeometry->SignedDistance( cornerPoint ); // if it has not the same signing as the distance of the first point if ( initialDistance * distance < 0 ) { // we have an intersection and return true return true; } } // all distances have the same sign, no intersection and we return false return false; } mitk::ImageVtkMapper2D::LocalStorage::LocalStorage() { //Do as much actions as possible in here to avoid double executions. m_Plane = vtkSmartPointer::New(); m_Texture = vtkSmartPointer::New().GetPointer(); m_DefaultLookupTable = vtkSmartPointer::New(); m_BinaryLookupTable = vtkSmartPointer::New(); m_Mapper = vtkSmartPointer::New(); m_Actor = vtkSmartPointer::New(); m_Actors = vtkSmartPointer::New(); m_Reslicer = mitk::ExtractSliceFilter::New(); m_TSFilter = vtkSmartPointer::New(); m_OutlinePolyData = vtkSmartPointer::New(); m_ReslicedImage = vtkSmartPointer::New(); m_EmptyPolyData = vtkSmartPointer::New(); //the following actions are always the same and thus can be performed //in the constructor for each image (i.e. the image-corresponding local storage) m_TSFilter->ReleaseDataFlagOn(); //built a default lookuptable m_DefaultLookupTable->SetRampToLinear(); m_DefaultLookupTable->SetSaturationRange( 0.0, 0.0 ); m_DefaultLookupTable->SetHueRange( 0.0, 0.0 ); m_DefaultLookupTable->SetValueRange( 0.0, 1.0 ); m_DefaultLookupTable->Build(); m_BinaryLookupTable->SetRampToLinear(); m_BinaryLookupTable->SetSaturationRange( 0.0, 0.0 ); m_BinaryLookupTable->SetHueRange( 0.0, 0.0 ); m_BinaryLookupTable->SetValueRange( 0.0, 1.0 ); m_BinaryLookupTable->SetRange(0.0, 1.0); // make first value transparent { double rgba[4]; m_BinaryLookupTable->GetTableValue(0, rgba); m_BinaryLookupTable->SetTableValue(0, rgba[0], rgba[1], rgba[2], 0); // background to 0 } m_BinaryLookupTable->Build(); //do not repeat the texture (the image) m_Texture->RepeatOff(); //set the mapper for the actor m_Actor->SetMapper( m_Mapper ); vtkSmartPointer outlineShadowActor = vtkSmartPointer::New(); outlineShadowActor->SetMapper( m_Mapper ); m_Actors->AddPart( outlineShadowActor ); m_Actors->AddPart( m_Actor ); //level window filter m_LevelWindowFilter = new vtkMitkLevelWindowFilter(); }