diff --git a/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/itkMLBSTrackingFilter.cpp b/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/itkMLBSTrackingFilter.cpp index ddede38311..afc8603edd 100644 --- a/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/itkMLBSTrackingFilter.cpp +++ b/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/itkMLBSTrackingFilter.cpp @@ -1,825 +1,591 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef __itkMLBSTrackingFilter_txx #define __itkMLBSTrackingFilter_txx #include #include #include #include "itkMLBSTrackingFilter.h" #include #include #include #include -//#include #define _USE_MATH_DEFINES #include namespace itk { template< int NumImageFeatures > MLBSTrackingFilter< NumImageFeatures > ::MLBSTrackingFilter() : m_FiberPolyData(NULL) , m_Points(NULL) , m_Cells(NULL) , m_AngularThreshold(0.7) , m_StepSize(0) , m_MaxLength(10000) , m_MinTractLength(20.0) , m_MaxTractLength(400.0) , m_SeedsPerVoxel(1) - , m_NumberOfSamples(50) , m_SamplingDistance(-1) + , m_NumberOfSamples(50) , m_SeedImage(NULL) , m_MaskImage(NULL) - , m_DecisionForest(NULL) , m_StoppingRegions(NULL) , m_DemoMode(false) , m_PauseTracking(false) , m_AbortTracking(false) , m_RemoveWmEndFibers(false) , m_AposterioriCurvCheck(false) , m_AvoidStop(true) , m_RandomSampling(false) - , m_Verbose(false) { this->SetNumberOfRequiredInputs(1); } template< int NumImageFeatures > double MLBSTrackingFilter< NumImageFeatures > ::RoundToNearest(double num) { return (num > 0.0) ? floor(num + 0.5) : ceil(num - 0.5); } template< int NumImageFeatures > void MLBSTrackingFilter< NumImageFeatures >::BeforeThreadedGenerateData() { m_InputImage = const_cast(this->GetInput(0)); - PreprocessRawData(); + m_ForestHandler.InitForTracking(); m_FiberPolyData = PolyDataType::New(); m_Points = vtkSmartPointer< vtkPoints >::New(); m_Cells = vtkSmartPointer< vtkCellArray >::New(); - m_ImageSize.resize(3); - m_ImageSize[0] = m_FeatureImage->GetLargestPossibleRegion().GetSize()[0]; - m_ImageSize[1] = m_FeatureImage->GetLargestPossibleRegion().GetSize()[1]; - m_ImageSize[2] = m_FeatureImage->GetLargestPossibleRegion().GetSize()[2]; - - m_ImageSpacing.resize(3); - m_ImageSpacing[0] = m_FeatureImage->GetSpacing()[0]; - m_ImageSpacing[1] = m_FeatureImage->GetSpacing()[1]; - m_ImageSpacing[2] = m_FeatureImage->GetSpacing()[2]; + std::vector< double > m_ImageSpacing; m_ImageSpacing.resize(3); + m_ImageSpacing[0] = m_InputImage->GetSpacing()[0]; + m_ImageSpacing[1] = m_InputImage->GetSpacing()[1]; + m_ImageSpacing[2] = m_InputImage->GetSpacing()[2]; double minSpacing; if(m_ImageSpacing[0]GetNumberOfThreads(); i++) { PolyDataType poly = PolyDataType::New(); m_PolyDataContainer.push_back(poly); } - m_NotWmImage = ItkDoubleImgType::New(); - m_NotWmImage->SetSpacing( m_FeatureImage->GetSpacing() ); - m_NotWmImage->SetOrigin( m_FeatureImage->GetOrigin() ); - m_NotWmImage->SetDirection( m_FeatureImage->GetDirection() ); - m_NotWmImage->SetRegions( m_FeatureImage->GetLargestPossibleRegion() ); - m_NotWmImage->Allocate(); - m_NotWmImage->FillBuffer(0); - - m_WmImage = ItkDoubleImgType::New(); - m_WmImage->SetSpacing( m_FeatureImage->GetSpacing() ); - m_WmImage->SetOrigin( m_FeatureImage->GetOrigin() ); - m_WmImage->SetDirection( m_FeatureImage->GetDirection() ); - m_WmImage->SetRegions( m_FeatureImage->GetLargestPossibleRegion() ); - m_WmImage->Allocate(); - m_WmImage->FillBuffer(0); - - m_AvoidStopImage = ItkDoubleImgType::New(); - m_AvoidStopImage->SetSpacing( m_FeatureImage->GetSpacing() ); - m_AvoidStopImage->SetOrigin( m_FeatureImage->GetOrigin() ); - m_AvoidStopImage->SetDirection( m_FeatureImage->GetDirection() ); - m_AvoidStopImage->SetRegions( m_FeatureImage->GetLargestPossibleRegion() ); - m_AvoidStopImage->Allocate(); - m_AvoidStopImage->FillBuffer(0); - if (m_StoppingRegions.IsNull()) { m_StoppingRegions = ItkUcharImgType::New(); - m_StoppingRegions->SetSpacing( m_FeatureImage->GetSpacing() ); - m_StoppingRegions->SetOrigin( m_FeatureImage->GetOrigin() ); - m_StoppingRegions->SetDirection( m_FeatureImage->GetDirection() ); - m_StoppingRegions->SetRegions( m_FeatureImage->GetLargestPossibleRegion() ); + m_StoppingRegions->SetSpacing( m_InputImage->GetSpacing() ); + m_StoppingRegions->SetOrigin( m_InputImage->GetOrigin() ); + m_StoppingRegions->SetDirection( m_InputImage->GetDirection() ); + m_StoppingRegions->SetRegions( m_InputImage->GetLargestPossibleRegion() ); m_StoppingRegions->Allocate(); m_StoppingRegions->FillBuffer(0); } if (m_SeedImage.IsNull()) { m_SeedImage = ItkUcharImgType::New(); - m_SeedImage->SetSpacing( m_FeatureImage->GetSpacing() ); - m_SeedImage->SetOrigin( m_FeatureImage->GetOrigin() ); - m_SeedImage->SetDirection( m_FeatureImage->GetDirection() ); - m_SeedImage->SetRegions( m_FeatureImage->GetLargestPossibleRegion() ); + m_SeedImage->SetSpacing( m_InputImage->GetSpacing() ); + m_SeedImage->SetOrigin( m_InputImage->GetOrigin() ); + m_SeedImage->SetDirection( m_InputImage->GetDirection() ); + m_SeedImage->SetRegions( m_InputImage->GetLargestPossibleRegion() ); m_SeedImage->Allocate(); m_SeedImage->FillBuffer(1); } if (m_MaskImage.IsNull()) { // initialize mask image m_MaskImage = ItkUcharImgType::New(); - m_MaskImage->SetSpacing( m_FeatureImage->GetSpacing() ); - m_MaskImage->SetOrigin( m_FeatureImage->GetOrigin() ); - m_MaskImage->SetDirection( m_FeatureImage->GetDirection() ); - m_MaskImage->SetRegions( m_FeatureImage->GetLargestPossibleRegion() ); + m_MaskImage->SetSpacing( m_InputImage->GetSpacing() ); + m_MaskImage->SetOrigin( m_InputImage->GetOrigin() ); + m_MaskImage->SetDirection( m_InputImage->GetDirection() ); + m_MaskImage->SetRegions( m_InputImage->GetLargestPossibleRegion() ); m_MaskImage->Allocate(); m_MaskImage->FillBuffer(1); } else std::cout << "MLBSTrackingFilter: using mask image" << std::endl; if (m_AngularThreshold<0.0) m_AngularThreshold = 0.5*minSpacing; m_BuildFibersReady = 0; m_BuildFibersFinished = false; m_Threads = 0; m_Tractogram.clear(); m_SamplingPointset = mitk::PointSet::New(); m_AlternativePointset = mitk::PointSet::New(); + m_StartTime = std::chrono::system_clock::now(); std::cout << "MLBSTrackingFilter: Angular threshold: " << m_AngularThreshold << std::endl; std::cout << "MLBSTrackingFilter: Stepsize: " << m_StepSize << " mm" << std::endl; std::cout << "MLBSTrackingFilter: Seeds per voxel: " << m_SeedsPerVoxel << std::endl; std::cout << "MLBSTrackingFilter: Max. sampling distance: " << m_SamplingDistance << " mm" << std::endl; std::cout << "MLBSTrackingFilter: Number of samples: " << m_NumberOfSamples << std::endl; std::cout << "MLBSTrackingFilter: Max. tract length: " << m_MaxTractLength << " mm" << std::endl; std::cout << "MLBSTrackingFilter: Min. tract length: " << m_MinTractLength << " mm" << std::endl; std::cout << "MLBSTrackingFilter: Starting streamline tracking using " << this->GetNumberOfThreads() << " threads." << std::endl; } -template< int NumImageFeatures > -void MLBSTrackingFilter< NumImageFeatures >::PreprocessRawData() -{ - typedef itk::AnalyticalDiffusionQballReconstructionImageFilter InterpolationFilterType; - - std::cout << "MLBSTrackingFilter: Spherical signal interpolation and sampling ..." << std::endl; - typename InterpolationFilterType::Pointer filter = InterpolationFilterType::New(); - filter->SetGradientImage( m_GradientDirections, m_InputImage ); - filter->SetBValue( m_B_Value ); - filter->SetLambda(0.006); - filter->SetNormalizationMethod(InterpolationFilterType::QBAR_RAW_SIGNAL); - filter->Update(); - // FeatureImageType::Pointer itkFeatureImage = qballfilter->GetCoefficientImage(); - // featureImageVector.push_back(itkFeatureImage); - - std::cout << "MLBSTrackingFilter: Creating feature image ..." << std::endl; - vnl_vector_fixed ref; ref.fill(0); ref[0]=1; - itk::OrientationDistributionFunction< double, NumImageFeatures*2 > odf; - m_DirectionIndices.clear(); - for (unsigned int f=0; f0) // only used directions on one hemisphere - m_DirectionIndices.push_back(f); - } - - m_FeatureImage = FeatureImageType::New(); - m_FeatureImage->SetSpacing(filter->GetOutput()->GetSpacing()); - m_FeatureImage->SetOrigin(filter->GetOutput()->GetOrigin()); - m_FeatureImage->SetDirection(filter->GetOutput()->GetDirection()); - m_FeatureImage->SetLargestPossibleRegion(filter->GetOutput()->GetLargestPossibleRegion()); - m_FeatureImage->SetBufferedRegion(filter->GetOutput()->GetLargestPossibleRegion()); - m_FeatureImage->SetRequestedRegion(filter->GetOutput()->GetLargestPossibleRegion()); - m_FeatureImage->Allocate(); - - itk::ImageRegionIterator< typename InterpolationFilterType::OutputImageType > it(filter->GetOutput(), filter->GetOutput()->GetLargestPossibleRegion()); - while(!it.IsAtEnd()) - { - typename FeatureImageType::PixelType pix; - for (unsigned int f=0; fSetPixel(it.GetIndex(), pix); - ++it; - } -} - template< int NumImageFeatures > void MLBSTrackingFilter< NumImageFeatures >::CalculateNewPosition(itk::Point& pos, vnl_vector_fixed& dir) { // vnl_matrix_fixed< double, 3, 3 > rot = m_FeatureImage->GetDirection().GetTranspose(); // dir = rot*dir; dir *= m_StepSize; pos[0] += dir[0]; pos[1] += dir[1]; pos[2] += dir[2]; } template< int NumImageFeatures > bool MLBSTrackingFilter< NumImageFeatures > ::IsValidPosition(itk::Point &pos) { typename FeatureImageType::IndexType idx; - m_FeatureImage->TransformPhysicalPointToIndex(pos, idx); - if (!m_FeatureImage->GetLargestPossibleRegion().IsInside(idx) || m_MaskImage->GetPixel(idx)==0) + m_InputImage->TransformPhysicalPointToIndex(pos, idx); + if (!m_InputImage->GetLargestPossibleRegion().IsInside(idx) || m_MaskImage->GetPixel(idx)==0) return false; return true; } -template< int NumImageFeatures > -typename MLBSTrackingFilter< NumImageFeatures >::FeatureImageType::PixelType MLBSTrackingFilter< NumImageFeatures >::GetImageValues(itk::Point itkP) -{ - itk::Index<3> idx; - itk::ContinuousIndex< double, 3> cIdx; - m_FeatureImage->TransformPhysicalPointToIndex(itkP, idx); - m_FeatureImage->TransformPhysicalPointToContinuousIndex(itkP, cIdx); - - typename FeatureImageType::PixelType pix; pix.Fill(0.0); - if ( m_FeatureImage->GetLargestPossibleRegion().IsInside(idx) ) - pix = m_FeatureImage->GetPixel(idx); - else - return pix; - - double frac_x = cIdx[0] - idx[0]; - double frac_y = cIdx[1] - idx[1]; - double frac_z = cIdx[2] - idx[2]; - if (frac_x<0) - { - idx[0] -= 1; - frac_x += 1; - } - if (frac_y<0) - { - idx[1] -= 1; - frac_y += 1; - } - if (frac_z<0) - { - idx[2] -= 1; - frac_z += 1; - } - frac_x = 1-frac_x; - frac_y = 1-frac_y; - frac_z = 1-frac_z; - - // int coordinates inside image? - if (idx[0] >= 0 && idx[0] < m_FeatureImage->GetLargestPossibleRegion().GetSize(0)-1 && - idx[1] >= 0 && idx[1] < m_FeatureImage->GetLargestPossibleRegion().GetSize(1)-1 && - idx[2] >= 0 && idx[2] < m_FeatureImage->GetLargestPossibleRegion().GetSize(2)-1) - { - vnl_vector_fixed interpWeights; - interpWeights[0] = ( frac_x)*( frac_y)*( frac_z); - interpWeights[1] = (1-frac_x)*( frac_y)*( frac_z); - interpWeights[2] = ( frac_x)*(1-frac_y)*( frac_z); - interpWeights[3] = ( frac_x)*( frac_y)*(1-frac_z); - interpWeights[4] = (1-frac_x)*(1-frac_y)*( frac_z); - interpWeights[5] = ( frac_x)*(1-frac_y)*(1-frac_z); - interpWeights[6] = (1-frac_x)*( frac_y)*(1-frac_z); - interpWeights[7] = (1-frac_x)*(1-frac_y)*(1-frac_z); - - pix = m_FeatureImage->GetPixel(idx) * interpWeights[0]; - typename FeatureImageType::IndexType tmpIdx = idx; tmpIdx[0]++; - pix += m_FeatureImage->GetPixel(tmpIdx) * interpWeights[1]; - tmpIdx = idx; tmpIdx[1]++; - pix += m_FeatureImage->GetPixel(tmpIdx) * interpWeights[2]; - tmpIdx = idx; tmpIdx[2]++; - pix += m_FeatureImage->GetPixel(tmpIdx) * interpWeights[3]; - tmpIdx = idx; tmpIdx[0]++; tmpIdx[1]++; - pix += m_FeatureImage->GetPixel(tmpIdx) * interpWeights[4]; - tmpIdx = idx; tmpIdx[1]++; tmpIdx[2]++; - pix += m_FeatureImage->GetPixel(tmpIdx) * interpWeights[5]; - tmpIdx = idx; tmpIdx[2]++; tmpIdx[0]++; - pix += m_FeatureImage->GetPixel(tmpIdx) * interpWeights[6]; - tmpIdx = idx; tmpIdx[0]++; tmpIdx[1]++; tmpIdx[2]++; - pix += m_FeatureImage->GetPixel(tmpIdx) * interpWeights[7]; - } - - return pix; -} - -template< int NumImageFeatures > -vnl_vector_fixed MLBSTrackingFilter< NumImageFeatures >::Classify(itk::Point& pos, int& candidates, vnl_vector_fixed& olddir, double angularThreshold, double& prob, bool avoidStop) -{ - vnl_vector_fixed direction; direction.fill(0); - - vigra::MultiArray<2, double> featureData = vigra::MultiArray<2, double>( vigra::Shape2(1,NumImageFeatures+3) ); - - typename FeatureImageType::PixelType featurePixel = GetImageValues(pos); - - // pixel values - for (unsigned int f=0; f ref; ref.fill(0); ref[0]=1; - for (unsigned int f=NumImageFeatures; f probs(vigra::Shape2(1, m_DecisionForest->class_count())); - m_DecisionForest->predictProbabilities(featureData, probs); - - double outProb = 0; - prob = 0; - candidates = 0; // directions with probability > 0 - for (int i=0; iclass_count(); i++) - { - if (probs(0,i)>0) - { - int classLabel = 0; - m_DecisionForest->ext_param_.to_classlabel(i, classLabel); - - if (classLabel d = m_ODF.GetDirection(m_DirectionIndices.at(classLabel)); - double dot = dot_product(d, olddir); - - if (olddir.magnitude()>0) - { - if (fabs(dot)>angularThreshold) - { - if (dot<0) - d *= -1; - dot = fabs(dot); - direction += probs(0,i)*dot*d; - prob += probs(0,i)*dot; - } - } - else - { - direction += probs(0,i)*d; - prob += probs(0,i); - } - } - else - outProb += probs(0,i); - } - } - - ItkDoubleImgType::IndexType idx; - if (m_Verbose) - { - m_NotWmImage->TransformPhysicalPointToIndex(pos, idx); - if (m_NotWmImage->GetLargestPossibleRegion().IsInside(idx)) - { - m_NotWmImage->SetPixel(idx, m_NotWmImage->GetPixel(idx)+outProb); - m_WmImage->SetPixel(idx, m_WmImage->GetPixel(idx)+prob); - } - } - if (outProb>prob && prob>0) - { - candidates = 0; - prob = 0; - direction.fill(0.0); - } - if (m_Verbose && avoidStop && m_AvoidStopImage->GetLargestPossibleRegion().IsInside(idx) && candidates>0 && direction.magnitude()>0.001) - m_AvoidStopImage->SetPixel(idx, m_AvoidStopImage->GetPixel(idx)+0.1); - - return direction; -} - - template< int NumImageFeatures > double MLBSTrackingFilter< NumImageFeatures >::GetRandDouble(double min, double max) { return (double)(rand()%((int)(10000*(max-min))) + 10000*min)/10000; } template< int NumImageFeatures > vnl_vector_fixed MLBSTrackingFilter< NumImageFeatures >::GetNewDirection(itk::Point &pos, vnl_vector_fixed& olddir) { if (m_DemoMode) { m_SamplingPointset->Clear(); m_AlternativePointset->Clear(); } vnl_vector_fixed direction; direction.fill(0); ItkUcharImgType::IndexType idx; m_StoppingRegions->TransformPhysicalPointToIndex(pos, idx); if (m_StoppingRegions->GetLargestPossibleRegion().IsInside(idx) && m_StoppingRegions->GetPixel(idx)>0) return direction; if (olddir.magnitude()>0) olddir.normalize(); int candidates = 0; // number of directions with probability > 0 double prob = 0; if (IsValidPosition(pos)) { - direction = Classify(pos, candidates, olddir, m_AngularThreshold, prob); // sample neighborhood + direction = m_ForestHandler.Classify(pos, candidates, olddir, m_AngularThreshold, prob); // sample neighborhood direction *= prob; } itk::OrientationDistributionFunction< double, 50 > probeVecs; itk::Point sample_pos; int alternatives = 1; for (int i=0; i d; if (m_RandomSampling) { d[0] = GetRandDouble(); d[1] = GetRandDouble(); d[2] = GetRandDouble(); d.normalize(); d *= GetRandDouble(0,m_SamplingDistance); } else { d = probeVecs.GetDirection(i)*m_SamplingDistance; } sample_pos[0] = pos[0] + d[0]; sample_pos[1] = pos[1] + d[1]; sample_pos[2] = pos[2] + d[2]; if(m_DemoMode) m_SamplingPointset->InsertPoint(i, sample_pos); candidates = 0; vnl_vector_fixed tempDir; tempDir.fill(0.0); if (IsValidPosition(sample_pos)) - tempDir = Classify(sample_pos, candidates, olddir, m_AngularThreshold, prob); // sample neighborhood + tempDir = m_ForestHandler.Classify(sample_pos, candidates, olddir, m_AngularThreshold, prob); // sample neighborhood if (candidates>0 && tempDir.magnitude()>0.001) { direction += tempDir*prob; } else if (m_AvoidStop && candidates==0 && olddir.magnitude()>0) // out of white matter { double dot = dot_product(d, olddir); if (dot >= 0.0) // in front of plane defined by pos and olddir d = -d + 2*dot*olddir; // reflect else d = -d; // invert // look a bit further into the other direction sample_pos[0] = pos[0] + d[0]; sample_pos[1] = pos[1] + d[1]; sample_pos[2] = pos[2] + d[2]; if(m_DemoMode) m_AlternativePointset->InsertPoint(alternatives, sample_pos); alternatives++; candidates = 0; vnl_vector_fixed tempDir; tempDir.fill(0.0); if (IsValidPosition(sample_pos)) - tempDir = Classify(sample_pos, candidates, olddir, m_AngularThreshold, prob, true); // sample neighborhood + tempDir = m_ForestHandler.Classify(sample_pos, candidates, olddir, m_AngularThreshold, prob); // sample neighborhood if (candidates>0 && tempDir.magnitude()>0.001) // are we back in the white matter? { direction += d; // go into the direction of the white matter direction += tempDir*prob; // go into the direction of the white matter direction at this location } } } if (direction.magnitude()>0.001) { direction.normalize(); olddir[0] = direction[0]; olddir[1] = direction[1]; olddir[2] = direction[2]; } else direction.fill(0); return direction; } template< int NumImageFeatures > double MLBSTrackingFilter< NumImageFeatures >::FollowStreamline(ThreadIdType threadId, itk::Point pos, vnl_vector_fixed dir, FiberType* fib, double tractLength, bool front) { vnl_vector_fixed dirOld = dir; dirOld = dir; for (int step=0; step< m_MaxLength/2; step++) { // get new position CalculateNewPosition(pos, dir); // is new position inside of image and mask if (m_AbortTracking) // if not end streamline { return tractLength; } else // if yes, add new point to streamline { tractLength += m_StepSize; if (front) fib->push_front(pos); else fib->push_back(pos); if (m_AposterioriCurvCheck) { int curv = CheckCurvature(fib, front); // TODO: Move into classification ??? if (curv>0) { tractLength -= m_StepSize*curv; while (curv>0) { if (front) fib->pop_front(); else fib->pop_back(); curv--; } return tractLength; } } if (tractLength>m_MaxTractLength) return tractLength; } if (m_DemoMode) // CHECK: warum sind die samplingpunkte der streamline in der visualisierung immer einen schritt voras? { m_Mutex.Lock(); m_BuildFibersReady++; m_Tractogram.push_back(*fib); BuildFibers(true); m_Stop = true; m_Mutex.Unlock(); while (m_Stop){ } } dir = GetNewDirection(pos, dirOld); while (m_PauseTracking){} if (dir.magnitude()<0.0001) return tractLength; } return tractLength; } template< int NumImageFeatures > int MLBSTrackingFilter::CheckCurvature(FiberType* fib, bool front) { double m_Distance = 5; if (fib->size()<3) return 0; double dist = 0; std::vector< vnl_vector_fixed< float, 3 > > vectors; vnl_vector_fixed< float, 3 > meanV; meanV.fill(0); double dev = 0; if (front) { int c=0; while(distsize()-1) { itk::Point p1 = fib->at(c); itk::Point p2 = fib->at(c+1); vnl_vector_fixed< float, 3 > v; v[0] = p2[0]-p1[0]; v[1] = p2[1]-p1[1]; v[2] = p2[2]-p1[2]; dist += v.magnitude(); v.normalize(); vectors.push_back(v); if (c==0) meanV += v; c++; } } else { int c=fib->size()-1; while(dist0) { itk::Point p1 = fib->at(c); itk::Point p2 = fib->at(c-1); vnl_vector_fixed< float, 3 > v; v[0] = p2[0]-p1[0]; v[1] = p2[1]-p1[1]; v[2] = p2[2]-p1[2]; dist += v.magnitude(); v.normalize(); vectors.push_back(v); if (c==fib->size()-1) meanV += v; c--; } } meanV.normalize(); for (int c=0; c1.0) angle = 1.0; if (angle<-1.0) angle = -1.0; dev += acos(angle)*180/M_PI; } if (vectors.size()>0) dev /= vectors.size(); if (dev<30) return 0; else return vectors.size(); } template< int NumImageFeatures > void MLBSTrackingFilter< NumImageFeatures >::ThreadedGenerateData(const InputImageRegionType ®ionForThread, ThreadIdType threadId) { m_Mutex.Lock(); m_Threads++; m_Mutex.Unlock(); typedef ImageRegionConstIterator< ItkUcharImgType > MaskIteratorType; MaskIteratorType sit(m_SeedImage, regionForThread ); MaskIteratorType mit(m_MaskImage, regionForThread ); sit.GoToBegin(); mit.GoToBegin(); itk::Point worldPos; while( !sit.IsAtEnd() ) { if (sit.Value()==0 || mit.Value()==0) { ++sit; ++mit; continue; } for (int s=0; s start; unsigned int counter = 0; if (m_SeedsPerVoxel>1) { start[0] = index[0]+GetRandDouble(-0.5, 0.5); start[1] = index[1]+GetRandDouble(-0.5, 0.5); start[2] = index[2]+GetRandDouble(-0.5, 0.5); } else { start[0] = index[0]; start[1] = index[1]; start[2] = index[2]; } // get staring position m_SeedImage->TransformContinuousIndexToPhysicalPoint( start, worldPos ); // get starting direction int candidates = 0; double prob = 0; vnl_vector_fixed dirOld; dirOld.fill(0.0); vnl_vector_fixed dir; dir.fill(0.0); if (IsValidPosition(worldPos)) - dir = Classify(worldPos, candidates, dirOld, 0, prob); + dir = m_ForestHandler.Classify(worldPos, candidates, dirOld, 0, prob); if (dir.magnitude()<0.0001) continue; // forward tracking tractLength = FollowStreamline(threadId, worldPos, dir, &fib, 0, false); fib.push_front(worldPos); if (m_RemoveWmEndFibers) { itk::Point check = fib.back(); dirOld.fill(0.0); vnl_vector_fixed check2 = GetNewDirection(check, dirOld); if (check2.magnitude()>0.001) { MITK_INFO << "Detected WM ending. Discarding fiber."; continue; } } // backward tracking tractLength = FollowStreamline(threadId, worldPos, -dir, &fib, tractLength, true); counter = fib.size(); if (m_RemoveWmEndFibers) { itk::Point check = fib.front(); dirOld.fill(0.0); vnl_vector_fixed check2 = GetNewDirection(check, dirOld); if (check2.magnitude()>0.001) { MITK_INFO << "Detected WM ending. Discarding fiber."; continue; } } if (tractLength void MLBSTrackingFilter< NumImageFeatures >::BuildFibers(bool check) { if (m_BuildFibersReady::New(); vtkSmartPointer vNewLines = vtkSmartPointer::New(); vtkSmartPointer vNewPoints = vtkSmartPointer::New(); for (int i=0; i container = vtkSmartPointer::New(); FiberType fib = m_Tractogram.at(i); for (FiberType::iterator it = fib.begin(); it!=fib.end(); ++it) { vtkIdType id = vNewPoints->InsertNextPoint((*it).GetDataPointer()); container->GetPointIds()->InsertNextId(id); } vNewLines->InsertNextCell(container); } if (check) for (int i=0; iSetPoints(vNewPoints); m_FiberPolyData->SetLines(vNewLines); m_BuildFibersFinished = true; } template< int NumImageFeatures > void MLBSTrackingFilter< NumImageFeatures >::AfterThreadedGenerateData() { MITK_INFO << "Generating polydata "; BuildFibers(false); MITK_INFO << "done"; + + m_EndTime = std::chrono::system_clock::now(); + std::chrono::hours hh = std::chrono::duration_cast(m_EndTime - m_StartTime); + std::chrono::minutes mm = std::chrono::duration_cast((m_EndTime - m_StartTime) % std::chrono::hours(1)); + MITK_INFO << "Tracking took " << hh.count() << "h and " << mm.count() << "m"; } } #endif // __itkDiffusionQballPrincipleDirectionsImageFilter_txx diff --git a/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/itkMLBSTrackingFilter.h b/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/itkMLBSTrackingFilter.h index ad08ddeb0a..5005f03e8f 100644 --- a/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/itkMLBSTrackingFilter.h +++ b/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/itkMLBSTrackingFilter.h @@ -1,195 +1,180 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ /*=================================================================== This file is based heavily on a corresponding ITK filter. ===================================================================*/ #ifndef __itkMLBSTrackingFilter_h_ #define __itkMLBSTrackingFilter_h_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include +#include // classification includes #include #include #include namespace itk{ /** * \brief Performes deterministic streamline tracking on the input tensor image. */ template< int NumImageFeatures=100 > class MLBSTrackingFilter : public ImageToImageFilter< VectorImage< short, 3 >, Image< double, 3 > > { public: typedef MLBSTrackingFilter Self; typedef SmartPointer Pointer; typedef SmartPointer ConstPointer; typedef ImageToImageFilter< VectorImage< short, 3 >, Image< double, 3 > > Superclass; - typedef vigra::RandomForest DecisionForestType; typedef typename Superclass::InputImageType InputImageType; typedef typename Superclass::InputImageRegionType InputImageRegionType; - typedef Image< Vector< float, NumImageFeatures > , 3 > FeatureImageType; + typedef Image< Vector< float, NumImageFeatures > , 3 > FeatureImageType; /** Method for creation through the object factory. */ itkFactorylessNewMacro(Self) itkCloneMacro(Self) /** Runtime information support. */ itkTypeMacro(MLBSTrackingFilter, ImageToImageFilter) typedef itk::Image ItkUcharImgType; typedef itk::Image ItkDoubleImgType; typedef itk::Image ItkFloatImgType; typedef vtkSmartPointer< vtkPolyData > PolyDataType; typedef std::deque< itk::Point > FiberType; typedef std::vector< FiberType > BundleType; volatile bool m_PauseTracking; bool m_AbortTracking; bool m_BuildFibersFinished; int m_BuildFibersReady; volatile bool m_Stop; mitk::PointSet::Pointer m_SamplingPointset; mitk::PointSet::Pointer m_AlternativePointset; -// void RequestFibers(){ m_Stop=true; m_BuildFibersReady=0; m_BuildFibersFinished=false; } itkGetMacro( FiberPolyData, PolyDataType ) ///< Output fibers itkSetMacro( SeedImage, ItkUcharImgType::Pointer) ///< Seeds are only placed inside of this mask. itkSetMacro( MaskImage, ItkUcharImgType::Pointer) ///< Tracking is only performed inside of this mask image. itkSetMacro( SeedsPerVoxel, int) ///< One seed placed in the center of each voxel or multiple seeds randomly placed inside each voxel. itkSetMacro( StepSize, double) ///< Integration step size in mm itkSetMacro( MinTractLength, double ) ///< Shorter tracts are discarded. - itkSetMacro( MaxTractLength, double ) - itkSetMacro( AngularThreshold, double ) - itkSetMacro( SamplingDistance, double ) - itkSetMacro( NumberOfSamples, int ) - itkSetMacro( StoppingRegions, ItkUcharImgType::Pointer) - itkSetMacro( B_Value, float ) - itkSetMacro( GradientDirections, mitk::DiffusionPropertyHelper::GradientDirectionsContainerType::Pointer ) + itkSetMacro( MaxTractLength, double ) ///< Streamline progression stops if tract is longer than specified. + itkSetMacro( AngularThreshold, double ) ///< Probabilities for directions with larger angular deviation from previous direction is set to 0 + itkSetMacro( SamplingDistance, double ) ///< Maximum distance of sampling points in mm + itkSetMacro( NumberOfSamples, int ) ///< Number of sampling points + itkSetMacro( StoppingRegions, ItkUcharImgType::Pointer) ///< Streamlines entering a stopping region will stop immediately itkSetMacro( DemoMode, bool ) - itkSetMacro( RemoveWmEndFibers, bool ) - itkSetMacro( AposterioriCurvCheck, bool ) - itkSetMacro( AvoidStop, bool ) - itkSetMacro( RandomSampling, bool ) - itkSetMacro( Verbose, bool ) + itkSetMacro( RemoveWmEndFibers, bool ) ///< Checks if fiber ending is located in the white matter. If this is the case, the streamline is discarded. + itkSetMacro( AposterioriCurvCheck, bool ) ///< Checks fiber curvature (angular deviation across 5mm) is larger than 30°. If yes, the streamline progression is stopped. + itkSetMacro( AvoidStop, bool ) ///< Use additional sampling points to avoid premature streamline termination + itkSetMacro( RandomSampling, bool ) ///< If true, the sampling points are distributed randomly around the current position, not sphericall in the specified sampling distance. - void SetDecisionForest( DecisionForestType* forest ) + void SetForestHandler( mitk::TrackingForestHandler<> fh ) ///< Stores random forest classifier and performs actual classification { - m_DecisionForest = forest; + m_ForestHandler = fh; } - itkGetMacro( WmImage, ItkDoubleImgType::Pointer ) - itkGetMacro( NotWmImage, ItkDoubleImgType::Pointer ) - itkGetMacro( AvoidStopImage, ItkDoubleImgType::Pointer ) - protected: MLBSTrackingFilter(); ~MLBSTrackingFilter() {} void CalculateNewPosition(itk::Point& pos, vnl_vector_fixed& dir); ///< Calculate next integration step. double FollowStreamline(ThreadIdType threadId, itk::Point pos, vnl_vector_fixed dir, FiberType* fib, double tractLength, bool front); ///< Start streamline in one direction. bool IsValidPosition(itk::Point& pos); ///< Are we outside of the mask image? - vnl_vector_fixed GetNewDirection(itk::Point& pos, vnl_vector_fixed& olddir); - vnl_vector_fixed Classify(itk::Point& pos, int& candidates, vnl_vector_fixed& olddir, double angularThreshold, double& prob, bool avoidStop=false); + vnl_vector_fixed GetNewDirection(itk::Point& pos, vnl_vector_fixed& olddir); ///< Determine new direction by sample voting at the current position taking the last progression direction into account. - typename FeatureImageType::PixelType GetImageValues(itk::Point itkP); double GetRandDouble(double min=-1, double max=1); double RoundToNearest(double num); void BeforeThreadedGenerateData() override; - void PreprocessRawData(); void ThreadedGenerateData( const InputImageRegionType &outputRegionForThread, ThreadIdType threadId) override; void AfterThreadedGenerateData() override; PolyDataType m_FiberPolyData; vtkSmartPointer m_Points; vtkSmartPointer m_Cells; BundleType m_Tractogram; double m_AngularThreshold; double m_StepSize; int m_MaxLength; double m_MinTractLength; double m_MaxTractLength; int m_SeedsPerVoxel; bool m_RandomSampling; double m_SamplingDistance; int m_NumberOfSamples; - std::vector< int > m_ImageSize; - std::vector< double > m_ImageSpacing; SimpleFastMutexLock m_Mutex; ItkUcharImgType::Pointer m_StoppingRegions; - ItkDoubleImgType::Pointer m_WmImage; - ItkDoubleImgType::Pointer m_NotWmImage; - ItkDoubleImgType::Pointer m_AvoidStopImage; ItkUcharImgType::Pointer m_SeedImage; ItkUcharImgType::Pointer m_MaskImage; - typename FeatureImageType::Pointer m_FeatureImage; - typename InputImageType::Pointer m_InputImage; mitk::DiffusionPropertyHelper::GradientDirectionsContainerType::Pointer m_GradientDirections; float m_B_Value; bool m_AposterioriCurvCheck; bool m_RemoveWmEndFibers; bool m_AvoidStop; - bool m_Verbose; int m_Threads; bool m_DemoMode; void BuildFibers(bool check); int CheckCurvature(FiberType* fib, bool front); // decision forest - DecisionForestType* m_DecisionForest; - itk::OrientationDistributionFunction< double, NumImageFeatures*2 > m_ODF; - std::vector< int > m_DirectionIndices; + mitk::TrackingForestHandler<> m_ForestHandler; + typename InputImageType::Pointer m_InputImage; + std::vector< PolyDataType > m_PolyDataContainer; + std::chrono::time_point m_StartTime; + std::chrono::time_point m_EndTime; + private: }; } #ifndef ITK_MANUAL_INSTANTIATION #include "itkMLBSTrackingFilter.cpp" #endif #endif //__itkMLBSTrackingFilter_h_ diff --git a/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/mitkTrackingForestHandler.cpp b/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/mitkTrackingForestHandler.cpp index 171879c509..56f0f524ab 100644 --- a/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/mitkTrackingForestHandler.cpp +++ b/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/mitkTrackingForestHandler.cpp @@ -1,524 +1,697 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef _TrackingForestHandler_cpp #define _TrackingForestHandler_cpp #include "mitkTrackingForestHandler.h" #include #include namespace mitk { -template< int NumberOfSignalFeatures > -TrackingForestHandler< NumberOfSignalFeatures >::TrackingForestHandler() - : m_GrayMatterSamplesPerVoxel(50) - , m_StepSize(-1) +template< int ShOrder, int NumberOfSignalFeatures > +TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::TrackingForestHandler() + : m_WmSampleDistance(-1) , m_NumTrees(30) , m_MaxTreeDepth(50) , m_SampleFraction(1.0) + , m_NumberOfSamples(0) + , m_GmSamplesPerVoxel(50) { - m_Forest.reset(); + } -template< int NumberOfSignalFeatures > -TrackingForestHandler< NumberOfSignalFeatures >::~TrackingForestHandler() +template< int ShOrder, int NumberOfSignalFeatures > +TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::~TrackingForestHandler() { } -template< int NumberOfSignalFeatures > -typename TrackingForestHandler< NumberOfSignalFeatures >::InterpolatedRawImageType::PixelType TrackingForestHandler< NumberOfSignalFeatures >::GetImageValues(itk::Point itkP, typename InterpolatedRawImageType::Pointer image) +template< int ShOrder, int NumberOfSignalFeatures > +typename TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::InterpolatedRawImageType::PixelType TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::GetImageValues(itk::Point itkP, typename InterpolatedRawImageType::Pointer image) { itk::Index<3> idx; itk::ContinuousIndex< double, 3> cIdx; image->TransformPhysicalPointToIndex(itkP, idx); image->TransformPhysicalPointToContinuousIndex(itkP, cIdx); typename InterpolatedRawImageType::PixelType pix; pix.Fill(0.0); if ( image->GetLargestPossibleRegion().IsInside(idx) ) pix = image->GetPixel(idx); else return pix; double frac_x = cIdx[0] - idx[0]; double frac_y = cIdx[1] - idx[1]; double frac_z = cIdx[2] - idx[2]; if (frac_x<0) { idx[0] -= 1; frac_x += 1; } if (frac_y<0) { idx[1] -= 1; frac_y += 1; } if (frac_z<0) { idx[2] -= 1; frac_z += 1; } frac_x = 1-frac_x; frac_y = 1-frac_y; frac_z = 1-frac_z; // int coordinates inside image? if (idx[0] >= 0 && idx[0] < image->GetLargestPossibleRegion().GetSize(0)-1 && idx[1] >= 0 && idx[1] < image->GetLargestPossibleRegion().GetSize(1)-1 && idx[2] >= 0 && idx[2] < image->GetLargestPossibleRegion().GetSize(2)-1) { vnl_vector_fixed interpWeights; interpWeights[0] = ( frac_x)*( frac_y)*( frac_z); interpWeights[1] = (1-frac_x)*( frac_y)*( frac_z); interpWeights[2] = ( frac_x)*(1-frac_y)*( frac_z); interpWeights[3] = ( frac_x)*( frac_y)*(1-frac_z); interpWeights[4] = (1-frac_x)*(1-frac_y)*( frac_z); interpWeights[5] = ( frac_x)*(1-frac_y)*(1-frac_z); interpWeights[6] = (1-frac_x)*( frac_y)*(1-frac_z); interpWeights[7] = (1-frac_x)*(1-frac_y)*(1-frac_z); pix = image->GetPixel(idx) * interpWeights[0]; typename InterpolatedRawImageType::IndexType tmpIdx = idx; tmpIdx[0]++; pix += image->GetPixel(tmpIdx) * interpWeights[1]; tmpIdx = idx; tmpIdx[1]++; pix += image->GetPixel(tmpIdx) * interpWeights[2]; tmpIdx = idx; tmpIdx[2]++; pix += image->GetPixel(tmpIdx) * interpWeights[3]; tmpIdx = idx; tmpIdx[0]++; tmpIdx[1]++; pix += image->GetPixel(tmpIdx) * interpWeights[4]; tmpIdx = idx; tmpIdx[1]++; tmpIdx[2]++; pix += image->GetPixel(tmpIdx) * interpWeights[5]; tmpIdx = idx; tmpIdx[2]++; tmpIdx[0]++; pix += image->GetPixel(tmpIdx) * interpWeights[6]; tmpIdx = idx; tmpIdx[0]++; tmpIdx[1]++; tmpIdx[2]++; pix += image->GetPixel(tmpIdx) * interpWeights[7]; } return pix; } -template< int NumberOfSignalFeatures > -void TrackingForestHandler< NumberOfSignalFeatures >::InputDataValidForTracking() +template< int ShOrder, int NumberOfSignalFeatures > +void TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::InputDataValidForTracking() { if (m_RawData.empty()) mitkThrow() << "No diffusion-weighted images set!"; + if (!IsForestValid()) + mitkThrow() << "No or invalid random forest detected!"; +} + +template< int ShOrder, int NumberOfSignalFeatures > +void TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::InitForTracking() +{ + InputDataValidForTracking(); + + typedef itk::AnalyticalDiffusionQballReconstructionImageFilter InterpolationFilterType; + + MITK_INFO << "Spherically interpolating raw data and creating feature image ..."; + + typename InterpolationFilterType::Pointer filter = InterpolationFilterType::New(); + filter->SetGradientImage( mitk::DiffusionPropertyHelper::GetGradientContainer(m_RawData.at(0)), mitk::DiffusionPropertyHelper::GetItkVectorImage(m_RawData.at(0)) ); + filter->SetBValue(mitk::DiffusionPropertyHelper::GetReferenceBValue(m_RawData.at(0))); + filter->SetLambda(0.006); + filter->SetNormalizationMethod(InterpolationFilterType::QBAR_RAW_SIGNAL); + filter->Update(); + + vnl_vector_fixed ref; ref.fill(0); ref[0]=1; + itk::OrientationDistributionFunction< double, NumberOfSignalFeatures*2 > odf; + m_DirectionIndices.clear(); + for (unsigned int f=0; f0) // only used directions on one hemisphere + m_DirectionIndices.push_back(f); + } + + m_FeatureImage = FeatureImageType::New(); + m_FeatureImage->SetSpacing(filter->GetOutput()->GetSpacing()); + m_FeatureImage->SetOrigin(filter->GetOutput()->GetOrigin()); + m_FeatureImage->SetDirection(filter->GetOutput()->GetDirection()); + m_FeatureImage->SetLargestPossibleRegion(filter->GetOutput()->GetLargestPossibleRegion()); + m_FeatureImage->SetBufferedRegion(filter->GetOutput()->GetLargestPossibleRegion()); + m_FeatureImage->SetRequestedRegion(filter->GetOutput()->GetLargestPossibleRegion()); + m_FeatureImage->Allocate(); + + itk::ImageRegionIterator< typename InterpolationFilterType::OutputImageType > it(filter->GetOutput(), filter->GetOutput()->GetLargestPossibleRegion()); + while(!it.IsAtEnd()) + { + typename FeatureImageType::PixelType pix; + for (unsigned int f=0; fSetPixel(it.GetIndex(), pix); + ++it; + } +} + +template< int ShOrder, int NumberOfSignalFeatures > +typename TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::FeatureImageType::PixelType TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::GetFeatureValues(itk::Point itkP) +{ + itk::Index<3> idx; + itk::ContinuousIndex< double, 3> cIdx; + m_FeatureImage->TransformPhysicalPointToIndex(itkP, idx); + m_FeatureImage->TransformPhysicalPointToContinuousIndex(itkP, cIdx); + + typename FeatureImageType::PixelType pix; pix.Fill(0.0); + if ( m_FeatureImage->GetLargestPossibleRegion().IsInside(idx) ) + pix = m_FeatureImage->GetPixel(idx); + else + return pix; + + double frac_x = cIdx[0] - idx[0]; + double frac_y = cIdx[1] - idx[1]; + double frac_z = cIdx[2] - idx[2]; + if (frac_x<0) + { + idx[0] -= 1; + frac_x += 1; + } + if (frac_y<0) + { + idx[1] -= 1; + frac_y += 1; + } + if (frac_z<0) + { + idx[2] -= 1; + frac_z += 1; + } + frac_x = 1-frac_x; + frac_y = 1-frac_y; + frac_z = 1-frac_z; + + // int coordinates inside image? + if (idx[0] >= 0 && idx[0] < m_FeatureImage->GetLargestPossibleRegion().GetSize(0)-1 && + idx[1] >= 0 && idx[1] < m_FeatureImage->GetLargestPossibleRegion().GetSize(1)-1 && + idx[2] >= 0 && idx[2] < m_FeatureImage->GetLargestPossibleRegion().GetSize(2)-1) + { + vnl_vector_fixed interpWeights; + interpWeights[0] = ( frac_x)*( frac_y)*( frac_z); + interpWeights[1] = (1-frac_x)*( frac_y)*( frac_z); + interpWeights[2] = ( frac_x)*(1-frac_y)*( frac_z); + interpWeights[3] = ( frac_x)*( frac_y)*(1-frac_z); + interpWeights[4] = (1-frac_x)*(1-frac_y)*( frac_z); + interpWeights[5] = ( frac_x)*(1-frac_y)*(1-frac_z); + interpWeights[6] = (1-frac_x)*( frac_y)*(1-frac_z); + interpWeights[7] = (1-frac_x)*(1-frac_y)*(1-frac_z); + + pix = m_FeatureImage->GetPixel(idx) * interpWeights[0]; + typename FeatureImageType::IndexType tmpIdx = idx; tmpIdx[0]++; + pix += m_FeatureImage->GetPixel(tmpIdx) * interpWeights[1]; + tmpIdx = idx; tmpIdx[1]++; + pix += m_FeatureImage->GetPixel(tmpIdx) * interpWeights[2]; + tmpIdx = idx; tmpIdx[2]++; + pix += m_FeatureImage->GetPixel(tmpIdx) * interpWeights[3]; + tmpIdx = idx; tmpIdx[0]++; tmpIdx[1]++; + pix += m_FeatureImage->GetPixel(tmpIdx) * interpWeights[4]; + tmpIdx = idx; tmpIdx[1]++; tmpIdx[2]++; + pix += m_FeatureImage->GetPixel(tmpIdx) * interpWeights[5]; + tmpIdx = idx; tmpIdx[2]++; tmpIdx[0]++; + pix += m_FeatureImage->GetPixel(tmpIdx) * interpWeights[6]; + tmpIdx = idx; tmpIdx[0]++; tmpIdx[1]++; tmpIdx[2]++; + pix += m_FeatureImage->GetPixel(tmpIdx) * interpWeights[7]; + } + + return pix; +} + +template< int ShOrder, int NumberOfSignalFeatures > +vnl_vector_fixed TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::Classify(itk::Point& pos, int& candidates, vnl_vector_fixed& olddir, double angularThreshold, double& prob) +{ + vnl_vector_fixed direction; direction.fill(0); + + vigra::MultiArray<2, double> featureData = vigra::MultiArray<2, double>( vigra::Shape2(1,NumberOfSignalFeatures+3) ); + + typename FeatureImageType::PixelType featurePixel = GetFeatureValues(pos); + + // pixel values + for (unsigned int f=0; f ref; ref.fill(0); ref[0]=1; + for (unsigned int f=NumberOfSignalFeatures; f probs(vigra::Shape2(1, m_Forest->class_count())); + m_Forest->predictProbabilities(featureData, probs); + + double outProb = 0; + prob = 0; + candidates = 0; // directions with probability > 0 + for (int i=0; iclass_count(); i++) + { + if (probs(0,i)>0) + { + int classLabel = 0; + m_Forest->ext_param_.to_classlabel(i, classLabel); + + if (classLabel d = m_DirContainer.GetDirection(m_DirectionIndices.at(classLabel)); + double dot = dot_product(d, olddir); + + if (olddir.magnitude()>0) + { + if (fabs(dot)>angularThreshold) + { + if (dot<0) + d *= -1; + dot = fabs(dot); + direction += probs(0,i)*dot*d; + prob += probs(0,i)*dot; + } + } + else + { + direction += probs(0,i)*d; + prob += probs(0,i); + } + } + else + outProb += probs(0,i); + } + } + + if (outProb>prob && prob>0) + { + candidates = 0; + prob = 0; + direction.fill(0.0); + } + + return direction; } -template< int NumberOfSignalFeatures > -void TrackingForestHandler< NumberOfSignalFeatures >::StartTraining() + +template< int ShOrder, int NumberOfSignalFeatures > +void TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::StartTraining() { + m_StartTime = std::chrono::system_clock::now(); InputDataValidForTraining(); - PreprocessInputData(); - CalculateFeatures(); + PreprocessInputDataForTraining(); + CalculateFeaturesForTraining(); TrainForest(); + m_EndTime = std::chrono::system_clock::now(); + std::chrono::hours hh = std::chrono::duration_cast(m_EndTime - m_StartTime); + std::chrono::minutes mm = std::chrono::duration_cast((m_EndTime - m_StartTime) % std::chrono::hours(1)); + MITK_INFO << "Training took " << hh.count() << "h and " << mm.count() << "m"; } -template< int NumberOfSignalFeatures > -void TrackingForestHandler< NumberOfSignalFeatures >::InputDataValidForTraining() +template< int ShOrder, int NumberOfSignalFeatures > +void TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::InputDataValidForTraining() { if (m_RawData.empty()) mitkThrow() << "No diffusion-weighted images set!"; if (m_Tractograms.empty()) mitkThrow() << "No tractograms set!"; if (m_RawData.size()!=m_Tractograms.size()) mitkThrow() << "Unequal number of diffusion-weighted images and tractograms detected!"; } -template< int NumberOfSignalFeatures > -bool TrackingForestHandler< NumberOfSignalFeatures >::IsForestValid() +template< int ShOrder, int NumberOfSignalFeatures > +bool TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::IsForestValid() { - if(m_Forest.tree_count()>0 && m_Forest.feature_count()==(NumberOfSignalFeatures+3)) + if(m_Forest && m_Forest->tree_count()>0 && m_Forest->feature_count()==(NumberOfSignalFeatures+3)) return true; return false; } -template< int NumberOfSignalFeatures > -void TrackingForestHandler< NumberOfSignalFeatures >::PreprocessInputData() +template< int ShOrder, int NumberOfSignalFeatures > +void TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::PreprocessInputDataForTraining() { - typedef itk::AnalyticalDiffusionQballReconstructionImageFilter InterpolationFilterType; + typedef itk::AnalyticalDiffusionQballReconstructionImageFilter InterpolationFilterType; MITK_INFO << "Spherical signal interpolation and sampling ..."; for (unsigned int i=0; iSetGradientImage( mitk::DiffusionPropertyHelper::GetGradientContainer(m_RawData.at(i)), mitk::DiffusionPropertyHelper::GetItkVectorImage(m_RawData.at(i)) ); qballfilter->SetBValue(mitk::DiffusionPropertyHelper::GetReferenceBValue(m_RawData.at(i))); qballfilter->SetLambda(0.006); qballfilter->SetNormalizationMethod(InterpolationFilterType::QBAR_RAW_SIGNAL); qballfilter->Update(); // FeatureImageType::Pointer itkFeatureImage = qballfilter->GetCoefficientImage(); - // featureImageVector.push_back(itkFeatureImage); m_InterpolatedRawImages.push_back(qballfilter->GetOutput()); if (i>=m_MaskImages.size()) { ItkUcharImgType::Pointer newMask = ItkUcharImgType::New(); newMask->SetSpacing( m_InterpolatedRawImages.at(i)->GetSpacing() ); newMask->SetOrigin( m_InterpolatedRawImages.at(i)->GetOrigin() ); newMask->SetDirection( m_InterpolatedRawImages.at(i)->GetDirection() ); newMask->SetLargestPossibleRegion( m_InterpolatedRawImages.at(i)->GetLargestPossibleRegion() ); newMask->SetBufferedRegion( m_InterpolatedRawImages.at(i)->GetLargestPossibleRegion() ); newMask->SetRequestedRegion( m_InterpolatedRawImages.at(i)->GetLargestPossibleRegion() ); newMask->Allocate(); newMask->FillBuffer(1); m_MaskImages.push_back(newMask); } if (m_MaskImages.at(i)==nullptr) { m_MaskImages.at(i) = ItkUcharImgType::New(); m_MaskImages.at(i)->SetSpacing( m_InterpolatedRawImages.at(i)->GetSpacing() ); m_MaskImages.at(i)->SetOrigin( m_InterpolatedRawImages.at(i)->GetOrigin() ); m_MaskImages.at(i)->SetDirection( m_InterpolatedRawImages.at(i)->GetDirection() ); m_MaskImages.at(i)->SetLargestPossibleRegion( m_InterpolatedRawImages.at(i)->GetLargestPossibleRegion() ); m_MaskImages.at(i)->SetBufferedRegion( m_InterpolatedRawImages.at(i)->GetLargestPossibleRegion() ); m_MaskImages.at(i)->SetRequestedRegion( m_InterpolatedRawImages.at(i)->GetLargestPossibleRegion() ); m_MaskImages.at(i)->Allocate(); m_MaskImages.at(i)->FillBuffer(1); } } MITK_INFO << "Resampling fibers and calculating number of samples ..."; m_NumberOfSamples = 0; for (unsigned int t=0; t::Pointer env = itk::TractDensityImageFilter< ItkUcharImgType >::New(); env->SetFiberBundle(m_Tractograms.at(t)); env->SetInputImage(mask); env->SetBinaryOutput(true); env->SetUseImageGeometry(true); env->Update(); wmmask = env->GetOutput(); if (t>=m_WhiteMatterImages.size()) m_WhiteMatterImages.push_back(wmmask); else m_WhiteMatterImages.at(t) = wmmask; } // Calculate white-matter samples - if (m_StepSize<0) + if (m_WmSampleDistance<0) { typename InterpolatedRawImageType::Pointer image = m_InterpolatedRawImages.at(t); float minSpacing = 1; if(image->GetSpacing()[0]GetSpacing()[1] && image->GetSpacing()[0]GetSpacing()[2]) minSpacing = image->GetSpacing()[0]; else if (image->GetSpacing()[1] < image->GetSpacing()[2]) minSpacing = image->GetSpacing()[1]; else minSpacing = image->GetSpacing()[2]; - m_StepSize = minSpacing*0.5; + m_WmSampleDistance = minSpacing*0.5; } - m_Tractograms.at(t)->ResampleSpline(m_StepSize); + m_Tractograms.at(t)->ResampleSpline(m_WmSampleDistance); unsigned int wmSamples = m_Tractograms.at(t)->GetNumberOfPoints()-2*m_Tractograms.at(t)->GetNumFibers(); m_NumberOfSamples += wmSamples; MITK_INFO << "Samples inside of WM: " << wmSamples; // calculate gray-matter samples itk::ImageRegionConstIterator it(wmmask, wmmask->GetLargestPossibleRegion()); int OUTOFWM = 0; while(!it.IsAtEnd()) { if (it.Get()==0 && mask->GetPixel(it.GetIndex())>0) OUTOFWM++; ++it; } MITK_INFO << "Non-white matter voxels: " << OUTOFWM; - if (m_GrayMatterSamplesPerVoxel>0) + if (m_GmSamplesPerVoxel>0) { - m_GmSamples.push_back(m_GrayMatterSamplesPerVoxel); - m_NumberOfSamples += m_GrayMatterSamplesPerVoxel*OUTOFWM; + m_GmSamples.push_back(m_GmSamplesPerVoxel); + m_NumberOfSamples += m_GmSamplesPerVoxel*OUTOFWM; } else if (OUTOFWM>0) { m_GmSamples.push_back(0.5+(double)wmSamples/(double)OUTOFWM); m_NumberOfSamples += m_GmSamples.back()*OUTOFWM; MITK_INFO << "Non-white matter samples per voxel: " << m_GmSamples.back(); } else { m_GmSamples.push_back(0); } MITK_INFO << "Samples outside of WM: " << m_GmSamples.back()*OUTOFWM; } MITK_INFO << "Number of samples: " << m_NumberOfSamples; } -template< int NumberOfSignalFeatures > -void TrackingForestHandler< NumberOfSignalFeatures >::CalculateFeatures() +template< int ShOrder, int NumberOfSignalFeatures > +void TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::CalculateFeaturesForTraining() { vnl_vector_fixed ref; ref.fill(0); ref[0]=1; itk::OrientationDistributionFunction< double, 2*NumberOfSignalFeatures > directions; std::vector< int > directionIndices; for (unsigned int f=0; f<2*NumberOfSignalFeatures; f++) { if (dot_product(ref, directions.GetDirection(f))>0) directionIndices.push_back(f); } int numDirectionFeatures = 3; m_FeatureData.reshape( vigra::Shape2(m_NumberOfSamples, NumberOfSignalFeatures+numDirectionFeatures) ); m_LabelData.reshape( vigra::Shape2(m_NumberOfSamples,1) ); MITK_INFO << "Number of features: " << m_FeatureData.shape(1); itk::Statistics::MersenneTwisterRandomVariateGenerator::Pointer m_RandGen = itk::Statistics::MersenneTwisterRandomVariateGenerator::New(); m_RandGen->SetSeed(); MITK_INFO << "Creating training data ..."; int sampleCounter = 0; for (unsigned int t=0; t it(wmMask, wmMask->GetLargestPossibleRegion()); while(!it.IsAtEnd()) { if (it.Get()==0 && (mask.IsNull() || (mask.IsNotNull() && mask->GetPixel(it.GetIndex())>0))) { typename InterpolatedRawImageType::PixelType pix = image->GetPixel(it.GetIndex()); // null direction for (unsigned int f=0; f probe; probe[0] = m_RandGen->GetVariate()*2-1; probe[1] = m_RandGen->GetVariate()*2-1; probe[2] = m_RandGen->GetVariate()*2-1; probe.normalize(); if (dot_product(ref, probe)<0) probe *= -1; for (unsigned int f=NumberOfSignalFeatures; f polyData = fib->GetFiberPolyData(); for (int i=0; iGetNumFibers(); i++) { vtkCell* cell = polyData->GetCell(i); int numPoints = cell->GetNumberOfPoints(); vtkPoints* points = cell->GetPoints(); vnl_vector_fixed dirOld; dirOld.fill(0.0); for (int j=0; jGetPoint(j); itk::Point itkP1; itkP1[0] = p1[0]; itkP1[1] = p1[1]; itkP1[2] = p1[2]; vnl_vector_fixed dir; dir.fill(0.0); itk::Point itkP2; double* p2 = points->GetPoint(j+1); itkP2[0] = p2[0]; itkP2[1] = p2[1]; itkP2[2] = p2[2]; dir[0]=itkP2[0]-itkP1[0]; dir[1]=itkP2[1]-itkP1[1]; dir[2]=itkP2[2]-itkP1[2]; if (dir.magnitude()<0.0001) { MITK_INFO << "streamline error!"; continue; } dir.normalize(); if (dir[0]!=dir[0] || dir[1]!=dir[1] || dir[2]!=dir[2]) { MITK_INFO << "ERROR: NaN direction!"; continue; } if (j==0) { dirOld = dir; continue; } // get voxel values typename InterpolatedRawImageType::PixelType pix = GetImageValues(itkP1, image); for (unsigned int f=0; f0.0001) { - int label = 0; for (unsigned int f=0; fangle) { m_LabelData(sampleCounter,0) = f; angle = a; - label = f; } } } dirOld = dir; sampleCounter++; } } } } -template< int NumberOfSignalFeatures > -void TrackingForestHandler< NumberOfSignalFeatures >::TrainForest() +template< int ShOrder, int NumberOfSignalFeatures > +void TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::TrainForest() { MITK_INFO << "Maximum tree depths: " << m_MaxTreeDepth; MITK_INFO << "Sample fraction per tree: " << m_SampleFraction; MITK_INFO << "Number of trees: " << m_NumTrees; - std::vector< vigra::RandomForest > trees; + std::vector< std::shared_ptr< vigra::RandomForest > > trees; int count = 0; #pragma omp parallel for for (int i = 0; i < m_NumTrees; ++i) { - vigra::RandomForest lrf; - vigra::rf::visitors::OOB_Error loob_v; - - lrf.set_options().use_stratification(vigra::RF_NONE); // How the data should be made equal - lrf.set_options().sample_with_replacement(true); // if sampled with replacement or not - lrf.set_options().samples_per_tree(m_SampleFraction); // Fraction of samples that are used to train a tree - lrf.set_options().tree_count(1); // Number of trees that are calculated; - lrf.set_options().min_split_node_size(5); // Minimum number of datapoints that must be in a node - lrf.ext_param_.max_tree_depth = m_MaxTreeDepth; - - lrf.learn(m_FeatureData, m_LabelData); + std::shared_ptr< vigra::RandomForest > lrf = std::make_shared< vigra::RandomForest >(); + lrf->set_options().use_stratification(vigra::RF_NONE); // How the data should be made equal + lrf->set_options().sample_with_replacement(true); // if sampled with replacement or not + lrf->set_options().samples_per_tree(m_SampleFraction); // Fraction of samples that are used to train a tree + lrf->set_options().tree_count(1); // Number of trees that are calculated; + lrf->set_options().min_split_node_size(5); // Minimum number of datapoints that must be in a node + lrf->ext_param_.max_tree_depth = m_MaxTreeDepth; + + lrf->learn(m_FeatureData, m_LabelData); #pragma omp critical { count++; MITK_INFO << "Tree " << count << " finished training."; trees.push_back(lrf); } } for (int i = 1; i < m_NumTrees; ++i) - trees.at(0).trees_.push_back(trees.at(i).trees_[0]); + trees.at(0)->trees_.push_back(trees.at(i)->trees_[0]); m_Forest = trees.at(0); - m_Forest.options_.tree_count_ = m_NumTrees; + m_Forest->options_.tree_count_ = m_NumTrees; MITK_INFO << "Training finsihed"; } -template< int NumberOfSignalFeatures > -void TrackingForestHandler< NumberOfSignalFeatures >::SaveForest(std::string forestFile) +template< int ShOrder, int NumberOfSignalFeatures > +void TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::SaveForest(std::string forestFile) { MITK_INFO << "Saving forest to " << forestFile; if (IsForestValid()) - vigra::rf_export_HDF5( m_Forest, forestFile, "" ); + vigra::rf_export_HDF5( *m_Forest, forestFile, "" ); else MITK_INFO << "Forest invalid! Could not be saved."; } -template< int NumberOfSignalFeatures > -void TrackingForestHandler< NumberOfSignalFeatures >::LoadForest(std::string forestFile) +template< int ShOrder, int NumberOfSignalFeatures > +void TrackingForestHandler< ShOrder, NumberOfSignalFeatures >::LoadForest(std::string forestFile) { MITK_INFO << "Loading forest from " << forestFile; - vigra::rf_import_HDF5(m_Forest, forestFile); + m_Forest = std::make_shared< vigra::RandomForest >(); + vigra::rf_import_HDF5( *m_Forest, forestFile); } -//// superclass implementations -//template< int NumberOfSignalFeatures > -//void TrackingForestHandler< NumberOfSignalFeatures >::UpdateOutputInformation() -//{ - -//} -//template< int NumberOfSignalFeatures > -//void TrackingForestHandler< NumberOfSignalFeatures >::SetRequestedRegionToLargestPossibleRegion() -//{ - -//} -//template< int NumberOfSignalFeatures > -//bool TrackingForestHandler< NumberOfSignalFeatures >::RequestedRegionIsOutsideOfTheBufferedRegion() -//{ -// return false; -//} -//template< int NumberOfSignalFeatures > -//bool TrackingForestHandler< NumberOfSignalFeatures >::VerifyRequestedRegion() -//{ -// return true; -//} -//template< int NumberOfSignalFeatures > -//void TrackingForestHandler< NumberOfSignalFeatures >::SetRequestedRegion(const itk::DataObject* ) -//{ - -//} - } #endif diff --git a/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/mitkTrackingForestHandler.h b/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/mitkTrackingForestHandler.h index 067cc2418a..16d9b98da9 100644 --- a/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/mitkTrackingForestHandler.h +++ b/Modules/DiffusionImaging/FiberTracking/Algorithms/MLTracking/mitkTrackingForestHandler.h @@ -1,115 +1,132 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef _TrackingForestHandler #define _TrackingForestHandler #include "mitkBaseData.h" #include #include #include #include +#include +#include -// classification includes -//#include "RegressionForestClasses.hxx" #undef DIFFERENCE #define VIGRA_STATIC_LIB #include #include #include #define _USE_MATH_DEFINES #include namespace mitk { /** -* \brief */ +* \brief Manages random forests for fiber tractography. The preparation of the features from the inputa data and the training process are handled here. The data preprocessing and actual prediction for the tracking process is also performed here. The tracking itself is performed in MLBSTrackingFilter. */ -template< int NumberOfSignalFeatures=100 > +template< int ShOrder=6, int NumberOfSignalFeatures=100 > class TrackingForestHandler { public: TrackingForestHandler(); ~TrackingForestHandler(); - typedef itk::Image ItkUcharImgType; - typedef itk::Image< itk::Vector< float, NumberOfSignalFeatures*2 > , 3 > InterpolatedRawImageType; + typedef itk::Image ItkUcharImgType; + typedef itk::Image< itk::Vector< float, NumberOfSignalFeatures*2 > , 3 > InterpolatedRawImageType; + typedef itk::Image< Vector< float, NumberOfSignalFeatures > , 3 > FeatureImageType; void SetRawData( std::vector< Image::Pointer > images ){ m_RawData = images; } + void AddRawData( Image::Pointer img ){ m_RawData.push_back(img); } void SetTractograms( std::vector< FiberBundle::Pointer > tractograms ) { m_Tractograms.clear(); - for (int i=0; iGetDeepCopy()); + m_Tractograms.push_back(fib->GetDeepCopy()); } } void SetMaskImages( std::vector< ItkUcharImgType::Pointer > images ){ m_MaskImages = images; } void SetWhiteMatterImages( std::vector< ItkUcharImgType::Pointer > images ){ m_WhiteMatterImages = images; } void StartTraining(); void SaveForest(std::string forestFile); void LoadForest(std::string forestFile); + // training parameters + void SetNumTrees(int num){ m_NumTrees = num; } void SetMaxTreeDepth(int depth){ m_MaxTreeDepth = depth; } - void SetStepSize(double step){ m_StepSize = step; } - void SetGrayMatterSamplesPerVoxel(int samples){ m_GrayMatterSamplesPerVoxel = samples; } + void SetStepSize(double step){ m_WmSampleDistance = step; } + void SetGrayMatterSamplesPerVoxel(int samples){ m_GmSamplesPerVoxel = samples; } void SetSampleFraction(double fraction){ m_SampleFraction = fraction; } - vigra::RandomForest GetForest(){ return m_Forest; } - bool IsForestValid(); + std::shared_ptr< vigra::RandomForest > GetForest(){ return m_Forest; } + + void InitForTracking(); ///< calls InputDataValidForTracking() and creates feature images from the war input DWI + vnl_vector_fixed Classify(itk::Point& pos, int& candidates, vnl_vector_fixed& olddir, double angularThreshold, double& prob); ///< predicts next progression direction at the given position + + bool IsForestValid(); ///< true is forest is not null, has more than 0 trees and the correct number of features (NumberOfSignalFeatures + 3) protected: - void InputDataValidForTracking(); - void InputDataValidForTraining(); - void PreprocessInputData(); - void CalculateFeatures(); - void TrainForest(); - - int m_GrayMatterSamplesPerVoxel; - double m_StepSize; - int m_NumTrees; - int m_MaxTreeDepth; - double m_SampleFraction; - - std::vector< Image::Pointer > m_RawData; - std::vector< FiberBundle::Pointer > m_Tractograms; - std::vector< ItkUcharImgType::Pointer > m_MaskImages; - std::vector< ItkUcharImgType::Pointer > m_WhiteMatterImages; - std::vector< ItkUcharImgType::Pointer > m_SeedImages; - std::vector< ItkUcharImgType::Pointer > m_StopImages; - - unsigned int m_NumberOfSamples; - std::vector< unsigned int > m_GmSamples; - vigra::RandomForest m_Forest; - vigra::MultiArray<2, double> m_FeatureData; - vigra::MultiArray<2, double> m_LabelData; - std::vector< typename InterpolatedRawImageType::Pointer > m_InterpolatedRawImages; - - typename InterpolatedRawImageType::PixelType GetImageValues(itk::Point itkP, typename InterpolatedRawImageType::Pointer image); + // tracking + void InputDataValidForTracking(); ///< check if raw data is set and tracking forest is valid + typename FeatureImageType::PixelType GetFeatureValues(itk::Point itkP); ///< get trilinearly interpolated feature values at given world position + + // training + void InputDataValidForTraining(); ///< Check if everything is tehere for training (raw datasets, fiber tracts) + void PreprocessInputDataForTraining(); ///< Generate masks if necessary, resample fibers, spherically interpolate raw DWIs + void CalculateFeaturesForTraining(); ///< Calculate GM and WM features using the interpolated raw data, the WM masks and the fibers + void TrainForest(); ///< start training process + typename InterpolatedRawImageType::PixelType GetImageValues(itk::Point itkP, typename InterpolatedRawImageType::Pointer image); ///< get trilinearly interpolated raw image values at given world position + + + std::vector< Image::Pointer > m_RawData; ///< original input DWI data + std::shared_ptr< vigra::RandomForest > m_Forest; ///< random forest classifier + std::chrono::time_point m_StartTime; + std::chrono::time_point m_EndTime; + + // only for training + std::vector< FiberBundle::Pointer > m_Tractograms; ///< training tractograms + std::vector< ItkUcharImgType::Pointer > m_MaskImages; ///< binary mask images to constrain training to a certain area (e.g. brain mask) + std::vector< ItkUcharImgType::Pointer > m_WhiteMatterImages; ///< defines white matter voxels. if not set, theses mask images are automatically generated from the input tractograms + std::vector< typename InterpolatedRawImageType::Pointer > m_InterpolatedRawImages; ///< spherically interpolated and resampled raw datasets + double m_WmSampleDistance; ///< deterines the number of white matter samples (distance of sampling points on each fiber). + int m_NumTrees; ///< number of trees in random forest + int m_MaxTreeDepth; ///< limits the tree depth + double m_SampleFraction; ///< fraction of samples used to train each tree + unsigned int m_NumberOfSamples; ///< stores overall number of samples used for training + std::vector< unsigned int > m_GmSamples; ///< number of gray matter samples + int m_GmSamplesPerVoxel; ///< number of gray matter samplees per voxel. if -1, then the number is automatically chosen to gain an overall number of GM samples close to the number of WM samples. + vigra::MultiArray<2, double> m_FeatureData; ///< vigra container for training features + + // only for tracking + typename FeatureImageType::Pointer m_FeatureImage; ///< feature image used for tracking + vigra::MultiArray<2, double> m_LabelData; ///< vigra container for training labels + std::vector< int > m_DirectionIndices; ///< maps each of the NumberOfSignalFeatures possible output directions to one of the 2*NumberOfSignalFeatures ODF directions. + itk::OrientationDistributionFunction< double, NumberOfSignalFeatures*2 > m_DirContainer; ///< direction container }; } #include "mitkTrackingForestHandler.cpp" #endif diff --git a/Modules/DiffusionImaging/FiberTracking/Fiberfox/itkTractsToDWIImageFilter.cpp b/Modules/DiffusionImaging/FiberTracking/Fiberfox/itkTractsToDWIImageFilter.cpp index bb450f9e70..a674c0290d 100755 --- a/Modules/DiffusionImaging/FiberTracking/Fiberfox/itkTractsToDWIImageFilter.cpp +++ b/Modules/DiffusionImaging/FiberTracking/Fiberfox/itkTractsToDWIImageFilter.cpp @@ -1,1703 +1,1596 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include "itkTractsToDWIImageFilter.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace itk { template< class PixelType > TractsToDWIImageFilter< PixelType >::TractsToDWIImageFilter() : m_FiberBundle(NULL) , m_StatusText("") , m_UseConstantRandSeed(false) , m_RandGen(itk::Statistics::MersenneTwisterRandomVariateGenerator::New()) { m_RandGen->SetSeed(); } template< class PixelType > TractsToDWIImageFilter< PixelType >::~TractsToDWIImageFilter() { } template< class PixelType > TractsToDWIImageFilter< PixelType >::DoubleDwiType::Pointer TractsToDWIImageFilter< PixelType >::SimulateKspaceAcquisition( std::vector< DoubleDwiType::Pointer >& images ) { int numFiberCompartments = m_Parameters.m_FiberModelList.size(); // create slice object ImageRegion<2> sliceRegion; sliceRegion.SetSize(0, m_WorkingImageRegion.GetSize()[0]); sliceRegion.SetSize(1, m_WorkingImageRegion.GetSize()[1]); Vector< double, 2 > sliceSpacing; sliceSpacing[0] = m_WorkingSpacing[0]; sliceSpacing[1] = m_WorkingSpacing[1]; DoubleDwiType::PixelType nullPix; nullPix.SetSize(images.at(0)->GetVectorLength()); nullPix.Fill(0.0); DoubleDwiType::Pointer magnitudeDwiImage = DoubleDwiType::New(); magnitudeDwiImage->SetSpacing( m_Parameters.m_SignalGen.m_ImageSpacing ); magnitudeDwiImage->SetOrigin( m_Parameters.m_SignalGen.m_ImageOrigin ); magnitudeDwiImage->SetDirection( m_Parameters.m_SignalGen.m_ImageDirection ); magnitudeDwiImage->SetLargestPossibleRegion( m_Parameters.m_SignalGen.m_CroppedRegion ); magnitudeDwiImage->SetBufferedRegion( m_Parameters.m_SignalGen.m_CroppedRegion ); magnitudeDwiImage->SetRequestedRegion( m_Parameters.m_SignalGen.m_CroppedRegion ); magnitudeDwiImage->SetVectorLength( images.at(0)->GetVectorLength() ); magnitudeDwiImage->Allocate(); magnitudeDwiImage->FillBuffer(nullPix); m_PhaseImage = DoubleDwiType::New(); m_PhaseImage->SetSpacing( m_Parameters.m_SignalGen.m_ImageSpacing ); m_PhaseImage->SetOrigin( m_Parameters.m_SignalGen.m_ImageOrigin ); m_PhaseImage->SetDirection( m_Parameters.m_SignalGen.m_ImageDirection ); m_PhaseImage->SetLargestPossibleRegion( m_Parameters.m_SignalGen.m_CroppedRegion ); m_PhaseImage->SetBufferedRegion( m_Parameters.m_SignalGen.m_CroppedRegion ); m_PhaseImage->SetRequestedRegion( m_Parameters.m_SignalGen.m_CroppedRegion ); m_PhaseImage->SetVectorLength( images.at(0)->GetVectorLength() ); m_PhaseImage->Allocate(); m_PhaseImage->FillBuffer(nullPix); - -// itk::ImageRegion<4> imageRegion4D; -// itk::Vector imageSpacing4D; imageSpacing4D.Fill(1); -// itk::Point imageOrigin4D; imageOrigin4D.Fill(0); -// itk::Matrix imageDirection4D; imageDirection4D.SetIdentity(); -// imageRegion4D.SetSize(0, m_Parameters.m_SignalGen.m_CroppedRegion.GetSize(0)); -// imageRegion4D.SetSize(1, m_Parameters.m_SignalGen.m_CroppedRegion.GetSize(1)); -// imageRegion4D.SetSize(2, m_Parameters.m_SignalGen.m_CroppedRegion.GetSize(2)); -// imageRegion4D.SetSize(3, m_Parameters.m_SignalGen.m_NumberOfCoils); -// for (int i=0; i<3; i++) -// { -// imageSpacing4D[i] = m_Parameters.m_SignalGen.m_ImageSpacing[i]; -// imageOrigin4D[i] = m_Parameters.m_SignalGen.m_ImageOrigin[i]; -// for (int j=0; j<3; j++) -// imageDirection4D[i][j]=m_Parameters.m_SignalGen.m_ImageDirection[i][j]; -// } - -//// ItkDoubleImgType4D::PixelType nullPix4D; -//// nullPix4D.SetSize(images.at(0)->GetVectorLength()); -//// nullPix4D.Fill(0.0); -// m_KspaceImage = ItkDoubleImgType4D::New(); -// m_KspaceImage->SetSpacing( imageSpacing4D ); -// m_KspaceImage->SetOrigin( imageOrigin4D ); -// m_KspaceImage->SetDirection( imageDirection4D ); -// m_KspaceImage->SetLargestPossibleRegion( imageRegion4D ); -// m_KspaceImage->SetBufferedRegion( imageRegion4D ); -// m_KspaceImage->SetRequestedRegion( imageRegion4D ); -//// m_KspaceImage->SetVectorLength( images.at(0)->GetVectorLength() ); -// m_KspaceImage->Allocate(); -// m_KspaceImage->FillBuffer(0); - m_KspaceImage = DoubleDwiType::New(); m_KspaceImage->SetSpacing( m_Parameters.m_SignalGen.m_ImageSpacing ); m_KspaceImage->SetOrigin( m_Parameters.m_SignalGen.m_ImageOrigin ); m_KspaceImage->SetDirection( m_Parameters.m_SignalGen.m_ImageDirection ); m_KspaceImage->SetLargestPossibleRegion( m_Parameters.m_SignalGen.m_CroppedRegion ); m_KspaceImage->SetBufferedRegion( m_Parameters.m_SignalGen.m_CroppedRegion ); m_KspaceImage->SetRequestedRegion( m_Parameters.m_SignalGen.m_CroppedRegion ); m_KspaceImage->SetVectorLength( m_Parameters.m_SignalGen.m_NumberOfCoils ); m_KspaceImage->Allocate(); m_KspaceImage->FillBuffer(nullPix); std::vector< unsigned int > spikeVolume; for (unsigned int i=0; iGetIntegerVariate()%(images.at(0)->GetVectorLength())); std::sort (spikeVolume.begin(), spikeVolume.end()); std::reverse (spikeVolume.begin(), spikeVolume.end()); // calculate coil positions double a = m_Parameters.m_SignalGen.m_ImageRegion.GetSize(0)*m_Parameters.m_SignalGen.m_ImageSpacing[0]; double b = m_Parameters.m_SignalGen.m_ImageRegion.GetSize(1)*m_Parameters.m_SignalGen.m_ImageSpacing[1]; double c = m_Parameters.m_SignalGen.m_ImageRegion.GetSize(2)*m_Parameters.m_SignalGen.m_ImageSpacing[2]; double diagonal = sqrt(a*a+b*b)/1000; // image diagonal in m m_CoilPointset = mitk::PointSet::New(); std::vector< itk::Vector > coilPositions; itk::Vector pos; pos.Fill(0.0); pos[1] = -diagonal/2; itk::Vector center; center[0] = a/2-m_Parameters.m_SignalGen.m_ImageSpacing[0]/2; center[1] = b/2-m_Parameters.m_SignalGen.m_ImageSpacing[2]/2; center[2] = c/2-m_Parameters.m_SignalGen.m_ImageSpacing[1]/2; for (int c=0; cInsertPoint(c, pos*1000 + m_Parameters.m_SignalGen.m_ImageOrigin.GetVectorFromOrigin() + center ); double rz = 360.0/m_Parameters.m_SignalGen.m_NumberOfCoils * M_PI/180; vnl_matrix_fixed< double, 3, 3 > rotZ; rotZ.set_identity(); rotZ[0][0] = cos(rz); rotZ[1][1] = rotZ[0][0]; rotZ[0][1] = -sin(rz); rotZ[1][0] = -rotZ[0][1]; pos.SetVnlVector(rotZ*pos.GetVnlVector()); } m_StatusText += "0% 10 20 30 40 50 60 70 80 90 100%\n"; m_StatusText += "|----|----|----|----|----|----|----|----|----|----|\n*"; unsigned long lastTick = 0; boost::progress_display disp(images.at(0)->GetVectorLength()*images.at(0)->GetLargestPossibleRegion().GetSize(2)); for (unsigned int g=0; gGetVectorLength(); g++) { std::vector< unsigned int > spikeSlice; while (!spikeVolume.empty() && spikeVolume.back()==g) { spikeSlice.push_back(m_RandGen->GetIntegerVariate()%images.at(0)->GetLargestPossibleRegion().GetSize(2)); spikeVolume.pop_back(); } std::sort (spikeSlice.begin(), spikeSlice.end()); std::reverse (spikeSlice.begin(), spikeSlice.end()); for (unsigned int z=0; zGetLargestPossibleRegion().GetSize(2); z++) { std::vector< SliceType::Pointer > compartmentSlices; std::vector< double > t2Vector; std::vector< double > t1Vector; for (unsigned int i=0; i* signalModel; if (iSetLargestPossibleRegion( sliceRegion ); slice->SetBufferedRegion( sliceRegion ); slice->SetRequestedRegion( sliceRegion ); slice->SetSpacing(sliceSpacing); slice->Allocate(); slice->FillBuffer(0.0); // extract slice from channel g for (unsigned int y=0; yGetLargestPossibleRegion().GetSize(1); y++) for (unsigned int x=0; xGetLargestPossibleRegion().GetSize(0); x++) { SliceType::IndexType index2D; index2D[0]=x; index2D[1]=y; DoubleDwiType::IndexType index3D; index3D[0]=x; index3D[1]=y; index3D[2]=z; slice->SetPixel(index2D, images.at(i)->GetPixel(index3D)[g]); } compartmentSlices.push_back(slice); t2Vector.push_back(signalModel->GetT2()); t1Vector.push_back(signalModel->GetT1()); } int numSpikes = 0; while (!spikeSlice.empty() && spikeSlice.back()==z) { numSpikes++; spikeSlice.pop_back(); } int spikeCoil = m_RandGen->GetIntegerVariate()%m_Parameters.m_SignalGen.m_NumberOfCoils; if (this->GetAbortGenerateData()) return NULL; #pragma omp parallel for for (int c=0; c::Pointer idft = itk::KspaceImageFilter< SliceType::PixelType >::New(); idft->SetCompartmentImages(compartmentSlices); idft->SetT2(t2Vector); idft->SetT1(t1Vector); idft->SetUseConstantRandSeed(m_UseConstantRandSeed); idft->SetParameters(&m_Parameters); idft->SetZ((double)z-(double)(images.at(0)->GetLargestPossibleRegion().GetSize(2)-images.at(0)->GetLargestPossibleRegion().GetSize(2)%2)/2.0); idft->SetZidx(z); idft->SetCoilPosition(coilPositions.at(c)); idft->SetFiberBundle(m_FiberBundleWorkingCopy); idft->SetTranslation(m_Translations.at(g)); idft->SetRotation(m_Rotations.at(g)); idft->SetDiffusionGradientDirection(m_Parameters.m_SignalGen.GetGradientDirection(g)); if (c==spikeCoil) idft->SetSpikesPerSlice(numSpikes); // idft->SetNumberOfThreads(1); idft->Update(); ComplexSliceType::Pointer fSlice; fSlice = idft->GetOutput(); // fourier transform slice ComplexSliceType::Pointer newSlice; itk::DftImageFilter< SliceType::PixelType >::Pointer dft = itk::DftImageFilter< SliceType::PixelType >::New(); dft->SetInput(fSlice); dft->SetParameters(m_Parameters); dft->Update(); newSlice = dft->GetOutput(); // put slice back into channel g for (unsigned int y=0; yGetLargestPossibleRegion().GetSize(1); y++) for (unsigned int x=0; xGetLargestPossibleRegion().GetSize(0); x++) { DoubleDwiType::IndexType index3D; index3D[0]=x; index3D[1]=y; index3D[2]=z; ComplexSliceType::IndexType index2D; index2D[0]=x; index2D[1]=y; ComplexSliceType::PixelType cPix = newSlice->GetPixel(index2D); double magn = sqrt(cPix.real()*cPix.real()+cPix.imag()*cPix.imag()); double phase = 0; if (cPix.real()!=0) phase = atan( cPix.imag()/cPix.real() ); DoubleDwiType::PixelType dwiPix = magnitudeDwiImage->GetPixel(index3D); DoubleDwiType::PixelType phasePix = m_PhaseImage->GetPixel(index3D); if (m_Parameters.m_SignalGen.m_NumberOfCoils>1) { dwiPix[g] += magn*magn; phasePix[g] += phase*phase; } else { dwiPix[g] = magn; phasePix[g] = phase; } #pragma omp critical { magnitudeDwiImage->SetPixel(index3D, dwiPix); m_PhaseImage->SetPixel(index3D, phasePix); // k-space image -// { -// DoubleDwiType4D::IndexType idx4d; -// idx4d[0]=index3D[0]; -// idx4d[1]=index3D[1]; -// idx4d[2]=index3D[2]; -// idx4d[3]=c; -// ItkDoubleImgType4D::PixelType pix4D = m_KspaceImage->GetPixel(idx4d); -// pix4D = idft->GetKSpaceImage()->GetPixel(index2D); -// m_KspaceImage->SetPixel(idx4d, pix4D); -// } - if (g==0) { DoubleDwiType::PixelType kspacePix = m_KspaceImage->GetPixel(index3D); kspacePix[c] = idft->GetKSpaceImage()->GetPixel(index2D); m_KspaceImage->SetPixel(index3D, kspacePix); } } } } if (m_Parameters.m_SignalGen.m_NumberOfCoils>1) { #ifdef WIN32 #pragma omp parallel for #else #pragma omp parallel for collapse(2) #endif for (int y=0; yGetLargestPossibleRegion().GetSize(1); y++) for (int x=0; xGetLargestPossibleRegion().GetSize(0); x++) { DoubleDwiType::IndexType index3D; index3D[0]=x; index3D[1]=y; index3D[2]=z; DoubleDwiType::PixelType magPix = magnitudeDwiImage->GetPixel(index3D); magPix[g] = sqrt(magPix[g]/m_Parameters.m_SignalGen.m_NumberOfCoils); DoubleDwiType::PixelType phasePix = m_PhaseImage->GetPixel(index3D); phasePix[g] = sqrt(phasePix[g]/m_Parameters.m_SignalGen.m_NumberOfCoils); #pragma omp critical { magnitudeDwiImage->SetPixel(index3D, magPix); m_PhaseImage->SetPixel(index3D, phasePix); } } } ++disp; unsigned long newTick = 50*disp.count()/disp.expected_count(); for (unsigned long tick = 0; tick<(newTick-lastTick); tick++) m_StatusText += "*"; lastTick = newTick; } } m_StatusText += "\n\n"; return magnitudeDwiImage; } template< class PixelType > TractsToDWIImageFilter< PixelType >::ItkDoubleImgType::Pointer TractsToDWIImageFilter< PixelType >::NormalizeInsideMask(ItkDoubleImgType::Pointer image) { double max = itk::NumericTraits< double >::min(); double min = itk::NumericTraits< double >::max(); itk::ImageRegionIterator< ItkDoubleImgType > it(image, image->GetLargestPossibleRegion()); while(!it.IsAtEnd()) { if (m_Parameters.m_SignalGen.m_MaskImage.IsNotNull() && m_Parameters.m_SignalGen.m_MaskImage->GetPixel(it.GetIndex())<=0) { it.Set(0.0); ++it; continue; } // if (it.Get()>900) // it.Set(900); if (it.Get()>max) max = it.Get(); if (it.Get()::Pointer scaler = itk::ShiftScaleImageFilter< ItkDoubleImgType, ItkDoubleImgType >::New(); scaler->SetInput(image); scaler->SetShift(-min); scaler->SetScale(1.0/(max-min)); scaler->Update(); return scaler->GetOutput(); } template< class PixelType > void TractsToDWIImageFilter< PixelType >::CheckVolumeFractionImages() { m_UseRelativeNonFiberVolumeFractions = false; // check for fiber volume fraction maps int fibVolImages = 0; for (int i=0; iGetVolumeFractionImage().IsNotNull()) { m_StatusText += "Using volume fraction map for fiber compartment " + boost::lexical_cast(i+1) + "\n"; MITK_INFO << "Using volume fraction map for fiber compartment " + boost::lexical_cast(i+1); fibVolImages++; } // check for non-fiber volume fraction maps int nonfibVolImages = 0; for (int i=0; iGetVolumeFractionImage().IsNotNull()) { m_StatusText += "Using volume fraction map for non-fiber compartment " + boost::lexical_cast(i+1) + "\n"; MITK_INFO << "Using volume fraction map for non-fiber compartment " + boost::lexical_cast(i+1); nonfibVolImages++; } // not all fiber compartments are using volume fraction maps --> non-fiber volume fractions are assumed to be relative to the non-fiber volume and not absolute voxel-volume fractions. // this means if two non-fiber compartments are used but only one of them has an associated volume fraction map, the repesctive other volume fraction map can be determined as inverse (1-val) of the present volume fraction map- if ( fibVolImages::Pointer inverter = itk::InvertIntensityImageFilter< ItkDoubleImgType, ItkDoubleImgType >::New(); inverter->SetMaximum(1.0); if ( m_Parameters.m_NonFiberModelList[0]->GetVolumeFractionImage().IsNull() && m_Parameters.m_NonFiberModelList[1]->GetVolumeFractionImage().IsNotNull() ) { // m_Parameters.m_NonFiberModelList[1]->SetVolumeFractionImage( NormalizeInsideMask( m_Parameters.m_NonFiberModelList[1]->GetVolumeFractionImage() ) ); inverter->SetInput( m_Parameters.m_NonFiberModelList[1]->GetVolumeFractionImage() ); inverter->Update(); m_Parameters.m_NonFiberModelList[0]->SetVolumeFractionImage(inverter->GetOutput()); } else if ( m_Parameters.m_NonFiberModelList[1]->GetVolumeFractionImage().IsNull() && m_Parameters.m_NonFiberModelList[0]->GetVolumeFractionImage().IsNotNull() ) { // m_Parameters.m_NonFiberModelList[0]->SetVolumeFractionImage( NormalizeInsideMask( m_Parameters.m_NonFiberModelList[0]->GetVolumeFractionImage() ) ); inverter->SetInput( m_Parameters.m_NonFiberModelList[0]->GetVolumeFractionImage() ); inverter->Update(); m_Parameters.m_NonFiberModelList[1]->SetVolumeFractionImage(inverter->GetOutput()); } else { itkExceptionMacro("Something went wrong in automatically calculating the missing non-fiber volume fraction image! Did you use two non fiber compartments but only one volume fraction image? Then it should work and this error is really strange."); } m_UseRelativeNonFiberVolumeFractions = true; nonfibVolImages++; } // Up to two fiber compartments are allowed without volume fraction maps since the volume fractions can then be determined automatically if (m_Parameters.m_FiberModelList.size()>2 && fibVolImages!=m_Parameters.m_FiberModelList.size()) itkExceptionMacro("More than two fiber compartment selected but no corresponding volume fraction maps set!"); // One non-fiber compartment is allowed without volume fraction map since the volume fraction can then be determined automatically if (m_Parameters.m_NonFiberModelList.size()>1 && nonfibVolImages!=m_Parameters.m_NonFiberModelList.size()) itkExceptionMacro("More than one non-fiber compartment selected but no volume fraction maps set!"); if (fibVolImages0) { m_StatusText += "Not all fiber compartments are using an associated volume fraction image.\nAssuming non-fiber volume fraction images to contain values relative to the remaining non-fiber volume, not absolute values.\n"; MITK_INFO << "Not all fiber compartments are using an associated volume fraction image.\nAssuming non-fiber volume fraction images to contain values relative to the remaining non-fiber volume, not absolute values."; m_UseRelativeNonFiberVolumeFractions = true; // itk::ImageFileWriter::Pointer wr = itk::ImageFileWriter::New(); // wr->SetInput(m_Parameters.m_NonFiberModelList[1]->GetVolumeFractionImage()); // wr->SetFileName("/local/volumefraction.nrrd"); // wr->Update(); } // initialize the images that store the output volume fraction of each compartment m_VolumeFractions.clear(); for (int i=0; iSetSpacing( m_WorkingSpacing ); doubleImg->SetOrigin( m_WorkingOrigin ); doubleImg->SetDirection( m_Parameters.m_SignalGen.m_ImageDirection ); doubleImg->SetLargestPossibleRegion( m_WorkingImageRegion ); doubleImg->SetBufferedRegion( m_WorkingImageRegion ); doubleImg->SetRequestedRegion( m_WorkingImageRegion ); doubleImg->Allocate(); doubleImg->FillBuffer(0); m_VolumeFractions.push_back(doubleImg); } } template< class PixelType > void TractsToDWIImageFilter< PixelType >::InitializeData() { // initialize output dwi image m_Parameters.m_SignalGen.m_CroppedRegion = m_Parameters.m_SignalGen.m_ImageRegion; m_Parameters.m_SignalGen.m_CroppedRegion.SetSize(1, m_Parameters.m_SignalGen.m_CroppedRegion.GetSize(1)*m_Parameters.m_SignalGen.m_CroppingFactor); itk::Point shiftedOrigin = m_Parameters.m_SignalGen.m_ImageOrigin; shiftedOrigin[1] += (m_Parameters.m_SignalGen.m_ImageRegion.GetSize(1)-m_Parameters.m_SignalGen.m_CroppedRegion.GetSize(1))*m_Parameters.m_SignalGen.m_ImageSpacing[1]/2; m_OutputImage = OutputImageType::New(); m_OutputImage->SetSpacing( m_Parameters.m_SignalGen.m_ImageSpacing ); m_OutputImage->SetOrigin( shiftedOrigin ); m_OutputImage->SetDirection( m_Parameters.m_SignalGen.m_ImageDirection ); m_OutputImage->SetLargestPossibleRegion( m_Parameters.m_SignalGen.m_CroppedRegion ); m_OutputImage->SetBufferedRegion( m_Parameters.m_SignalGen.m_CroppedRegion ); m_OutputImage->SetRequestedRegion( m_Parameters.m_SignalGen.m_CroppedRegion ); m_OutputImage->SetVectorLength( m_Parameters.m_SignalGen.GetNumVolumes() ); m_OutputImage->Allocate(); typename OutputImageType::PixelType temp; temp.SetSize(m_Parameters.m_SignalGen.GetNumVolumes()); temp.Fill(0.0); m_OutputImage->FillBuffer(temp); - //// NOT NECESSARY ANYMORE -// if ( m_Parameters.m_SignalGen.m_CroppedRegion.GetSize(0)%2 == 1 ) -// m_Parameters.m_SignalGen.m_CroppedRegion.SetSize(0, m_Parameters.m_SignalGen.m_CroppedRegion.GetSize(0)+1); -// if ( m_Parameters.m_SignalGen.m_CroppedRegion.GetSize(1)%2 == 1 ) -// m_Parameters.m_SignalGen.m_CroppedRegion.SetSize(1, m_Parameters.m_SignalGen.m_CroppedRegion.GetSize(1)+1); - -// // ADJUST GEOMETRY FOR FURTHER PROCESSING -// // is input slize size a power of two? -// unsigned int x=m_Parameters.m_SignalGen.m_ImageRegion.GetSize(0); unsigned int y=m_Parameters.m_SignalGen.m_ImageRegion.GetSize(1); -// ItkDoubleImgType::SizeType pad; pad[0]=x%2; pad[1]=y%2; pad[2]=0; -// m_Parameters.m_SignalGen.m_ImageRegion.SetSize(0, x+pad[0]); -// m_Parameters.m_SignalGen.m_ImageRegion.SetSize(1, y+pad[1]); -// if (m_Parameters.m_SignalGen.m_FrequencyMap.IsNotNull() && (pad[0]>0 || pad[1]>0)) -// { -// itk::ConstantPadImageFilter::Pointer zeroPadder = itk::ConstantPadImageFilter::New(); -// zeroPadder->SetInput(m_Parameters.m_SignalGen.m_FrequencyMap); -// zeroPadder->SetConstant(0); -// zeroPadder->SetPadUpperBound(pad); -// zeroPadder->Update(); -// m_Parameters.m_SignalGen.m_FrequencyMap = zeroPadder->GetOutput(); -// } -// if (m_Parameters.m_SignalGen.m_MaskImage.IsNotNull() && (pad[0]>0 || pad[1]>0)) -// { -// itk::ConstantPadImageFilter::Pointer zeroPadder = itk::ConstantPadImageFilter::New(); -// zeroPadder->SetInput(m_Parameters.m_SignalGen.m_MaskImage); -// zeroPadder->SetConstant(0); -// zeroPadder->SetPadUpperBound(pad); -// zeroPadder->Update(); -// m_Parameters.m_SignalGen.m_MaskImage = zeroPadder->GetOutput(); -// } - // Apply in-plane upsampling for Gibbs ringing artifact double upsampling = 1; if (m_Parameters.m_SignalGen.m_DoAddGibbsRinging) upsampling = 2; m_WorkingSpacing = m_Parameters.m_SignalGen.m_ImageSpacing; m_WorkingSpacing[0] /= upsampling; m_WorkingSpacing[1] /= upsampling; m_WorkingImageRegion = m_Parameters.m_SignalGen.m_ImageRegion; m_WorkingImageRegion.SetSize(0, m_Parameters.m_SignalGen.m_ImageRegion.GetSize()[0]*upsampling); m_WorkingImageRegion.SetSize(1, m_Parameters.m_SignalGen.m_ImageRegion.GetSize()[1]*upsampling); m_WorkingOrigin = m_Parameters.m_SignalGen.m_ImageOrigin; m_WorkingOrigin[0] -= m_Parameters.m_SignalGen.m_ImageSpacing[0]/2; m_WorkingOrigin[0] += m_WorkingSpacing[0]/2; m_WorkingOrigin[1] -= m_Parameters.m_SignalGen.m_ImageSpacing[1]/2; m_WorkingOrigin[1] += m_WorkingSpacing[1]/2; m_WorkingOrigin[2] -= m_Parameters.m_SignalGen.m_ImageSpacing[2]/2; m_WorkingOrigin[2] += m_WorkingSpacing[2]/2; m_VoxelVolume = m_WorkingSpacing[0]*m_WorkingSpacing[1]*m_WorkingSpacing[2]; // generate double images to store the individual compartment signals m_CompartmentImages.clear(); int numFiberCompartments = m_Parameters.m_FiberModelList.size(); int numNonFiberCompartments = m_Parameters.m_NonFiberModelList.size(); for (int i=0; iSetSpacing( m_WorkingSpacing ); doubleDwi->SetOrigin( m_WorkingOrigin ); doubleDwi->SetDirection( m_Parameters.m_SignalGen.m_ImageDirection ); doubleDwi->SetLargestPossibleRegion( m_WorkingImageRegion ); doubleDwi->SetBufferedRegion( m_WorkingImageRegion ); doubleDwi->SetRequestedRegion( m_WorkingImageRegion ); doubleDwi->SetVectorLength( m_Parameters.m_SignalGen.GetNumVolumes() ); doubleDwi->Allocate(); DoubleDwiType::PixelType pix; pix.SetSize(m_Parameters.m_SignalGen.GetNumVolumes()); pix.Fill(0.0); doubleDwi->FillBuffer(pix); m_CompartmentImages.push_back(doubleDwi); } // resample mask image and frequency map to fit upsampled geometry if (m_Parameters.m_SignalGen.m_DoAddGibbsRinging) { if (m_Parameters.m_SignalGen.m_MaskImage.IsNotNull()) { // rescale mask image (otherwise there are problems with the resampling) itk::RescaleIntensityImageFilter::Pointer rescaler = itk::RescaleIntensityImageFilter::New(); rescaler->SetInput(0,m_Parameters.m_SignalGen.m_MaskImage); rescaler->SetOutputMaximum(100); rescaler->SetOutputMinimum(0); rescaler->Update(); // resample mask image itk::ResampleImageFilter::Pointer resampler = itk::ResampleImageFilter::New(); resampler->SetInput(rescaler->GetOutput()); resampler->SetOutputParametersFromImage(m_Parameters.m_SignalGen.m_MaskImage); resampler->SetSize(m_WorkingImageRegion.GetSize()); resampler->SetOutputSpacing(m_WorkingSpacing); resampler->SetOutputOrigin(m_WorkingOrigin); itk::NearestNeighborInterpolateImageFunction::Pointer nn_interpolator = itk::NearestNeighborInterpolateImageFunction::New(); resampler->SetInterpolator(nn_interpolator); resampler->Update(); m_Parameters.m_SignalGen.m_MaskImage = resampler->GetOutput(); } // resample frequency map if (m_Parameters.m_SignalGen.m_FrequencyMap.IsNotNull()) { itk::ResampleImageFilter::Pointer resampler = itk::ResampleImageFilter::New(); resampler->SetInput(m_Parameters.m_SignalGen.m_FrequencyMap); resampler->SetOutputParametersFromImage(m_Parameters.m_SignalGen.m_FrequencyMap); resampler->SetSize(m_WorkingImageRegion.GetSize()); resampler->SetOutputSpacing(m_WorkingSpacing); resampler->SetOutputOrigin(m_WorkingOrigin); itk::NearestNeighborInterpolateImageFunction::Pointer nn_interpolator = itk::NearestNeighborInterpolateImageFunction::New(); resampler->SetInterpolator(nn_interpolator); resampler->Update(); m_Parameters.m_SignalGen.m_FrequencyMap = resampler->GetOutput(); } } m_MaskImageSet = true; if (m_Parameters.m_SignalGen.m_MaskImage.IsNull()) { // no input tissue mask is set -> create default m_StatusText += "No tissue mask set\n"; MITK_INFO << "No tissue mask set"; m_Parameters.m_SignalGen.m_MaskImage = ItkUcharImgType::New(); m_Parameters.m_SignalGen.m_MaskImage->SetSpacing( m_WorkingSpacing ); m_Parameters.m_SignalGen.m_MaskImage->SetOrigin( m_WorkingOrigin ); m_Parameters.m_SignalGen.m_MaskImage->SetDirection( m_Parameters.m_SignalGen.m_ImageDirection ); m_Parameters.m_SignalGen.m_MaskImage->SetLargestPossibleRegion( m_WorkingImageRegion ); m_Parameters.m_SignalGen.m_MaskImage->SetBufferedRegion( m_WorkingImageRegion ); m_Parameters.m_SignalGen.m_MaskImage->SetRequestedRegion( m_WorkingImageRegion ); m_Parameters.m_SignalGen.m_MaskImage->Allocate(); m_Parameters.m_SignalGen.m_MaskImage->FillBuffer(100); m_MaskImageSet = false; } else { if (m_Parameters.m_SignalGen.m_MaskImage->GetLargestPossibleRegion()!=m_WorkingImageRegion) itkExceptionMacro("Mask image and specified DWI geometry are not matching!"); m_StatusText += "Using tissue mask\n"; MITK_INFO << "Using tissue mask"; } if (m_Parameters.m_SignalGen.m_DoAddMotion) { std::string fileName = "fiberfox_motion_0.log"; std::string filePath = mitk::IOUtil::GetTempPath(); if (m_Parameters.m_Misc.m_OutputPath.size()>0) filePath = m_Parameters.m_Misc.m_OutputPath; int c = 1; while (itksys::SystemTools::FileExists((filePath+fileName).c_str())) { fileName = "fiberfox_motion_"; fileName += boost::lexical_cast(c); fileName += ".log"; c++; } m_MotionLogfile.open((filePath+fileName).c_str()); if (m_Parameters.m_SignalGen.m_DoRandomizeMotion) { m_StatusText += "Adding random motion artifacts:\n"; m_StatusText += "Maximum rotation: +/-" + boost::lexical_cast(m_Parameters.m_SignalGen.m_Rotation) + "°\n"; m_StatusText += "Maximum translation: +/-" + boost::lexical_cast(m_Parameters.m_SignalGen.m_Translation) + "mm\n"; } else { m_StatusText += "Adding linear motion artifacts:\n"; m_StatusText += "Maximum rotation: " + boost::lexical_cast(m_Parameters.m_SignalGen.m_Rotation) + "°\n"; m_StatusText += "Maximum translation: " + boost::lexical_cast(m_Parameters.m_SignalGen.m_Translation) + "mm\n"; } m_StatusText += "Motion logfile: " + (filePath+fileName) + "\n"; MITK_INFO << "Adding motion artifacts"; MITK_INFO << "Maximum rotation: " << m_Parameters.m_SignalGen.m_Rotation; MITK_INFO << "Maxmimum translation: " << m_Parameters.m_SignalGen.m_Translation; MITK_INFO << "Motion logfile: " << filePath << fileName; } if ( m_Parameters.m_SignalGen.m_MotionVolumes.empty() ) { // no motion in first volume m_Parameters.m_SignalGen.m_MotionVolumes.push_back(false); // motion in all other volumes while ( m_Parameters.m_SignalGen.m_MotionVolumes.size()::Pointer duplicator = itk::ImageDuplicator::New(); duplicator->SetInputImage(m_Parameters.m_SignalGen.m_MaskImage); duplicator->Update(); m_TransformedMaskImage = duplicator->GetOutput(); // second upsampling needed for motion artifacts ImageRegion<3> upsampledImageRegion = m_WorkingImageRegion; DoubleVectorType upsampledSpacing = m_WorkingSpacing; upsampledSpacing[0] /= 4; upsampledSpacing[1] /= 4; upsampledSpacing[2] /= 4; upsampledImageRegion.SetSize(0, m_WorkingImageRegion.GetSize()[0]*4); upsampledImageRegion.SetSize(1, m_WorkingImageRegion.GetSize()[1]*4); upsampledImageRegion.SetSize(2, m_WorkingImageRegion.GetSize()[2]*4); itk::Point upsampledOrigin = m_WorkingOrigin; upsampledOrigin[0] -= m_WorkingSpacing[0]/2; upsampledOrigin[0] += upsampledSpacing[0]/2; upsampledOrigin[1] -= m_WorkingSpacing[1]/2; upsampledOrigin[1] += upsampledSpacing[1]/2; upsampledOrigin[2] -= m_WorkingSpacing[2]/2; upsampledOrigin[2] += upsampledSpacing[2]/2; m_UpsampledMaskImage = ItkUcharImgType::New(); itk::ResampleImageFilter::Pointer upsampler = itk::ResampleImageFilter::New(); upsampler->SetInput(m_Parameters.m_SignalGen.m_MaskImage); upsampler->SetOutputParametersFromImage(m_Parameters.m_SignalGen.m_MaskImage); upsampler->SetSize(upsampledImageRegion.GetSize()); upsampler->SetOutputSpacing(upsampledSpacing); upsampler->SetOutputOrigin(upsampledOrigin); itk::NearestNeighborInterpolateImageFunction::Pointer nn_interpolator = itk::NearestNeighborInterpolateImageFunction::New(); upsampler->SetInterpolator(nn_interpolator); upsampler->Update(); m_UpsampledMaskImage = upsampler->GetOutput(); } template< class PixelType > void TractsToDWIImageFilter< PixelType >::InitializeFiberData() { // resample fiber bundle for sufficient voxel coverage m_StatusText += "\n"+this->GetTime()+" > Resampling fibers ...\n"; m_SegmentVolume = 0.0001; float minSpacing = 1; if(m_WorkingSpacing[0]GetDeepCopy(); // working copy is needed because we need to resample the fibers but do not want to change the original bundle double volumeAccuracy = 10; m_FiberBundleWorkingCopy->ResampleSpline(minSpacing/volumeAccuracy); m_mmRadius = m_Parameters.m_SignalGen.m_AxonRadius/1000; if (m_mmRadius>0) m_SegmentVolume = M_PI*m_mmRadius*m_mmRadius*minSpacing/volumeAccuracy; m_FiberBundleTransformed = m_FiberBundleWorkingCopy; // a secon fiber bundle is needed to store the transformed version of the m_FiberBundleWorkingCopy } template< class PixelType > void TractsToDWIImageFilter< PixelType >::GenerateData() { m_TimeProbe.Start(); m_StatusText = "Starting simulation\n"; // check input data if (m_FiberBundle.IsNull()) itkExceptionMacro("Input fiber bundle is NULL!"); if (m_Parameters.m_FiberModelList.empty()) itkExceptionMacro("No diffusion model for fiber compartments defined! At least one fiber compartment is necessary."); if (m_Parameters.m_NonFiberModelList.empty()) itkExceptionMacro("No diffusion model for non-fiber compartments defined! At least one non-fiber compartment is necessary."); int baselineIndex = m_Parameters.m_SignalGen.GetFirstBaselineIndex(); if (baselineIndex<0) itkExceptionMacro("No baseline index found!"); if (!m_Parameters.m_SignalGen.m_SimulateKspaceAcquisition) // No upsampling of input image needed if no k-space simulation is performed m_Parameters.m_SignalGen.m_DoAddGibbsRinging = false; if (m_UseConstantRandSeed) // always generate the same random numbers? m_RandGen->SetSeed(0); else m_RandGen->SetSeed(); InitializeData(); CheckVolumeFractionImages(); InitializeFiberData(); int numFiberCompartments = m_Parameters.m_FiberModelList.size(); int numNonFiberCompartments = m_Parameters.m_NonFiberModelList.size(); double maxVolume = 0; unsigned long lastTick = 0; int signalModelSeed = m_RandGen->GetIntegerVariate(); m_StatusText += "\n"+this->GetTime()+" > Generating " + boost::lexical_cast(numFiberCompartments+numNonFiberCompartments) + "-compartment diffusion-weighted signal.\n"; MITK_INFO << "Generating " << numFiberCompartments+numNonFiberCompartments << "-compartment diffusion-weighted signal."; int numFibers = m_FiberBundleWorkingCopy->GetNumFibers(); boost::progress_display disp(numFibers*m_Parameters.m_SignalGen.GetNumVolumes()); switch (m_Parameters.m_SignalGen.m_DiffusionDirectionMode) { case(SignalGenerationParameters::FIBER_TANGENT_DIRECTIONS): // use fiber tangent directions to determine diffusion direction { m_StatusText += "0% 10 20 30 40 50 60 70 80 90 100%\n"; m_StatusText += "|----|----|----|----|----|----|----|----|----|----|\n*"; for (unsigned int g=0; gSetSeed(signalModelSeed); for (int i=0; iSetSeed(signalModelSeed); // storing voxel-wise intra-axonal volume in mm³ ItkDoubleImgType::Pointer intraAxonalVolumeImage = ItkDoubleImgType::New(); intraAxonalVolumeImage->SetSpacing( m_WorkingSpacing ); intraAxonalVolumeImage->SetOrigin( m_WorkingOrigin ); intraAxonalVolumeImage->SetDirection( m_Parameters.m_SignalGen.m_ImageDirection ); intraAxonalVolumeImage->SetLargestPossibleRegion( m_WorkingImageRegion ); intraAxonalVolumeImage->SetBufferedRegion( m_WorkingImageRegion ); intraAxonalVolumeImage->SetRequestedRegion( m_WorkingImageRegion ); intraAxonalVolumeImage->Allocate(); intraAxonalVolumeImage->FillBuffer(0); maxVolume = 0; vtkPolyData* fiberPolyData = m_FiberBundleTransformed->GetFiberPolyData(); // generate fiber signal (if there are any fiber models present) if (!m_Parameters.m_FiberModelList.empty()) for( int i=0; iGetFiberWeight(i); vtkCell* cell = fiberPolyData->GetCell(i); int numPoints = cell->GetNumberOfPoints(); vtkPoints* points = cell->GetPoints(); if (numPoints<2) continue; for( int j=0; jGetAbortGenerateData()) { m_StatusText += "\n"+this->GetTime()+" > Simulation aborted\n"; return; } double* temp = points->GetPoint(j); itk::Point vertex = GetItkPoint(temp); itk::Vector v = GetItkVector(temp); itk::Vector dir(3); if (jGetPoint(j+1))-v; else dir = v-GetItkVector(points->GetPoint(j-1)); if (dir.GetSquaredNorm()<0.0001 || dir[0]!=dir[0] || dir[1]!=dir[1] || dir[2]!=dir[2]) continue; itk::Index<3> idx; itk::ContinuousIndex contIndex; m_TransformedMaskImage->TransformPhysicalPointToIndex(vertex, idx); m_TransformedMaskImage->TransformPhysicalPointToContinuousIndex(vertex, contIndex); if (!m_TransformedMaskImage->GetLargestPossibleRegion().IsInside(idx) || m_TransformedMaskImage->GetPixel(idx)<=0) continue; // generate signal for each fiber compartment for (int k=0; kSetFiberDirection(dir); DoubleDwiType::PixelType pix = m_CompartmentImages.at(k)->GetPixel(idx); pix[g] += fiberWeight*m_SegmentVolume*m_Parameters.m_FiberModelList[k]->SimulateMeasurement(g); m_CompartmentImages.at(k)->SetPixel(idx, pix); } // update fiber volume image double vol = intraAxonalVolumeImage->GetPixel(idx) + m_SegmentVolume*fiberWeight; intraAxonalVolumeImage->SetPixel(idx, vol); if (vol>maxVolume) // we assume that the first volume is always unweighted! maxVolume = vol; } // progress report ++disp; unsigned long newTick = 50*disp.count()/disp.expected_count(); for (unsigned int tick = 0; tick<(newTick-lastTick); tick++) m_StatusText += "*"; lastTick = newTick; } // generate non-fiber signal ImageRegionIterator it3(m_TransformedMaskImage, m_TransformedMaskImage->GetLargestPossibleRegion()); double fact = 1; // density correction factor in mm³ if (m_Parameters.m_SignalGen.m_AxonRadius<0.0001 || maxVolume>m_VoxelVolume) // the fullest voxel is always completely full fact = m_VoxelVolume/maxVolume; while(!it3.IsAtEnd()) { if (it3.Get()>0) { DoubleDwiType::IndexType index = it3.GetIndex(); itk::Point point; m_TransformedMaskImage->TransformIndexToPhysicalPoint(index, point); if (m_Parameters.m_SignalGen.m_DoAddMotion && g>=0 && m_Parameters.m_SignalGen.m_MotionVolumes[g]) { if (m_Parameters.m_SignalGen.m_DoRandomizeMotion) point = m_FiberBundleWorkingCopy->TransformPoint(point.GetVnlVector(), -m_Rotation[0],-m_Rotation[1],-m_Rotation[2],-m_Translation[0],-m_Translation[1],-m_Translation[2]); else point = m_FiberBundleWorkingCopy->TransformPoint(point.GetVnlVector(), -m_Rotation[0]*m_MotionCounter,-m_Rotation[1]*m_MotionCounter,-m_Rotation[2]*m_MotionCounter,-m_Translation[0]*m_MotionCounter,-m_Translation[1]*m_MotionCounter,-m_Translation[2]*m_MotionCounter); } double iAxVolume = intraAxonalVolumeImage->GetPixel(index); // if volume fraction image is set use it, otherwise use scaling factor to obtain one full fiber voxel double fact2 = fact; if (m_Parameters.m_FiberModelList[0]->GetVolumeFractionImage()!=nullptr && iAxVolume>0.0001) { -// DoubleDwiType::IndexType newIndex; -// m_Parameters.m_FiberModelList[0]->GetVolumeFractionImage()->TransformPhysicalPointToIndex(point, newIndex); -// if (m_Parameters.m_FiberModelList[0]->GetVolumeFractionImage()->GetLargestPossibleRegion().IsInside(newIndex)) -// fact2 = m_VoxelVolume*m_Parameters.m_FiberModelList[0]->GetVolumeFractionImage()->GetPixel(newIndex)/iAxVolume; - double val = InterpolateValue(point, m_Parameters.m_FiberModelList[0]->GetVolumeFractionImage()); if (val>=0) fact2 = m_VoxelVolume*val/iAxVolume; } // adjust intra-axonal image value for (int i=0; iGetPixel(index); pix[g] *= fact2; m_CompartmentImages.at(i)->SetPixel(index, pix); } // simulate other compartments SimulateExtraAxonalSignal(index, iAxVolume*fact2, g); } ++it3; } } break; } - case (SignalGenerationParameters::MAIN_FIBER_DIRECTIONS): // use main fiber directions to determine voxel-wise diffusion directions + case (SignalGenerationParameters::MAIN_FIBER_DIRECTIONS): // use main fiber directions to determine voxel-wise diffusion directions // NOT UP TO DATE ANYMORE! NEEDED? REMOVE? { typedef itk::Image< itk::Vector< float, 3>, 3 > ItkDirectionImage3DType; typedef itk::VectorContainer< unsigned int, ItkDirectionImage3DType::Pointer > ItkDirectionImageContainerType; // calculate main fiber directions itk::TractsToVectorImageFilter::Pointer fOdfFilter = itk::TractsToVectorImageFilter::New(); fOdfFilter->SetFiberBundle(m_FiberBundleTransformed); fOdfFilter->SetMaskImage(m_TransformedMaskImage); fOdfFilter->SetAngularThreshold(cos(m_Parameters.m_SignalGen.m_FiberSeparationThreshold*M_PI/180.0)); fOdfFilter->SetNormalizeVectors(false); fOdfFilter->SetUseWorkingCopy(true); fOdfFilter->SetSizeThreshold(0); fOdfFilter->SetMaxNumDirections(3); fOdfFilter->Update(); ItkDirectionImageContainerType::Pointer directionImageContainer = fOdfFilter->GetDirectionImageContainer(); // allocate image storing intra-axonal volume fraction information ItkDoubleImgType::Pointer intraAxonalVolumeImage = ItkDoubleImgType::New(); intraAxonalVolumeImage->SetSpacing( m_WorkingSpacing ); intraAxonalVolumeImage->SetOrigin( m_WorkingOrigin ); intraAxonalVolumeImage->SetDirection( m_Parameters.m_SignalGen.m_ImageDirection ); intraAxonalVolumeImage->SetLargestPossibleRegion( m_WorkingImageRegion ); intraAxonalVolumeImage->SetBufferedRegion( m_WorkingImageRegion ); intraAxonalVolumeImage->SetRequestedRegion( m_WorkingImageRegion ); intraAxonalVolumeImage->Allocate(); intraAxonalVolumeImage->FillBuffer(0); // determine intra-axonal volume fraction using the tract density itk::TractDensityImageFilter< ItkDoubleImgType >::Pointer tdiFilter = itk::TractDensityImageFilter< ItkDoubleImgType >::New(); tdiFilter->SetFiberBundle(m_FiberBundleTransformed); tdiFilter->SetBinaryOutput(false); tdiFilter->SetOutputAbsoluteValues(false); tdiFilter->SetInputImage(intraAxonalVolumeImage); tdiFilter->SetUseImageGeometry(true); tdiFilter->Update(); intraAxonalVolumeImage = tdiFilter->GetOutput(); m_StatusText += "0% 10 20 30 40 50 60 70 80 90 100%\n"; m_StatusText += "|----|----|----|----|----|----|----|----|----|----|\n*"; boost::progress_display disp(m_TransformedMaskImage->GetLargestPossibleRegion().GetNumberOfPixels()*m_Parameters.m_SignalGen.GetNumVolumes()); for (unsigned int g=0; gSetSeed(signalModelSeed); for (int i=0; iSetSeed(signalModelSeed); if (m_Parameters.m_SignalGen.m_DoAddMotion && g>0) // if fibers have moved we need a new TDI and new directions { fOdfFilter->SetFiberBundle(m_FiberBundleTransformed); fOdfFilter->SetMaskImage(m_TransformedMaskImage); fOdfFilter->Update(); directionImageContainer = fOdfFilter->GetDirectionImageContainer(); tdiFilter->SetFiberBundle(m_FiberBundleTransformed); tdiFilter->Update(); intraAxonalVolumeImage = tdiFilter->GetOutput(); } ImageRegionIterator< ItkUcharImgType > it(m_TransformedMaskImage, m_TransformedMaskImage->GetLargestPossibleRegion()); while(!it.IsAtEnd()) { ++disp; unsigned long newTick = 50*disp.count()/disp.expected_count(); for (unsigned int tick = 0; tick<(newTick-lastTick); tick++) m_StatusText += "*"; lastTick = newTick; if (this->GetAbortGenerateData()) { m_StatusText += "\n"+this->GetTime()+" > Simulation aborted\n"; return; } if (it.Get()>0) { // generate fiber signal for (int c=0; cGetPixel(it.GetIndex()); for (unsigned int i=0; iSize(); i++) { itk::Vector< double, 3> dir; dir.CastFrom(directionImageContainer->GetElement(i)->GetPixel(it.GetIndex())); double norm = dir.GetNorm(); if (norm>0.0001) { m_Parameters.m_FiberModelList.at(c)->SetFiberDirection(dir); pix[g] += m_Parameters.m_FiberModelList.at(c)->SimulateMeasurement(g)*norm; count++; } } if (count>0) pix[g] /= count; pix[g] *= intraAxonalVolumeImage->GetPixel(it.GetIndex())*m_VoxelVolume; m_CompartmentImages.at(c)->SetPixel(it.GetIndex(), pix); } // simulate other compartments SimulateExtraAxonalSignal(it.GetIndex(), intraAxonalVolumeImage->GetPixel(it.GetIndex())*m_VoxelVolume, g); } ++it; } SimulateMotion(g); } itk::ImageFileWriter< ItkUcharImgType >::Pointer wr = itk::ImageFileWriter< ItkUcharImgType >::New(); wr->SetInput(fOdfFilter->GetNumDirectionsImage()); wr->SetFileName(mitk::IOUtil::GetTempPath()+"/NumDirections_MainFiberDirections.nrrd"); wr->Update(); break; } - case (SignalGenerationParameters::RANDOM_DIRECTIONS): + case (SignalGenerationParameters::RANDOM_DIRECTIONS): // NOT UP TO DATE ANYMORE! NEEDED? REMOVE? { ItkUcharImgType::Pointer numDirectionsImage = ItkUcharImgType::New(); numDirectionsImage->SetSpacing( m_WorkingSpacing ); numDirectionsImage->SetOrigin( m_WorkingOrigin ); numDirectionsImage->SetDirection( m_Parameters.m_SignalGen.m_ImageDirection ); numDirectionsImage->SetLargestPossibleRegion( m_WorkingImageRegion ); numDirectionsImage->SetBufferedRegion( m_WorkingImageRegion ); numDirectionsImage->SetRequestedRegion( m_WorkingImageRegion ); numDirectionsImage->Allocate(); numDirectionsImage->FillBuffer(0); double sepAngle = cos(m_Parameters.m_SignalGen.m_FiberSeparationThreshold*M_PI/180.0); m_StatusText += "0% 10 20 30 40 50 60 70 80 90 100%\n"; m_StatusText += "|----|----|----|----|----|----|----|----|----|----|\n*"; boost::progress_display disp(m_TransformedMaskImage->GetLargestPossibleRegion().GetNumberOfPixels()); ImageRegionIterator it(m_TransformedMaskImage, m_TransformedMaskImage->GetLargestPossibleRegion()); while(!it.IsAtEnd()) { ++disp; unsigned long newTick = 50*disp.count()/disp.expected_count(); for (unsigned int tick = 0; tick<(newTick-lastTick); tick++) m_StatusText += "*"; lastTick = newTick; if (this->GetAbortGenerateData()) { m_StatusText += "\n"+this->GetTime()+" > Simulation aborted\n"; return; } if (it.Get()>0) { int numFibs = m_RandGen->GetIntegerVariate(2)+1; DoubleDwiType::PixelType pix = m_CompartmentImages.at(0)->GetPixel(it.GetIndex()); double volume = m_RandGen->GetVariateWithClosedRange(0.3); // double sum = 0; std::vector< double > fractions; for (int i=0; iGetVariateWithClosedRange(0.5)); // sum += fractions.at(i); } // for (int i=0; i > directions; for (int i=0; iGetVariateWithClosedRange(2)-1.0; fib[1] = m_RandGen->GetVariateWithClosedRange(2)-1.0; fib[2] = m_RandGen->GetVariateWithClosedRange(2)-1.0; fib.Normalize(); double min = 0; for (unsigned int d=0; dmin) min = angle; } if (minSetFiberDirection(fib); pix += m_Parameters.m_FiberModelList.at(0)->SimulateMeasurement()*fractions[i]; directions.push_back(fib); } else i--; } pix *= (1-volume); m_CompartmentImages.at(0)->SetPixel(it.GetIndex(), pix); // CSF/GM { pix += volume*m_Parameters.m_NonFiberModelList.at(0)->SimulateMeasurement(); } numDirectionsImage->SetPixel(it.GetIndex(), numFibs); } ++it; } itk::ImageFileWriter< ItkUcharImgType >::Pointer wr = itk::ImageFileWriter< ItkUcharImgType >::New(); wr->SetInput(numDirectionsImage); wr->SetFileName(mitk::IOUtil::GetTempPath()+"/NumDirections_RandomDirections.nrrd"); wr->Update(); } } if (m_MotionLogfile.is_open()) { m_MotionLogfile << "DONE"; m_MotionLogfile.close(); } m_StatusText += "\n\n"; if (this->GetAbortGenerateData()) { m_StatusText += "\n"+this->GetTime()+" > Simulation aborted\n"; return; } DoubleDwiType::Pointer doubleOutImage; double signalScale = m_Parameters.m_SignalGen.m_SignalScale; if ( m_Parameters.m_SignalGen.m_SimulateKspaceAcquisition ) // do k-space stuff { m_StatusText += this->GetTime()+" > Simulating k-space acquisition using "+boost::lexical_cast(m_Parameters.m_SignalGen.m_NumberOfCoils)+" coil(s)\n"; MITK_INFO << "Simulating k-space acquisition using " << m_Parameters.m_SignalGen.m_NumberOfCoils << " coil(s)."; switch (m_Parameters.m_SignalGen.m_AcquisitionType) { case SignalGenerationParameters::SingleShotEpi: m_StatusText += "Acquisition type: single shot EPI\n"; break; case SignalGenerationParameters::SpinEcho: m_StatusText += "Acquisition type: classic spin echo with cartesian k-space trajectory\n"; break; default: m_StatusText += "Acquisition type: single shot EPI\n"; } if (m_Parameters.m_SignalGen.m_DoSimulateRelaxation) m_StatusText += "Simulating signal relaxation\n"; if (m_Parameters.m_SignalGen.m_FrequencyMap.IsNotNull()) m_StatusText += "Simulating distortions\n"; if (m_Parameters.m_SignalGen.m_DoAddGibbsRinging) m_StatusText += "Simulating ringing artifacts\n"; if (m_Parameters.m_SignalGen.m_EddyStrength>0) m_StatusText += "Simulating eddy currents\n"; if (m_Parameters.m_SignalGen.m_Spikes>0) m_StatusText += "Simulating spikes\n"; if (m_Parameters.m_SignalGen.m_CroppingFactor<1.0) m_StatusText += "Simulating aliasing artifacts\n"; if (m_Parameters.m_SignalGen.m_KspaceLineOffset>0) m_StatusText += "Simulating ghosts\n"; doubleOutImage = SimulateKspaceAcquisition(m_CompartmentImages); signalScale = 1; // already scaled in SimulateKspaceAcquisition() } else // don't do k-space stuff, just sum compartments { m_StatusText += this->GetTime()+" > Summing compartments\n"; MITK_INFO << "Summing compartments"; doubleOutImage = m_CompartmentImages.at(0); for (unsigned int i=1; i::Pointer adder = itk::AddImageFilter< DoubleDwiType, DoubleDwiType, DoubleDwiType>::New(); adder->SetInput1(doubleOutImage); adder->SetInput2(m_CompartmentImages.at(i)); adder->Update(); doubleOutImage = adder->GetOutput(); } } if (this->GetAbortGenerateData()) { m_StatusText += "\n"+this->GetTime()+" > Simulation aborted\n"; return; } m_StatusText += this->GetTime()+" > Finalizing image\n"; MITK_INFO << "Finalizing image"; if (signalScale>1) m_StatusText += " Scaling signal\n"; if (m_Parameters.m_NoiseModel) m_StatusText += " Adding noise\n"; unsigned int window = 0; unsigned int min = itk::NumericTraits::max(); ImageRegionIterator it4 (m_OutputImage, m_OutputImage->GetLargestPossibleRegion()); DoubleDwiType::PixelType signal; signal.SetSize(m_Parameters.m_SignalGen.GetNumVolumes()); boost::progress_display disp2(m_OutputImage->GetLargestPossibleRegion().GetNumberOfPixels()); m_StatusText += "0% 10 20 30 40 50 60 70 80 90 100%\n"; m_StatusText += "|----|----|----|----|----|----|----|----|----|----|\n*"; lastTick = 0; while(!it4.IsAtEnd()) { if (this->GetAbortGenerateData()) { m_StatusText += "\n"+this->GetTime()+" > Simulation aborted\n"; return; } ++disp2; unsigned long newTick = 50*disp2.count()/disp2.expected_count(); for (unsigned long tick = 0; tick<(newTick-lastTick); tick++) m_StatusText += "*"; lastTick = newTick; typename OutputImageType::IndexType index = it4.GetIndex(); signal = doubleOutImage->GetPixel(index)*signalScale; if (m_Parameters.m_NoiseModel) m_Parameters.m_NoiseModel->AddNoise(signal); for (unsigned int i=0; i0) signal[i] = floor(signal[i]+0.5); else signal[i] = ceil(signal[i]-0.5); if ( (!m_Parameters.m_SignalGen.IsBaselineIndex(i) || signal.Size()==1) && signal[i]>window) window = signal[i]; if ( (!m_Parameters.m_SignalGen.IsBaselineIndex(i) || signal.Size()==1) && signal[i]SetNthOutput(0, m_OutputImage); m_StatusText += "\n\n"; m_StatusText += "Finished simulation\n"; m_StatusText += "Simulation time: "+GetTime(); m_TimeProbe.Stop(); } template< class PixelType > void TractsToDWIImageFilter< PixelType >::SimulateMotion(int g) { // is motion artifact enabled? // is the current volume g affected by motion? if (m_Parameters.m_SignalGen.m_DoAddMotion && m_Parameters.m_SignalGen.m_MotionVolumes[g] && gGetDeepCopy(); // either undo last transform or work on fresh copy of untransformed fibers m_Rotation[0] = m_RandGen->GetVariateWithClosedRange(m_Parameters.m_SignalGen.m_Rotation[0]*2)-m_Parameters.m_SignalGen.m_Rotation[0]; m_Rotation[1] = m_RandGen->GetVariateWithClosedRange(m_Parameters.m_SignalGen.m_Rotation[1]*2)-m_Parameters.m_SignalGen.m_Rotation[1]; m_Rotation[2] = m_RandGen->GetVariateWithClosedRange(m_Parameters.m_SignalGen.m_Rotation[2]*2)-m_Parameters.m_SignalGen.m_Rotation[2]; m_Translation[0] = m_RandGen->GetVariateWithClosedRange(m_Parameters.m_SignalGen.m_Translation[0]*2)-m_Parameters.m_SignalGen.m_Translation[0]; m_Translation[1] = m_RandGen->GetVariateWithClosedRange(m_Parameters.m_SignalGen.m_Translation[1]*2)-m_Parameters.m_SignalGen.m_Translation[1]; m_Translation[2] = m_RandGen->GetVariateWithClosedRange(m_Parameters.m_SignalGen.m_Translation[2]*2)-m_Parameters.m_SignalGen.m_Translation[2]; } else { m_Rotation = m_Parameters.m_SignalGen.m_Rotation/m_NumMotionVolumes; m_Translation = m_Parameters.m_SignalGen.m_Translation/m_NumMotionVolumes; m_MotionCounter++; } // move mask image if (m_MaskImageSet) { ImageRegionIterator maskIt(m_UpsampledMaskImage, m_UpsampledMaskImage->GetLargestPossibleRegion()); m_TransformedMaskImage->FillBuffer(0); while(!maskIt.IsAtEnd()) { if (maskIt.Get()<=0) { ++maskIt; continue; } DoubleDwiType::IndexType index = maskIt.GetIndex(); itk::Point point; m_UpsampledMaskImage->TransformIndexToPhysicalPoint(index, point); if (m_Parameters.m_SignalGen.m_DoRandomizeMotion) point = m_FiberBundleWorkingCopy->TransformPoint(point.GetVnlVector(), m_Rotation[0],m_Rotation[1],m_Rotation[2],m_Translation[0],m_Translation[1],m_Translation[2]); else point = m_FiberBundleWorkingCopy->TransformPoint(point.GetVnlVector(), m_Rotation[0]*m_MotionCounter,m_Rotation[1]*m_MotionCounter,m_Rotation[2]*m_MotionCounter,m_Translation[0]*m_MotionCounter,m_Translation[1]*m_MotionCounter,m_Translation[2]*m_MotionCounter); m_TransformedMaskImage->TransformPhysicalPointToIndex(point, index); if (m_TransformedMaskImage->GetLargestPossibleRegion().IsInside(index)) m_TransformedMaskImage->SetPixel(index,100); ++maskIt; } } if (m_Parameters.m_SignalGen.m_DoRandomizeMotion) { m_Rotations.push_back(m_Rotation); m_Translations.push_back(m_Translation); if (m_MotionLogfile.is_open()) { m_MotionLogfile << g << " rotation: " << m_Rotation[0] << "," << m_Rotation[1] << "," << m_Rotation[2] << ";"; m_MotionLogfile << " translation: " << m_Translation[0] << "," << m_Translation[1] << "," << m_Translation[2] << "\n"; } } else { m_Rotations.push_back(m_Rotation*m_MotionCounter); m_Translations.push_back(m_Translation*m_MotionCounter); if (m_MotionLogfile.is_open()) { m_MotionLogfile << g << " rotation: " << m_Rotation[0]*m_MotionCounter << "," << m_Rotation[1]*m_MotionCounter << "," << m_Rotation[2]*m_MotionCounter << ";"; m_MotionLogfile << " translation: " << m_Translation[0]*m_MotionCounter << "," << m_Translation[1]*m_MotionCounter << "," << m_Translation[2]*m_MotionCounter << "\n"; } } m_FiberBundleTransformed->TransformFibers(m_Rotation[0],m_Rotation[1],m_Rotation[2],m_Translation[0],m_Translation[1],m_Translation[2]); } else { m_Rotation.Fill(0.0); m_Translation.Fill(0.0); m_Rotations.push_back(m_Rotation); m_Translations.push_back(m_Translation); m_MotionLogfile << g << " rotation: " << m_Rotation[0] << "," << m_Rotation[1] << "," << m_Rotation[2] << ";"; m_MotionLogfile << " translation: " << m_Translation[0] << "," << m_Translation[1] << "," << m_Translation[2] << "\n"; } } template< class PixelType > void TractsToDWIImageFilter< PixelType >::SimulateExtraAxonalSignal(ItkUcharImgType::IndexType index, double intraAxonalVolume, int g) { int numFiberCompartments = m_Parameters.m_FiberModelList.size(); int numNonFiberCompartments = m_Parameters.m_NonFiberModelList.size(); if (intraAxonalVolume>0.0001 && m_Parameters.m_SignalGen.m_DoDisablePartialVolume) // only fiber in voxel { DoubleDwiType::PixelType pix = m_CompartmentImages.at(0)->GetPixel(index); if (g>=0) pix[g] *= m_VoxelVolume/intraAxonalVolume; else pix *= m_VoxelVolume/intraAxonalVolume; m_CompartmentImages.at(0)->SetPixel(index, pix); if (g==0) m_VolumeFractions.at(0)->SetPixel(index, 1); for (int i=1; iGetPixel(index); if (g>=0) pix[g] = 0.0; else pix.Fill(0.0); m_CompartmentImages.at(i)->SetPixel(index, pix); } } else { if (g==0) m_VolumeFractions.at(0)->SetPixel(index, intraAxonalVolume/m_VoxelVolume); // get non-transformed point (remove headmotion tranformation) // this point can then be transformed to each of the original images, regardless of their geometry itk::Point point; m_TransformedMaskImage->TransformIndexToPhysicalPoint(index, point); if (m_Parameters.m_SignalGen.m_DoAddMotion && g>=0 && m_Parameters.m_SignalGen.m_MotionVolumes[g]) { if (m_Parameters.m_SignalGen.m_DoRandomizeMotion) point = m_FiberBundleWorkingCopy->TransformPoint(point.GetVnlVector(), -m_Rotation[0],-m_Rotation[1],-m_Rotation[2],-m_Translation[0],-m_Translation[1],-m_Translation[2]); else point = m_FiberBundleWorkingCopy->TransformPoint(point.GetVnlVector(), -m_Rotation[0]*m_MotionCounter,-m_Rotation[1]*m_MotionCounter,-m_Rotation[2]*m_MotionCounter,-m_Translation[0]*m_MotionCounter,-m_Translation[1]*m_MotionCounter,-m_Translation[2]*m_MotionCounter); } if (m_Parameters.m_SignalGen.m_DoDisablePartialVolume) { int maxVolumeIndex = 0; double maxWeight = 0; for (int i=0; i1) { - // ItkUcharImgType::IndexType maskIndex; - // m_UpsampledMaskImage->TransformPhysicalPointToIndex(point, maskIndex); - // if (!m_UpsampledMaskImage->GetLargestPossibleRegion().IsInside(maskIndex) || m_UpsampledMaskImage->GetPixel(maskIndex)==0) - // continue; -// DoubleDwiType::IndexType newIndex; -// m_Parameters.m_NonFiberModelList[i]->GetVolumeFractionImage()->TransformPhysicalPointToIndex(point, newIndex); -// if (!m_Parameters.m_NonFiberModelList[i]->GetVolumeFractionImage()->GetLargestPossibleRegion().IsInside(newIndex)) -// continue; -// weight = m_Parameters.m_NonFiberModelList[i]->GetVolumeFractionImage()->GetPixel(newIndex); double val = InterpolateValue(point, m_Parameters.m_NonFiberModelList[i]->GetVolumeFractionImage()); if (val<0) continue; else weight = val; } if (weight>maxWeight) { maxWeight = weight; maxVolumeIndex = i; } } DoubleDwiType::Pointer doubleDwi = m_CompartmentImages.at(maxVolumeIndex+numFiberCompartments); DoubleDwiType::PixelType pix = doubleDwi->GetPixel(index); if (g>=0) pix[g] += m_Parameters.m_NonFiberModelList[maxVolumeIndex]->SimulateMeasurement(g)*m_VoxelVolume; else pix += m_Parameters.m_NonFiberModelList[maxVolumeIndex]->SimulateMeasurement()*m_VoxelVolume; doubleDwi->SetPixel(index, pix); if (g==0) m_VolumeFractions.at(maxVolumeIndex+numFiberCompartments)->SetPixel(index, 1); } else { double extraAxonalVolume = m_VoxelVolume-intraAxonalVolume; // non-fiber volume if (extraAxonalVolume<0) { MITK_ERROR << "Coorupted intra-axonal signal voxel detected. Fiber volume larger voxel volume!"; extraAxonalVolume = 0; } double interAxonalVolume = 0; if (numFiberCompartments>1) interAxonalVolume = extraAxonalVolume * intraAxonalVolume/m_VoxelVolume; // inter-axonal fraction of non fiber compartment double other = extraAxonalVolume - interAxonalVolume; // rest of compartment if (other<0) { MITK_ERROR << "Corrupted signal voxel detected. Fiber volume larger voxel volume!"; other = 0; } // adjust non-fiber and intra-axonal signal for (int i=1; iGetPixel(index); if (intraAxonalVolume>0) // remove scaling by intra-axonal volume from inter-axonal compartment { if (g>=0) pix[g] /= intraAxonalVolume; else pix /= intraAxonalVolume; } if (m_Parameters.m_FiberModelList[i]->GetVolumeFractionImage()!=nullptr) { - // ItkUcharImgType::IndexType maskIndex; - // m_UpsampledMaskImage->TransformPhysicalPointToIndex(point, maskIndex); - // if (!m_UpsampledMaskImage->GetLargestPossibleRegion().IsInside(maskIndex) || m_UpsampledMaskImage->GetPixel(maskIndex)==0) - // continue; -// DoubleDwiType::IndexType newIndex; -// m_Parameters.m_FiberModelList[i]->GetVolumeFractionImage()->TransformPhysicalPointToIndex(point, newIndex); -// if (!m_Parameters.m_FiberModelList[i]->GetVolumeFractionImage()->GetLargestPossibleRegion().IsInside(newIndex)) -// continue; -// weight = m_Parameters.m_FiberModelList[i]->GetVolumeFractionImage()->GetPixel(newIndex)*m_VoxelVolume; - double val = InterpolateValue(point, m_Parameters.m_FiberModelList[i]->GetVolumeFractionImage()); if (val<0) continue; else weight = val*m_VoxelVolume; } if (g>=0) pix[g] *= weight; else pix *= weight; m_CompartmentImages.at(i)->SetPixel(index, pix); if (g==0) m_VolumeFractions.at(i)->SetPixel(index, weight/m_VoxelVolume); } for (int i=0; iGetPixel(index); if (m_Parameters.m_NonFiberModelList[i]->GetVolumeFractionImage()!=nullptr) { - // ItkUcharImgType::IndexType maskIndex; - // m_UpsampledMaskImage->TransformPhysicalPointToIndex(point, maskIndex); - // if (!m_UpsampledMaskImage->GetLargestPossibleRegion().IsInside(maskIndex) || m_UpsampledMaskImage->GetPixel(maskIndex)==0) - // continue; -// DoubleDwiType::IndexType newIndex; -// m_Parameters.m_NonFiberModelList[i]->GetVolumeFractionImage()->TransformPhysicalPointToIndex(point, newIndex); -// if (!m_Parameters.m_NonFiberModelList[i]->GetVolumeFractionImage()->GetLargestPossibleRegion().IsInside(newIndex)) -// continue; -// weight = m_Parameters.m_NonFiberModelList[i]->GetVolumeFractionImage()->GetPixel(newIndex)*m_VoxelVolume; - double val = InterpolateValue(point, m_Parameters.m_NonFiberModelList[i]->GetVolumeFractionImage()); if (val<0) continue; else weight = val*m_VoxelVolume; if (m_UseRelativeNonFiberVolumeFractions) weight *= other/m_VoxelVolume; } if (g>=0) pix[g] += m_Parameters.m_NonFiberModelList[i]->SimulateMeasurement(g)*weight; else pix += m_Parameters.m_NonFiberModelList[i]->SimulateMeasurement()*weight; m_CompartmentImages.at(i+numFiberCompartments)->SetPixel(index, pix); if (g==0) m_VolumeFractions.at(i+numFiberCompartments)->SetPixel(index, weight/m_VoxelVolume); } } } } template< class PixelType > double TractsToDWIImageFilter< PixelType >::InterpolateValue(itk::Point itkP, ItkDoubleImgType::Pointer img) { itk::Index<3> idx; itk::ContinuousIndex< double, 3> cIdx; img->TransformPhysicalPointToIndex(itkP, idx); img->TransformPhysicalPointToContinuousIndex(itkP, cIdx); double pix = -1; if ( img->GetLargestPossibleRegion().IsInside(idx) ) pix = img->GetPixel(idx); else return pix; double frac_x = cIdx[0] - idx[0]; double frac_y = cIdx[1] - idx[1]; double frac_z = cIdx[2] - idx[2]; if (frac_x<0) { idx[0] -= 1; frac_x += 1; } if (frac_y<0) { idx[1] -= 1; frac_y += 1; } if (frac_z<0) { idx[2] -= 1; frac_z += 1; } frac_x = 1-frac_x; frac_y = 1-frac_y; frac_z = 1-frac_z; // int coordinates inside image? if (idx[0] >= 0 && idx[0] < img->GetLargestPossibleRegion().GetSize(0)-1 && idx[1] >= 0 && idx[1] < img->GetLargestPossibleRegion().GetSize(1)-1 && idx[2] >= 0 && idx[2] < img->GetLargestPossibleRegion().GetSize(2)-1) { vnl_vector_fixed interpWeights; interpWeights[0] = ( frac_x)*( frac_y)*( frac_z); interpWeights[1] = (1-frac_x)*( frac_y)*( frac_z); interpWeights[2] = ( frac_x)*(1-frac_y)*( frac_z); interpWeights[3] = ( frac_x)*( frac_y)*(1-frac_z); interpWeights[4] = (1-frac_x)*(1-frac_y)*( frac_z); interpWeights[5] = ( frac_x)*(1-frac_y)*(1-frac_z); interpWeights[6] = (1-frac_x)*( frac_y)*(1-frac_z); interpWeights[7] = (1-frac_x)*(1-frac_y)*(1-frac_z); pix = img->GetPixel(idx) * interpWeights[0]; ItkDoubleImgType::IndexType tmpIdx = idx; tmpIdx[0]++; pix += img->GetPixel(tmpIdx) * interpWeights[1]; tmpIdx = idx; tmpIdx[1]++; pix += img->GetPixel(tmpIdx) * interpWeights[2]; tmpIdx = idx; tmpIdx[2]++; pix += img->GetPixel(tmpIdx) * interpWeights[3]; tmpIdx = idx; tmpIdx[0]++; tmpIdx[1]++; pix += img->GetPixel(tmpIdx) * interpWeights[4]; tmpIdx = idx; tmpIdx[1]++; tmpIdx[2]++; pix += img->GetPixel(tmpIdx) * interpWeights[5]; tmpIdx = idx; tmpIdx[2]++; tmpIdx[0]++; pix += img->GetPixel(tmpIdx) * interpWeights[6]; tmpIdx = idx; tmpIdx[0]++; tmpIdx[1]++; tmpIdx[2]++; pix += img->GetPixel(tmpIdx) * interpWeights[7]; } return pix; } template< class PixelType > itk::Point TractsToDWIImageFilter< PixelType >::GetItkPoint(double point[3]) { itk::Point itkPoint; itkPoint[0] = point[0]; itkPoint[1] = point[1]; itkPoint[2] = point[2]; return itkPoint; } template< class PixelType > itk::Vector TractsToDWIImageFilter< PixelType >::GetItkVector(double point[3]) { itk::Vector itkVector; itkVector[0] = point[0]; itkVector[1] = point[1]; itkVector[2] = point[2]; return itkVector; } template< class PixelType > vnl_vector_fixed TractsToDWIImageFilter< PixelType >::GetVnlVector(double point[3]) { vnl_vector_fixed vnlVector; vnlVector[0] = point[0]; vnlVector[1] = point[1]; vnlVector[2] = point[2]; return vnlVector; } template< class PixelType > vnl_vector_fixed TractsToDWIImageFilter< PixelType >::GetVnlVector(Vector& vector) { vnl_vector_fixed vnlVector; vnlVector[0] = vector[0]; vnlVector[1] = vector[1]; vnlVector[2] = vector[2]; return vnlVector; } template< class PixelType > double TractsToDWIImageFilter< PixelType >::RoundToNearest(double num) { return (num > 0.0) ? floor(num + 0.5) : ceil(num - 0.5); } template< class PixelType > std::string TractsToDWIImageFilter< PixelType >::GetTime() { m_TimeProbe.Stop(); unsigned long total = RoundToNearest(m_TimeProbe.GetTotal()); unsigned long hours = total/3600; unsigned long minutes = (total%3600)/60; unsigned long seconds = total%60; std::string out = ""; out.append(boost::lexical_cast(hours)); out.append(":"); out.append(boost::lexical_cast(minutes)); out.append(":"); out.append(boost::lexical_cast(seconds)); m_TimeProbe.Start(); return out; } } diff --git a/Modules/DiffusionImaging/FiberTracking/Fiberfox/itkTractsToDWIImageFilter.h b/Modules/DiffusionImaging/FiberTracking/Fiberfox/itkTractsToDWIImageFilter.h index ea68997253..4ba527a3f6 100755 --- a/Modules/DiffusionImaging/FiberTracking/Fiberfox/itkTractsToDWIImageFilter.h +++ b/Modules/DiffusionImaging/FiberTracking/Fiberfox/itkTractsToDWIImageFilter.h @@ -1,157 +1,158 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef __itkTractsToDWIImageFilter_h__ #define __itkTractsToDWIImageFilter_h__ #include #include #include #include #include #include #include #include #include namespace itk { /** * \brief Generates artificial diffusion weighted image volume from the input fiberbundle using a generic multicompartment model. * See "Fiberfox: Facilitating the creation of realistic white matter software phantoms" (DOI: 10.1002/mrm.25045) for details. */ template< class PixelType > class TractsToDWIImageFilter : public ImageSource< itk::VectorImage< PixelType, 3 > > { public: typedef TractsToDWIImageFilter Self; typedef ImageSource< itk::VectorImage< PixelType, 3 > > Superclass; typedef SmartPointer< Self > Pointer; typedef SmartPointer< const Self > ConstPointer; typedef typename Superclass::OutputImageType OutputImageType; typedef itk::Image ItkDoubleImgType4D; typedef itk::Image ItkDoubleImgType; typedef itk::Image ItkFloatImgType; typedef itk::Image ItkUcharImgType; typedef mitk::FiberBundle::Pointer FiberBundleType; typedef itk::VectorImage< double, 3 > DoubleDwiType; typedef itk::VectorImage< double, 4 > DoubleDwiType4D; typedef itk::Matrix MatrixType; typedef itk::Image< double, 2 > SliceType; typedef itk::VnlForwardFFTImageFilter::OutputImageType ComplexSliceType; typedef itk::VectorImage< vcl_complex< double >, 3 > ComplexDwiType; typedef itk::Vector< double,3> DoubleVectorType; itkFactorylessNewMacro(Self) itkCloneMacro(Self) itkTypeMacro( TractsToDWIImageFilter, ImageSource ) /** Input */ itkSetMacro( FiberBundle, FiberBundleType ) ///< Input fiber bundle itkSetMacro( UseConstantRandSeed, bool ) ///< Seed for random generator. void SetParameters( FiberfoxParameters param ) ///< Simulation parameters. { m_Parameters = param; } /** Output */ FiberfoxParameters GetParameters(){ return m_Parameters; } std::vector< ItkDoubleImgType::Pointer > GetVolumeFractions() ///< one double image for each compartment containing the corresponding volume fraction per voxel { return m_VolumeFractions; } - mitk::LevelWindow GetLevelWindow(){ return m_LevelWindow; } + mitk::LevelWindow GetLevelWindow() ///< Level window is determined from the output image + { return m_LevelWindow; } itkGetMacro( StatusText, std::string ) itkGetMacro( PhaseImage, DoubleDwiType::Pointer ) itkGetMacro( KspaceImage, DoubleDwiType::Pointer ) itkGetMacro( CoilPointset, mitk::PointSet::Pointer ) void GenerateData(); protected: TractsToDWIImageFilter(); virtual ~TractsToDWIImageFilter(); itk::Point GetItkPoint(double point[3]); itk::Vector GetItkVector(double point[3]); vnl_vector_fixed GetVnlVector(double point[3]); vnl_vector_fixed GetVnlVector(Vector< float, 3 >& vector); double RoundToNearest(double num); std::string GetTime(); - /** Transform generated image compartment by compartment, channel by channel and slice by slice using DFT and add k-space artifacts. */ + /** Transform generated image compartment by compartment, channel by channel and slice by slice using DFT and add k-space artifacts/effects. */ DoubleDwiType::Pointer SimulateKspaceAcquisition(std::vector< DoubleDwiType::Pointer >& images); /** Generate signal of non-fiber compartments. */ void SimulateExtraAxonalSignal(ItkUcharImgType::IndexType index, double intraAxonalVolume, int g=-1); /** Move fibers to simulate headmotion */ void SimulateMotion(int g=-1); void CheckVolumeFractionImages(); ItkDoubleImgType::Pointer NormalizeInsideMask(ItkDoubleImgType::Pointer image); void InitializeData(); void InitializeFiberData(); double InterpolateValue(itk::Point itkP, ItkDoubleImgType::Pointer img); // input mitk::FiberfoxParameters m_Parameters; FiberBundleType m_FiberBundle; // output typename OutputImageType::Pointer m_OutputImage; typename DoubleDwiType::Pointer m_PhaseImage; typename DoubleDwiType::Pointer m_KspaceImage; mitk::LevelWindow m_LevelWindow; std::vector< ItkDoubleImgType::Pointer > m_VolumeFractions; std::string m_StatusText; // MISC itk::TimeProbe m_TimeProbe; bool m_UseConstantRandSeed; bool m_MaskImageSet; ofstream m_MotionLogfile; // signal generation FiberBundleType m_FiberBundleWorkingCopy; ///< we work on an upsampled version of the input bundle FiberBundleType m_FiberBundleTransformed; ///< transformed bundle simulating headmotion itk::Vector m_WorkingSpacing; itk::Point m_WorkingOrigin; ImageRegion<3> m_WorkingImageRegion; double m_VoxelVolume; std::vector< DoubleDwiType::Pointer > m_CompartmentImages; ItkUcharImgType::Pointer m_TransformedMaskImage; ///< copy of mask image (changes for each motion step) ItkUcharImgType::Pointer m_UpsampledMaskImage; ///< helper image for motion simulation DoubleVectorType m_Rotation; DoubleVectorType m_Translation; std::vector< DoubleVectorType > m_Rotations; /// m_Translations; /// #include #include #include #include "mitkCommandLineParser.h" #include #include #include -//#include #include #include #include #include #include -#include -//#include #include -#include -#include -#include +#include #define _USE_MATH_DEFINES #include const int numOdfSamples = 200; typedef itk::Image< itk::Vector< float, numOdfSamples > , 3 > SampledShImageType; int main(int argc, char* argv[]) { mitkCommandLineParser parser; parser.setTitle("Machine Learning Based Streamline Tractography"); parser.setCategory("Fiber Tracking and Processing Methods"); parser.setDescription(""); parser.setContributor("MBI"); parser.setArgumentPrefix("--", "-"); parser.addArgument("image", "i", mitkCommandLineParser::String, "DWIs:", "input diffusion-weighted image", us::Any(), false); parser.addArgument("forest", "f", mitkCommandLineParser::String, "Forest:", "input forest", us::Any(), false); parser.addArgument("out", "o", mitkCommandLineParser::OutputDirectory, "Output:", "output fiberbundle", us::Any(), false); parser.addArgument("stop", "st", mitkCommandLineParser::String, "Stop image:", "stop image", us::Any()); parser.addArgument("mask", "m", mitkCommandLineParser::String, "Mask image:", "mask image", us::Any()); parser.addArgument("seed", "s", mitkCommandLineParser::String, "Seed image:", "seed image", us::Any()); parser.addArgument("athres", "a", mitkCommandLineParser::Float, "Angular threshold:", "angular threshold (in radians)", us::Any()); parser.addArgument("stepsize", "se", mitkCommandLineParser::Float, "Stepsize:", "stepsize", us::Any()); parser.addArgument("samples", "ns", mitkCommandLineParser::Int, "Samples:", "samples", us::Any()); parser.addArgument("samplingdist", "sd", mitkCommandLineParser::Float, "Sampling distance:", "sampling distance (in voxels)", us::Any()); parser.addArgument("seeds", "nse", mitkCommandLineParser::Int, "Seeds per voxel:", "seeds per voxel", us::Any()); - parser.addArgument("verbose", "v", mitkCommandLineParser::Bool, "Verbose:", "output additional images", us::Any()); - map parsedArgs = parser.parseArguments(argc, argv); if (parsedArgs.size()==0) return EXIT_FAILURE; string imageFile = us::any_cast(parsedArgs["image"]); string forestFile = us::any_cast(parsedArgs["forest"]); string outFile = us::any_cast(parsedArgs["out"]); string maskFile = ""; if (parsedArgs.count("mask")) maskFile = us::any_cast(parsedArgs["mask"]); string seedFile = ""; if (parsedArgs.count("seed")) seedFile = us::any_cast(parsedArgs["seed"]); string stopFile = ""; if (parsedArgs.count("stop")) stopFile = us::any_cast(parsedArgs["stop"]); float stepsize = -1; if (parsedArgs.count("stepsize")) stepsize = us::any_cast(parsedArgs["stepsize"]); float athres = 0.7; if (parsedArgs.count("athres")) athres = us::any_cast(parsedArgs["athres"]); float samplingdist = 0.25; if (parsedArgs.count("samplingdist")) samplingdist = us::any_cast(parsedArgs["samplingdist"]); - bool verbose = false; - if (parsedArgs.count("verbose")) - verbose = true; - int samples = 10; if (parsedArgs.count("samples")) samples = us::any_cast(parsedArgs["samples"]); int seeds = 1; if (parsedArgs.count("seeds")) seeds = us::any_cast(parsedArgs["seeds"]); typedef itk::Image ItkUcharImgType; MITK_INFO << "loading diffusion-weighted image"; mitk::Image::Pointer dwi = dynamic_cast(mitk::IOUtil::LoadImage(imageFile).GetPointer()); ItkUcharImgType::Pointer mask; if (!maskFile.empty()) { MITK_INFO << "loading mask image"; mitk::Image::Pointer img = dynamic_cast(mitk::IOUtil::LoadImage(maskFile).GetPointer()); mask = ItkUcharImgType::New(); mitk::CastToItkImage(img, mask); } ItkUcharImgType::Pointer seed; if (!seedFile.empty()) { MITK_INFO << "loading seed image"; mitk::Image::Pointer img = dynamic_cast(mitk::IOUtil::LoadImage(seedFile).GetPointer()); seed = ItkUcharImgType::New(); mitk::CastToItkImage(img, seed); } ItkUcharImgType::Pointer stop; if (!stopFile.empty()) { MITK_INFO << "loading stop image"; mitk::Image::Pointer img = dynamic_cast(mitk::IOUtil::LoadImage(stopFile).GetPointer()); stop = ItkUcharImgType::New(); mitk::CastToItkImage(img, stop); } - MITK_INFO << "loading forest"; - vigra::RandomForest rf; - vigra::rf_import_HDF5(rf, forestFile); + mitk::TrackingForestHandler<> tfh; + tfh.LoadForest(forestFile); + tfh.AddRawData(dwi); typedef itk::MLBSTrackingFilter<100> TrackerType; TrackerType::Pointer tracker = TrackerType::New(); tracker->SetInput(0, mitk::DiffusionPropertyHelper::GetItkVectorImage(dwi)); - tracker->SetGradientDirections( mitk::DiffusionPropertyHelper::GetGradientContainer(dwi) ); - tracker->SetB_Value( mitk::DiffusionPropertyHelper::GetReferenceBValue(dwi) ); tracker->SetMaskImage(mask); tracker->SetSeedImage(seed); tracker->SetStoppingRegions(stop); tracker->SetSeedsPerVoxel(seeds); tracker->SetStepSize(stepsize); tracker->SetAngularThreshold(athres); - tracker->SetDecisionForest(&rf); + tracker->SetForestHandler(tfh); tracker->SetSamplingDistance(samplingdist); tracker->SetNumberOfSamples(samples); //tracker->SetAvoidStop(false); - tracker->SetVerbose(verbose); tracker->SetAposterioriCurvCheck(false); tracker->SetRemoveWmEndFibers(false); tracker->Update(); vtkSmartPointer< vtkPolyData > poly = tracker->GetFiberPolyData(); mitk::FiberBundle::Pointer outFib = mitk::FiberBundle::New(poly); mitk::IOUtil::SaveBaseData(outFib, outFile); - if (verbose) - { - MITK_INFO << "Writing images..."; - string outName = itksys::SystemTools::GetFilenamePath(outFile)+"/"+itksys::SystemTools::GetFilenameWithoutLastExtension(outFile); - itk::ImageFileWriter< TrackerType::ItkDoubleImgType >::Pointer writer = itk::ImageFileWriter< TrackerType::ItkDoubleImgType >::New(); - writer->SetFileName(outName+"_WhiteMatter.nrrd"); - writer->SetInput(tracker->GetWmImage()); - writer->Update(); - - writer->SetFileName(outName+"_NotWhiteMatter.nrrd"); - writer->SetInput(tracker->GetNotWmImage()); - writer->Update(); - - writer->SetFileName(outName+"_AvoidStop.nrrd"); - writer->SetInput(tracker->GetAvoidStopImage()); - writer->Update(); - } - return EXIT_SUCCESS; } diff --git a/Modules/DiffusionImaging/MiniApps/DFTraining.cpp b/Modules/DiffusionImaging/MiniApps/DFTraining.cpp index 38e2bdaaa7..2c72f4c5ca 100755 --- a/Modules/DiffusionImaging/MiniApps/DFTraining.cpp +++ b/Modules/DiffusionImaging/MiniApps/DFTraining.cpp @@ -1,150 +1,150 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include #include #include #include #include "mitkCommandLineParser.h" #include #include #include #include #include #include #include #include #include #define _USE_MATH_DEFINES #include int main(int argc, char* argv[]) { MITK_INFO << "DFTraining"; mitkCommandLineParser parser; parser.setTitle("Machine Learning Based Streamline Tractography"); parser.setCategory("Fiber Tracking and Processing Methods"); parser.setDescription(""); parser.setContributor("MBI"); parser.setArgumentPrefix("--", "-"); parser.addArgument("images", "i", mitkCommandLineParser::StringList, "DWIs:", "input diffusion-weighted images", us::Any(), false); parser.addArgument("wmmasks", "w", mitkCommandLineParser::StringList, "WM-Masks:", "white matter mask images", us::Any()); parser.addArgument("tractograms", "t", mitkCommandLineParser::StringList, "Tractograms:", "input tractograms (.fib, vtk ascii file format)", us::Any(), false); parser.addArgument("masks", "m", mitkCommandLineParser::StringList, "Masks:", "mask images", us::Any()); parser.addArgument("forest", "f", mitkCommandLineParser::OutputFile, "Forest:", "output forest", us::Any(), false); - parser.addArgument("stepsize", "s", mitkCommandLineParser::Float, "Stepsize:", "stepsize", us::Any()); + parser.addArgument("stepsize", "s", mitkCommandLineParser::Float, "Stepsize:", "stepsize in mm (determines number of white-matter samples)", us::Any()); parser.addArgument("gmsamples", "g", mitkCommandLineParser::Int, "Number of gray matter samples per voxel:", "Number of gray matter samples per voxel", us::Any()); parser.addArgument("numtrees", "n", mitkCommandLineParser::Int, "Number of trees:", "number of trees", us::Any()); parser.addArgument("max_tree_depth", "d", mitkCommandLineParser::Int, "Max. tree depth:", "maximum tree depth", us::Any()); parser.addArgument("sample_fraction", "sf", mitkCommandLineParser::Float, "Sample fraction:", "fraction of samples used per tree", us::Any()); map parsedArgs = parser.parseArguments(argc, argv); if (parsedArgs.size()==0) return EXIT_FAILURE; mitkCommandLineParser::StringContainerType imageFiles = us::any_cast(parsedArgs["images"]); mitkCommandLineParser::StringContainerType wmMaskFiles; if (parsedArgs.count("wmmasks")) wmMaskFiles = us::any_cast(parsedArgs["wmmasks"]); mitkCommandLineParser::StringContainerType maskFiles; if (parsedArgs.count("masks")) maskFiles = us::any_cast(parsedArgs["masks"]); string forestFile = us::any_cast(parsedArgs["forest"]); mitkCommandLineParser::StringContainerType tractogramFiles; if (parsedArgs.count("tractograms")) tractogramFiles = us::any_cast(parsedArgs["tractograms"]); int numTrees = 30; if (parsedArgs.count("numtrees")) numTrees = us::any_cast(parsedArgs["numtrees"]); int gmsamples = -1; if (parsedArgs.count("gmsamples")) gmsamples = us::any_cast(parsedArgs["gmsamples"]); float stepsize = -1; if (parsedArgs.count("stepsize")) stepsize = us::any_cast(parsedArgs["stepsize"]); int max_tree_depth = 50; if (parsedArgs.count("max_tree_depth")) max_tree_depth = us::any_cast(parsedArgs["max_tree_depth"]); double sample_fraction = 1.0; if (parsedArgs.count("sample_fraction")) sample_fraction = us::any_cast(parsedArgs["sample_fraction"]); MITK_INFO << "loading diffusion-weighted images"; std::vector< mitk::Image::Pointer > rawData; for (auto imgFile : imageFiles) { mitk::Image::Pointer dwi = dynamic_cast(mitk::IOUtil::LoadImage(imgFile).GetPointer()); rawData.push_back(dwi); } typedef itk::Image ItkUcharImgType; MITK_INFO << "loading mask images"; std::vector< ItkUcharImgType::Pointer > maskImageVector; for (auto maskFile : maskFiles) { mitk::Image::Pointer img = dynamic_cast(mitk::IOUtil::LoadImage(maskFile).GetPointer()); ItkUcharImgType::Pointer mask = ItkUcharImgType::New(); mitk::CastToItkImage(img, mask); maskImageVector.push_back(mask); } MITK_INFO << "loading white matter mask images"; std::vector< ItkUcharImgType::Pointer > wmMaskImageVector; for (auto wmFile : wmMaskFiles) { mitk::Image::Pointer img = dynamic_cast(mitk::IOUtil::LoadImage(wmFile).GetPointer()); ItkUcharImgType::Pointer wmmask = ItkUcharImgType::New(); mitk::CastToItkImage(img, wmmask); wmMaskImageVector.push_back(wmmask); } MITK_INFO << "loading tractograms"; std::vector< mitk::FiberBundle::Pointer > tractograms; for (auto tractFile : tractogramFiles) { mitk::FiberBundle::Pointer fib = dynamic_cast(mitk::IOUtil::Load(tractFile).at(0).GetPointer()); tractograms.push_back(fib); } mitk::TrackingForestHandler<> forestHandler; forestHandler.SetRawData(rawData); forestHandler.SetMaskImages(maskImageVector); forestHandler.SetWhiteMatterImages(wmMaskImageVector); forestHandler.SetTractograms(tractograms); forestHandler.SetNumTrees(numTrees); forestHandler.SetMaxTreeDepth(max_tree_depth); forestHandler.SetGrayMatterSamplesPerVoxel(gmsamples); forestHandler.SetSampleFraction(sample_fraction); forestHandler.SetStepSize(stepsize); forestHandler.StartTraining(); forestHandler.SaveForest(forestFile); return EXIT_SUCCESS; } diff --git a/Plugins/org.mitk.gui.qt.diffusionimaging/src/internal/QmitkFiberProcessingViewControls.ui b/Plugins/org.mitk.gui.qt.diffusionimaging/src/internal/QmitkFiberProcessingViewControls.ui index 8ca5d39b84..3cdd629232 100644 --- a/Plugins/org.mitk.gui.qt.diffusionimaging/src/internal/QmitkFiberProcessingViewControls.ui +++ b/Plugins/org.mitk.gui.qt.diffusionimaging/src/internal/QmitkFiberProcessingViewControls.ui @@ -1,1399 +1,1399 @@ QmitkFiberProcessingViewControls 0 0 386 540 Form 0 0 0 355 330 Fiber Extraction Extract a fiber subset from the selected fiber bundle using manually placed planar figures as waypoints or binary regions of interest. false 0 0 200 16777215 11 Extract fibers passing through selected ROI or composite ROI. Select ROI and fiber bundle to execute. Extract Qt::Vertical 20 40 QFrame::NoFrame QFrame::Raised 9 9 9 9 0 false 0 0 16777215 16777215 11 Generate a binary image containing all selected ROIs. Select at least one ROI (planar figure) and a reference fiber bundle or image. Generate ROI Image 0 0 200 0 16777215 60 QFrame::NoFrame QFrame::Raised 0 0 0 0 false 60 16777215 Create OR composition with selected ROIs. OR Qt::Horizontal 40 20 false 60 16777215 Create NOT composition from selected ROI. NOT false 60 16777215 Create AND composition with selected ROIs. AND 0 0 200 0 16777215 60 QFrame::NoFrame QFrame::Raised 0 0 0 0 30 30 Draw circular ROI. Select reference fiber bundle to execute. :/QmitkDiffusionImaging/circle.png:/QmitkDiffusionImaging/circle.png 32 32 false true Qt::Horizontal 40 20 30 30 Draw polygonal ROI. Select reference fiber bundle to execute. :/QmitkDiffusionImaging/polygon.png:/QmitkDiffusionImaging/polygon.png 32 32 true true 0 0 Extract using planar figures Extract using binary ROI image QFrame::NoFrame QFrame::Raised 0 0 0 0 0 0 Ending in mask Not ending in mask Passing mask Not passing mask Extract fibers: Both ends true 0 - 0 - 270 + -70 + 355 380 Fiber Removal Remove fibers that satisfy certain criteria from the selected bundle. 0 0 Remove fibers in direction Remove fibers by length Remove fibers by curvature Remove fiber parts outside mask Remove fiber parts inside mask QFrame::NoFrame QFrame::Raised 0 0 0 0 Qt::Horizontal 40 20 Minimum fiber length in mm 0 999999999 20 Max. Length: Min. Length: Maximum fiber length in mm 0 999999999 300 QFrame::NoFrame QFrame::Raised 0 0 0 0 0 X: Y: Z: Angle: Angular deviation threshold in degree 1 90.000000000000000 1.000000000000000 - 5.000000000000000 + 25.000000000000000 QFrame::NoFrame QFrame::Raised 0 0 0 0 If unchecked, the fiber exceeding the threshold will be split in two instead of removed. Remove Fiber false QFrame::NoFrame QFrame::Raised 0 0 0 0 0 Max. Angular Deviation: Qt::Horizontal 40 20 Maximum angular deviation in degree 180.000000000000000 0.100000000000000 30.000000000000000 Distance: Distance in mm 1 999.000000000000000 1.000000000000000 5.000000000000000 false 0 0 200 16777215 11 Remove Qt::Vertical 20 40 0 0 - 355 + 280 349 Bundle Modification Modify the selected bundle with operations such as fiber resampling, FA coloring, etc. QFrame::NoFrame QFrame::Raised 0 0 0 0 0 Error threshold in mm: 999999999.000000000000000 0.100000000000000 0.100000000000000 QFrame::NoFrame QFrame::Raised 0 0 0 0 0 Sagittal Coronal Axial Select direction: QFrame::NoFrame QFrame::Raised 0 0 0 0 0 Scalar map: If checked, the image values are not only used to color the fibers but are also used as opaxity values. Values as opacity true 0 0 Smooth fibers Compress fibers Color fibers by scalar map (e.g. FA) Mirror fibers Weight Bundle Color fibers by curvature QFrame::NoFrame QFrame::Raised 0 0 0 0 0 0.010000000000000 999999999.000000000000000 0.100000000000000 1.000000000000000 Point distance in mm: Qt::Vertical 20 40 false 0 0 200 16777215 11 Execute QFrame::NoFrame QFrame::Raised 0 0 0 0 0 Weight: 7 999999999.000000000000000 0.100000000000000 1.000000000000000 0 0 - 368 - 152 + 360 + 104 Bundle Operations Join, subtract or copy bundles. false 0 0 200 16777215 11 Returns all fibers contained in bundle X that are not contained in bundle Y (not commutative!). Select at least two fiber bundles to execute. Substract Qt::Vertical 20 40 false 0 0 200 16777215 11 Merge selected fiber bundles. Select at least two fiber bundles to execute. Join false 0 0 200 16777215 11 Merge selected fiber bundles. Select at least two fiber bundles to execute. Copy Please Select Input Data <html><head/><body><p><span style=" color:#ff0000;">mandatory</span></p></body></html> true <html><head/><body><p><span style=" color:#969696;">needed for extraction</span></p></body></html> true Input DTI Fiber Bundle: Binary seed ROI. If not specified, the whole image area is seeded. ROI: Qt::Vertical 20 40 QmitkDataStorageComboBox QComboBox
QmitkDataStorageComboBox.h
diff --git a/Plugins/org.mitk.gui.qt.diffusionimaging/src/internal/QmitkMLBTView.cpp b/Plugins/org.mitk.gui.qt.diffusionimaging/src/internal/QmitkMLBTView.cpp index 3d2db7cf7f..9c189cb3ec 100644 --- a/Plugins/org.mitk.gui.qt.diffusionimaging/src/internal/QmitkMLBTView.cpp +++ b/Plugins/org.mitk.gui.qt.diffusionimaging/src/internal/QmitkMLBTView.cpp @@ -1,419 +1,417 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ // Blueberry #include #include // Qmitk #include "QmitkMLBTView.h" #include "QmitkStdMultiWidget.h" // Qt #include #include #include #include #include #include #include #include #include #include #include #include #include #define _USE_MATH_DEFINES #include const std::string QmitkMLBTView::VIEW_ID = "org.mitk.views.mlbtview"; using namespace berry; QmitkMLBTView::QmitkMLBTView() : QmitkFunctionality() , m_Controls( 0 ) , m_MultiWidget( NULL ) { m_TrackingTimer = std::make_shared(this); } // Destructor QmitkMLBTView::~QmitkMLBTView() { } void QmitkMLBTView::CreateQtPartControl( QWidget *parent ) { // build up qt view, unless already done if ( !m_Controls ) { // create GUI widgets from the Qt Designer's .ui file m_Controls = new Ui::QmitkMLBTViewControls; m_Controls->setupUi( parent ); connect( m_Controls->m_StartTrainingButton, SIGNAL ( clicked() ), this, SLOT( StartTrainingThread() ) ); connect( &m_TrainingWatcher, SIGNAL ( finished() ), this, SLOT( OnTrainingThreadStop() ) ); connect( m_Controls->m_StartTrackingButton, SIGNAL ( clicked() ), this, SLOT( StartTrackingThread() ) ); connect( &m_TrackingWatcher, SIGNAL ( finished() ), this, SLOT( OnTrackingThreadStop() ) ); connect( m_Controls->m_SaveForestButton, SIGNAL ( clicked() ), this, SLOT( SaveForest() ) ); connect( m_Controls->m_LoadForestButton, SIGNAL ( clicked() ), this, SLOT( LoadForest() ) ); connect( m_TrackingTimer.get(), SIGNAL(timeout()), this, SLOT(BuildFibers()) ); connect( m_Controls->m_TimerIntervalBox, SIGNAL(valueChanged(int)), this, SLOT( ChangeTimerInterval(int) )); connect( m_Controls->m_DemoModeBox, SIGNAL(stateChanged(int)), this, SLOT( ToggleDemoMode(int) )); connect( m_Controls->m_PauseTrackingButton, SIGNAL ( clicked() ), this, SLOT( PauseTracking() ) ); connect( m_Controls->m_AbortTrackingButton, SIGNAL ( clicked() ), this, SLOT( AbortTracking() ) ); connect( m_Controls->m_AddTwButton, SIGNAL ( clicked() ), this, SLOT( AddTrainingWidget() ) ); connect( m_Controls->m_RemoveTwButton, SIGNAL ( clicked() ), this, SLOT( RemoveTrainingWidget() ) ); int numThread = itk::MultiThreader::GetGlobalDefaultNumberOfThreads(); m_Controls->m_NumberOfThreadsBox->setMaximum(numThread); m_Controls->m_NumberOfThreadsBox->setValue(numThread); m_Controls->m_TrackingMaskImageBox->SetDataStorage(this->GetDataStorage()); m_Controls->m_TrackingSeedImageBox->SetDataStorage(this->GetDataStorage()); m_Controls->m_TrackingStopImageBox->SetDataStorage(this->GetDataStorage()); m_Controls->m_TrackingRawImageBox->SetDataStorage(this->GetDataStorage()); mitk::NodePredicateIsDWI::Pointer isDiffusionImage = mitk::NodePredicateIsDWI::New(); mitk::TNodePredicateDataType::Pointer isMitkImage = mitk::TNodePredicateDataType::New(); mitk::NodePredicateNot::Pointer noDiffusionImage = mitk::NodePredicateNot::New(isDiffusionImage); mitk::NodePredicateAnd::Pointer finalPredicate = mitk::NodePredicateAnd::New(isMitkImage, noDiffusionImage); mitk::NodePredicateProperty::Pointer isBinaryPredicate = mitk::NodePredicateProperty::New("binary", mitk::BoolProperty::New(true)); finalPredicate = mitk::NodePredicateAnd::New(finalPredicate, isBinaryPredicate); m_Controls->m_TrackingMaskImageBox->SetPredicate(finalPredicate); m_Controls->m_TrackingSeedImageBox->SetPredicate(finalPredicate); m_Controls->m_TrackingStopImageBox->SetPredicate(finalPredicate); m_Controls->m_TrackingRawImageBox->SetPredicate(isDiffusionImage); m_Controls->m_TrackingMaskImageBox->SetZeroEntryText("--"); m_Controls->m_TrackingSeedImageBox->SetZeroEntryText("--"); m_Controls->m_TrackingStopImageBox->SetZeroEntryText("--"); AddTrainingWidget(); UpdateGui(); } } void QmitkMLBTView::AddTrainingWidget() { std::shared_ptr tw = std::make_shared(); tw->SetDataStorage(this->GetDataStorage()); m_Controls->m_TwFrame->layout()->addWidget(tw.get()); m_TrainingWidgets.push_back(tw); } void QmitkMLBTView::RemoveTrainingWidget() { if(m_TrainingWidgets.size()>1) { m_TrainingWidgets.back().reset(); m_TrainingWidgets.pop_back(); } } void QmitkMLBTView::UpdateGui() { if (m_ForestHandler.IsForestValid()) { m_Controls->statusLabel->setText("Random forest available"); m_Controls->m_SaveForestButton->setEnabled(true); m_Controls->m_StartTrackingButton->setEnabled(true); } else { m_Controls->statusLabel->setText("Please load or train random forest!"); m_Controls->m_SaveForestButton->setEnabled(false); m_Controls->m_StartTrackingButton->setEnabled(false); } } void QmitkMLBTView::AbortTracking() { if (tracker.IsNotNull()) { tracker->m_AbortTracking = true; } } void QmitkMLBTView::PauseTracking() { if (tracker.IsNotNull()) { tracker->m_PauseTracking = !tracker->m_PauseTracking; } } void QmitkMLBTView::ChangeTimerInterval(int value) { m_TrackingTimer->setInterval(value); } void QmitkMLBTView::ToggleDemoMode(int state) { if (tracker.IsNotNull()) { tracker->SetDemoMode(m_Controls->m_DemoModeBox->isChecked()); tracker->m_Stop = false; } } void QmitkMLBTView::BuildFibers() { if (m_Controls->m_DemoModeBox->isChecked() && tracker.IsNotNull() && tracker->m_BuildFibersFinished) { vtkSmartPointer< vtkPolyData > poly = tracker->GetFiberPolyData(); mitk::FiberBundle::Pointer outFib = mitk::FiberBundle::New(poly); outFib->SetFiberColors(255,255,255); m_TractogramNode->SetData(outFib); m_SamplingPointsNode->SetData(tracker->m_SamplingPointset); m_AlternativePointsNode->SetData(tracker->m_AlternativePointset); mitk::RenderingManager::GetInstance()->RequestUpdateAll(); tracker->m_BuildFibersFinished = false; tracker->m_BuildFibersReady = 0; tracker->m_Stop = false; } } void QmitkMLBTView::LoadForest() { QString filename = QFileDialog::getOpenFileName(0, tr("Load Forest"), QDir::currentPath(), tr("HDF5 random forest file (*.rf)") ); if(filename.isEmpty() || filename.isNull()) return; m_ForestHandler.LoadForest( filename.toStdString() ); UpdateGui(); } void QmitkMLBTView::StartTrackingThread() { m_TractogramNode = mitk::DataNode::New(); m_TractogramNode->SetName("MLBT Result"); m_TractogramNode->SetProperty("Fiber2DSliceThickness", mitk::FloatProperty::New(20)); m_TractogramNode->SetProperty("Fiber2DfadeEFX", mitk::BoolProperty::New(false)); m_TractogramNode->SetProperty("LineWidth", mitk::IntProperty::New(2)); m_TractogramNode->SetProperty("color",mitk::ColorProperty::New(0, 1, 1)); this->GetDataStorage()->Add(m_TractogramNode); m_SamplingPointsNode = mitk::DataNode::New(); m_SamplingPointsNode->SetName("SamplingPoints"); m_SamplingPointsNode->SetProperty("pointsize", mitk::FloatProperty::New(0.2)); m_SamplingPointsNode->SetProperty("color", mitk::ColorProperty::New(1,1,1)); mitk::PointSetShapeProperty::Pointer bla = mitk::PointSetShapeProperty::New(); bla->SetValue(8); m_SamplingPointsNode->SetProperty("Pointset.2D.shape", bla); m_SamplingPointsNode->SetProperty("Pointset.2D.distance to plane", mitk::FloatProperty::New(1.5)); m_SamplingPointsNode->SetProperty("point 2D size", mitk::FloatProperty::New(0.1)); m_SamplingPointsNode->SetProperty("Pointset.2D.fill shape", mitk::BoolProperty::New(true)); this->GetDataStorage()->Add(m_SamplingPointsNode); m_AlternativePointsNode = mitk::DataNode::New(); m_AlternativePointsNode->SetName("AlternativePoints"); m_AlternativePointsNode->SetProperty("pointsize", mitk::FloatProperty::New(0.2)); m_AlternativePointsNode->SetProperty("color", mitk::ColorProperty::New(1,0,0)); m_AlternativePointsNode->SetProperty("Pointset.2D.shape", bla); m_AlternativePointsNode->SetProperty("Pointset.2D.distance to plane", mitk::FloatProperty::New(1.5)); m_AlternativePointsNode->SetProperty("point 2D size", mitk::FloatProperty::New(0.1)); m_AlternativePointsNode->SetProperty("Pointset.2D.fill shape", mitk::BoolProperty::New(true)); this->GetDataStorage()->Add(m_AlternativePointsNode); QFuture future = QtConcurrent::run( this, &QmitkMLBTView::StartTracking ); m_TrackingWatcher.setFuture(future); m_TrackingThreadIsRunning = true; m_Controls->m_StartTrackingButton->setEnabled(false); m_TrackingTimer->start(m_Controls->m_TimerIntervalBox->value()); } void QmitkMLBTView::OnTrackingThreadStop() { m_TrackingThreadIsRunning = false; m_Controls->m_StartTrackingButton->setEnabled(true); vtkSmartPointer< vtkPolyData > poly = tracker->GetFiberPolyData(); mitk::FiberBundle::Pointer outFib = mitk::FiberBundle::New(poly); m_TractogramNode->SetData(outFib); m_TractogramNode->SetProperty("Fiber2DSliceThickness", mitk::FloatProperty::New(1)); if (m_Controls->m_DemoModeBox->isChecked()) { m_SamplingPointsNode->SetData(tracker->m_SamplingPointset); m_AlternativePointsNode->SetData(tracker->m_AlternativePointset); } tracker = NULL; m_TrackingTimer->stop(); mitk::RenderingManager::GetInstance()->RequestUpdateAll(); } void QmitkMLBTView::StartTracking() { if ( m_Controls->m_TrackingRawImageBox->GetSelectedNode().IsNull() || !m_ForestHandler.IsForestValid()) return; mitk::Image::Pointer dwi = dynamic_cast(m_Controls->m_TrackingRawImageBox->GetSelectedNode()->GetData()); + m_ForestHandler.AddRawData(dwi); + tracker = TrackerType::New(); tracker->SetNumberOfThreads(m_Controls->m_NumberOfThreadsBox->value()); tracker->SetInput(0, mitk::DiffusionPropertyHelper::GetItkVectorImage(dwi) ); - tracker->SetGradientDirections( mitk::DiffusionPropertyHelper::GetGradientContainer(dwi) ); - tracker->SetB_Value( mitk::DiffusionPropertyHelper::GetReferenceBValue(dwi) ); tracker->SetDemoMode(m_Controls->m_DemoModeBox->isChecked()); if (m_Controls->m_DemoModeBox->isChecked()) tracker->SetNumberOfThreads(1); if (m_Controls->m_TrackingMaskImageBox->GetSelectedNode().IsNotNull()) { mitk::Image::Pointer mask = dynamic_cast(m_Controls->m_TrackingMaskImageBox->GetSelectedNode()->GetData()); ItkUcharImgType::Pointer itkMask = ItkUcharImgType::New(); mitk::CastToItkImage(mask, itkMask); tracker->SetMaskImage(itkMask); } if (m_Controls->m_TrackingSeedImageBox->GetSelectedNode().IsNotNull()) { mitk::Image::Pointer img = dynamic_cast(m_Controls->m_TrackingSeedImageBox->GetSelectedNode()->GetData()); ItkUcharImgType::Pointer itkImg = ItkUcharImgType::New(); mitk::CastToItkImage(img, itkImg); tracker->SetSeedImage(itkImg); } if (m_Controls->m_TrackingStopImageBox->GetSelectedNode().IsNotNull()) { mitk::Image::Pointer img = dynamic_cast(m_Controls->m_TrackingStopImageBox->GetSelectedNode()->GetData()); ItkUcharImgType::Pointer itkImg = ItkUcharImgType::New(); mitk::CastToItkImage(img, itkImg); tracker->SetStoppingRegions(itkImg); } tracker->SetSeedsPerVoxel(m_Controls->m_NumberOfSeedsBox->value()); tracker->SetStepSize(m_Controls->m_TrackingStepSizeBox->value()); tracker->SetMinTractLength(m_Controls->m_MinLengthBox->value()); tracker->SetMaxTractLength(m_Controls->m_MaxLengthBox->value()); tracker->SetAposterioriCurvCheck(m_Controls->m_Curvcheck2->isChecked()); tracker->SetRemoveWmEndFibers(false); tracker->SetAvoidStop(m_Controls->m_AvoidStop->isChecked()); - - vigra::RandomForest forest = m_ForestHandler.GetForest(); - tracker->SetDecisionForest(&forest); + tracker->SetForestHandler(m_ForestHandler); tracker->SetSamplingDistance(m_Controls->m_SamplingDistanceBox->value()); tracker->SetNumberOfSamples(m_Controls->m_NumSamplesBox->value()); tracker->SetRandomSampling(m_Controls->m_RandomSampling->isChecked()); tracker->Update(); } void QmitkMLBTView::SaveForest() { if (!m_ForestHandler.IsForestValid()) { UpdateGui(); return; } QString filename = QFileDialog::getSaveFileName(0, tr("Save Forest"), QDir::currentPath()+"/forest.rf", tr("HDF5 random forest file (*.rf)") ); if(filename.isEmpty() || filename.isNull()) return; if(!filename.endsWith(".rf")) filename += ".rf"; m_ForestHandler.SaveForest( filename.toStdString() ); } void QmitkMLBTView::StartTrainingThread() { QFuture future = QtConcurrent::run( this, &QmitkMLBTView::StartTraining ); m_TrainingWatcher.setFuture(future); m_Controls->m_StartTrainingButton->setEnabled(false); m_Controls->m_SaveForestButton->setEnabled(false); m_Controls->m_LoadForestButton->setEnabled(false); } void QmitkMLBTView::OnTrainingThreadStop() { m_Controls->m_StartTrainingButton->setEnabled(true); m_Controls->m_SaveForestButton->setEnabled(true); m_Controls->m_LoadForestButton->setEnabled(true); UpdateGui(); } void QmitkMLBTView::StartTraining() { std::vector< mitk::Image::Pointer > m_SelectedDiffImages; std::vector< mitk::FiberBundle::Pointer > m_SelectedFB; std::vector< ItkUcharImgType::Pointer > m_MaskImages; std::vector< ItkUcharImgType::Pointer > m_WhiteMatterImages; for (auto w : m_TrainingWidgets) { if ( w->GetImage().IsNull() || w->GetFibers().IsNull() ) { QMessageBox::information(nullptr, "Warning", "Training could not be started. Not all necessary datasets were selected."); return; } m_SelectedDiffImages.push_back(dynamic_cast(w->GetImage()->GetData())); m_SelectedFB.push_back(dynamic_cast(w->GetFibers()->GetData())); if (w->GetMask().IsNotNull()) { mitk::Image::Pointer img = dynamic_cast(w->GetMask()->GetData()); ItkUcharImgType::Pointer itkMask = ItkUcharImgType::New(); mitk::CastToItkImage(img, itkMask); m_MaskImages.push_back(itkMask); } else m_MaskImages.push_back(nullptr); if (w->GetWhiteMatter().IsNotNull()) { mitk::Image::Pointer img = dynamic_cast(w->GetWhiteMatter()->GetData()); ItkUcharImgType::Pointer itkMask = ItkUcharImgType::New(); mitk::CastToItkImage(img, itkMask); m_WhiteMatterImages.push_back(itkMask); } else m_WhiteMatterImages.push_back(nullptr); } m_ForestHandler.SetRawData(m_SelectedDiffImages); m_ForestHandler.SetTractograms(m_SelectedFB); m_ForestHandler.SetMaskImages(m_MaskImages); m_ForestHandler.SetWhiteMatterImages(m_WhiteMatterImages); m_ForestHandler.SetNumTrees(m_Controls->m_NumTreesBox->value()); m_ForestHandler.SetMaxTreeDepth(m_Controls->m_MaxDepthBox->value()); m_ForestHandler.SetGrayMatterSamplesPerVoxel(m_Controls->m_GmSamplingBox->value()); m_ForestHandler.SetSampleFraction(m_Controls->m_SampleFractionBox->value()); m_ForestHandler.SetStepSize(m_Controls->m_TrainingStepSizeBox->value()); m_ForestHandler.StartTraining(); } void QmitkMLBTView::StdMultiWidgetAvailable (QmitkStdMultiWidget &stdMultiWidget) { m_MultiWidget = &stdMultiWidget; } void QmitkMLBTView::StdMultiWidgetNotAvailable() { m_MultiWidget = NULL; } void QmitkMLBTView::Activated() { } diff --git a/Plugins/org.mitk.gui.qt.diffusionimaging/src/internal/QmitkMLBTViewControls.ui b/Plugins/org.mitk.gui.qt.diffusionimaging/src/internal/QmitkMLBTViewControls.ui index e6491e2411..14f30cb2ed 100644 --- a/Plugins/org.mitk.gui.qt.diffusionimaging/src/internal/QmitkMLBTViewControls.ui +++ b/Plugins/org.mitk.gui.qt.diffusionimaging/src/internal/QmitkMLBTViewControls.ui @@ -1,779 +1,779 @@ QmitkMLBTViewControls 0 0 696 907 Form Please load or train random forest! Qt::AlignCenter 1 0 0 678 804 Training - - - - Load Forest - - - - - - - Start Training - - - - - - - Qt::Vertical - - - - 20 - 40 - - - - Save Forest - - + + QFrame::NoFrame QFrame::Raised - + 0 0 0 0 - - - - Maximum tree depth. - - - 1 - - - 999999999 - - - 50 - - - - - - - Non-WM Sampling Points: - - - - - - - Fiber Sampling: - - - - - + + - Number of tress in the final random forest. - - - 1 - - - 999999999 - - - 30 - - - - - - - Fiber sampling in mm. Determines the number of white-matter samples (-1 = auto). - - - 3 - - - -1.000000000000000 - - - 999.000000000000000 - - - 0.100000000000000 - - - -1.000000000000000 + Add additional training data pair - - - - - Num. Trees: - - - - - - - Number of sampling points outside of the white-matter (-1 = automatic estimation). - - - -1 - - - 999999999 + ... - - -1 + + + :/org_mitk_icons/icons/tango/scalable/actions/list-add.svg:/org_mitk_icons/icons/tango/scalable/actions/list-add.svg - - - - Max. Depth: + + + + Remove training data pair - - - - - Sample Fraction: - - - - - - - Fraction of samples used to train each tree. - - - 3 - - - 1.000000000000000 - - - 0.100000000000000 + ... - - 1.000000000000000 + + + :/org_mitk_icons/icons/tango/scalable/actions/list-remove.svg:/org_mitk_icons/icons/tango/scalable/actions/list-remove.svg QFrame::NoFrame QFrame::Raised 0 0 0 0 0 6 QFrame::NoFrame QFrame::Raised 0 0 0 0 0 Input DWI: Qt::AlignCenter Reference Tractogram: Qt::AlignCenter Mask: Qt::AlignCenter WM: Qt::AlignCenter - - + + QFrame::NoFrame QFrame::Raised - + 0 0 0 0 - - - - Add additional training data pair + + + + Maximum tree depth. + + + 1 + + + 999999999 + + 50 + + + + + - ... + Non-WM Sampling Points: - - - :/org_mitk_icons/icons/tango/scalable/actions/list-add.svg:/org_mitk_icons/icons/tango/scalable/actions/list-add.svg + + + + + + Fiber Sampling: - - + + - Remove training data pair + Number of tress in the final random forest. + + + 1 + + + 999999999 + + + 30 + + + + + + + Fiber sampling in mm. Determines the number of white-matter samples (-1 = auto). + + + 3 + + + -1.000000000000000 + + + 999.000000000000000 + + + 0.100000000000000 + + + -1.000000000000000 + + + + + + + Num. Trees: + + + + + + + Number of sampling points outside of the white-matter (-1 = automatic estimation). + + -1 + + + 999999999 + + + -1 + + + + + - ... + Max. Depth: - - - :/org_mitk_icons/icons/tango/scalable/actions/list-remove.svg:/org_mitk_icons/icons/tango/scalable/actions/list-remove.svg + + + + + + Sample Fraction: + + + + + + + Fraction of samples used to train each tree. + + + 3 + + + 1.000000000000000 + + + 0.100000000000000 + + + 1.000000000000000 + + + + Start Training + + + + + + + Qt::Vertical + + + + 20 + 40 + + + + 0 0 678 804 Tractography QFrame::NoFrame QFrame::Raised 0 0 0 0 Demo Mode 1 1000 10 Random sampling false QFrame::NoFrame QFrame::Raised 0 0 0 0 Mask Image: Seed Image: Stop Image: QFrame::NoFrame QFrame::Raised 0 0 0 0 Number of seeds per voxel. 1 999 999999999.000000000000000 1.000000000000000 400.000000000000000 999999999.000000000000000 1.000000000000000 20.000000000000000 999999999 50 Num. Seeds: Min. Length Sampling Distance: 0.100000000000000 0.500000000000000 Max. Length Step Size: Input DWI: 1 30 Num. Threads: 0.500000000000000 Num. Samples: Avoid premature termination true QFrame::NoFrame QFrame::Raised 0 0 0 0 Pause tractography ... :/org_mitk_icons/icons/tango/scalable/actions/media-playback-pause.svg:/org_mitk_icons/icons/tango/scalable/actions/media-playback-pause.svg Start tractography ... :/org_mitk_icons/icons/tango/scalable/actions/media-playback-start.svg:/org_mitk_icons/icons/tango/scalable/actions/media-playback-start.svg Abort tractography ... :/org_mitk_icons/icons/tango/scalable/actions/media-playback-stop.svg:/org_mitk_icons/icons/tango/scalable/actions/media-playback-stop.svg Qt::Vertical 20 40 Secondary curvature check true + + + + Load Forest + + + QmitkDataStorageComboBox QComboBox
QmitkDataStorageComboBox.h
QmitkDataStorageComboBoxWithSelectNone QComboBox
QmitkDataStorageComboBoxWithSelectNone.h