diff --git a/Modules/DiffusionCore/Algorithms/Reconstruction/itkDiffusionIntravoxelIncoherentMotionReconstructionImageFilter.cpp b/Modules/DiffusionCore/Algorithms/Reconstruction/itkDiffusionIntravoxelIncoherentMotionReconstructionImageFilter.cpp index 658a943..0ba0cc3 100644 --- a/Modules/DiffusionCore/Algorithms/Reconstruction/itkDiffusionIntravoxelIncoherentMotionReconstructionImageFilter.cpp +++ b/Modules/DiffusionCore/Algorithms/Reconstruction/itkDiffusionIntravoxelIncoherentMotionReconstructionImageFilter.cpp @@ -1,712 +1,718 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef __itkDiffusionIntravoxelIncoherentMotionReconstructionImageFilter_cpp #define __itkDiffusionIntravoxelIncoherentMotionReconstructionImageFilter_cpp #include "itkDiffusionIntravoxelIncoherentMotionReconstructionImageFilter.h" #include "itkImageRegionConstIterator.h" #include "itkImageRegionConstIteratorWithIndex.h" #include "itkImageRegionIterator.h" #include "vnl/vnl_matrix.h" #include "vnl/algo/vnl_symmetric_eigensystem.h" #include #include "itkRegularizedIVIMReconstructionFilter.h" #include #define IVIM_FOO -100000 namespace itk { template< class TIn, class TOut> DiffusionIntravoxelIncoherentMotionReconstructionImageFilter ::DiffusionIntravoxelIncoherentMotionReconstructionImageFilter() : m_GradientDirectionContainer(nullptr), m_Method(IVIM_DSTAR_FIX), m_FitDStar(true), m_Verbose(false) { this->SetNumberOfRequiredInputs( 1 ); this->SetNumberOfRequiredOutputs( 3 ); typename OutputImageType::Pointer outputPtr1 = OutputImageType::New(); this->SetNthOutput(0, outputPtr1.GetPointer()); typename OutputImageType::Pointer outputPtr2 = OutputImageType::New(); this->SetNthOutput(1, outputPtr2.GetPointer()); typename OutputImageType::Pointer outputPtr3 = OutputImageType::New(); this->SetNthOutput(2, outputPtr3.GetPointer()); } template< class TIn, class TOut> void DiffusionIntravoxelIncoherentMotionReconstructionImageFilter ::BeforeThreadedGenerateData() { // Input must be an itk::VectorImage. std::string gradientImageClassName(this->ProcessObject::GetInput(0)->GetNameOfClass()); if ( strcmp(gradientImageClassName.c_str(),"VectorImage") != 0 ) { itkExceptionMacro( << "There is only one Gradient image. I expect that to be a VectorImage. " << "But its of type: " << gradientImageClassName ); } // Compute the indicies of the baseline images and gradient images // If no b=0 mm/s² gradients ar found, the next lowest b-value is used. GradientDirectionContainerType::ConstIterator gdcit = this->m_GradientDirectionContainer->Begin(); double minNorm = itk::NumericTraits::max(); while( gdcit != this->m_GradientDirectionContainer->End() ) { double norm = gdcit.Value().one_norm(); if (normm_GradientDirectionContainer->Begin(); while( gdcit != this->m_GradientDirectionContainer->End() ) { if(gdcit.Value().one_norm() <= minNorm) m_Snap.baselineind.push_back(gdcit.Index()); else m_Snap.gradientind.push_back(gdcit.Index()); ++gdcit; } if (m_Snap.gradientind.size()==0) itkExceptionMacro("Only one b-value supplied. At least two needed for IVIM fit."); // check sind die grad und base gleichlang? alle grad gerade und base ungerade? dann iterierende aufnahme!! m_Snap.iterated_sequence = false; if(m_Snap.baselineind.size() == m_Snap.gradientind.size()) { int size = m_Snap.baselineind.size(); int sum_b = 0, sum_g = 0; for(int i=0; iGetElement(m_Snap.gradientind[i]).two_norm(); m_Snap.bvalues[i] = m_BValue*twonorm*twonorm; } if(m_Verbose) { std::cout << "ref bval: " << m_BValue << "; b-values: "; for(int i=0; im_BThres) m_Snap.high_indices.push_back(i); } m_Snap.num_high = m_Snap.high_indices.size(); m_Snap.high_bvalues.set_size(m_Snap.num_high); m_Snap.high_meas.set_size(m_Snap.num_high); for(int i=0; i MeasAndBvals DiffusionIntravoxelIncoherentMotionReconstructionImageFilter ::ApplyS0Threshold(vnl_vector &meas, vnl_vector &bvals) { std::vector newmeas; std::vector newbvals; int N = meas.size(); for(int i=0; i void DiffusionIntravoxelIncoherentMotionReconstructionImageFilter ::ThreadedGenerateData(const OutputImageRegionType& outputRegionForThread, ThreadIdType ) { typename OutputImageType::Pointer outputImage = static_cast< OutputImageType * >(this->ProcessObject::GetPrimaryOutput()); ImageRegionIterator< OutputImageType > oit(outputImage, outputRegionForThread); oit.GoToBegin(); typename OutputImageType::Pointer dImage = static_cast< OutputImageType * >(this->ProcessObject::GetOutput(1)); ImageRegionIterator< OutputImageType > oit1(dImage, outputRegionForThread); oit1.GoToBegin(); typename OutputImageType::Pointer dstarImage = static_cast< OutputImageType * >(this->ProcessObject::GetOutput(2)); ImageRegionIterator< OutputImageType > oit2(dstarImage, outputRegionForThread); oit2.GoToBegin(); typedef ImageRegionConstIterator< InputImageType > InputIteratorType; typedef typename InputImageType::PixelType InputVectorType; typename InputImageType::Pointer inputImagePointer = nullptr; // Would have liked a dynamic_cast here, but seems SGI doesn't like it // The enum will DiffusionIntravoxelIncoherentMotionReconstructionImageFilterensure that an inappropriate cast is not done inputImagePointer = static_cast< InputImageType * >( this->ProcessObject::GetInput(0) ); InputIteratorType iit(inputImagePointer, outputRegionForThread ); iit.GoToBegin(); // init internal vector image for regularized fit m_InternalVectorImage = VectorImageType::New(); m_InternalVectorImage->SetSpacing( inputImagePointer->GetSpacing() ); // Set the image spacing m_InternalVectorImage->SetOrigin( inputImagePointer->GetOrigin() ); // Set the image origin m_InternalVectorImage->SetDirection( inputImagePointer->GetDirection() ); // Set the image direction m_InternalVectorImage->SetRegions( inputImagePointer->GetLargestPossibleRegion() ); m_InitialFitImage = InitialFitImageType::New(); m_InitialFitImage->SetSpacing( inputImagePointer->GetSpacing() ); // Set the image spacing m_InitialFitImage->SetOrigin( inputImagePointer->GetOrigin() ); // Set the image origin m_InitialFitImage->SetDirection( inputImagePointer->GetDirection() ); // Set the image direction m_InitialFitImage->SetRegions( inputImagePointer->GetLargestPossibleRegion() ); if(m_Method == IVIM_REGULARIZED) { m_InternalVectorImage->SetVectorLength(m_Snap.num_high); m_InternalVectorImage->Allocate(); VectorImageType::PixelType varvec(m_Snap.num_high); for(int i=0; iFillBuffer(varvec); m_InitialFitImage->Allocate(); InitialFitImageType::PixelType vec; vec[0] = 0.5; vec[1] = 0.01; vec[2]=0.001; m_InitialFitImage->FillBuffer(vec); } typedef itk::ImageRegionIterator VectorIteratorType; VectorIteratorType vecit(m_InternalVectorImage, outputRegionForThread ); vecit.GoToBegin(); typedef itk::ImageRegionIterator InitIteratorType; InitIteratorType initit(m_InitialFitImage, outputRegionForThread ); initit.GoToBegin(); while( !iit.IsAtEnd() ) { InputVectorType measvec = iit.Get(); typename NumericTraits::AccumulateType b0 = NumericTraits::Zero; m_Snap.meas_for_threshold.set_size(m_Snap.num_weighted); m_Snap.allmeas.set_size(m_Snap.num_weighted); if(!m_Snap.iterated_sequence) { // Average the baseline image pixels for(unsigned int i = 0; i < m_Snap.baselineind.size(); ++i) b0 += measvec[m_Snap.baselineind[i]]; if(m_Snap.baselineind.size()) b0 /= m_Snap.baselineind.size(); // measurement vector for(int i = 0; i < m_Snap.num_weighted; ++i) { m_Snap.allmeas[i] = measvec[m_Snap.gradientind[i]] / (b0+.0001); if(measvec[m_Snap.gradientind[i]] > m_S0Thres) m_Snap.meas_for_threshold[i] = measvec[m_Snap.gradientind[i]] / (b0+.0001); else m_Snap.meas_for_threshold[i] = IVIM_FOO; } } else { // measurement vector for(int i = 0; i < m_Snap.num_weighted; ++i) { b0 = measvec[m_Snap.baselineind[i]]; m_Snap.allmeas[i] = measvec[m_Snap.gradientind[i]] / (b0+.0001); if(measvec[m_Snap.gradientind[i]] > m_S0Thres) m_Snap.meas_for_threshold[i] = measvec[m_Snap.gradientind[i]] / (b0+.0001); else m_Snap.meas_for_threshold[i] = IVIM_FOO; } } m_Snap.currentF = 0; m_Snap.currentD = 0; m_Snap.currentDStar = 0; switch(m_Method) { case IVIM_D_THEN_DSTAR: { for(int i=0; i x_donly(2); x_donly[0] = 0.001; x_donly[1] = 0.1; // f 0.1 Dstar 0.01 D 0.001 vnl_levenberg_marquardt lm_donly(f_donly); lm_donly.set_f_tolerance(0.0001); lm_donly.minimize(x_donly); m_Snap.currentD = x_donly[0]; m_Snap.currentF = x_donly[1]; if(m_FitDStar) { MeasAndBvals input2 = ApplyS0Threshold(m_Snap.meas_for_threshold, m_Snap.bvalues); m_Snap.bvals2 = input2.bvals; m_Snap.meas2 = input2.meas; if (input2.N < 2) break; IVIM_dstar_only f_dstar_only(input2.N,m_Snap.currentD,m_Snap.currentF); f_dstar_only.set_bvalues(input2.bvals); f_dstar_only.set_measurements(input2.meas); vnl_vector< double > x_dstar_only(1); vnl_vector< double > fx_dstar_only(input2.N); double opt = 1111111111111111.0; int opt_idx = -1; int num_its = 100; double min_val = .001; double max_val = .15; for(int i=0; i x(2); x[0] = 0.1; x[1] = 0.001; // f 0.1 Dstar 0.01 D 0.001 vnl_levenberg_marquardt lm(f_fixdstar); lm.set_f_tolerance(0.0001); lm.minimize(x); m_Snap.currentF = x[0]; m_Snap.currentD = x[1]; m_Snap.currentDStar = m_DStar; break; } case IVIM_FIT_ALL: { MeasAndBvals input = ApplyS0Threshold(m_Snap.meas_for_threshold, m_Snap.bvalues); m_Snap.bvals1 = input.bvals; m_Snap.meas1 = input.meas; if (input.N < 3) break; IVIM_3param f_3param(input.N); f_3param.set_bvalues(input.bvals); f_3param.set_measurements(input.meas); vnl_vector< double > x(3); x[0] = 0.1; x[1] = 0.001; x[2] = 0.01; // f 0.1 Dstar 0.01 D 0.001 vnl_levenberg_marquardt lm(f_3param); lm.set_f_tolerance(0.0001); lm.minimize(x); m_Snap.currentF = x[0]; m_Snap.currentD = x[1]; m_Snap.currentDStar = x[2]; break; } case IVIM_LINEAR_D_THEN_F: { for(int i=0; i X(input.N,2); + bool nan_element = false; for(int i=0; i XX = X.transpose() * X; vnl_symmetric_eigensystem eigs(XX); vnl_vector eig; if(eigs.get_eigenvalue(0) > eigs.get_eigenvalue(1)) eig = eigs.get_eigenvector(0); else eig = eigs.get_eigenvector(1); m_Snap.currentF = 1 - exp( meas_m - bval_m*(eig(1)/eig(0)) ); m_Snap.currentD = -eig(1)/eig(0); if(m_FitDStar) { MeasAndBvals input2 = ApplyS0Threshold(m_Snap.meas_for_threshold, m_Snap.bvalues); m_Snap.bvals2 = input2.bvals; m_Snap.meas2 = input2.meas; if (input2.N < 2) break; IVIM_dstar_only f_dstar_only(input2.N,m_Snap.currentD,m_Snap.currentF); f_dstar_only.set_bvalues(input2.bvals); f_dstar_only.set_measurements(input2.meas); vnl_vector< double > x_dstar_only(1); vnl_vector< double > fx_dstar_only(input2.N); double opt = 1111111111111111.0; int opt_idx = -1; int num_its = 100; double min_val = .001; double max_val = .15; for(int i=0; i " << DStar; // x_dstar_only[0] = 0.01; // // f 0.1 Dstar 0.01 D 0.001 // vnl_levenberg_marquardt lm_dstar_only(f_dstar_only); // lm_dstar_only.set_f_tolerance(0.0001); // lm_dstar_only.minimize(x_dstar_only); // DStar = x_dstar_only[0]; break; } case IVIM_REGULARIZED: { //m_Snap.high_meas, m_Snap.high_bvalues; for(int i=0; i x_donly(2); x_donly[0] = 0.001; x_donly[1] = 0.1; if(input.N >= 2) { IVIM_d_and_f f_donly(input.N); f_donly.set_bvalues(input.bvals); f_donly.set_measurements(input.meas); //MITK_INFO << "initial fit N=" << input.N << ", min-b = " << input.bvals[0] << ", max-b = " << input.bvals[input.N-1]; vnl_levenberg_marquardt lm_donly(f_donly); lm_donly.set_f_tolerance(0.0001); lm_donly.minimize(x_donly); } typename InitialFitImageType::PixelType initvec; initvec[0] = x_donly[1]; initvec[1] = x_donly[0]; initit.Set(initvec); //MITK_INFO << "Init vox " << initit.GetIndex() << " with " << initvec[0] << "; " << initvec[1]; ++initit; int N = m_Snap.high_meas.size(); typename VectorImageType::PixelType vec(N); for(int i=0; i void DiffusionIntravoxelIncoherentMotionReconstructionImageFilter ::AfterThreadedGenerateData() { if(m_Method == IVIM_REGULARIZED) { typename OutputImageType::Pointer outputImage = static_cast< OutputImageType * >(this->ProcessObject::GetPrimaryOutput()); ImageRegionIterator< OutputImageType > oit0(outputImage, outputImage->GetLargestPossibleRegion()); oit0.GoToBegin(); typename OutputImageType::Pointer dImage = static_cast< OutputImageType * >(this->ProcessObject::GetOutput(1)); ImageRegionIterator< OutputImageType > oit1(dImage, dImage->GetLargestPossibleRegion()); oit1.GoToBegin(); typename OutputImageType::Pointer dstarImage = static_cast< OutputImageType * >(this->ProcessObject::GetOutput(2)); ImageRegionIterator< OutputImageType > oit2(dstarImage, dstarImage->GetLargestPossibleRegion()); oit2.GoToBegin(); typedef itk::RegularizedIVIMReconstructionFilter RegFitType; RegFitType::Pointer filter = RegFitType::New(); filter->SetInput(m_InitialFitImage); filter->SetReferenceImage(m_InternalVectorImage); filter->SetBValues(m_Snap.high_bvalues); filter->SetNumberIterations(m_NumberIterations); filter->SetNumberOfThreads(1); filter->SetLambda(m_Lambda); filter->Update(); typename RegFitType::OutputImageType::Pointer outimg = filter->GetOutput(); ImageRegionConstIterator< RegFitType::OutputImageType > iit(outimg, outimg->GetLargestPossibleRegion()); iit.GoToBegin(); while( !iit.IsAtEnd() ) { double f = iit.Get()[0]; IVIM_CEIL( f, 0.0, 1.0 ); oit0.Set( myround(f) ); oit1.Set( myround(iit.Get()[1]) ); oit2.Set( myround(iit.Get()[2]) ); if(!m_Verbose) { // report the middle voxel if( iit.GetIndex()[0] == static_cast(outimg->GetLargestPossibleRegion().GetSize(0)-1)/2 && iit.GetIndex()[1] == static_cast(outimg->GetLargestPossibleRegion().GetSize(2)-1)/2 && iit.GetIndex()[2] == static_cast(outimg->GetLargestPossibleRegion().GetSize(1)-1)/2 ) { m_Snap.currentF = f; m_Snap.currentD = iit.Get()[1]; m_Snap.currentDStar = iit.Get()[2]; m_Snap.allmeas = m_tmp_allmeas; MITK_INFO << "setting " << f << ";" << iit.Get()[1] << ";" << iit.Get()[2]; } } ++oit0; ++oit1; ++oit2; ++iit; } } } template< class TIn, class TOut> double DiffusionIntravoxelIncoherentMotionReconstructionImageFilter ::myround(double number) { return number < 0.0 ? ceil(number - 0.5) : floor(number + 0.5); } template< class TIn, class TOut> void DiffusionIntravoxelIncoherentMotionReconstructionImageFilter ::SetGradientDirections( GradientDirectionContainerType *gradientDirection ) { this->m_GradientDirectionContainer = gradientDirection; this->m_NumberOfGradientDirections = gradientDirection->Size(); } template< class TIn, class TOut> void DiffusionIntravoxelIncoherentMotionReconstructionImageFilter ::PrintSelf(std::ostream& os, Indent indent) const { Superclass::PrintSelf(os,indent); if ( m_GradientDirectionContainer ) { os << indent << "GradientDirectionContainer: " << m_GradientDirectionContainer << std::endl; } else { os << indent << "GradientDirectionContainer: (Gradient directions not set)" << std::endl; } } } #endif // __itkDiffusionIntravoxelIncoherentMotionReconstructionImageFilter_cpp