diff --git a/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp b/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp index edb5ba55ce..458108434a 100644 --- a/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp +++ b/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp @@ -1,626 +1,626 @@ /*=================================================================== mitkPhotoacousticBeamformingFilter The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #define _USE_MATH_DEFINES #include "mitkPhotoacousticBeamformingFilter.h" #include "mitkProperties.h" #include "mitkImageReadAccessor.h" #include #include #include #include #include #include #include "mitkImageCast.h" #include mitk::BeamformingFilter::BeamformingFilter() : m_OutputData(nullptr), m_InputData(nullptr) { this->SetNumberOfIndexedInputs(1); this->SetNumberOfRequiredInputs(1); m_ProgressHandle = [](int, std::string) {}; } void mitk::BeamformingFilter::SetProgressHandle(std::function progressHandle) { m_ProgressHandle = progressHandle; } mitk::BeamformingFilter::~BeamformingFilter() { } void mitk::BeamformingFilter::GenerateInputRequestedRegion() { Superclass::GenerateInputRequestedRegion(); mitk::Image* output = this->GetOutput(); mitk::Image* input = const_cast (this->GetInput()); if (!output->IsInitialized()) { return; } input->SetRequestedRegionToLargestPossibleRegion(); //GenerateTimeInInputRegion(output, input); } void mitk::BeamformingFilter::GenerateOutputInformation() { mitk::Image::ConstPointer input = this->GetInput(); mitk::Image::Pointer output = this->GetOutput(); if ((output->IsInitialized()) && (this->GetMTime() <= m_TimeOfHeaderInitialization.GetMTime())) return; itkDebugMacro(<< "GenerateOutputInformation()"); unsigned int dim[] = { m_Conf.ReconstructionLines, m_Conf.SamplesPerLine, input->GetDimension(2) }; output->Initialize(mitk::MakeScalarPixelType(), 3, dim); mitk::Vector3D spacing; spacing[0] = m_Conf.Pitch * m_Conf.TransducerElements * 1000 / m_Conf.ReconstructionLines; spacing[1] = m_Conf.RecordTime / 2 * m_Conf.SpeedOfSound * 1000 / m_Conf.SamplesPerLine; spacing[2] = 1; output->GetGeometry()->SetSpacing(spacing); output->GetGeometry()->Modified(); output->SetPropertyList(input->GetPropertyList()->Clone()); m_TimeOfHeaderInitialization.Modified(); } void mitk::BeamformingFilter::GenerateData() { GenerateOutputInformation(); mitk::Image::Pointer input = this->GetInput(); mitk::Image::Pointer output = this->GetOutput(); if (!output->IsInitialized()) return; - float inputDim[2] = { input->GetDimension(0), input->GetDimension(1) }; - float outputDim[2] = { output->GetDimension(0), output->GetDimension(1) }; + float inputDim[2] = { (float)input->GetDimension(0), (float)input->GetDimension(1) }; + float outputDim[2] = { (float)output->GetDimension(0), (float)output->GetDimension(1) }; unsigned short chunkSize = 5; // TODO: make this slightly less random unsigned int oclOutputDim[3] = { output->GetDimension(0), output->GetDimension(1), output->GetDimension(2) }; unsigned int oclOutputDimChunk[3] = { output->GetDimension(0), output->GetDimension(1), chunkSize}; unsigned int oclInputDimChunk[3] = { input->GetDimension(0), input->GetDimension(1), chunkSize}; unsigned int oclOutputDimLastChunk[3] = { output->GetDimension(0), output->GetDimension(1), input->GetDimension(2) % chunkSize }; unsigned int oclInputDimLastChunk[3] = { input->GetDimension(0), input->GetDimension(1), input->GetDimension(2) % chunkSize }; const int apodArraySize = m_Conf.TransducerElements * 4; // set the resolution of the apodization array float* ApodWindow; // calculate the appropiate apodization window switch (m_Conf.Apod) { case beamformingSettings::Apodization::Hann: ApodWindow = VonHannFunction(apodArraySize); break; case beamformingSettings::Apodization::Hamm: ApodWindow = HammFunction(apodArraySize); break; case beamformingSettings::Apodization::Box: ApodWindow = BoxFunction(apodArraySize); break; default: ApodWindow = BoxFunction(apodArraySize); break; } int progInterval = output->GetDimension(2) / 20 > 1 ? output->GetDimension(2) / 20 : 1; // the interval at which we update the gui progress bar auto begin = std::chrono::high_resolution_clock::now(); // debbuging the performance... if (!m_Conf.UseGPU) { for (int i = 0; i < output->GetDimension(2); ++i) // seperate Slices should get Beamforming seperately applied { mitk::ImageReadAccessor inputReadAccessor(input, input->GetSliceData(i)); m_OutputData = new float[m_Conf.ReconstructionLines*m_Conf.SamplesPerLine]; m_InputDataPuffer = new float[input->GetDimension(0)*input->GetDimension(1)]; // first, we convert any data to float, which we use by default if (input->GetPixelType().GetTypeAsString() == "scalar (float)" || input->GetPixelType().GetTypeAsString() == " (float)") { m_InputData = (float*)inputReadAccessor.GetData(); } else { MITK_INFO << "Pixel type is not float, abort"; return; } // fill the image with zeros for (int l = 0; l < outputDim[0]; ++l) { for (int s = 0; s < outputDim[1]; ++s) { m_OutputData[l*(short)outputDim[1] + s] = 0; } } std::thread *threads = new std::thread[(short)outputDim[0]]; // every line will be beamformed in a seperate thread if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DASQuadraticLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DASSphericalLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } } else if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DMAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DMASQuadraticLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DMASSphericalLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } } // wait for all lines to finish for (short line = 0; line < outputDim[0]; ++line) { threads[line].join(); } output->SetSlice(m_OutputData, i); if (i % progInterval == 0) m_ProgressHandle((int)((i + 1) / (float)output->GetDimension(2) * 100), "performing reconstruction"); delete[] m_OutputData; delete[] m_InputDataPuffer; m_OutputData = nullptr; m_InputData = nullptr; } } else { mitk::PhotoacousticOCLBeamformer::Pointer m_oclFilter = mitk::PhotoacousticOCLBeamformer::New(); try { if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformer::BeamformingAlgorithm::DASQuad, true); else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformer::BeamformingAlgorithm::DASSphe, true); } else if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DMAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformer::BeamformingAlgorithm::DMASQuad, true); else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformer::BeamformingAlgorithm::DMASSphe, true); } m_oclFilter->SetApodisation(ApodWindow, apodArraySize); m_oclFilter->SetOutputDim(oclOutputDimChunk); m_oclFilter->SetBeamformingParameters(m_Conf.SpeedOfSound, m_Conf.TimeSpacing, m_Conf.Pitch, m_Conf.Angle, m_Conf.Photoacoustic, m_Conf.TransducerElements); if (chunkSize < oclOutputDim[2]) { bool skip = false; for (int i = 0; !skip && i < ceil((float)oclOutputDim[2] / (float)chunkSize); ++i) { m_ProgressHandle(100 * ((float)(i * chunkSize) / (float)oclOutputDim[2]), "performing reconstruction"); mitk::Image::Pointer chunk = mitk::Image::New(); if ((oclOutputDim[2]) - (i * chunkSize) == 1 + chunkSize) { // A 3d image of 3rd dimension == 1 can not be processed by openCL, make sure that this case never arises oclInputDimLastChunk[2] = input->GetDimension(2) % chunkSize + chunkSize; oclOutputDimLastChunk[2] = input->GetDimension(2) % chunkSize + chunkSize; chunk->Initialize(input->GetPixelType(), 3, oclInputDimLastChunk); m_oclFilter->SetOutputDim(oclOutputDimLastChunk); skip = true; //skip the last chunk } else if ((oclOutputDim[2]) - (i * chunkSize) >= chunkSize) chunk->Initialize(input->GetPixelType(), 3, oclInputDimChunk); else { chunk->Initialize(input->GetPixelType(), 3, oclInputDimLastChunk); m_oclFilter->SetOutputDim(oclOutputDimLastChunk); } chunk->SetSpacing(input->GetGeometry()->GetSpacing()); mitk::ImageReadAccessor ChunkMove(input); chunk->SetImportVolume((void*)&(((float*)const_cast(ChunkMove.GetData()))[i * chunkSize * input->GetDimension(0) * input->GetDimension(1)]), 0, 0, mitk::Image::ReferenceMemory); m_oclFilter->SetInput(chunk); m_oclFilter->Update(); auto out = m_oclFilter->GetOutput(); for (int s = i * chunkSize; s < oclOutputDim[2]; ++s) // TODO: make the bounds here smaller... { mitk::ImageReadAccessor copy(out, out->GetSliceData(s - i * chunkSize)); output->SetImportSlice(const_cast(copy.GetData()), s, 0, 0, mitk::Image::ReferenceMemory); } } } else { m_ProgressHandle(50, "performing reconstruction"); m_oclFilter->SetOutputDim(oclOutputDim); m_oclFilter->SetInput(input); m_oclFilter->Update(); auto out = m_oclFilter->GetOutput(); mitk::ImageReadAccessor copy(out); output->SetImportVolume(const_cast(copy.GetData()), 0, 0, mitk::Image::ReferenceMemory); } } catch (mitk::Exception &e) { std::string errorMessage = "Caught unexpected exception "; errorMessage.append(e.what()); MITK_ERROR << errorMessage; } } m_TimeOfHeaderInitialization.Modified(); auto end = std::chrono::high_resolution_clock::now(); MITK_INFO << "Beamforming of " << output->GetDimension(2) << " Images completed in " << ((float)std::chrono::duration_cast(end - begin).count()) / 1000000 << "ms" << std::endl; } void mitk::BeamformingFilter::Configure(beamformingSettings settings) { m_Conf = settings; } float* mitk::BeamformingFilter::VonHannFunction(int samples) { float* ApodWindow = new float[samples]; for (int n = 0; n < samples; ++n) { ApodWindow[n] = (1 - cos(2 * M_PI * n / (samples - 1))) / 2; } return ApodWindow; } float* mitk::BeamformingFilter::HammFunction(int samples) { float* ApodWindow = new float[samples]; for (int n = 0; n < samples; ++n) { ApodWindow[n] = 0.54 - 0.46*cos(2 * M_PI*n / (samples - 1)); } return ApodWindow; } float* mitk::BeamformingFilter::BoxFunction(int samples) { float* ApodWindow = new float[samples]; for (int n = 0; n < samples; ++n) { ApodWindow[n] = 1; } return ApodWindow; } void mitk::BeamformingFilter::DASQuadraticLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short AddSample = 0; short maxLine = 0; short minLine = 0; float delayMultiplicator = 0; float l_i = 0; float s_i = 0; float l = 0; float x = 0; float root = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; float apod_mult = 1; float mult = 0; short usedLines = (maxLine - minLine); //quadratic delay l_i = line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = (float)sample / outputS * inputS / 2; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); delayMultiplicator = pow((1 / (m_Conf.TimeSpacing*m_Conf.SpeedOfSound) * (m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) / s_i / 2; for (short l_s = minLine; l_s < maxLine; ++l_s) { AddSample = delayMultiplicator * pow((l_s - l_i), 2) + s_i + (1 - m_Conf.Photoacoustic)*s_i; if (AddSample < inputS && AddSample >= 0) output[sample*(short)outputL + line] += input[l_s + AddSample*(short)inputL] * apodisation[(short)((l_s - minLine)*apod_mult)]; else --usedLines; } output[sample*(short)outputL + line] = output[sample*(short)outputL + line] / usedLines; } } void mitk::BeamformingFilter::DASSphericalLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short AddSample = 0; short maxLine = 0; short minLine = 0; float delayMultiplicator = 0; float l_i = 0; float s_i = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; float apod_mult = 1; short usedLines = (maxLine - minLine); //exact delay l_i = (float)line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = (float)sample / outputS * inputS / 2; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); for (short l_s = minLine; l_s < maxLine; ++l_s) { AddSample = (int)sqrt( pow(s_i, 2) + pow((1 / (m_Conf.TimeSpacing*m_Conf.SpeedOfSound) * ((l_s - l_i)*m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) ) + (1 - m_Conf.Photoacoustic)*s_i; if (AddSample < inputS && AddSample >= 0) output[sample*(short)outputL + line] += input[l_s + AddSample*(short)inputL] * apodisation[(short)((l_s - minLine)*apod_mult)]; else --usedLines; } output[sample*(short)outputL + line] = output[sample*(short)outputL + line] / usedLines; } } void mitk::BeamformingFilter::DMASQuadraticLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short AddSample1 = 0; short AddSample2 = 0; short maxLine = 0; short minLine = 0; float delayMultiplicator = 0; float l_i = 0; float s_i = 0; float l = 0; float x = 0; float root = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; float apod_mult = 1; float mult = 0; short usedLines = (maxLine - minLine); //quadratic delay l_i = line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = sample / outputS * inputS / 2; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); delayMultiplicator = pow((1 / (m_Conf.TimeSpacing*m_Conf.SpeedOfSound) * (m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) / s_i / 2; //calculate the AddSamples beforehand to save some time short* AddSample = new short[maxLine - minLine]; for (short l_s = 0; l_s < maxLine - minLine; ++l_s) { AddSample[l_s] = (short)(delayMultiplicator * pow((minLine + l_s - l_i), 2) + s_i) + (1 - m_Conf.Photoacoustic)*s_i; } for (short l_s1 = minLine; l_s1 < maxLine - 1; ++l_s1) { if (AddSample[l_s1 - minLine] < (short)inputS && AddSample[l_s1 - minLine] >= 0) { for (short l_s2 = l_s1 + 1; l_s2 < maxLine; ++l_s2) { if (AddSample[l_s2 - minLine] < inputS && AddSample[l_s2 - minLine] >= 0) { mult = input[l_s2 + AddSample[l_s2 - minLine] * (short)inputL] * apodisation[(short)((l_s2 - minLine)*apod_mult)] * input[l_s1 + AddSample[l_s1 - minLine] * (short)inputL] * apodisation[(short)((l_s1 - minLine)*apod_mult)]; output[sample*(short)outputL + line] += sqrt(abs(mult)) * ((mult > 0) - (mult < 0)); } } } else --usedLines; } output[sample*(short)outputL + line] = 10 * output[sample*(short)outputL + line] / (pow(usedLines, 2) - (usedLines - 1)); delete[] AddSample; } } void mitk::BeamformingFilter::DMASSphericalLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short AddSample1 = 0; short AddSample2 = 0; short maxLine = 0; short minLine = 0; float delayMultiplicator = 0; float l_i = 0; float s_i = 0; float l = 0; float x = 0; float root = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; float apod_mult = 1; float mult = 0; short usedLines = (maxLine - minLine); //exact delay l_i = line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = sample / outputS * inputS / 2; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); //calculate the AddSamples beforehand to save some time short* AddSample = new short[maxLine - minLine]; for (short l_s = 0; l_s < maxLine - minLine; ++l_s) { AddSample[l_s] = (short)sqrt( pow(s_i, 2) + pow((1 / (m_Conf.TimeSpacing*m_Conf.SpeedOfSound) * ((minLine + l_s - l_i)*m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) ) + (1 - m_Conf.Photoacoustic)*s_i; } for (short l_s1 = minLine; l_s1 < maxLine - 1; ++l_s1) { if (AddSample[l_s1 - minLine] < inputS && AddSample[l_s1 - minLine] >= 0) { for (short l_s2 = l_s1 + 1; l_s2 < maxLine; ++l_s2) { if (AddSample[l_s2 - minLine] < inputS && AddSample[l_s2 - minLine] >= 0) { mult = input[l_s2 + AddSample[l_s2 - minLine] * (short)inputL] * apodisation[(int)((l_s2 - minLine)*apod_mult)] * input[l_s1 + AddSample[l_s1 - minLine] * (short)inputL] * apodisation[(int)((l_s1 - minLine)*apod_mult)]; output[sample*(short)outputL + line] += sqrt(abs(mult)) * ((mult > 0) - (mult < 0)); } } } else --usedLines; } output[sample*(short)outputL + line] = 10 * output[sample*(short)outputL + line] / (pow(usedLines, 2) - (usedLines - 1)); delete[] AddSample; } } \ No newline at end of file diff --git a/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.cpp b/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.cpp index 0d1f1e2b19..1e80af4157 100644 --- a/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.cpp +++ b/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.cpp @@ -1,612 +1,613 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #define _USE_MATH_DEFINES #include #include "mitkPhotoacousticImage.h" #include "itkBModeImageFilter.h" #include "itkPhotoacousticBModeImageFilter.h" #include "mitkImageCast.h" #include "mitkITKImageImport.h" #include "mitkPhotoacousticBeamformingFilter.h" #include #include #include // itk dependencies #include "itkImage.h" #include "itkResampleImageFilter.h" #include "itkCastImageFilter.h" #include "itkCropImageFilter.h" #include "itkRescaleIntensityImageFilter.h" #include "itkIntensityWindowingImageFilter.h" #include #include "itkMultiplyImageFilter.h" #include "itkBSplineInterpolateImageFunction.h" #include // needed itk image filters #include "mitkITKImageImport.h" #include "itkFFTShiftImageFilter.h" #include "itkMultiplyImageFilter.h" #include "itkComplexToModulusImageFilter.h" #include #include "itkFFT1DComplexConjugateToRealImageFilter.h" #include "itkFFT1DRealToComplexConjugateImageFilter.h" mitk::PhotoacousticImage::PhotoacousticImage() { MITK_INFO << "[PhotoacousticImage Debug] created that image"; } mitk::PhotoacousticImage::~PhotoacousticImage() { MITK_INFO << "[PhotoacousticImage Debug] destroyed that image"; } mitk::Image::Pointer mitk::PhotoacousticImage::ApplyBmodeFilter(mitk::Image::Pointer inputImage, BModeMethod method, bool UseLogFilter, float resampleSpacing) { if (method == BModeMethod::Abs) { auto input = ApplyCropping(inputImage, 0, 0, 0, 0, 0, inputImage->GetDimension(2) - 1); PhotoacousticBModeFilter::Pointer filter = PhotoacousticBModeFilter::New(); filter->SetParameters(UseLogFilter); filter->SetInput(input); filter->Update(); auto out = filter->GetOutput(); return out; } else if (method == BModeMethod::ShapeDetection) { // the image needs to be of floating point type for the envelope filter to work; the casting is done automatically by the CastToItkImage typedef itk::Image< float, 3 > itkFloatImageType; typedef itk::BModeImageFilter < itkFloatImageType, itkFloatImageType > BModeFilterType; BModeFilterType::Pointer bModeFilter = BModeFilterType::New(); // LogFilter typedef itk::PhotoacousticBModeImageFilter < itkFloatImageType, itkFloatImageType > PhotoacousticBModeImageFilter; PhotoacousticBModeImageFilter::Pointer photoacousticBModeFilter = PhotoacousticBModeImageFilter::New(); // No LogFilter typedef itk::ResampleImageFilter < itkFloatImageType, itkFloatImageType > ResampleImageFilter; ResampleImageFilter::Pointer resampleImageFilter = ResampleImageFilter::New(); itkFloatImageType::Pointer itkImage; mitk::CastToItkImage(inputImage, itkImage); itkFloatImageType::Pointer bmode; if (UseLogFilter) { bModeFilter->SetInput(itkImage); bModeFilter->SetDirection(1); bmode = bModeFilter->GetOutput(); } else { photoacousticBModeFilter->SetInput(itkImage); photoacousticBModeFilter->SetDirection(1); bmode = photoacousticBModeFilter->GetOutput(); } // resampleSpacing == 0 means: do no resampling if (resampleSpacing == 0) { return mitk::GrabItkImageMemory(bmode); } itkFloatImageType::SpacingType outputSpacing; itkFloatImageType::SizeType inputSize = itkImage->GetLargestPossibleRegion().GetSize(); itkFloatImageType::SizeType outputSize = inputSize; outputSpacing[0] = itkImage->GetSpacing()[0] * (static_cast(inputSize[0]) / static_cast(outputSize[0])); outputSpacing[1] = resampleSpacing; outputSpacing[2] = itkImage->GetSpacing()[2]; outputSize[1] = inputSize[1] * itkImage->GetSpacing()[1] / outputSpacing[1]; typedef itk::IdentityTransform TransformType; resampleImageFilter->SetInput(bmode); resampleImageFilter->SetSize(outputSize); resampleImageFilter->SetOutputSpacing(outputSpacing); resampleImageFilter->SetTransform(TransformType::New()); resampleImageFilter->UpdateLargestPossibleRegion(); return mitk::GrabItkImageMemory(resampleImageFilter->GetOutput()); } + return nullptr; } /*mitk::Image::Pointer mitk::PhotoacousticImage::ApplyScatteringCompensation(mitk::Image::Pointer inputImage, int scattering) { typedef itk::Image< float, 3 > itkFloatImageType; typedef itk::MultiplyImageFilter MultiplyImageFilterType; itkFloatImageType::Pointer itkImage; mitk::CastToItkImage(inputImage, itkImage); MultiplyImageFilterType::Pointer multiplyFilter = MultiplyImageFilterType::New(); multiplyFilter->SetInput1(itkImage); multiplyFilter->SetInput2(m_FluenceCompResizedItk.at(m_ScatteringCoefficient)); return mitk::GrabItkImageMemory(multiplyFilter->GetOutput()); }*/ mitk::Image::Pointer mitk::PhotoacousticImage::ApplyResampling(mitk::Image::Pointer inputImage, unsigned int outputSize[2]) { typedef itk::Image< float, 3 > itkFloatImageType; typedef itk::ResampleImageFilter < itkFloatImageType, itkFloatImageType > ResampleImageFilter; ResampleImageFilter::Pointer resampleImageFilter = ResampleImageFilter::New(); typedef itk::LinearInterpolateImageFunction T_Interpolator; itkFloatImageType::Pointer itkImage; mitk::CastToItkImage(inputImage, itkImage); itkFloatImageType::SpacingType inputSpacing = itkImage->GetSpacing(); itkFloatImageType::SpacingType outputSpacingItk; itkFloatImageType::SizeType inputSizeItk = itkImage->GetLargestPossibleRegion().GetSize(); itkFloatImageType::SizeType outputSizeItk = inputSizeItk; outputSizeItk[0] = outputSize[0]; outputSizeItk[1] = outputSize[1]; outputSizeItk[2] = inputSizeItk[2]; outputSpacingItk[0] = itkImage->GetSpacing()[0] * (static_cast(inputSizeItk[0]) / static_cast(outputSizeItk[0])); outputSpacingItk[1] = itkImage->GetSpacing()[1] * (static_cast(inputSizeItk[1]) / static_cast(outputSizeItk[1])); outputSpacingItk[2] = itkImage->GetSpacing()[2]; typedef itk::IdentityTransform TransformType; T_Interpolator::Pointer _pInterpolator = T_Interpolator::New(); resampleImageFilter->SetInput(itkImage); resampleImageFilter->SetSize(outputSizeItk); resampleImageFilter->SetOutputSpacing(outputSpacingItk); resampleImageFilter->SetTransform(TransformType::New()); resampleImageFilter->SetInterpolator(_pInterpolator); resampleImageFilter->UpdateLargestPossibleRegion(); return mitk::GrabItkImageMemory(resampleImageFilter->GetOutput()); } mitk::Image::Pointer mitk::PhotoacousticImage::ApplyCropping(mitk::Image::Pointer inputImage, int above, int below, int right, int left, int minSlice, int maxSlice) { unsigned int inputDim[3] = { inputImage->GetDimension(0), inputImage->GetDimension(1), inputImage->GetDimension(2) }; - unsigned int outputDim[3] = { inputImage->GetDimension(0) - left - right, inputImage->GetDimension(1) - above - below, maxSlice - minSlice + 1 }; + unsigned int outputDim[3] = { inputImage->GetDimension(0) - left - right, inputImage->GetDimension(1) - (unsigned int)above - (unsigned int)below, (unsigned int)maxSlice - (unsigned int)minSlice + 1 }; void* inputData; float* outputData = new float[outputDim[0] * outputDim[1] * outputDim[2]]; ImageReadAccessor acc(inputImage); inputData = const_cast(acc.GetData()); // convert the data to float by default // as of now only those float, short, float are used at all... though it's easy to add other ones if (inputImage->GetPixelType().GetTypeAsString() == "scalar (float)" || inputImage->GetPixelType().GetTypeAsString() == " (float)") { // copy the data into the cropped image for (unsigned short sl = 0; sl < outputDim[2]; ++sl) { for (unsigned short l = 0; l < outputDim[0]; ++l) { for (unsigned short s = 0; s < outputDim[1]; ++s) { outputData[l + s*(unsigned short)outputDim[0] + sl*outputDim[0] * outputDim[1]] = (float)((float*)inputData)[(l + left) + (s + above)*(unsigned short)inputDim[0] + (sl + minSlice)*inputDim[0] * inputDim[1]]; } } } } else if (inputImage->GetPixelType().GetTypeAsString() == "scalar (short)" || inputImage->GetPixelType().GetTypeAsString() == " (short)") { // copy the data unsigned shorto the cropped image for (unsigned short sl = 0; sl < outputDim[2]; ++sl) { for (unsigned short l = 0; l < outputDim[0]; ++l) { for (unsigned short s = 0; s < outputDim[1]; ++s) { outputData[l + s*(unsigned short)outputDim[0] + sl*outputDim[0] * outputDim[1]] = (float)((short*)inputData)[(l + left) + (s + above)*(unsigned short)inputDim[0] + (sl + minSlice)*inputDim[0] * inputDim[1]]; } } } } else if (inputImage->GetPixelType().GetTypeAsString() == "scalar (double)" || inputImage->GetPixelType().GetTypeAsString() == " (double)") { // copy the data unsigned shorto the cropped image for (unsigned short sl = 0; sl < outputDim[2]; ++sl) { for (unsigned short l = 0; l < outputDim[0]; ++l) { for (unsigned short s = 0; s < outputDim[1]; ++s) { outputData[l + s*(unsigned short)outputDim[0] + sl*outputDim[0] * outputDim[1]] = (float)((double*)inputData)[(l + left) + (s + above)*(unsigned short)inputDim[0] + (sl + minSlice)*inputDim[0] * inputDim[1]]; } } } } else { MITK_INFO << "Could not determine pixel type"; } mitk::Image::Pointer output = mitk::Image::New(); output->Initialize(mitk::MakeScalarPixelType(), 3, outputDim); output->SetSpacing(inputImage->GetGeometry()->GetSpacing()); output->SetImportVolume(outputData, 0, 0, mitk::Image::ReferenceMemory); return output; } mitk::Image::Pointer mitk::PhotoacousticImage::ApplyBeamforming(mitk::Image::Pointer inputImage, BeamformingFilter::beamformingSettings config, int cutoff, std::function progressHandle) { // crop the image // set the Maximum Size of the image to 4096 unsigned short lowerCutoff = (4096 + cutoff) < inputImage->GetDimension(1) ? inputImage->GetDimension(1) - 4096 : 0; config.RecordTime = config.RecordTime - (cutoff + lowerCutoff) / inputImage->GetDimension(1) * config.RecordTime; // adjust the recorded time lost by cropping progressHandle(0, "cropping image"); if (!config.partial) { config.CropBounds[0] = 0; config.CropBounds[1] = inputImage->GetDimension(2) - 1; } Image::Pointer processedImage = ApplyCropping(inputImage, cutoff, lowerCutoff, 0, 0, config.CropBounds[0], config.CropBounds[1]); // resample the image in horizontal direction if (inputImage->GetDimension(0) != config.ReconstructionLines) { progressHandle(0, "resampling image"); auto begin = std::chrono::high_resolution_clock::now(); unsigned int dim[2] = { config.ReconstructionLines, processedImage->GetDimension(1) }; processedImage = ApplyResampling(processedImage, dim); auto end = std::chrono::high_resolution_clock::now(); MITK_DEBUG << "Upsampling from " << inputImage->GetDimension(0) << " to " << config.ReconstructionLines << " lines completed in " << ((double)std::chrono::duration_cast(end - begin).count()) / 1000000 << "ms" << std::endl; } // perform the beamforming BeamformingFilter::Pointer Beamformer = BeamformingFilter::New(); Beamformer->SetInput(processedImage); Beamformer->Configure(config); Beamformer->SetProgressHandle(progressHandle); Beamformer->UpdateLargestPossibleRegion(); processedImage = Beamformer->GetOutput(); return processedImage; } mitk::Image::Pointer mitk::PhotoacousticImage::BandpassFilter(mitk::Image::Pointer data, float recordTime, float BPHighPass, float BPLowPass, float alpha) { bool powerOfTwo = false; int finalPower = 0; for (int i = 1; pow(2, i) <= data->GetDimension(1); ++i) { finalPower = i; if (pow(2, i) == data->GetDimension(1)) { powerOfTwo = true; } } if (!powerOfTwo) { - unsigned int dim[2] = { data->GetDimension(0), pow(2,finalPower+1)}; + unsigned int dim[2] = { data->GetDimension(0), (unsigned int)pow(2,finalPower+1)}; data = ApplyResampling(data, dim); } MITK_INFO << data->GetDimension(0); // do a fourier transform, multiply with an appropriate window for the filter, and transform back typedef float PixelType; typedef itk::Image< PixelType, 3 > RealImageType; RealImageType::Pointer image; mitk::CastToItkImage(data, image); typedef itk::FFT1DRealToComplexConjugateImageFilter ForwardFFTFilterType; typedef ForwardFFTFilterType::OutputImageType ComplexImageType; ForwardFFTFilterType::Pointer forwardFFTFilter = ForwardFFTFilterType::New(); forwardFFTFilter->SetInput(image); forwardFFTFilter->SetDirection(1); try { forwardFFTFilter->UpdateOutputInformation(); } catch (itk::ExceptionObject & error) { std::cerr << "Error: " << error << std::endl; MITK_WARN << "Bandpass could not be applied"; return data; } float singleVoxel = 1 / (recordTime / data->GetDimension(1)) / 2 / 1000; float cutoffPixelHighPass = std::min(BPHighPass / singleVoxel, (float)data->GetDimension(1) / 2); float cutoffPixelLowPass = std::min(BPLowPass / singleVoxel, (float)data->GetDimension(1) / 2 - cutoffPixelHighPass); RealImageType::Pointer fftMultiplicator = BPFunction(data, cutoffPixelHighPass, cutoffPixelLowPass, alpha); typedef itk::MultiplyImageFilter< ComplexImageType, RealImageType, ComplexImageType > MultiplyFilterType; MultiplyFilterType::Pointer multiplyFilter = MultiplyFilterType::New(); multiplyFilter->SetInput1(forwardFFTFilter->GetOutput()); multiplyFilter->SetInput2(fftMultiplicator); /*itk::ComplexToModulusImageFilter::Pointer toReal = itk::ComplexToModulusImageFilter::New(); toReal->SetInput(forwardFFTFilter->GetOutput()); return GrabItkImageMemory(toReal->GetOutput()); return GrabItkImageMemory(fftMultiplicator); *///DEBUG typedef itk::FFT1DComplexConjugateToRealImageFilter< ComplexImageType, RealImageType > InverseFilterType; InverseFilterType::Pointer inverseFFTFilter = InverseFilterType::New(); inverseFFTFilter->SetInput(multiplyFilter->GetOutput()); inverseFFTFilter->SetDirection(1); return GrabItkImageMemory(inverseFFTFilter->GetOutput()); } itk::Image::Pointer mitk::PhotoacousticImage::BPFunction(mitk::Image::Pointer reference, int cutoffFrequencyPixelHighPass, int cutoffFrequencyPixelLowPass, float alpha) { float* imageData = new float[reference->GetDimension(0)*reference->GetDimension(1)]; // tukey window float width = reference->GetDimension(1) / 2 - (float)cutoffFrequencyPixelHighPass - (float)cutoffFrequencyPixelLowPass; float center = (float)cutoffFrequencyPixelHighPass / 2 + width / 2; MITK_INFO << width << "width " << center << "center " << alpha; for (int n = 0; n < reference->GetDimension(1); ++n) { imageData[reference->GetDimension(0)*n] = 0; } for (int n = 0; n < width; ++n) { if (n <= (alpha*(width - 1)) / 2) { imageData[reference->GetDimension(0)*(int)(n + center - (width / 2))] = (1 + cos(M_PI*(2 * n / (alpha*(width - 1)) - 1))) / 2; } else if (n >= (width - 1)*(1 - alpha / 2) && n <= (width - 1)) { imageData[reference->GetDimension(0)*(int)(n + center - (width / 2))] = (1 + cos(M_PI*(2 * n / (alpha*(width - 1)) + 1 - 2 / alpha))) / 2; } else { imageData[reference->GetDimension(0)*(int)(n + center - (width / 2))] = 1; } } // Butterworth-Filter /* // first, write the HighPass if (cutoffFrequencyPixelHighPass != reference->GetDimension(1) / 2) { for (int n = 0; n < reference->GetDimension(1) / 2; ++n) { imageData[reference->GetDimension(0)*n] = 1 / (1 + pow( (float)n / (float)(reference->GetDimension(1) / 2 - cutoffFrequencyPixelHighPass) , 2 * butterworthOrder)); } } else { for (int n = 0; n < reference->GetDimension(1) / 2; ++n) { imageData[reference->GetDimension(0)*n] = 1; } } // now, the LowPass for (int n = 0; n < reference->GetDimension(1) / 2; ++n) { imageData[reference->GetDimension(0)*n] *= 1 / (1 + pow( (float)(reference->GetDimension(1) / 2 - 1 - n) / (float)(reference->GetDimension(1) / 2 - cutoffFrequencyPixelLowPass) , 2 * butterworthOrder)); } */ // mirror the first half of the image for (int n = reference->GetDimension(1) / 2; n < reference->GetDimension(1); ++n) { imageData[reference->GetDimension(0)*n] = imageData[(reference->GetDimension(1) - (n + 1)) * reference->GetDimension(0)]; } // copy and paste to all lines for (int line = 1; line < reference->GetDimension(0); ++line) { for (int sample = 0; sample < reference->GetDimension(1); ++sample) { imageData[reference->GetDimension(0)*sample + line] = imageData[reference->GetDimension(0)*sample]; } } typedef itk::Image< float, 3U > ImageType; ImageType::RegionType region; ImageType::IndexType start; start.Fill(0); region.SetIndex(start); ImageType::SizeType size; size[0] = reference->GetDimension(0); size[1] = reference->GetDimension(1); size[2] = reference->GetDimension(2); region.SetSize(size); ImageType::SpacingType SpacingItk; SpacingItk[0] = reference->GetGeometry()->GetSpacing()[0]; SpacingItk[1] = reference->GetGeometry()->GetSpacing()[1]; SpacingItk[2] = reference->GetGeometry()->GetSpacing()[2]; ImageType::Pointer image = ImageType::New(); image->SetRegions(region); image->Allocate(); image->FillBuffer(itk::NumericTraits::Zero); image->SetSpacing(SpacingItk); ImageType::IndexType pixelIndex; for (ImageType::IndexValueType slice = 0; slice < reference->GetDimension(2); ++slice) { for (ImageType::IndexValueType line = 0; line < reference->GetDimension(0); ++line) { for (ImageType::IndexValueType sample = 0; sample < reference->GetDimension(1); ++sample) { pixelIndex[0] = line; pixelIndex[1] = sample; pixelIndex[2] = slice; image->SetPixel(pixelIndex, imageData[line + sample*reference->GetDimension(0)]); } } } delete[] imageData; return image; } /* mitk::CropFilter::CropFilter() : m_PixelCalculation(NULL) { this->AddSourceFile("CropFilter.cl"); this->m_FilterID = "CropFilter"; } mitk::CropFilter::~CropFilter() { if (this->m_PixelCalculation) { clReleaseKernel(m_PixelCalculation); } } void mitk::CropFilter::Update() { //Check if context & program available if (!this->Initialize()) { us::ServiceReference ref = GetModuleContext()->GetServiceReference(); OclResourceService* resources = GetModuleContext()->GetService(ref); // clean-up also the resources resources->InvalidateStorage(); mitkThrow() << "Filter is not initialized. Cannot update."; } else { // Execute this->Execute(); } } void mitk::CropFilter::Execute() { cl_int clErr = 0; try { this->InitExec(this->m_PixelCalculation); } catch (const mitk::Exception& e) { MITK_ERROR << "Catched exception while initializing filter: " << e.what(); return; } us::ServiceReference ref = GetModuleContext()->GetServiceReference(); OclResourceService* resources = GetModuleContext()->GetService(ref); cl_context gpuContext = resources->GetContext(); unsigned int size = m_OutputDim[0] * m_OutputDim[1] * m_OutputDim[2] * m_Images; cl_mem cl_input = clCreateBuffer(gpuContext, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, sizeof(float) * size, m_Data, &clErr); CHECK_OCL_ERR(clErr); // set kernel arguments clErr = clSetKernelArg(this->m_PixelCalculation, 2, sizeof(cl_mem), &cl_input); clErr |= clSetKernelArg(this->m_PixelCalculation, 3, sizeof(cl_ushort), &(this->m_Images)); CHECK_OCL_ERR(clErr); // execute the filter on a 3D NDRange this->ExecuteKernel(m_PixelCalculation, 3); // signalize the GPU-side data changed m_Output->Modified(GPU_DATA); } us::Module *mitk::CropFilter::GetModule() { return us::GetModuleContext()->GetModule(); } bool mitk::CropFilter::Initialize() { bool buildErr = true; cl_int clErr = 0; if (OclFilter::Initialize()) { this->m_PixelCalculation = clCreateKernel(this->m_ClProgram, "ckCrop", &clErr); buildErr |= CHECK_OCL_ERR(clErr); } return (OclFilter::IsInitialized() && buildErr); } void mitk::CropFilter::SetInput(mitk::Image::Pointer image) { if (image->GetDimension() != 3) { mitkThrowException(mitk::Exception) << "Input for " << this->GetNameOfClass() << " is not 3D. The filter only supports 3D. Please change your input."; } OclImageToImageFilter::SetInput(image); } mitk::Image::Pointer mitk::CropFilter::GetOutput() { if (m_Output->IsModified(GPU_DATA)) { void* pData = m_Output->TransferDataToCPU(m_CommandQue); const unsigned int dimension = 3; unsigned int* dimensions = m_OutputDim; const mitk::SlicedGeometry3D::Pointer p_slg = m_Input->GetMITKImage()->GetSlicedGeometry(); MITK_DEBUG << "Creating new MITK Image."; m_Output->GetMITKImage()->Initialize(this->GetOutputType(), dimension, dimensions); m_Output->GetMITKImage()->SetSpacing(p_slg->GetSpacing()); m_Output->GetMITKImage()->SetGeometry(m_Input->GetMITKImage()->GetGeometry()); m_Output->GetMITKImage()->SetImportVolume(pData, 0, 0, mitk::Image::ReferenceMemory); } MITK_DEBUG << "Image Initialized."; return m_Output->GetMITKImage(); } */ \ No newline at end of file