diff --git a/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp b/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp index ac06045522..3b9627370a 100644 --- a/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp +++ b/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp @@ -1,568 +1,557 @@ /*=================================================================== mitkPhotoacousticBeamformingFilter The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #define _USE_MATH_DEFINES #include "mitkPhotoacousticBeamformingFilter.h" #include "mitkProperties.h" #include "mitkImageReadAccessor.h" #include #include #include #include #include #include #include "mitkImageCast.h" #include mitk::BeamformingFilter::BeamformingFilter() : m_OutputData(nullptr), m_InputData(nullptr) { this->SetNumberOfIndexedInputs(1); this->SetNumberOfRequiredInputs(1); m_ProgressHandle = [](int, std::string) {}; } void mitk::BeamformingFilter::SetProgressHandle(std::function progressHandle) { m_ProgressHandle = progressHandle; } mitk::BeamformingFilter::~BeamformingFilter() { } void mitk::BeamformingFilter::GenerateInputRequestedRegion() { Superclass::GenerateInputRequestedRegion(); mitk::Image* output = this->GetOutput(); mitk::Image* input = const_cast (this->GetInput()); if (!output->IsInitialized()) { return; } input->SetRequestedRegionToLargestPossibleRegion(); //GenerateTimeInInputRegion(output, input); } void mitk::BeamformingFilter::GenerateOutputInformation() { mitk::Image::ConstPointer input = this->GetInput(); mitk::Image::Pointer output = this->GetOutput(); if ((output->IsInitialized()) && (this->GetMTime() <= m_TimeOfHeaderInitialization.GetMTime())) return; itkDebugMacro(<< "GenerateOutputInformation()"); unsigned int dim[] = { m_Conf.ReconstructionLines, m_Conf.SamplesPerLine, input->GetDimension(2) }; output->Initialize(mitk::MakeScalarPixelType(), 3, dim); mitk::Vector3D spacing; spacing[0] = m_Conf.Pitch * m_Conf.TransducerElements * 1000 / m_Conf.ReconstructionLines; spacing[1] = m_Conf.RecordTime / 2 * m_Conf.SpeedOfSound * 1000 / m_Conf.SamplesPerLine; spacing[2] = 1; output->GetGeometry()->SetSpacing(spacing); output->GetGeometry()->Modified(); output->SetPropertyList(input->GetPropertyList()->Clone()); m_TimeOfHeaderInitialization.Modified(); } void mitk::BeamformingFilter::GenerateData() { GenerateOutputInformation(); mitk::Image::Pointer input = this->GetInput(); mitk::Image::Pointer output = this->GetOutput(); if (!output->IsInitialized()) return; - float inputDim[2] = { (float)input->GetDimension(0), (float)input->GetDimension(1) }; - float outputDim[2] = { (float)output->GetDimension(0), (float)output->GetDimension(1) }; - const int apodArraySize = m_Conf.TransducerElements * 4; // set the resolution of the apodization array float* ApodWindow; // calculate the appropiate apodization window switch (m_Conf.Apod) { - case beamformingSettings::Apodization::Hann: - ApodWindow = VonHannFunction(apodArraySize); - break; - case beamformingSettings::Apodization::Hamm: - ApodWindow = HammFunction(apodArraySize); - break; - case beamformingSettings::Apodization::Box: - ApodWindow = BoxFunction(apodArraySize); - break; - default: - ApodWindow = BoxFunction(apodArraySize); - break; + case beamformingSettings::Apodization::Hann: + ApodWindow = VonHannFunction(apodArraySize); + break; + case beamformingSettings::Apodization::Hamm: + ApodWindow = HammFunction(apodArraySize); + break; + case beamformingSettings::Apodization::Box: + ApodWindow = BoxFunction(apodArraySize); + break; + default: + ApodWindow = BoxFunction(apodArraySize); + break; } - int progInterval = output->GetDimension(2) / 20 > 1 ? output->GetDimension(2) / 20 : 1; - // the interval at which we update the gui progress bar - auto begin = std::chrono::high_resolution_clock::now(); // debbuging the performance... + if (!m_Conf.UseGPU) { + int progInterval = output->GetDimension(2) / 20 > 1 ? output->GetDimension(2) / 20 : 1; + // the interval at which we update the gui progress bar + + float inputDim[2] = { (float)input->GetDimension(0), (float)input->GetDimension(1) }; + float outputDim[2] = { (float)output->GetDimension(0), (float)output->GetDimension(1) }; + for (unsigned int i = 0; i < output->GetDimension(2); ++i) // seperate Slices should get Beamforming seperately applied { mitk::ImageReadAccessor inputReadAccessor(input, input->GetSliceData(i)); // first, we check whether the dara is float, other formats are unsupported if (input->GetPixelType().GetTypeAsString() == "scalar (float)" || input->GetPixelType().GetTypeAsString() == " (float)") { m_InputData = (float*)inputReadAccessor.GetData(); } else { MITK_INFO << "Pixel type is not float, abort"; return; } m_OutputData = new float[m_Conf.ReconstructionLines*m_Conf.SamplesPerLine]; // fill the image with zeros for (int l = 0; l < outputDim[0]; ++l) { for (int s = 0; s < outputDim[1]; ++s) { m_OutputData[l*(short)outputDim[1] + s] = 0; } } std::thread *threads = new std::thread[(short)outputDim[0]]; // every line will be beamformed in a seperate thread if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DASQuadraticLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DASSphericalLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } } else if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DMAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DMASQuadraticLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DMASSphericalLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } } // wait for all lines to finish for (short line = 0; line < outputDim[0]; ++line) { threads[line].join(); } output->SetSlice(m_OutputData, i); if (i % progInterval == 0) m_ProgressHandle((int)((i + 1) / (float)output->GetDimension(2) * 100), "performing reconstruction"); delete[] m_OutputData; m_OutputData = nullptr; m_InputData = nullptr; } } #ifdef PHOTOACOUSTICS_USE_GPU - else { - //unsigned short chunkSize = 1; // 1 chunk = 1 image - - unsigned int oclOutputDim[3] = { output->GetDimension(0), output->GetDimension(1), output->GetDimension(2) }; - try { + // first, we check whether the data is float, other formats are unsupported + if (!(input->GetPixelType().GetTypeAsString() == "scalar (float)" || input->GetPixelType().GetTypeAsString() == " (float)")) + { + MITK_ERROR << "Pixel type is not float, abort"; + return; + } + + m_ProgressHandle(50, "performing reconstruction"); + mitk::PhotoacousticOCLBeamformingFilter::Pointer m_oclFilter = mitk::PhotoacousticOCLBeamformingFilter::New(); if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformingFilter::BeamformingAlgorithm::DASQuad, true); else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformingFilter::BeamformingAlgorithm::DASSphe, true); } else if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DMAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformingFilter::BeamformingAlgorithm::DMASQuad, true); else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformingFilter::BeamformingAlgorithm::DMASSphe, true); } + m_oclFilter->SetApodisation(ApodWindow, apodArraySize); m_oclFilter->SetBeamformingParameters(m_Conf.SpeedOfSound, m_Conf.TimeSpacing, m_Conf.Pitch, m_Conf.Angle, m_Conf.Photoacoustic, m_Conf.TransducerElements); - - mitk::ImageReadAccessor inputReadAccessor(input); - - // first, we check whether the data is float, other formats are unsupported - if (input->GetPixelType().GetTypeAsString() == "scalar (float)" || input->GetPixelType().GetTypeAsString() == " (float)") - { - m_InputData = (float*)inputReadAccessor.GetData(); - } - else - { - MITK_INFO << "Pixel type is not float, abort"; - return; - } - - m_ProgressHandle(50, "performing reconstruction"); - - m_oclFilter->SetOutputDim(oclOutputDim); + m_oclFilter->SetOutputDim(output->GetDimensions()); m_oclFilter->SetInput(input); m_oclFilter->Update(); void* out = m_oclFilter->GetOutput(); output->SetImportVolume(out, 0, 0, mitk::Image::ReferenceMemory); - } catch (mitk::Exception &e) { std::string errorMessage = "Caught unexpected exception "; errorMessage.append(e.what()); MITK_ERROR << errorMessage; } } #endif m_TimeOfHeaderInitialization.Modified(); auto end = std::chrono::high_resolution_clock::now(); MITK_INFO << "Beamforming of " << output->GetDimension(2) << " Images completed in " << ((float)std::chrono::duration_cast(end - begin).count()) / 1000000 << "ms" << std::endl; } void mitk::BeamformingFilter::Configure(beamformingSettings settings) { m_Conf = settings; } float* mitk::BeamformingFilter::VonHannFunction(int samples) { float* ApodWindow = new float[samples]; for (int n = 0; n < samples; ++n) { ApodWindow[n] = (1 - cos(2 * M_PI * n / (samples - 1))) / 2; } return ApodWindow; } float* mitk::BeamformingFilter::HammFunction(int samples) { float* ApodWindow = new float[samples]; for (int n = 0; n < samples; ++n) { ApodWindow[n] = 0.54 - 0.46*cos(2 * M_PI*n / (samples - 1)); } return ApodWindow; } float* mitk::BeamformingFilter::BoxFunction(int samples) { float* ApodWindow = new float[samples]; for (int n = 0; n < samples; ++n) { ApodWindow[n] = 1; } return ApodWindow; } void mitk::BeamformingFilter::DASQuadraticLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short AddSample = 0; short maxLine = 0; short minLine = 0; float delayMultiplicator = 0; float l_i = 0; float s_i = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); - float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; + float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * inputL / m_Conf.TransducerElements; float apod_mult = 1; short usedLines = (maxLine - minLine); //quadratic delay l_i = line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = (float)sample / outputS * inputS / 2; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); delayMultiplicator = pow((1 / (m_Conf.TimeSpacing*m_Conf.SpeedOfSound) * (m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) / s_i / 2; for (short l_s = minLine; l_s < maxLine; ++l_s) { AddSample = delayMultiplicator * pow((l_s - l_i), 2) + s_i + (1 - m_Conf.Photoacoustic)*s_i; if (AddSample < inputS && AddSample >= 0) output[sample*(short)outputL + line] += input[l_s + AddSample*(short)inputL] * apodisation[(short)((l_s - minLine)*apod_mult)]; else --usedLines; } output[sample*(short)outputL + line] = output[sample*(short)outputL + line] / usedLines; } } void mitk::BeamformingFilter::DASSphericalLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short AddSample = 0; short maxLine = 0; short minLine = 0; float l_i = 0; float s_i = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); - float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; + float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * inputL / m_Conf.TransducerElements; float apod_mult = 1; short usedLines = (maxLine - minLine); //exact delay l_i = (float)line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = (float)sample / outputS * inputS / 2; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); for (short l_s = minLine; l_s < maxLine; ++l_s) { AddSample = (int)sqrt( pow(s_i, 2) + pow((1 / (m_Conf.TimeSpacing*m_Conf.SpeedOfSound) * ((l_s - l_i)*m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) ) + (1 - m_Conf.Photoacoustic)*s_i; if (AddSample < inputS && AddSample >= 0) output[sample*(short)outputL + line] += input[l_s + AddSample*(short)inputL] * apodisation[(short)((l_s - minLine)*apod_mult)]; else --usedLines; } output[sample*(short)outputL + line] = output[sample*(short)outputL + line] / usedLines; } } void mitk::BeamformingFilter::DMASQuadraticLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short maxLine = 0; short minLine = 0; float delayMultiplicator = 0; float l_i = 0; float s_i = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); - float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; + float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * inputL / m_Conf.TransducerElements; float apod_mult = 1; float mult = 0; short usedLines = (maxLine - minLine); //quadratic delay l_i = line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = sample / outputS * inputS / 2; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); delayMultiplicator = pow((1 / (m_Conf.TimeSpacing*m_Conf.SpeedOfSound) * (m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) / s_i / 2; //calculate the AddSamples beforehand to save some time short* AddSample = new short[maxLine - minLine]; for (short l_s = 0; l_s < maxLine - minLine; ++l_s) { AddSample[l_s] = (short)(delayMultiplicator * pow((minLine + l_s - l_i), 2) + s_i) + (1 - m_Conf.Photoacoustic)*s_i; } for (short l_s1 = minLine; l_s1 < maxLine - 1; ++l_s1) { if (AddSample[l_s1 - minLine] < (short)inputS && AddSample[l_s1 - minLine] >= 0) { for (short l_s2 = l_s1 + 1; l_s2 < maxLine; ++l_s2) { if (AddSample[l_s2 - minLine] < inputS && AddSample[l_s2 - minLine] >= 0) { mult = input[l_s2 + AddSample[l_s2 - minLine] * (short)inputL] * apodisation[(short)((l_s2 - minLine)*apod_mult)] * input[l_s1 + AddSample[l_s1 - minLine] * (short)inputL] * apodisation[(short)((l_s1 - minLine)*apod_mult)]; output[sample*(short)outputL + line] += sqrt(abs(mult)) * ((mult > 0) - (mult < 0)); } } } else --usedLines; } output[sample*(short)outputL + line] = 10 * output[sample*(short)outputL + line] / (pow(usedLines, 2) - (usedLines - 1)); delete[] AddSample; } } void mitk::BeamformingFilter::DMASSphericalLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short maxLine = 0; short minLine = 0; float l_i = 0; float s_i = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); - float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; + float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * inputL / m_Conf.TransducerElements; float apod_mult = 1; float mult = 0; short usedLines = (maxLine - minLine); //exact delay l_i = line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = sample / outputS * inputS / 2; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); //calculate the AddSamples beforehand to save some time short* AddSample = new short[maxLine - minLine]; for (short l_s = 0; l_s < maxLine - minLine; ++l_s) { AddSample[l_s] = (short)sqrt( pow(s_i, 2) + pow((1 / (m_Conf.TimeSpacing*m_Conf.SpeedOfSound) * ((minLine + l_s - l_i)*m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) ) + (1 - m_Conf.Photoacoustic)*s_i; } for (short l_s1 = minLine; l_s1 < maxLine - 1; ++l_s1) { if (AddSample[l_s1 - minLine] < inputS && AddSample[l_s1 - minLine] >= 0) { for (short l_s2 = l_s1 + 1; l_s2 < maxLine; ++l_s2) { if (AddSample[l_s2 - minLine] < inputS && AddSample[l_s2 - minLine] >= 0) { mult = input[l_s2 + AddSample[l_s2 - minLine] * (short)inputL] * apodisation[(int)((l_s2 - minLine)*apod_mult)] * input[l_s1 + AddSample[l_s1 - minLine] * (short)inputL] * apodisation[(int)((l_s1 - minLine)*apod_mult)]; output[sample*(short)outputL + line] += sqrt(abs(mult)) * ((mult > 0) - (mult < 0)); } } } else --usedLines; } output[sample*(short)outputL + line] = 10 * output[sample*(short)outputL + line] / (pow(usedLines, 2) - (usedLines - 1)); delete[] AddSample; } } diff --git a/Modules/PhotoacousticsAlgorithms/Resources/DASQuadratic.cl b/Modules/PhotoacousticsAlgorithms/Resources/DASQuadratic.cl index a205f73c75..fe873c7b42 100644 --- a/Modules/PhotoacousticsAlgorithms/Resources/DASQuadratic.cl +++ b/Modules/PhotoacousticsAlgorithms/Resources/DASQuadratic.cl @@ -1,73 +1,73 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ __kernel void ckDASQuad( __global float* dSource, // input image __global float* dDest, // output buffer __global float* apodArray, unsigned short apodArraySize, float SpeedOfSound, float TimeSpacing, float Pitch, float Angle, unsigned short PAImage, unsigned short TransducerElements, unsigned int inputL, unsigned int inputS, unsigned int Slices, unsigned int outputL, unsigned int outputS // parameters ) { // get thread identifier unsigned int globalPosX = get_global_id(0); unsigned int globalPosY = get_global_id(1); unsigned int globalPosZ = get_global_id(2); // terminate non-valid threads if ( globalPosX < outputL && globalPosY < outputS && globalPosZ < Slices ) { float l_i = (float)globalPosX / outputL * inputL; float s_i = (float)globalPosY / outputS * inputS / 2; - float part = (tan(Angle / 360 * 2 * M_PI) * TimeSpacing * SpeedOfSound / Pitch * outputL / TransducerElements) * s_i; + float part = (tan(Angle / 360 * 2 * M_PI) * ((SpeedOfSound * TimeSpacing) * s_i) / (Pitch * TransducerElements)) * inputL; if (part < 1) part = 1; short maxLine = min((l_i + part) + 1, (float)inputL); short minLine = max((l_i - part), 0.0f); short usedLines = (maxLine - minLine); float apod_mult = apodArraySize / (maxLine - minLine); short AddSample = 0; float output = 0; float delayMultiplicator = pow(1 / (TimeSpacing*SpeedOfSound) * Pitch * TransducerElements / inputL, 2) / s_i / 2; for (short l_s = minLine; l_s < maxLine; ++l_s) { AddSample = delayMultiplicator * pow((l_s - l_i), 2) + s_i + (1-PAImage)*s_i; if (AddSample < inputS && AddSample >= 0) { output += apodArray[(short)((l_s - minLine)*apod_mult)] * dSource[(unsigned int)(globalPosZ * inputL * inputS + AddSample * inputL + l_s)]; } else --usedLines; } dDest[ globalPosZ * outputL * outputS + globalPosY * outputL + globalPosX ] = output / usedLines; } } \ No newline at end of file diff --git a/Modules/PhotoacousticsAlgorithms/Resources/DASspherical.cl b/Modules/PhotoacousticsAlgorithms/Resources/DASspherical.cl index 75770a2b4b..f6974aa5a4 100644 --- a/Modules/PhotoacousticsAlgorithms/Resources/DASspherical.cl +++ b/Modules/PhotoacousticsAlgorithms/Resources/DASspherical.cl @@ -1,73 +1,73 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ __kernel void ckDASSphe( __global float* dSource, // input image __global float* dDest, // output buffer __global float* apodArray, unsigned short apodArraySize, float SpeedOfSound, float TimeSpacing, float Pitch, float Angle, unsigned short PAImage, unsigned short TransducerElements, unsigned int inputL, unsigned int inputS, unsigned int Slices, unsigned int outputL, unsigned int outputS // parameters ) { // get thread identifier unsigned int globalPosX = get_global_id(0); unsigned int globalPosY = get_global_id(1); unsigned int globalPosZ = get_global_id(2); // terminate non-valid threads if ( globalPosX < outputL && globalPosY < outputS && globalPosZ < Slices ) { float l_i = (float)globalPosX / outputL * inputL; float s_i = (float)globalPosY / outputS * inputS / 2; - float part = (tan(Angle / 360 * 2 * M_PI) * TimeSpacing * SpeedOfSound / Pitch * outputL / TransducerElements) * s_i; + float part = (tan(Angle / 360 * 2 * M_PI) * ((SpeedOfSound * TimeSpacing) * s_i) / (Pitch * TransducerElements)) * inputL; if (part < 1) part = 1; short maxLine = min((l_i + part) + 1, (float)inputL); short minLine = max((l_i - part), 0.0f); short usedLines = (maxLine - minLine); float apod_mult = apodArraySize / (maxLine - minLine); short AddSample = 0; float output = 0; for (short l_s = minLine; l_s < maxLine; ++l_s) { AddSample = sqrt( pow(s_i, 2) + pow((1 / (TimeSpacing*SpeedOfSound) * ((l_s - l_i)*Pitch*TransducerElements) / inputL), 2) ) + (1-PAImage)*s_i; if (AddSample < inputS && AddSample >= 0) output += apodArray[(short)((l_s - minLine)*apod_mult)] * dSource[(unsigned int)(globalPosZ * inputL * inputS + AddSample * inputL + l_s)]; else --usedLines; } dDest[ globalPosZ * outputL * outputS + globalPosY * outputL + globalPosX ] = output / usedLines; } } \ No newline at end of file diff --git a/Modules/PhotoacousticsAlgorithms/Resources/DMASQuadratic.cl b/Modules/PhotoacousticsAlgorithms/Resources/DMASQuadratic.cl index 87536d6730..ec7ec8a868 100644 --- a/Modules/PhotoacousticsAlgorithms/Resources/DMASQuadratic.cl +++ b/Modules/PhotoacousticsAlgorithms/Resources/DMASQuadratic.cl @@ -1,84 +1,84 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ __kernel void ckDMASQuad( __global float* dSource, // input image __global float* dDest, // output buffer __global float* apodArray, unsigned short apodArraySize, float SpeedOfSound, float TimeSpacing, float Pitch, float Angle, unsigned short PAImage, unsigned short TransducerElements, unsigned int inputL, unsigned int inputS, unsigned int Slices, unsigned int outputL, unsigned int outputS // parameters ) { // get thread identifier unsigned int globalPosX = get_global_id(0); unsigned int globalPosY = get_global_id(1); unsigned int globalPosZ = get_global_id(2); // terminate non-valid threads if ( globalPosX < outputL && globalPosY < outputS && globalPosZ < Slices ) { float l_i = (float)globalPosX / outputL * inputL; float s_i = (float)globalPosY / outputS * inputS / 2; - float part = (tan(Angle / 360 * 2 * M_PI) * TimeSpacing * SpeedOfSound / Pitch * outputL / TransducerElements) * s_i; + float part = (tan(Angle / 360 * 2 * M_PI) * ((SpeedOfSound * TimeSpacing) * s_i) / (Pitch * TransducerElements)) * inputL; if (part < 1) part = 1; short maxLine = min((l_i + part) + 1, (float)inputL); short minLine = max((l_i - part), 0.0f); short usedLines = (maxLine - minLine); float apod_mult = apodArraySize / (maxLine - minLine); short AddSample1 = 0; short AddSample2 = 0; float output = 0; float mult = 0; float delayMultiplicator = pow(1 / (TimeSpacing*SpeedOfSound) * Pitch * TransducerElements / inputL, 2) / s_i / 2; for (short l_s1 = minLine; l_s1 < maxLine; ++l_s1) { AddSample1 = delayMultiplicator * pow((l_s1 - l_i), 2) + s_i + (1-PAImage)*s_i; if (AddSample1 < inputS && AddSample1 >= 0) { for (short l_s2 = l_s1 + 1; l_s2 < maxLine; ++l_s2) { AddSample2 = delayMultiplicator * pow((l_s2 - l_i), 2) + s_i + (1-PAImage)*s_i; if (AddSample1 < inputS && AddSample1 >= 0) { mult = apodArray[(short)((l_s2 - minLine)*apod_mult)] * dSource[(unsigned int)(globalPosZ * inputL * inputS + AddSample2 * inputL + l_s2)] * apodArray[(short)((l_s1 - minLine)*apod_mult)] * dSource[(unsigned int)(globalPosZ * inputL * inputS + AddSample1 * inputL + l_s1)]; output += sqrt(mult * ((float)(mult>0)-(float)(mult<0))) * ((mult > 0) - (mult < 0)); } } } else --usedLines; } dDest[ globalPosZ * outputL * outputS + globalPosY * outputL + globalPosX ] = output / (pow((float)usedLines, 2.0f) - (usedLines - 1)); } } \ No newline at end of file diff --git a/Modules/PhotoacousticsAlgorithms/Resources/DMASspherical.cl b/Modules/PhotoacousticsAlgorithms/Resources/DMASspherical.cl index a9ae09fa98..a883160ead 100644 --- a/Modules/PhotoacousticsAlgorithms/Resources/DMASspherical.cl +++ b/Modules/PhotoacousticsAlgorithms/Resources/DMASspherical.cl @@ -1,94 +1,94 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ __kernel void ckDMASSphe( __global float* dSource, // input image __global float* dDest, // output buffer __global float* apodArray, unsigned short apodArraySize, float SpeedOfSound, float TimeSpacing, float Pitch, float Angle, unsigned short PAImage, unsigned short TransducerElements, unsigned int inputL, unsigned int inputS, unsigned int Slices, unsigned int outputL, unsigned int outputS // parameters ) { // get thread identifier unsigned int globalPosX = get_global_id(0); unsigned int globalPosY = get_global_id(1); unsigned int globalPosZ = get_global_id(2); // terminate non-valid threads if ( globalPosX < outputL && globalPosY < outputS && globalPosZ < Slices ) { float l_i = (float)globalPosX / outputL * inputL; float s_i = (float)globalPosY / outputS * inputS / 2; - float part = (tan(Angle / 360 * 2 * M_PI) * TimeSpacing * SpeedOfSound / Pitch * outputL / TransducerElements) * s_i; + float part = (tan(Angle / 360 * 2 * M_PI) * ((SpeedOfSound * TimeSpacing) * s_i) / (Pitch * TransducerElements)) * inputL; if (part < 1) part = 1; short maxLine = min((l_i + part) + 1, (float)inputL); short minLine = max((l_i - part), 0.0f); short usedLines = (maxLine - minLine); float apod_mult = apodArraySize / (maxLine - minLine); short AddSample1 = 0; short AddSample2 = 0; float output = 0; float mult = 0; float delayMultiplicator = pow(1 / (TimeSpacing*SpeedOfSound) * Pitch * TransducerElements / inputL, 2) / s_i / 2; for (short l_s1 = minLine; l_s1 < maxLine; ++l_s1) { AddSample1 = sqrt( pow(s_i, 2) + pow((1 / (TimeSpacing*SpeedOfSound) * ((l_s1 - l_i)*Pitch*TransducerElements)/inputL), 2) ) + (1-PAImage)*s_i; if (AddSample1 < inputS && AddSample1 >= 0) { for (short l_s2 = l_s1 + 1; l_s2 < maxLine; ++l_s2) { AddSample2 = sqrt( pow(s_i, 2) + pow((1 / (TimeSpacing*SpeedOfSound) * ((l_s2 - l_i)*Pitch*TransducerElements)/inputL), 2) ) + (1-PAImage)*s_i; if (AddSample1 < inputS && AddSample1 >= 0) { mult = apodArray[(short)((l_s2 - minLine)*apod_mult)] * dSource[(unsigned int)(globalPosZ * inputL * inputS + AddSample2 * inputL + l_s2)] * apodArray[(short)((l_s1 - minLine)*apod_mult)] * dSource[(unsigned int)(globalPosZ * inputL * inputS + AddSample1 * inputL + l_s1)]; output += sqrt(mult * ((float)(mult>0)-(float)(mult<0))) * ((mult > 0) - (mult < 0)); } } } else --usedLines; } dDest[ globalPosZ * outputL * outputS + globalPosY * outputL + globalPosX ] = output / (pow((float)usedLines, 2.0f) - (usedLines - 1)); } } \ No newline at end of file diff --git a/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.cpp b/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.cpp index 44b8eeafe6..814659e65f 100644 --- a/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.cpp +++ b/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.cpp @@ -1,530 +1,519 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #define _USE_MATH_DEFINES #include #include "mitkPhotoacousticImage.h" #include "itkBModeImageFilter.h" #include "itkPhotoacousticBModeImageFilter.h" #include "mitkImageCast.h" #include "mitkITKImageImport.h" #include "mitkPhotoacousticBeamformingFilter.h" #include #include #include // itk dependencies #include "itkImage.h" #include "itkResampleImageFilter.h" #include "itkCastImageFilter.h" #include "itkCropImageFilter.h" #include "itkRescaleIntensityImageFilter.h" #include "itkIntensityWindowingImageFilter.h" #include #include "itkMultiplyImageFilter.h" #include "itkBSplineInterpolateImageFunction.h" #include // needed itk image filters #include "mitkITKImageImport.h" #include "itkFFTShiftImageFilter.h" #include "itkMultiplyImageFilter.h" #include "itkComplexToModulusImageFilter.h" #include #include "itkFFT1DComplexConjugateToRealImageFilter.h" #include "itkFFT1DRealToComplexConjugateImageFilter.h" mitk::PhotoacousticImage::PhotoacousticImage() { MITK_INFO << "[PhotoacousticImage Debug] created that image"; } mitk::PhotoacousticImage::~PhotoacousticImage() { MITK_INFO << "[PhotoacousticImage Debug] destroyed that image"; } mitk::Image::Pointer mitk::PhotoacousticImage::ApplyBmodeFilter(mitk::Image::Pointer inputImage, BModeMethod method, bool UseGPU, bool UseLogFilter, float resampleSpacing) { // the image needs to be of floating point type for the envelope filter to work; the casting is done automatically by the CastToItkImage typedef itk::Image< float, 3 > itkFloatImageType; typedef itk::IdentityTransform TransformType; if (method == BModeMethod::Abs) { mitk::Image::Pointer input; mitk::Image::Pointer out; if (inputImage->GetPixelType().GetTypeAsString() == "scalar (float)" || inputImage->GetPixelType().GetTypeAsString() == " (float)") input = inputImage; else input = ApplyCropping(inputImage, 0, 0, 0, 0, 0, inputImage->GetDimension(2) - 1); if (!UseGPU) { PhotoacousticBModeFilter::Pointer filter = PhotoacousticBModeFilter::New(); filter->SetParameters(UseLogFilter); filter->SetInput(input); filter->Update(); out = filter->GetOutput(); if (resampleSpacing == 0) return out; } #ifdef PHOTOACOUSTICS_USE_GPU else { PhotoacousticOCLBModeFilter::Pointer filter = PhotoacousticOCLBModeFilter::New(); filter->SetParameters(UseLogFilter); filter->SetInput(input); filter->Update(); out = filter->GetOutput(); if (resampleSpacing == 0) return out; } #endif typedef itk::ResampleImageFilter < itkFloatImageType, itkFloatImageType > ResampleImageFilter; ResampleImageFilter::Pointer resampleImageFilter = ResampleImageFilter::New(); itkFloatImageType::Pointer itkImage; mitk::CastToItkImage(out, itkImage); itkFloatImageType::SpacingType outputSpacing; itkFloatImageType::SizeType inputSize = itkImage->GetLargestPossibleRegion().GetSize(); itkFloatImageType::SizeType outputSize = inputSize; outputSpacing[0] = itkImage->GetSpacing()[0]; outputSpacing[1] = resampleSpacing; outputSpacing[2] = itkImage->GetSpacing()[2]; outputSize[1] = inputSize[1] * itkImage->GetSpacing()[1] / outputSpacing[1]; typedef itk::IdentityTransform TransformType; resampleImageFilter->SetInput(itkImage); resampleImageFilter->SetSize(outputSize); resampleImageFilter->SetOutputSpacing(outputSpacing); resampleImageFilter->SetTransform(TransformType::New()); resampleImageFilter->UpdateLargestPossibleRegion(); return mitk::GrabItkImageMemory(resampleImageFilter->GetOutput()); } else if (method == BModeMethod::ShapeDetection) { typedef itk::BModeImageFilter < itkFloatImageType, itkFloatImageType > BModeFilterType; BModeFilterType::Pointer bModeFilter = BModeFilterType::New(); // LogFilter typedef itk::PhotoacousticBModeImageFilter < itkFloatImageType, itkFloatImageType > PhotoacousticBModeImageFilter; PhotoacousticBModeImageFilter::Pointer photoacousticBModeFilter = PhotoacousticBModeImageFilter::New(); // No LogFilter typedef itk::ResampleImageFilter < itkFloatImageType, itkFloatImageType > ResampleImageFilter; ResampleImageFilter::Pointer resampleImageFilter = ResampleImageFilter::New(); itkFloatImageType::Pointer itkImage; mitk::CastToItkImage(inputImage, itkImage); itkFloatImageType::Pointer bmode; if (UseLogFilter) { bModeFilter->SetInput(itkImage); bModeFilter->SetDirection(1); bmode = bModeFilter->GetOutput(); } else { photoacousticBModeFilter->SetInput(itkImage); photoacousticBModeFilter->SetDirection(1); bmode = photoacousticBModeFilter->GetOutput(); } // resampleSpacing == 0 means: do no resampling if (resampleSpacing == 0) { return mitk::GrabItkImageMemory(bmode); } itkFloatImageType::SpacingType outputSpacing; itkFloatImageType::SizeType inputSize = itkImage->GetLargestPossibleRegion().GetSize(); itkFloatImageType::SizeType outputSize = inputSize; outputSpacing[0] = itkImage->GetSpacing()[0]; outputSpacing[1] = resampleSpacing; outputSpacing[2] = itkImage->GetSpacing()[2]; outputSize[1] = inputSize[1] * itkImage->GetSpacing()[1] / outputSpacing[1]; resampleImageFilter->SetInput(bmode); resampleImageFilter->SetSize(outputSize); resampleImageFilter->SetOutputSpacing(outputSpacing); resampleImageFilter->SetTransform(TransformType::New()); resampleImageFilter->UpdateLargestPossibleRegion(); return mitk::GrabItkImageMemory(resampleImageFilter->GetOutput()); } return nullptr; } /*mitk::Image::Pointer mitk::PhotoacousticImage::ApplyScatteringCompensation(mitk::Image::Pointer inputImage, int scattering) { typedef itk::Image< float, 3 > itkFloatImageType; typedef itk::MultiplyImageFilter MultiplyImageFilterType; itkFloatImageType::Pointer itkImage; mitk::CastToItkImage(inputImage, itkImage); MultiplyImageFilterType::Pointer multiplyFilter = MultiplyImageFilterType::New(); multiplyFilter->SetInput1(itkImage); multiplyFilter->SetInput2(m_FluenceCompResizedItk.at(m_ScatteringCoefficient)); return mitk::GrabItkImageMemory(multiplyFilter->GetOutput()); }*/ mitk::Image::Pointer mitk::PhotoacousticImage::ApplyResampling(mitk::Image::Pointer inputImage, unsigned int outputSize[2]) { typedef itk::Image< float, 3 > itkFloatImageType; typedef itk::ResampleImageFilter < itkFloatImageType, itkFloatImageType > ResampleImageFilter; ResampleImageFilter::Pointer resampleImageFilter = ResampleImageFilter::New(); typedef itk::LinearInterpolateImageFunction T_Interpolator; itkFloatImageType::Pointer itkImage; mitk::CastToItkImage(inputImage, itkImage); itkFloatImageType::SpacingType outputSpacingItk; itkFloatImageType::SizeType inputSizeItk = itkImage->GetLargestPossibleRegion().GetSize(); itkFloatImageType::SizeType outputSizeItk = inputSizeItk; outputSizeItk[0] = outputSize[0]; outputSizeItk[1] = outputSize[1]; outputSizeItk[2] = inputSizeItk[2]; outputSpacingItk[0] = itkImage->GetSpacing()[0] * (static_cast(inputSizeItk[0]) / static_cast(outputSizeItk[0])); outputSpacingItk[1] = itkImage->GetSpacing()[1] * (static_cast(inputSizeItk[1]) / static_cast(outputSizeItk[1])); outputSpacingItk[2] = itkImage->GetSpacing()[2]; typedef itk::IdentityTransform TransformType; T_Interpolator::Pointer _pInterpolator = T_Interpolator::New(); resampleImageFilter->SetInput(itkImage); resampleImageFilter->SetSize(outputSizeItk); resampleImageFilter->SetOutputSpacing(outputSpacingItk); resampleImageFilter->SetTransform(TransformType::New()); resampleImageFilter->SetInterpolator(_pInterpolator); resampleImageFilter->UpdateLargestPossibleRegion(); return mitk::GrabItkImageMemory(resampleImageFilter->GetOutput()); } mitk::Image::Pointer mitk::PhotoacousticImage::ApplyCropping(mitk::Image::Pointer inputImage, int above, int below, int right, int left, int minSlice, int maxSlice) { unsigned int inputDim[3] = { inputImage->GetDimension(0), inputImage->GetDimension(1), inputImage->GetDimension(2) }; unsigned int outputDim[3] = { inputImage->GetDimension(0) - left - right, inputImage->GetDimension(1) - (unsigned int)above - (unsigned int)below, (unsigned int)maxSlice - (unsigned int)minSlice + 1 }; void* inputData; float* outputData = new float[outputDim[0] * outputDim[1] * outputDim[2]]; ImageReadAccessor acc(inputImage); inputData = const_cast(acc.GetData()); // convert the data to float by default // as of now only those float, short, float are used at all... though it's easy to add other ones if (inputImage->GetPixelType().GetTypeAsString() == "scalar (float)" || inputImage->GetPixelType().GetTypeAsString() == " (float)") { // copy the data into the cropped image for (unsigned short sl = 0; sl < outputDim[2]; ++sl) { for (unsigned short l = 0; l < outputDim[0]; ++l) { for (unsigned short s = 0; s < outputDim[1]; ++s) { outputData[l + s*(unsigned short)outputDim[0] + sl*outputDim[0] * outputDim[1]] = (float)((float*)inputData)[(l + left) + (s + above)*(unsigned short)inputDim[0] + (sl + minSlice)*inputDim[0] * inputDim[1]]; } } } } else if (inputImage->GetPixelType().GetTypeAsString() == "scalar (short)" || inputImage->GetPixelType().GetTypeAsString() == " (short)") { // copy the data unsigned shorto the cropped image for (unsigned short sl = 0; sl < outputDim[2]; ++sl) { for (unsigned short l = 0; l < outputDim[0]; ++l) { for (unsigned short s = 0; s < outputDim[1]; ++s) { outputData[l + s*(unsigned short)outputDim[0] + sl*outputDim[0] * outputDim[1]] = (float)((short*)inputData)[(l + left) + (s + above)*(unsigned short)inputDim[0] + (sl + minSlice)*inputDim[0] * inputDim[1]]; } } } } else if (inputImage->GetPixelType().GetTypeAsString() == "scalar (double)" || inputImage->GetPixelType().GetTypeAsString() == " (double)") { // copy the data unsigned shorto the cropped image for (unsigned short sl = 0; sl < outputDim[2]; ++sl) { for (unsigned short l = 0; l < outputDim[0]; ++l) { for (unsigned short s = 0; s < outputDim[1]; ++s) { outputData[l + s*(unsigned short)outputDim[0] + sl*outputDim[0] * outputDim[1]] = (float)((double*)inputData)[(l + left) + (s + above)*(unsigned short)inputDim[0] + (sl + minSlice)*inputDim[0] * inputDim[1]]; } } } } else { MITK_INFO << "Could not determine pixel type"; } mitk::Image::Pointer output = mitk::Image::New(); output->Initialize(mitk::MakeScalarPixelType(), 3, outputDim); output->SetSpacing(inputImage->GetGeometry()->GetSpacing()); output->SetImportVolume(outputData, 0, 0, mitk::Image::ReferenceMemory); return output; } mitk::Image::Pointer mitk::PhotoacousticImage::ApplyBeamforming(mitk::Image::Pointer inputImage, BeamformingFilter::beamformingSettings config, int cutoff, std::function progressHandle) { config.RecordTime = config.RecordTime - (cutoff) / inputImage->GetDimension(1) * config.RecordTime; // adjust the recorded time lost by cropping progressHandle(0, "cropping image"); if (!config.partial) { config.CropBounds[0] = 0; config.CropBounds[1] = inputImage->GetDimension(2) - 1; } Image::Pointer processedImage = ApplyCropping(inputImage, cutoff, 0, 0, 0, config.CropBounds[0], config.CropBounds[1]); - // resample the image in horizontal direction - if (inputImage->GetDimension(0) != config.ReconstructionLines) - { - progressHandle(0, "resampling image"); - auto begin = std::chrono::high_resolution_clock::now(); - unsigned int dim[2] = { config.ReconstructionLines, processedImage->GetDimension(1) }; - processedImage = ApplyResampling(processedImage, dim); - auto end = std::chrono::high_resolution_clock::now(); - MITK_DEBUG << "Upsampling from " << inputImage->GetDimension(0) << " to " << config.ReconstructionLines << " lines completed in " << ((double)std::chrono::duration_cast(end - begin).count()) / 1000000 << "ms" << std::endl; - } - // perform the beamforming BeamformingFilter::Pointer Beamformer = BeamformingFilter::New(); Beamformer->SetInput(processedImage); Beamformer->Configure(config); Beamformer->SetProgressHandle(progressHandle); Beamformer->UpdateLargestPossibleRegion(); processedImage = Beamformer->GetOutput(); return processedImage; } mitk::Image::Pointer mitk::PhotoacousticImage::BandpassFilter(mitk::Image::Pointer data, float recordTime, float BPHighPass, float BPLowPass, float alpha) { bool powerOfTwo = false; int finalPower = 0; for (int i = 1; pow(2, i) <= data->GetDimension(1); ++i) { finalPower = i; if (pow(2, i) == data->GetDimension(1)) { powerOfTwo = true; } } if (!powerOfTwo) { unsigned int dim[2] = { data->GetDimension(0), (unsigned int)pow(2,finalPower+1)}; data = ApplyResampling(data, dim); } MITK_INFO << data->GetDimension(0); // do a fourier transform, multiply with an appropriate window for the filter, and transform back typedef float PixelType; typedef itk::Image< PixelType, 3 > RealImageType; RealImageType::Pointer image; mitk::CastToItkImage(data, image); typedef itk::FFT1DRealToComplexConjugateImageFilter ForwardFFTFilterType; typedef ForwardFFTFilterType::OutputImageType ComplexImageType; ForwardFFTFilterType::Pointer forwardFFTFilter = ForwardFFTFilterType::New(); forwardFFTFilter->SetInput(image); forwardFFTFilter->SetDirection(1); try { forwardFFTFilter->UpdateOutputInformation(); } catch (itk::ExceptionObject & error) { std::cerr << "Error: " << error << std::endl; MITK_WARN << "Bandpass could not be applied"; return data; } float singleVoxel = 1 / (recordTime / data->GetDimension(1)) / 2 / 1000; float cutoffPixelHighPass = std::min(BPHighPass / singleVoxel, (float)data->GetDimension(1) / 2); float cutoffPixelLowPass = std::min(BPLowPass / singleVoxel, (float)data->GetDimension(1) / 2 - cutoffPixelHighPass); RealImageType::Pointer fftMultiplicator = BPFunction(data, cutoffPixelHighPass, cutoffPixelLowPass, alpha); typedef itk::MultiplyImageFilter< ComplexImageType, RealImageType, ComplexImageType > MultiplyFilterType; MultiplyFilterType::Pointer multiplyFilter = MultiplyFilterType::New(); multiplyFilter->SetInput1(forwardFFTFilter->GetOutput()); multiplyFilter->SetInput2(fftMultiplicator); /*itk::ComplexToModulusImageFilter::Pointer toReal = itk::ComplexToModulusImageFilter::New(); toReal->SetInput(forwardFFTFilter->GetOutput()); return GrabItkImageMemory(toReal->GetOutput()); return GrabItkImageMemory(fftMultiplicator); *///DEBUG typedef itk::FFT1DComplexConjugateToRealImageFilter< ComplexImageType, RealImageType > InverseFilterType; InverseFilterType::Pointer inverseFFTFilter = InverseFilterType::New(); inverseFFTFilter->SetInput(multiplyFilter->GetOutput()); inverseFFTFilter->SetDirection(1); return GrabItkImageMemory(inverseFFTFilter->GetOutput()); } itk::Image::Pointer mitk::PhotoacousticImage::BPFunction(mitk::Image::Pointer reference, int cutoffFrequencyPixelHighPass, int cutoffFrequencyPixelLowPass, float alpha) { float* imageData = new float[reference->GetDimension(0)*reference->GetDimension(1)]; // tukey window float width = reference->GetDimension(1) / 2 - (float)cutoffFrequencyPixelHighPass - (float)cutoffFrequencyPixelLowPass; float center = (float)cutoffFrequencyPixelHighPass / 2 + width / 2; MITK_INFO << width << "width " << center << "center " << alpha; for (unsigned int n = 0; n < reference->GetDimension(1); ++n) { imageData[reference->GetDimension(0)*n] = 0; } for (int n = 0; n < width; ++n) { if (n <= (alpha*(width - 1)) / 2) { imageData[reference->GetDimension(0)*(int)(n + center - (width / 2))] = (1 + cos(M_PI*(2 * n / (alpha*(width - 1)) - 1))) / 2; } else if (n >= (width - 1)*(1 - alpha / 2) && n <= (width - 1)) { imageData[reference->GetDimension(0)*(int)(n + center - (width / 2))] = (1 + cos(M_PI*(2 * n / (alpha*(width - 1)) + 1 - 2 / alpha))) / 2; } else { imageData[reference->GetDimension(0)*(int)(n + center - (width / 2))] = 1; } } // Butterworth-Filter /* // first, write the HighPass if (cutoffFrequencyPixelHighPass != reference->GetDimension(1) / 2) { for (int n = 0; n < reference->GetDimension(1) / 2; ++n) { imageData[reference->GetDimension(0)*n] = 1 / (1 + pow( (float)n / (float)(reference->GetDimension(1) / 2 - cutoffFrequencyPixelHighPass) , 2 * butterworthOrder)); } } else { for (int n = 0; n < reference->GetDimension(1) / 2; ++n) { imageData[reference->GetDimension(0)*n] = 1; } } // now, the LowPass for (int n = 0; n < reference->GetDimension(1) / 2; ++n) { imageData[reference->GetDimension(0)*n] *= 1 / (1 + pow( (float)(reference->GetDimension(1) / 2 - 1 - n) / (float)(reference->GetDimension(1) / 2 - cutoffFrequencyPixelLowPass) , 2 * butterworthOrder)); } */ // mirror the first half of the image for (unsigned int n = reference->GetDimension(1) / 2; n < reference->GetDimension(1); ++n) { imageData[reference->GetDimension(0)*n] = imageData[(reference->GetDimension(1) - (n + 1)) * reference->GetDimension(0)]; } // copy and paste to all lines for (unsigned int line = 1; line < reference->GetDimension(0); ++line) { for (unsigned int sample = 0; sample < reference->GetDimension(1); ++sample) { imageData[reference->GetDimension(0)*sample + line] = imageData[reference->GetDimension(0)*sample]; } } typedef itk::Image< float, 3U > ImageType; ImageType::RegionType region; ImageType::IndexType start; start.Fill(0); region.SetIndex(start); ImageType::SizeType size; size[0] = reference->GetDimension(0); size[1] = reference->GetDimension(1); size[2] = reference->GetDimension(2); region.SetSize(size); ImageType::SpacingType SpacingItk; SpacingItk[0] = reference->GetGeometry()->GetSpacing()[0]; SpacingItk[1] = reference->GetGeometry()->GetSpacing()[1]; SpacingItk[2] = reference->GetGeometry()->GetSpacing()[2]; ImageType::Pointer image = ImageType::New(); image->SetRegions(region); image->Allocate(); image->FillBuffer(itk::NumericTraits::Zero); image->SetSpacing(SpacingItk); ImageType::IndexType pixelIndex; for (ImageType::IndexValueType slice = 0; slice < reference->GetDimension(2); ++slice) { for (ImageType::IndexValueType line = 0; line < reference->GetDimension(0); ++line) { for (ImageType::IndexValueType sample = 0; sample < reference->GetDimension(1); ++sample) { pixelIndex[0] = line; pixelIndex[1] = sample; pixelIndex[2] = slice; image->SetPixel(pixelIndex, imageData[line + sample*reference->GetDimension(0)]); } } } delete[] imageData; return image; } \ No newline at end of file