diff --git a/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp b/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp index dc086c4972..8ff20181b3 100644 --- a/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp +++ b/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp @@ -1,616 +1,627 @@ /*=================================================================== mitkPhotoacousticBeamformingFilter The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #define _USE_MATH_DEFINES #include "mitkPhotoacousticBeamformingFilter.h" #include "mitkProperties.h" #include "mitkImageReadAccessor.h" #include #include #include #include #include #include #include "mitkImageCast.h" #include mitk::BeamformingFilter::BeamformingFilter() : m_OutputData(nullptr), m_InputData(nullptr) { this->SetNumberOfIndexedInputs(1); this->SetNumberOfRequiredInputs(1); m_ProgressHandle = [](int, std::string) {}; } void mitk::BeamformingFilter::SetProgressHandle(std::function progressHandle) { m_ProgressHandle = progressHandle; } mitk::BeamformingFilter::~BeamformingFilter() { } void mitk::BeamformingFilter::GenerateInputRequestedRegion() { Superclass::GenerateInputRequestedRegion(); mitk::Image* output = this->GetOutput(); mitk::Image* input = const_cast (this->GetInput()); if (!output->IsInitialized()) { return; } input->SetRequestedRegionToLargestPossibleRegion(); //GenerateTimeInInputRegion(output, input); } void mitk::BeamformingFilter::GenerateOutputInformation() { mitk::Image::ConstPointer input = this->GetInput(); mitk::Image::Pointer output = this->GetOutput(); if ((output->IsInitialized()) && (this->GetMTime() <= m_TimeOfHeaderInitialization.GetMTime())) return; itkDebugMacro(<< "GenerateOutputInformation()"); unsigned int dim[] = { m_Conf.ReconstructionLines, m_Conf.SamplesPerLine, input->GetDimension(2) }; output->Initialize(mitk::MakeScalarPixelType(), 3, dim); mitk::Vector3D spacing; spacing[0] = m_Conf.Pitch * m_Conf.TransducerElements * 1000 / m_Conf.ReconstructionLines; spacing[1] = m_Conf.RecordTime * m_Conf.SpeedOfSound * 1000 / m_Conf.SamplesPerLine; spacing[2] = 1; output->GetGeometry()->SetSpacing(spacing); output->GetGeometry()->Modified(); output->SetPropertyList(input->GetPropertyList()->Clone()); m_TimeOfHeaderInitialization.Modified(); } void mitk::BeamformingFilter::GenerateData() { GenerateOutputInformation(); mitk::Image::Pointer input = this->GetInput(); mitk::Image::Pointer output = this->GetOutput(); if (!output->IsInitialized()) return; float inputDim[2] = { input->GetDimension(0), input->GetDimension(1) / ((int)m_Conf.Photoacoustic + 1) }; // if the photoacoustic option is used, we halve the image, as only the upper part of it contains any information float outputDim[2] = { output->GetDimension(0), output->GetDimension(1) }; - unsigned short chunkSize = 40; // TODO: make this slightly less random + unsigned short chunkSize = 5; // TODO: make this slightly less random unsigned int oclOutputDim[3] = { output->GetDimension(0), output->GetDimension(1), output->GetDimension(2) }; unsigned int oclOutputDimChunk[3] = { output->GetDimension(0), output->GetDimension(1), chunkSize}; unsigned int oclInputDimChunk[3] = { input->GetDimension(0), input->GetDimension(1), chunkSize}; unsigned int oclOutputDimLastChunk[3] = { output->GetDimension(0), output->GetDimension(1), input->GetDimension(2) % chunkSize }; unsigned int oclInputDimLastChunk[3] = { input->GetDimension(0), input->GetDimension(1), input->GetDimension(2) % chunkSize }; const int apodArraySize = m_Conf.TransducerElements * 4; // set the resolution of the apodization array float* ApodWindow; // calculate the appropiate apodization window switch (m_Conf.Apod) { case beamformingSettings::Apodization::Hann: ApodWindow = VonHannFunction(apodArraySize); break; case beamformingSettings::Apodization::Hamm: ApodWindow = HammFunction(apodArraySize); break; case beamformingSettings::Apodization::Box: ApodWindow = BoxFunction(apodArraySize); break; default: ApodWindow = BoxFunction(apodArraySize); break; } int progInterval = output->GetDimension(2) / 20 > 1 ? output->GetDimension(2) / 20 : 1; // the interval at which we update the gui progress bar auto begin = std::chrono::high_resolution_clock::now(); // debbuging the performance... if (!m_Conf.UseGPU) { for (int i = 0; i < output->GetDimension(2); ++i) // seperate Slices should get Beamforming seperately applied { mitk::ImageReadAccessor inputReadAccessor(input, input->GetSliceData(i)); m_OutputData = new float[m_Conf.ReconstructionLines*m_Conf.SamplesPerLine]; m_InputDataPuffer = new float[input->GetDimension(0)*input->GetDimension(1)]; // first, we convert any data to float, which we use by default if (input->GetPixelType().GetTypeAsString() == "scalar (float)" || input->GetPixelType().GetTypeAsString() == " (float)") { m_InputData = (float*)inputReadAccessor.GetData(); } else { MITK_INFO << "Pixel type is not float, abort"; return; } // fill the image with zeros for (int l = 0; l < outputDim[0]; ++l) { for (int s = 0; s < outputDim[1]; ++s) { m_OutputData[l*(short)outputDim[1] + s] = 0; } } std::thread *threads = new std::thread[(short)outputDim[0]]; // every line will be beamformed in a seperate thread if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DASQuadraticLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DASSphericalLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } } else if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DMAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DMASQuadraticLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DMASSphericalLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } } // wait for all lines to finish for (short line = 0; line < outputDim[0]; ++line) { threads[line].join(); } output->SetSlice(m_OutputData, i); if (i % progInterval == 0) m_ProgressHandle((int)((i + 1) / (float)output->GetDimension(2) * 100), "performing reconstruction"); delete[] m_OutputData; delete[] m_InputDataPuffer; m_OutputData = nullptr; m_InputData = nullptr; } } else { mitk::PhotoacousticOCLBeamformer::Pointer m_oclFilter = mitk::PhotoacousticOCLBeamformer::New(); try { if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformer::BeamformingAlgorithm::DASQuad, true); else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformer::BeamformingAlgorithm::DASSphe, true); } else if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DMAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformer::BeamformingAlgorithm::DMASQuad, true); else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformer::BeamformingAlgorithm::DMASSphe, true); } m_oclFilter->SetApodisation(ApodWindow, apodArraySize); m_oclFilter->SetOutputDim(oclOutputDimChunk); m_oclFilter->SetBeamformingParameters(m_Conf.SpeedOfSound, m_Conf.RecordTime, m_Conf.Pitch, m_Conf.Angle, m_Conf.Photoacoustic, m_Conf.TransducerElements); if (chunkSize < oclOutputDim[2]) { - for (int i = 0; i < ceil((float)oclOutputDim[2] / (float)chunkSize); ++i) + bool skip = false; + for (int i = 0; !skip && i < ceil((float)oclOutputDim[2] / (float)chunkSize); ++i) { - m_ProgressHandle(100 * ((i * chunkSize) / oclOutputDim[2]), "beamforming"); + m_ProgressHandle(100 * ((float)(i * chunkSize) / (float)oclOutputDim[2]), "beamforming"); mitk::Image::Pointer chunk = mitk::Image::New(); - if ((oclOutputDim[2]) - (i * chunkSize) >= chunkSize) + if ((oclOutputDim[2]) - (i * chunkSize) == 1 + chunkSize) + { + // A 3d image of 3rd dimension == 1 can not be processed by openCL, make sure that this case never arises + oclInputDimLastChunk[2] = input->GetDimension(2) % chunkSize + chunkSize; + oclOutputDimLastChunk[2] = input->GetDimension(2) % chunkSize + chunkSize; + + chunk->Initialize(input->GetPixelType(), 3, oclInputDimLastChunk); + m_oclFilter->SetOutputDim(oclOutputDimLastChunk); + skip = true; //skip the last chunk + } + else if ((oclOutputDim[2]) - (i * chunkSize) >= chunkSize) chunk->Initialize(input->GetPixelType(), 3, oclInputDimChunk); else { chunk->Initialize(input->GetPixelType(), 3, oclInputDimLastChunk); m_oclFilter->SetOutputDim(oclOutputDimLastChunk); } chunk->SetSpacing(input->GetGeometry()->GetSpacing()); mitk::ImageReadAccessor ChunkMove(input); chunk->SetImportVolume((void*)&(((float*)const_cast(ChunkMove.GetData()))[i * chunkSize * input->GetDimension(0) * input->GetDimension(1)]), 0, 0, mitk::Image::ReferenceMemory); m_oclFilter->SetInput(chunk); m_oclFilter->Update(); - auto out = m_oclFilter->GetOutput(); - for (int s = i * chunkSize; s < oclOutputDim[2]; ++s) + + for (int s = i * chunkSize; s < oclOutputDim[2]; ++s) // TODO: make the bounds here smaller... { mitk::ImageReadAccessor copy(out, out->GetSliceData(s - i * chunkSize)); output->SetImportSlice(const_cast(copy.GetData()), s, 0, 0, mitk::Image::ReferenceMemory); } } } else { m_ProgressHandle(50, "beamforming"); m_oclFilter->SetOutputDim(oclOutputDim); m_oclFilter->SetInput(input); m_oclFilter->Update(); auto out = m_oclFilter->GetOutput(); mitk::ImageReadAccessor copy(out); output->SetImportVolume(const_cast(copy.GetData()), 0, 0, mitk::Image::ReferenceMemory); } } catch (mitk::Exception &e) { std::string errorMessage = "Caught unexpected exception "; errorMessage.append(e.what()); MITK_ERROR << errorMessage; } } m_TimeOfHeaderInitialization.Modified(); auto end = std::chrono::high_resolution_clock::now(); MITK_INFO << "Beamforming of " << output->GetDimension(2) << " Images completed in " << ((float)std::chrono::duration_cast(end - begin).count()) / 1000000 << "ms" << std::endl; } void mitk::BeamformingFilter::Configure(beamformingSettings settings) { m_Conf = settings; } float* mitk::BeamformingFilter::VonHannFunction(int samples) { float* ApodWindow = new float[samples]; for (int n = 0; n < samples; ++n) { ApodWindow[n] = (1 - cos(2 * M_PI * n / (samples - 1))) / 2; } return ApodWindow; } float* mitk::BeamformingFilter::HammFunction(int samples) { float* ApodWindow = new float[samples]; for (int n = 0; n < samples; ++n) { ApodWindow[n] = 0.54 - 0.46*cos(2 * M_PI*n / (samples - 1)); } return ApodWindow; } float* mitk::BeamformingFilter::BoxFunction(int samples) { float* ApodWindow = new float[samples]; for (int n = 0; n < samples; ++n) { ApodWindow[n] = 1; } return ApodWindow; } void mitk::BeamformingFilter::DASQuadraticLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short AddSample = 0; short maxLine = 0; short minLine = 0; float delayMultiplicator = 0; float l_i = 0; float s_i = 0; float l = 0; float x = 0; float root = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); float part_multiplicator = tan_phi * m_Conf.RecordTime / inputS * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; float apod_mult = 1; float mult = 0; short usedLines = (maxLine - minLine); //quadratic delay l_i = line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = sample / outputS * inputS; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); delayMultiplicator = pow((inputS / (m_Conf.RecordTime*m_Conf.SpeedOfSound) * (m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) / s_i / 2; for (short l_s = minLine; l_s < maxLine; ++l_s) { AddSample = delayMultiplicator * pow((l_s - l_i), 2) + s_i; if (AddSample < inputS && AddSample >= 0) output[sample*(short)outputL + line] += input[l_s + AddSample*(short)inputL] * apodisation[(short)((l_s - minLine)*apod_mult)]; else --usedLines; } output[sample*(short)outputL + line] = output[sample*(short)outputL + line] / usedLines; } } void mitk::BeamformingFilter::DASSphericalLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short AddSample = 0; short maxLine = 0; short minLine = 0; float delayMultiplicator = 0; float l_i = 0; float s_i = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); float part_multiplicator = tan_phi * m_Conf.RecordTime / inputS * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; float apod_mult = 1; short usedLines = (maxLine - minLine); //exact delay l_i = (float)line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = (float)sample / outputS * inputS; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); for (short l_s = minLine; l_s < maxLine; ++l_s) { AddSample = (int)sqrt( pow(s_i, 2) + pow((inputS / (m_Conf.RecordTime*m_Conf.SpeedOfSound) * ((l_s - l_i)*m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) ); if (AddSample < inputS && AddSample >= 0) output[sample*(short)outputL + line] += input[l_s + AddSample*(short)inputL] * apodisation[(short)((l_s - minLine)*apod_mult)]; else --usedLines; } output[sample*(short)outputL + line] = output[sample*(short)outputL + line] / usedLines; } } void mitk::BeamformingFilter::DMASQuadraticLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short AddSample1 = 0; short AddSample2 = 0; short maxLine = 0; short minLine = 0; float delayMultiplicator = 0; float l_i = 0; float s_i = 0; float l = 0; float x = 0; float root = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); float part_multiplicator = tan_phi * m_Conf.RecordTime / inputS * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; float apod_mult = 1; float mult = 0; short usedLines = (maxLine - minLine); //quadratic delay l_i = line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = sample / outputS * inputS; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); delayMultiplicator = pow((inputS / (m_Conf.RecordTime*m_Conf.SpeedOfSound) * (m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) / s_i / 2; //calculate the AddSamples beforehand to save some time short* AddSample = new short[maxLine - minLine]; for (short l_s = 0; l_s < maxLine - minLine; ++l_s) { AddSample[l_s] = (short)(delayMultiplicator * pow((minLine + l_s - l_i), 2) + s_i); } for (short l_s1 = minLine; l_s1 < maxLine - 1; ++l_s1) { if (AddSample[l_s1 - minLine] < (short)inputS && AddSample[l_s1 - minLine] >= 0) { for (short l_s2 = l_s1 + 1; l_s2 < maxLine; ++l_s2) { if (AddSample[l_s2 - minLine] < inputS && AddSample[l_s2 - minLine] >= 0) { mult = input[l_s2 + AddSample[l_s2 - minLine] * (short)inputL] * apodisation[(short)((l_s2 - minLine)*apod_mult)] * input[l_s1 + AddSample[l_s1 - minLine] * (short)inputL] * apodisation[(short)((l_s1 - minLine)*apod_mult)]; output[sample*(short)outputL + line] += sqrt(abs(mult)) * ((mult > 0) - (mult < 0)); } } } else --usedLines; } output[sample*(short)outputL + line] = 10 * output[sample*(short)outputL + line] / (pow(usedLines, 2) - (usedLines - 1)); delete[] AddSample; } } void mitk::BeamformingFilter::DMASSphericalLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short AddSample1 = 0; short AddSample2 = 0; short maxLine = 0; short minLine = 0; float delayMultiplicator = 0; float l_i = 0; float s_i = 0; float l = 0; float x = 0; float root = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); float part_multiplicator = tan_phi * m_Conf.RecordTime / inputS * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; float apod_mult = 1; float mult = 0; short usedLines = (maxLine - minLine); //exact delay l_i = line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = sample / outputS * inputS; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); //calculate the AddSamples beforehand to save some time short* AddSample = new short[maxLine - minLine]; for (short l_s = 0; l_s < maxLine - minLine; ++l_s) { AddSample[l_s] = (short)sqrt( pow(s_i, 2) + pow((inputS / (m_Conf.RecordTime*m_Conf.SpeedOfSound) * ((minLine + l_s - l_i)*m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) ); } for (short l_s1 = minLine; l_s1 < maxLine - 1; ++l_s1) { if (AddSample[l_s1 - minLine] < inputS && AddSample[l_s1 - minLine] >= 0) { for (short l_s2 = l_s1 + 1; l_s2 < maxLine; ++l_s2) { if (AddSample[l_s2 - minLine] < inputS && AddSample[l_s2 - minLine] >= 0) { mult = input[l_s2 + AddSample[l_s2 - minLine] * (short)inputL] * apodisation[(int)((l_s2 - minLine)*apod_mult)] * input[l_s1 + AddSample[l_s1 - minLine] * (short)inputL] * apodisation[(int)((l_s1 - minLine)*apod_mult)]; output[sample*(short)outputL + line] += sqrt(abs(mult)) * ((mult > 0) - (mult < 0)); } } } else --usedLines; } output[sample*(short)outputL + line] = 10 * output[sample*(short)outputL + line] / (pow(usedLines, 2) - (usedLines - 1)); delete[] AddSample; } } \ No newline at end of file diff --git a/Modules/PhotoacousticsAlgorithms/Resources/DASQuadratic.cl b/Modules/PhotoacousticsAlgorithms/Resources/DASQuadratic.cl index 973ffe03fb..2a2bfaf5c7 100644 --- a/Modules/PhotoacousticsAlgorithms/Resources/DASQuadratic.cl +++ b/Modules/PhotoacousticsAlgorithms/Resources/DASQuadratic.cl @@ -1,76 +1,76 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ __kernel void ckDASQuad( __read_only image3d_t dSource, // input image __global float* dDest, // output buffer __global float* apodArray, unsigned short apodArraySize, float SpeedOfSound, float RecordTime, float Pitch, float Angle, unsigned short PAImage, unsigned short TransducerElements // parameters ) { // get thread identifier unsigned int globalPosX = get_global_id(0); unsigned int globalPosY = get_global_id(1); unsigned int globalPosZ = get_global_id(2); unsigned short outputS = get_global_size(1); unsigned short outputL = get_global_size(0); // get image width and weight const unsigned int inputL = get_image_width( dSource ); const unsigned int inputS = get_image_height( dSource ) / (PAImage + 1); const unsigned int Slices = get_image_depth( dSource ); // create an image sampler const sampler_t defaultSampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST ; // terminate non-valid threads if ( globalPosX < outputL && globalPosY < outputS && globalPosZ < Slices ) { float l_i = (float)globalPosX / outputL * inputL; - float s_i = (float)globalPosY / outputS * inputS; + float s_i = (float)globalPosY / outputS * inputS / (2 - PAImage); float part = (tan(Angle / 360 * 2 * M_PI) * RecordTime / inputS * SpeedOfSound / Pitch * outputL / TransducerElements) * s_i; if (part < 1) part = 1; short maxLine = min((l_i + part) + 1, (float)inputL); short minLine = max((l_i - part), 0.0f); short usedLines = (maxLine - minLine); float apod_mult = apodArraySize / (maxLine - minLine); short AddSample = 0; float output = 0; float delayMultiplicator = pow(inputS / (RecordTime*SpeedOfSound) * Pitch * TransducerElements / inputL, 2) / s_i / 2; for (short l_s = minLine; l_s < maxLine; ++l_s) { - AddSample = delayMultiplicator * pow((l_s - l_i), 2) + s_i; + AddSample = delayMultiplicator * pow((l_s - l_i), 2) + s_i + (1-PAImage)*s_i; if (AddSample < inputS && AddSample >= 0) output += apodArray[(short)((l_s - minLine)*apod_mult)] * read_imagef( dSource, defaultSampler, (int4)(l_s, AddSample, globalPosZ, 0 )).x; else --usedLines; } dDest[ globalPosZ * outputL * outputS + globalPosY * outputL + globalPosX ] = output / usedLines; } } \ No newline at end of file diff --git a/Modules/PhotoacousticsAlgorithms/Resources/DASspherical.cl b/Modules/PhotoacousticsAlgorithms/Resources/DASspherical.cl index 87ac198a0d..07906c6460 100644 --- a/Modules/PhotoacousticsAlgorithms/Resources/DASspherical.cl +++ b/Modules/PhotoacousticsAlgorithms/Resources/DASspherical.cl @@ -1,79 +1,79 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ __kernel void ckDASSphe( __read_only image3d_t dSource, // input image __global float* dDest, // output buffer __global float* apodArray, unsigned short apodArraySize, float SpeedOfSound, float RecordTime, float Pitch, float Angle, unsigned short PAImage, unsigned short TransducerElements // parameters ) { // get thread identifier unsigned int globalPosX = get_global_id(0); unsigned int globalPosY = get_global_id(1); unsigned int globalPosZ = get_global_id(2); unsigned short outputS = get_global_size(1); unsigned short outputL = get_global_size(0); // get image width and weight const unsigned int inputL = get_image_width( dSource ); const unsigned int inputS = get_image_height( dSource ) / (PAImage + 1); const unsigned int Slices = get_image_depth( dSource ); // create an image sampler const sampler_t defaultSampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST ; // terminate non-valid threads if ( globalPosX < outputL && globalPosY < outputS && globalPosZ < Slices ) { float l_i = (float)globalPosX / outputL * inputL; - float s_i = (float)globalPosY / outputS * inputS; + float s_i = (float)globalPosY / outputS * inputS / (2 - PAImage); float part = (tan(Angle / 360 * 2 * M_PI) * RecordTime / inputS * SpeedOfSound / Pitch * outputL / TransducerElements) * s_i; if (part < 1) part = 1; short maxLine = min((l_i + part) + 1, (float)inputL); short minLine = max((l_i - part), 0.0f); short usedLines = (maxLine - minLine); float apod_mult = apodArraySize / (maxLine - minLine); short AddSample = 0; float output = 0; for (short l_s = minLine; l_s < maxLine; ++l_s) { AddSample = sqrt( pow(s_i, 2) + pow((inputS / (RecordTime*SpeedOfSound) * ((l_s - l_i)*Pitch*TransducerElements) / inputL), 2) - ); + ) + (1-PAImage)*s_i; if (AddSample < inputS && AddSample >= 0) output += apodArray[(short)((l_s - minLine)*apod_mult)] * read_imagef( dSource, defaultSampler, (int4)(l_s, AddSample, globalPosZ, 0 )).x; else --usedLines; } dDest[ globalPosZ * outputL * outputS + globalPosY * outputL + globalPosX ] = output / usedLines; } } \ No newline at end of file diff --git a/Modules/PhotoacousticsAlgorithms/Resources/DMASQuadratic.cl b/Modules/PhotoacousticsAlgorithms/Resources/DMASQuadratic.cl index d7fc466068..3423fa7226 100644 --- a/Modules/PhotoacousticsAlgorithms/Resources/DMASQuadratic.cl +++ b/Modules/PhotoacousticsAlgorithms/Resources/DMASQuadratic.cl @@ -1,92 +1,92 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ __kernel void ckDMASQuad( __read_only image3d_t dSource, // input image __global float* dDest, // output buffer __global float* apodArray, unsigned short apodArraySize, float SpeedOfSound, float RecordTime, float Pitch, float Angle, unsigned short PAImage, unsigned short TransducerElements // parameters ) { // get thread identifier unsigned int globalPosX = get_global_id(0); unsigned int globalPosY = get_global_id(1); unsigned int globalPosZ = get_global_id(2); unsigned short outputS = get_global_size(1); unsigned short outputL = get_global_size(0); // get image width and weight const unsigned int inputL = get_image_width( dSource ); const unsigned int inputS = get_image_height( dSource ) / (PAImage + 1); const unsigned int Slices = get_image_depth( dSource ); // create an image sampler const sampler_t defaultSampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST ; // terminate non-valid threads if ( globalPosX < outputL && globalPosY < outputS && globalPosZ < Slices ) { float l_i = (float)globalPosX / outputL * inputL; - float s_i = (float)globalPosY / outputS * inputS; + float s_i = (float)globalPosY / outputS * inputS / (2 - PAImage); float part = (tan(Angle / 360 * 2 * M_PI) * RecordTime / inputS * SpeedOfSound / Pitch * outputL / TransducerElements) * s_i; if (part < 1) part = 1; short maxLine = min((l_i + part) + 1, (float)inputL); short minLine = max((l_i - part), 0.0f); short usedLines = (maxLine - minLine); float apod_mult = apodArraySize / (maxLine - minLine); float delayMultiplicator = pow((inputS / (RecordTime*SpeedOfSound) * Pitch * TransducerElements / inputL), 2) / s_i / 2; float mult = 0; float output = 0; float AddSample1 = 0; float AddSample2 = 0; for (short l_s1 = minLine; l_s1 < maxLine - 1; ++l_s1) { - AddSample1 = delayMultiplicator * pow((l_s1 - l_i), 2) + s_i; + AddSample1 = delayMultiplicator * pow((l_s1 - l_i), 2) + s_i + (1-PAImage)*s_i; if (AddSample1 < inputS && AddSample1 >= 0) { for (short l_s2 = l_s1 + 1; l_s2 < maxLine; ++l_s2) { - AddSample2 = delayMultiplicator * pow((l_s2 - l_i), 2) + s_i; + AddSample2 = delayMultiplicator * pow((l_s2 - l_i), 2) + s_i + (1-PAImage)*s_i; if (AddSample2 < inputS && AddSample2 >= 0) { mult = read_imagef( dSource, defaultSampler, (int4)(l_s2, AddSample2, globalPosZ, 0 )).x * apodArray[(short)((l_s2 - minLine)*apod_mult)] * read_imagef( dSource, defaultSampler, (int4)(l_s1, AddSample1, globalPosZ, 0 )).x * apodArray[(short)((l_s1 - minLine)*apod_mult)]; output += sqrt(mult * ((float)(mult>0)-(float)(mult<0))) * ((mult > 0) - (mult < 0)); } } } else --usedLines; } dDest[ globalPosZ * outputL * outputS + globalPosY * outputL + globalPosX ] = 10 * output / (pow((float)usedLines, 2.0f) - (usedLines - 1)); } } \ No newline at end of file diff --git a/Modules/PhotoacousticsAlgorithms/Resources/DMASspherical.cl b/Modules/PhotoacousticsAlgorithms/Resources/DMASspherical.cl index acc963dd04..10b540203f 100644 --- a/Modules/PhotoacousticsAlgorithms/Resources/DMASspherical.cl +++ b/Modules/PhotoacousticsAlgorithms/Resources/DMASspherical.cl @@ -1,96 +1,98 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ __kernel void ckDMASSphe( __read_only image3d_t dSource, // input image __global float* dDest, // output buffer __global float* apodArray, unsigned short apodArraySize, float SpeedOfSound, float RecordTime, float Pitch, float Angle, unsigned short PAImage, unsigned short TransducerElements // parameters ) { // get thread identifier unsigned int globalPosX = get_global_id(0); unsigned int globalPosY = get_global_id(1); unsigned int globalPosZ = get_global_id(2); unsigned short outputS = get_global_size(1); unsigned short outputL = get_global_size(0); // get image width and weight const unsigned int inputL = get_image_width( dSource ); const unsigned int inputS = get_image_height( dSource ) / (PAImage + 1); const unsigned int Slices = get_image_depth( dSource ); // create an image sampler const sampler_t defaultSampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST ; // terminate non-valid threads if ( globalPosX < outputL && globalPosY < outputS && globalPosZ < Slices ) { float l_i = (float)globalPosX / outputL * inputL; - float s_i = (float)globalPosY / outputS * inputS; + float s_i = (float)globalPosY / outputS * inputS / (2 - PAImage); float part = (tan(Angle / 360 * 2 * M_PI) * RecordTime / inputS * SpeedOfSound / Pitch * outputL / TransducerElements) * s_i; if (part < 1) part = 1; short maxLine = min((l_i + part) + 1, (float)inputL); short minLine = max((l_i - part), 0.0f); short usedLines = (maxLine - minLine); float apod_mult = apodArraySize / (maxLine - minLine); float mult = 0; float output = 0; float AddSample1 = 0; float AddSample2 = 0; for (short l_s1 = minLine; l_s1 < maxLine - 1; ++l_s1) { AddSample1 = sqrt( pow(s_i, 2) + - pow((inputS / (RecordTime*SpeedOfSound) * ((l_s1 - l_i)*Pitch*TransducerElements)/inputL), 2)); + pow((inputS / (RecordTime*SpeedOfSound) * ((l_s1 - l_i)*Pitch*TransducerElements)/inputL), 2) + ) + (1-PAImage)*s_i; if (AddSample1 < inputS && AddSample1 >= 0) { for (short l_s2 = l_s1 + 1; l_s2 < maxLine; ++l_s2) { AddSample2 = sqrt( pow(s_i, 2) + - pow((inputS / (RecordTime*SpeedOfSound) * ((l_s2 - l_i)*Pitch*TransducerElements)/inputL), 2)); + pow((inputS / (RecordTime*SpeedOfSound) * ((l_s2 - l_i)*Pitch*TransducerElements)/inputL), 2) + ) + (1-PAImage)*s_i; if (AddSample2 < inputS && AddSample2 >= 0) { mult = read_imagef( dSource, defaultSampler, (int4)(l_s2, AddSample2, globalPosZ, 0 )).x * apodArray[(short)((l_s2 - minLine)*apod_mult)] * read_imagef( dSource, defaultSampler, (int4)(l_s1, AddSample1, globalPosZ, 0 )).x * apodArray[(short)((l_s1 - minLine)*apod_mult)]; output += sqrt(mult * ((float)(mult>0)-(float)(mult<0))) * ((mult > 0) - (mult < 0)); } } } else --usedLines; } dDest[ globalPosZ * outputL * outputS + globalPosY * outputL + globalPosX ] = 10 * output / (pow((float)usedLines, 2.0f) - (usedLines - 1)); } } \ No newline at end of file diff --git a/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.cpp b/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.cpp index d97da9e0e6..8745b81f28 100644 --- a/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.cpp +++ b/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.cpp @@ -1,590 +1,587 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #define _USE_MATH_DEFINES #include #include "mitkPhotoacousticImage.h" #include "itkBModeImageFilter.h" #include "itkPhotoacousticBModeImageFilter.h" #include "mitkImageCast.h" #include "mitkITKImageImport.h" #include "mitkPhotoacousticBeamformingFilter.h" #include #include // itk dependencies #include "itkImage.h" #include "itkResampleImageFilter.h" #include "itkCastImageFilter.h" #include "itkCropImageFilter.h" #include "itkRescaleIntensityImageFilter.h" #include "itkIntensityWindowingImageFilter.h" #include #include "itkMultiplyImageFilter.h" #include "itkBSplineInterpolateImageFunction.h" #include // needed itk image filters #include "mitkITKImageImport.h" #include "itkFFTShiftImageFilter.h" #include "itkMultiplyImageFilter.h" #include "itkComplexToModulusImageFilter.h" #include #include "itkFFT1DComplexConjugateToRealImageFilter.h" #include "itkFFT1DRealToComplexConjugateImageFilter.h" mitk::PhotoacousticImage::PhotoacousticImage() { MITK_INFO << "[PhotoacousticImage Debug] created that image"; } mitk::PhotoacousticImage::~PhotoacousticImage() { MITK_INFO << "[PhotoacousticImage Debug] destroyed that image"; } mitk::Image::Pointer mitk::PhotoacousticImage::ApplyBmodeFilter(mitk::Image::Pointer inputImage, bool UseLogFilter, float resampleSpacing) { // we use this seperate ApplyBmodeFilter Method for processing of two-dimensional images // the image needs to be of floating point type for the envelope filter to work; the casting is done automatically by the CastToItkImage typedef itk::Image< float, 3 > itkFloatImageType; typedef itk::BModeImageFilter < itkFloatImageType, itkFloatImageType > BModeFilterType; BModeFilterType::Pointer bModeFilter = BModeFilterType::New(); // LogFilter typedef itk::PhotoacousticBModeImageFilter < itkFloatImageType, itkFloatImageType > PhotoacousticBModeImageFilter; PhotoacousticBModeImageFilter::Pointer photoacousticBModeFilter = PhotoacousticBModeImageFilter::New(); // No LogFilter typedef itk::ResampleImageFilter < itkFloatImageType, itkFloatImageType > ResampleImageFilter; ResampleImageFilter::Pointer resampleImageFilter = ResampleImageFilter::New(); itkFloatImageType::Pointer itkImage; mitk::CastToItkImage(inputImage, itkImage); itkFloatImageType::Pointer bmode; if (UseLogFilter) { bModeFilter->SetInput(itkImage); bModeFilter->SetDirection(1); bmode = bModeFilter->GetOutput(); } else { photoacousticBModeFilter->SetInput(itkImage); photoacousticBModeFilter->SetDirection(1); bmode = photoacousticBModeFilter->GetOutput(); } // resampleSpacing == 0 means: do no resampling if (resampleSpacing == 0) { return mitk::GrabItkImageMemory(bmode); } itkFloatImageType::SpacingType outputSpacing; itkFloatImageType::SizeType inputSize = itkImage->GetLargestPossibleRegion().GetSize(); itkFloatImageType::SizeType outputSize = inputSize; outputSpacing[0] = itkImage->GetSpacing()[0] * (static_cast(inputSize[0]) / static_cast(outputSize[0])); outputSpacing[1] = resampleSpacing; outputSpacing[2] = itkImage->GetSpacing()[2]; outputSize[1] = inputSize[1] * itkImage->GetSpacing()[1] / outputSpacing[1]; typedef itk::IdentityTransform TransformType; resampleImageFilter->SetInput(bmode); resampleImageFilter->SetSize(outputSize); resampleImageFilter->SetOutputSpacing(outputSpacing); resampleImageFilter->SetTransform(TransformType::New()); resampleImageFilter->UpdateLargestPossibleRegion(); return mitk::GrabItkImageMemory(resampleImageFilter->GetOutput()); } /*mitk::Image::Pointer mitk::PhotoacousticImage::ApplyScatteringCompensation(mitk::Image::Pointer inputImage, int scattering) { typedef itk::Image< float, 3 > itkFloatImageType; typedef itk::MultiplyImageFilter MultiplyImageFilterType; itkFloatImageType::Pointer itkImage; mitk::CastToItkImage(inputImage, itkImage); MultiplyImageFilterType::Pointer multiplyFilter = MultiplyImageFilterType::New(); multiplyFilter->SetInput1(itkImage); multiplyFilter->SetInput2(m_FluenceCompResizedItk.at(m_ScatteringCoefficient)); return mitk::GrabItkImageMemory(multiplyFilter->GetOutput()); }*/ mitk::Image::Pointer mitk::PhotoacousticImage::ApplyResampling(mitk::Image::Pointer inputImage, unsigned int outputSize[2]) { typedef itk::Image< float, 3 > itkFloatImageType; typedef itk::ResampleImageFilter < itkFloatImageType, itkFloatImageType > ResampleImageFilter; ResampleImageFilter::Pointer resampleImageFilter = ResampleImageFilter::New(); typedef itk::LinearInterpolateImageFunction T_Interpolator; itkFloatImageType::Pointer itkImage; mitk::CastToItkImage(inputImage, itkImage); itkFloatImageType::SpacingType inputSpacing = itkImage->GetSpacing(); itkFloatImageType::SpacingType outputSpacingItk; itkFloatImageType::SizeType inputSizeItk = itkImage->GetLargestPossibleRegion().GetSize(); itkFloatImageType::SizeType outputSizeItk = inputSizeItk; outputSizeItk[0] = outputSize[0]; outputSizeItk[1] = outputSize[1]; outputSizeItk[2] = inputSizeItk[2]; outputSpacingItk[0] = itkImage->GetSpacing()[0] * (static_cast(inputSizeItk[0]) / static_cast(outputSizeItk[0])); outputSpacingItk[1] = itkImage->GetSpacing()[1] * (static_cast(inputSizeItk[1]) / static_cast(outputSizeItk[1])); outputSpacingItk[2] = itkImage->GetSpacing()[2]; typedef itk::IdentityTransform TransformType; T_Interpolator::Pointer _pInterpolator = T_Interpolator::New(); resampleImageFilter->SetInput(itkImage); resampleImageFilter->SetSize(outputSizeItk); resampleImageFilter->SetOutputSpacing(outputSpacingItk); resampleImageFilter->SetTransform(TransformType::New()); resampleImageFilter->SetInterpolator(_pInterpolator); resampleImageFilter->UpdateLargestPossibleRegion(); return mitk::GrabItkImageMemory(resampleImageFilter->GetOutput()); } mitk::Image::Pointer mitk::PhotoacousticImage::ApplyCropping(mitk::Image::Pointer inputImage, int above, int below, int right, int left, int minSlice, int maxSlice) { unsigned int inputDim[3] = { inputImage->GetDimension(0), inputImage->GetDimension(1), inputImage->GetDimension(2) }; unsigned int outputDim[3] = { inputImage->GetDimension(0) - left - right, inputImage->GetDimension(1) - above - below, maxSlice - minSlice + 1 }; void* inputData; float* outputData = new float[outputDim[0] * outputDim[1] * outputDim[2]]; ImageReadAccessor acc(inputImage); inputData = const_cast(acc.GetData()); // convert the data to float by default // as of now only those float, short, float are used at all... though it's easy to add other ones if (inputImage->GetPixelType().GetTypeAsString() == "scalar (float)" || inputImage->GetPixelType().GetTypeAsString() == " (float)") { // copy the data into the cropped image for (unsigned short sl = 0; sl < outputDim[2]; ++sl) { for (unsigned short l = 0; l < outputDim[0]; ++l) { for (unsigned short s = 0; s < outputDim[1]; ++s) { outputData[l + s*(unsigned short)outputDim[0] + sl*outputDim[0] * outputDim[1]] = (float)((float*)inputData)[(l + left) + (s + above)*(unsigned short)inputDim[0] + (sl + minSlice)*inputDim[0] * inputDim[1]]; } } } } else if (inputImage->GetPixelType().GetTypeAsString() == "scalar (short)" || inputImage->GetPixelType().GetTypeAsString() == " (short)") { // copy the data unsigned shorto the cropped image for (unsigned short sl = 0; sl < outputDim[2]; ++sl) { for (unsigned short l = 0; l < outputDim[0]; ++l) { for (unsigned short s = 0; s < outputDim[1]; ++s) { outputData[l + s*(unsigned short)outputDim[0] + sl*outputDim[0] * outputDim[1]] = (float)((short*)inputData)[(l + left) + (s + above)*(unsigned short)inputDim[0] + (sl + minSlice)*inputDim[0] * inputDim[1]]; } } } } else if (inputImage->GetPixelType().GetTypeAsString() == "scalar (double)" || inputImage->GetPixelType().GetTypeAsString() == " (double)") { // copy the data unsigned shorto the cropped image for (unsigned short sl = 0; sl < outputDim[2]; ++sl) { for (unsigned short l = 0; l < outputDim[0]; ++l) { for (unsigned short s = 0; s < outputDim[1]; ++s) { outputData[l + s*(unsigned short)outputDim[0] + sl*outputDim[0] * outputDim[1]] = (float)((double*)inputData)[(l + left) + (s + above)*(unsigned short)inputDim[0] + (sl + minSlice)*inputDim[0] * inputDim[1]]; } } } } else { MITK_INFO << "Could not determine pixel type"; } mitk::Image::Pointer output = mitk::Image::New(); output->Initialize(mitk::MakeScalarPixelType(), 3, outputDim); output->SetSpacing(inputImage->GetGeometry()->GetSpacing()); - output->SetImportVolume(outputData, 0, 0, mitk::Image::ManageMemory); + output->SetImportVolume(outputData, 0, 0, mitk::Image::ReferenceMemory); return output; } mitk::Image::Pointer mitk::PhotoacousticImage::ApplyBeamforming(mitk::Image::Pointer inputImage, BeamformingFilter::beamformingSettings config, int cutoff, std::function progressHandle) { // crop the image - config.RecordTime = config.RecordTime - cutoff / inputImage->GetDimension(1) * config.RecordTime; // adjust the recorded time lost by cropping + // set the Maximum Size of the image to 4096 + unsigned short lowerCutoff = (4096 + cutoff) < inputImage->GetDimension(1) ? inputImage->GetDimension(1) - 4096 : 0; + + config.RecordTime = config.RecordTime - (cutoff + lowerCutoff) / inputImage->GetDimension(1) * config.RecordTime; // adjust the recorded time lost by cropping progressHandle(0, "cropping image"); if (!config.partial) { config.CropBounds[0] = 0; config.CropBounds[1] = inputImage->GetDimension(2) - 1; } - Image::Pointer processedImage = ApplyCropping(inputImage, cutoff, 0, 0, 0, config.CropBounds[0], config.CropBounds[1]); + Image::Pointer processedImage = ApplyCropping(inputImage, cutoff, lowerCutoff, 0, 0, config.CropBounds[0], config.CropBounds[1]); // resample the image in horizontal direction if (inputImage->GetDimension(0) != config.ReconstructionLines) { progressHandle(0, "resampling image"); auto begin = std::chrono::high_resolution_clock::now(); unsigned int dim[2] = { config.ReconstructionLines, processedImage->GetDimension(1) }; processedImage = ApplyResampling(processedImage, dim); auto end = std::chrono::high_resolution_clock::now(); MITK_DEBUG << "Upsampling from " << inputImage->GetDimension(0) << " to " << config.ReconstructionLines << " lines completed in " << ((double)std::chrono::duration_cast(end - begin).count()) / 1000000 << "ms" << std::endl; } // perform the beamforming BeamformingFilter::Pointer Beamformer = BeamformingFilter::New(); Beamformer->SetInput(processedImage); Beamformer->Configure(config); Beamformer->SetProgressHandle(progressHandle); Beamformer->UpdateLargestPossibleRegion(); processedImage = Beamformer->GetOutput(); - // apply a bandpass filter, if requested - if (config.UseBP) - { - progressHandle(100, "applying bandpass"); - mitk::Image::Pointer BP = BandpassFilter(processedImage, config.RecordTime, config.BPHighPass, config.BPLowPass, config.ButterworthOrder); - - mitk::ImageReadAccessor copy(BP); - processedImage->SetImportVolume(const_cast(copy.GetData()), 0, 0, mitk::Image::ReferenceMemory); - } - return processedImage; } mitk::Image::Pointer mitk::PhotoacousticImage::BandpassFilter(mitk::Image::Pointer data, float recordTime, float BPHighPass, float BPLowPass, float alpha) { + if (data->GetDimension(1) % 2 != 0) + { + int CropBounds[2] = { 0, data->GetDimension(2) - 1 }; + data = ApplyCropping(data, 1, 0, 0, 0, CropBounds[0], CropBounds[1]); + //for the itk filter to work we make sure the 2nd dimension is a multiple of 2 + } // do a fourier transform, multiply with an appropriate window for the filter, and transform back typedef float PixelType; typedef itk::Image< PixelType, 3 > RealImageType; RealImageType::Pointer image; mitk::CastToItkImage(data, image); typedef itk::FFT1DRealToComplexConjugateImageFilter ForwardFFTFilterType; typedef ForwardFFTFilterType::OutputImageType ComplexImageType; ForwardFFTFilterType::Pointer forwardFFTFilter = ForwardFFTFilterType::New(); forwardFFTFilter->SetInput(image); forwardFFTFilter->SetDirection(1); try { forwardFFTFilter->UpdateOutputInformation(); } catch (itk::ExceptionObject & error) { std::cerr << "Error: " << error << std::endl; MITK_WARN << "Bandpass could not be applied"; return data; } float singleVoxel = 1 / (recordTime / data->GetDimension(1)) / 2 / 1000; float cutoffPixelHighPass = std::min(BPHighPass / singleVoxel, (float)data->GetDimension(1) / 2); float cutoffPixelLowPass = std::min(BPLowPass / singleVoxel, (float)data->GetDimension(1) / 2 - cutoffPixelHighPass); - MITK_INFO << "LowPass " << cutoffPixelLowPass << "HighPass " << cutoffPixelHighPass; - RealImageType::Pointer fftMultiplicator = BPFunction(data, cutoffPixelHighPass, cutoffPixelLowPass, alpha); typedef itk::MultiplyImageFilter< ComplexImageType, RealImageType, ComplexImageType > MultiplyFilterType; MultiplyFilterType::Pointer multiplyFilter = MultiplyFilterType::New(); multiplyFilter->SetInput1(forwardFFTFilter->GetOutput()); multiplyFilter->SetInput2(fftMultiplicator); /*itk::ComplexToModulusImageFilter::Pointer toReal = itk::ComplexToModulusImageFilter::New(); toReal->SetInput(forwardFFTFilter->GetOutput()); - //return GrabItkImageMemory(toReal->GetOutput()); - return GrabItkImageMemory(fftMultiplicator); */ //DEBUG + return GrabItkImageMemory(toReal->GetOutput()); + return GrabItkImageMemory(fftMultiplicator); *///DEBUG typedef itk::FFT1DComplexConjugateToRealImageFilter< ComplexImageType, RealImageType > InverseFilterType; InverseFilterType::Pointer inverseFFTFilter = InverseFilterType::New(); inverseFFTFilter->SetInput(multiplyFilter->GetOutput()); inverseFFTFilter->SetDirection(1); return GrabItkImageMemory(inverseFFTFilter->GetOutput()); } itk::Image::Pointer mitk::PhotoacousticImage::BPFunction(mitk::Image::Pointer reference, int cutoffFrequencyPixelHighPass, int cutoffFrequencyPixelLowPass, float alpha) { float* imageData = new float[reference->GetDimension(0)*reference->GetDimension(1)]; // tukey window float width = reference->GetDimension(1) / 2 - (float)cutoffFrequencyPixelHighPass - (float)cutoffFrequencyPixelLowPass; float center = (float)cutoffFrequencyPixelHighPass / 2 + width / 2; MITK_INFO << width << "width " << center << "center " << alpha; for (int n = 0; n < reference->GetDimension(1); ++n) { imageData[reference->GetDimension(0)*n] = 0; } for (int n = 0; n < width; ++n) { if (n <= (alpha*(width - 1)) / 2) { imageData[reference->GetDimension(0)*(int)(n + center - (width / 2))] = (1 + cos(M_PI*(2 * n / (alpha*(width - 1)) - 1))) / 2; } else if (n >= (width - 1)*(1 - alpha / 2) && n <= (width - 1)) { imageData[reference->GetDimension(0)*(int)(n + center - (width / 2))] = (1 + cos(M_PI*(2 * n / (alpha*(width - 1)) + 1 - 2 / alpha))) / 2; } else { imageData[reference->GetDimension(0)*(int)(n + center - (width / 2))] = 1; } } // Butterworth-Filter /* // first, write the HighPass if (cutoffFrequencyPixelHighPass != reference->GetDimension(1) / 2) { for (int n = 0; n < reference->GetDimension(1) / 2; ++n) { imageData[reference->GetDimension(0)*n] = 1 / (1 + pow( (float)n / (float)(reference->GetDimension(1) / 2 - cutoffFrequencyPixelHighPass) , 2 * butterworthOrder)); } } else { for (int n = 0; n < reference->GetDimension(1) / 2; ++n) { imageData[reference->GetDimension(0)*n] = 1; } } // now, the LowPass for (int n = 0; n < reference->GetDimension(1) / 2; ++n) { imageData[reference->GetDimension(0)*n] *= 1 / (1 + pow( (float)(reference->GetDimension(1) / 2 - 1 - n) / (float)(reference->GetDimension(1) / 2 - cutoffFrequencyPixelLowPass) , 2 * butterworthOrder)); } */ // mirror the first half of the image for (int n = reference->GetDimension(1) / 2; n < reference->GetDimension(1); ++n) { imageData[reference->GetDimension(0)*n] = imageData[(reference->GetDimension(1) - (n + 1)) * reference->GetDimension(0)]; } // copy and paste to all lines for (int line = 1; line < reference->GetDimension(0); ++line) { for (int sample = 0; sample < reference->GetDimension(1); ++sample) { imageData[reference->GetDimension(0)*sample + line] = imageData[reference->GetDimension(0)*sample]; } } typedef itk::Image< float, 3U > ImageType; ImageType::RegionType region; ImageType::IndexType start; start.Fill(0); region.SetIndex(start); ImageType::SizeType size; size[0] = reference->GetDimension(0); size[1] = reference->GetDimension(1); size[2] = reference->GetDimension(2); region.SetSize(size); ImageType::SpacingType SpacingItk; SpacingItk[0] = reference->GetGeometry()->GetSpacing()[0]; SpacingItk[1] = reference->GetGeometry()->GetSpacing()[1]; SpacingItk[2] = reference->GetGeometry()->GetSpacing()[2]; ImageType::Pointer image = ImageType::New(); image->SetRegions(region); image->Allocate(); image->FillBuffer(itk::NumericTraits::Zero); image->SetSpacing(SpacingItk); ImageType::IndexType pixelIndex; for (ImageType::IndexValueType slice = 0; slice < reference->GetDimension(2); ++slice) { for (ImageType::IndexValueType line = 0; line < reference->GetDimension(0); ++line) { for (ImageType::IndexValueType sample = 0; sample < reference->GetDimension(1); ++sample) { pixelIndex[0] = line; pixelIndex[1] = sample; pixelIndex[2] = slice; image->SetPixel(pixelIndex, imageData[line + sample*reference->GetDimension(0)]); } } } delete[] imageData; return image; } /* mitk::CropFilter::CropFilter() : m_PixelCalculation(NULL) { this->AddSourceFile("CropFilter.cl"); this->m_FilterID = "CropFilter"; } mitk::CropFilter::~CropFilter() { if (this->m_PixelCalculation) { clReleaseKernel(m_PixelCalculation); } } void mitk::CropFilter::Update() { //Check if context & program available if (!this->Initialize()) { us::ServiceReference ref = GetModuleContext()->GetServiceReference(); OclResourceService* resources = GetModuleContext()->GetService(ref); // clean-up also the resources resources->InvalidateStorage(); mitkThrow() << "Filter is not initialized. Cannot update."; } else { // Execute this->Execute(); } } void mitk::CropFilter::Execute() { cl_int clErr = 0; try { this->InitExec(this->m_PixelCalculation); } catch (const mitk::Exception& e) { MITK_ERROR << "Catched exception while initializing filter: " << e.what(); return; } us::ServiceReference ref = GetModuleContext()->GetServiceReference(); OclResourceService* resources = GetModuleContext()->GetService(ref); cl_context gpuContext = resources->GetContext(); unsigned int size = m_OutputDim[0] * m_OutputDim[1] * m_OutputDim[2] * m_Images; cl_mem cl_input = clCreateBuffer(gpuContext, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, sizeof(float) * size, m_Data, &clErr); CHECK_OCL_ERR(clErr); // set kernel arguments clErr = clSetKernelArg(this->m_PixelCalculation, 2, sizeof(cl_mem), &cl_input); clErr |= clSetKernelArg(this->m_PixelCalculation, 3, sizeof(cl_ushort), &(this->m_Images)); CHECK_OCL_ERR(clErr); // execute the filter on a 3D NDRange this->ExecuteKernel(m_PixelCalculation, 3); // signalize the GPU-side data changed m_Output->Modified(GPU_DATA); } us::Module *mitk::CropFilter::GetModule() { return us::GetModuleContext()->GetModule(); } bool mitk::CropFilter::Initialize() { bool buildErr = true; cl_int clErr = 0; if (OclFilter::Initialize()) { this->m_PixelCalculation = clCreateKernel(this->m_ClProgram, "ckCrop", &clErr); buildErr |= CHECK_OCL_ERR(clErr); } return (OclFilter::IsInitialized() && buildErr); } void mitk::CropFilter::SetInput(mitk::Image::Pointer image) { if (image->GetDimension() != 3) { mitkThrowException(mitk::Exception) << "Input for " << this->GetNameOfClass() << " is not 3D. The filter only supports 3D. Please change your input."; } OclImageToImageFilter::SetInput(image); } mitk::Image::Pointer mitk::CropFilter::GetOutput() { if (m_Output->IsModified(GPU_DATA)) { void* pData = m_Output->TransferDataToCPU(m_CommandQue); const unsigned int dimension = 3; unsigned int* dimensions = m_OutputDim; const mitk::SlicedGeometry3D::Pointer p_slg = m_Input->GetMITKImage()->GetSlicedGeometry(); MITK_DEBUG << "Creating new MITK Image."; m_Output->GetMITKImage()->Initialize(this->GetOutputType(), dimension, dimensions); m_Output->GetMITKImage()->SetSpacing(p_slg->GetSpacing()); m_Output->GetMITKImage()->SetGeometry(m_Input->GetMITKImage()->GetGeometry()); m_Output->GetMITKImage()->SetImportVolume(pData, 0, 0, mitk::Image::ReferenceMemory); } MITK_DEBUG << "Image Initialized."; return m_Output->GetMITKImage(); } */ \ No newline at end of file diff --git a/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/src/internal/PAImageProcessing.cpp b/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/src/internal/PAImageProcessing.cpp index 8eb88a5336..701be5ccde 100644 --- a/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/src/internal/PAImageProcessing.cpp +++ b/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/src/internal/PAImageProcessing.cpp @@ -1,871 +1,866 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ // Blueberry #include #include // Qmitk #include "PAImageProcessing.h" // Qt #include #include #include //mitk image #include #include "mitkPhotoacousticImage.h" #include "mitkPhotoacousticBeamformingFilter.h" //other #include #include const std::string PAImageProcessing::VIEW_ID = "org.mitk.views.paimageprocessing"; PAImageProcessing::PAImageProcessing() : m_ResampleSpacing(0), m_UseLogfilter(false) { qRegisterMetaType(); qRegisterMetaType(); } void PAImageProcessing::SetFocus() { m_Controls.buttonApplyBModeFilter->setFocus(); } void PAImageProcessing::CreateQtPartControl(QWidget *parent) { // create GUI widgets from the Qt Designer's .ui file m_Controls.setupUi(parent); connect(m_Controls.buttonApplyBModeFilter, SIGNAL(clicked()), this, SLOT(StartBmodeThread())); connect(m_Controls.DoResampling, SIGNAL(clicked()), this, SLOT(UseResampling())); connect(m_Controls.Logfilter, SIGNAL(clicked()), this, SLOT(UseLogfilter())); connect(m_Controls.ResamplingValue, SIGNAL(valueChanged(double)), this, SLOT(SetResampling())); connect(m_Controls.buttonApplyBeamforming, SIGNAL(clicked()), this, SLOT(StartBeamformingThread())); connect(m_Controls.buttonApplyCropFilter, SIGNAL(clicked()), this, SLOT(StartCropThread())); connect(m_Controls.buttonApplyBandpass, SIGNAL(clicked()), this, SLOT(StartBandpassThread())); connect(m_Controls.UseImageSpacing, SIGNAL(clicked()), this, SLOT(UseImageSpacing())); connect(m_Controls.ScanDepth, SIGNAL(valueChanged(double)), this, SLOT(UpdateRecordTime())); connect(m_Controls.SpeedOfSound, SIGNAL(valueChanged(double)), this, SLOT(UpdateRecordTime())); connect(m_Controls.Samples, SIGNAL(valueChanged(int)), this, SLOT(UpdateRecordTime())); connect(m_Controls.UseImageSpacing, SIGNAL(clicked()), this, SLOT(UpdateRecordTime())); connect(m_Controls.UseBP, SIGNAL(clicked()), this, SLOT(UseBandpass())); connect(m_Controls.boundLow, SIGNAL(valueChanged(int)), this, SLOT(UpdateBounds())); connect(m_Controls.boundHigh, SIGNAL(valueChanged(int)), this, SLOT(UpdateBounds())); connect(m_Controls.Partial, SIGNAL(clicked()), this, SLOT(UpdateBounds())); m_Controls.DoResampling->setChecked(false); m_Controls.ResamplingValue->setEnabled(false); m_Controls.progressBar->setMinimum(0); m_Controls.progressBar->setMaximum(100); m_Controls.progressBar->setVisible(false); m_Controls.UseImageSpacing->setToolTip("Image spacing of y-Axis must be in us, x-Axis in mm."); m_Controls.UseImageSpacing->setToolTipDuration(5000); m_Controls.ProgressInfo->setVisible(false); m_Controls.UseBP->hide(); UseImageSpacing(); } void PAImageProcessing::StartBeamformingThread() { QList nodes = this->GetDataManagerSelection(); if (nodes.empty()) return; mitk::DataStorage::Pointer storage = this->GetDataStorage(); mitk::DataNode::Pointer node = nodes.front(); if (!node) { // Nothing selected. Inform the user and return QMessageBox::information(NULL, "Template", "Please load and select an image before starting image processing."); return; } mitk::BaseData* data = node->GetData(); if (data) { // test if this data item is an image or not (could also be a surface or something totally different) mitk::Image* image = dynamic_cast(data); if (image) { UpdateBFSettings(image); std::stringstream message; std::string name; message << "Performing beamforming for image "; if (node->GetName(name)) { // a property called "name" was found for this DataNode message << "'" << name << "'"; m_OldNodeName = name; } else m_OldNodeName = " "; message << "."; MITK_INFO << message.str(); m_Controls.progressBar->setValue(0); m_Controls.progressBar->setVisible(true); m_Controls.ProgressInfo->setVisible(true); m_Controls.ProgressInfo->setText("started"); m_Controls.buttonApplyBeamforming->setText("working..."); DisableControls(); BeamformingThread *thread = new BeamformingThread(); connect(thread, &BeamformingThread::result, this, &PAImageProcessing::HandleBeamformingResults); connect(thread, &BeamformingThread::updateProgress, this, &PAImageProcessing::UpdateProgress); connect(thread, &BeamformingThread::finished, thread, &QObject::deleteLater); thread->setConfig(BFconfig); thread->setInputImage(image); MITK_INFO << "Started new thread for Beamforming"; thread->start(); } } } void PAImageProcessing::HandleBeamformingResults(mitk::Image::Pointer image) { auto newNode = mitk::DataNode::New(); newNode->SetData(image); // name the new Data node std::stringstream newNodeName; newNodeName << m_OldNodeName << " "; if (BFconfig.Algorithm == mitk::BeamformingFilter::beamformingSettings::BeamformingAlgorithm::DAS) newNodeName << "DAS bf, "; else if (BFconfig.Algorithm == mitk::BeamformingFilter::beamformingSettings::BeamformingAlgorithm::DMAS) newNodeName << "DMAS bf, "; if (BFconfig.DelayCalculationMethod == mitk::BeamformingFilter::beamformingSettings::DelayCalc::QuadApprox) newNodeName << "q. delay"; if (BFconfig.DelayCalculationMethod == mitk::BeamformingFilter::beamformingSettings::DelayCalc::Spherical) newNodeName << "s. delay"; newNode->SetName(newNodeName.str()); // update level window for the current dynamic range mitk::LevelWindow levelWindow; newNode->GetLevelWindow(levelWindow); auto data = newNode->GetData(); levelWindow.SetAuto(dynamic_cast(data), true, true); newNode->SetLevelWindow(levelWindow); // add new node to data storage this->GetDataStorage()->Add(newNode); // disable progress bar m_Controls.progressBar->setVisible(false); m_Controls.ProgressInfo->setVisible(false); m_Controls.buttonApplyBeamforming->setText("Apply Beamforming"); EnableControls(); // update rendering mitk::RenderingManager::GetInstance()->InitializeViews( dynamic_cast(data)->GetGeometry(), mitk::RenderingManager::REQUEST_UPDATE_ALL, true); mitk::RenderingManager::GetInstance()->RequestUpdateAll(); } void PAImageProcessing::StartBmodeThread() { QList nodes = this->GetDataManagerSelection(); if (nodes.empty()) return; mitk::DataStorage::Pointer storage = this->GetDataStorage(); mitk::DataNode::Pointer node = nodes.front(); if (!node) { // Nothing selected. Inform the user and return QMessageBox::information(NULL, "Template", "Please load and select an image before starting image processing."); return; } mitk::BaseData* data = node->GetData(); if (data) { // test if this data item is an image or not (could also be a surface or something totally different) mitk::Image* image = dynamic_cast(data); if (image) { UpdateBFSettings(image); std::stringstream message; std::string name; message << "Performing image processing for image "; if (node->GetName(name)) { // a property called "name" was found for this DataNode message << "'" << name << "'"; m_OldNodeName = name; } else m_OldNodeName = " "; message << "."; MITK_INFO << message.str(); m_Controls.buttonApplyBModeFilter->setText("working..."); DisableControls(); BmodeThread *thread = new BmodeThread(); connect(thread, &BmodeThread::result, this, &PAImageProcessing::HandleBmodeResults); connect(thread, &BmodeThread::finished, thread, &QObject::deleteLater); thread->setConfig(m_UseLogfilter, m_ResampleSpacing); thread->setInputImage(image); MITK_INFO << "Started new thread for Image Processing"; thread->start(); } } } void PAImageProcessing::HandleBmodeResults(mitk::Image::Pointer image) { auto newNode = mitk::DataNode::New(); newNode->SetData(image); // name the new Data node std::stringstream newNodeName; newNodeName << m_OldNodeName << " "; newNodeName << "B-Mode"; newNode->SetName(newNodeName.str()); // update level window for the current dynamic range mitk::LevelWindow levelWindow; newNode->GetLevelWindow(levelWindow); auto data = newNode->GetData(); levelWindow.SetAuto(dynamic_cast(data), true, true); newNode->SetLevelWindow(levelWindow); // add new node to data storage this->GetDataStorage()->Add(newNode); // disable progress bar m_Controls.progressBar->setVisible(false); m_Controls.buttonApplyBModeFilter->setText("Apply B-mode Filter"); EnableControls(); // update rendering mitk::RenderingManager::GetInstance()->InitializeViews( dynamic_cast(data)->GetGeometry(), mitk::RenderingManager::REQUEST_UPDATE_ALL, true); mitk::RenderingManager::GetInstance()->RequestUpdateAll(); } void PAImageProcessing::StartCropThread() { QList nodes = this->GetDataManagerSelection(); if (nodes.empty()) return; mitk::DataStorage::Pointer storage = this->GetDataStorage(); mitk::DataNode::Pointer node = nodes.front(); if (!node) { // Nothing selected. Inform the user and return QMessageBox::information(NULL, "Template", "Please load and select an image before starting image cropping."); return; } mitk::BaseData* data = node->GetData(); if (data) { // test if this data item is an image or not (could also be a surface or something totally different) mitk::Image* image = dynamic_cast(data); if (image) { UpdateBFSettings(image); std::stringstream message; std::string name; message << "Performing image cropping for image "; if (node->GetName(name)) { // a property called "name" was found for this DataNode message << "'" << name << "'"; m_OldNodeName = name; } else m_OldNodeName = " "; message << "."; MITK_INFO << message.str(); m_Controls.buttonApplyCropFilter->setText("working..."); DisableControls(); CropThread *thread = new CropThread(); connect(thread, &CropThread::result, this, &PAImageProcessing::HandleCropResults); connect(thread, &CropThread::finished, thread, &QObject::deleteLater); thread->setConfig(m_Controls.CutoffAbove->value(), m_Controls.CutoffBelow->value()); thread->setInputImage(image); MITK_INFO << "Started new thread for Image Cropping"; thread->start(); } } } void PAImageProcessing::HandleCropResults(mitk::Image::Pointer image) { auto newNode = mitk::DataNode::New(); newNode->SetData(image); // name the new Data node std::stringstream newNodeName; newNodeName << m_OldNodeName << " "; newNodeName << "Cropped"; newNode->SetName(newNodeName.str()); // update level window for the current dynamic range mitk::LevelWindow levelWindow; newNode->GetLevelWindow(levelWindow); auto data = newNode->GetData(); levelWindow.SetAuto(dynamic_cast(data), true, true); newNode->SetLevelWindow(levelWindow); // add new node to data storage this->GetDataStorage()->Add(newNode); m_Controls.buttonApplyCropFilter->setText("Apply Crop Filter"); EnableControls(); // update rendering mitk::RenderingManager::GetInstance()->InitializeViews( dynamic_cast(data)->GetGeometry(), mitk::RenderingManager::REQUEST_UPDATE_ALL, true); mitk::RenderingManager::GetInstance()->RequestUpdateAll(); } void PAImageProcessing::StartBandpassThread() { QList nodes = this->GetDataManagerSelection(); if (nodes.empty()) return; mitk::DataStorage::Pointer storage = this->GetDataStorage(); mitk::DataNode::Pointer node = nodes.front(); if (!node) { // Nothing selected. Inform the user and return QMessageBox::information(NULL, "Template", "Please load and select an image before starting image cropping."); return; } mitk::BaseData* data = node->GetData(); if (data) { // test if this data item is an image or not (could also be a surface or something totally different) mitk::Image* image = dynamic_cast(data); if (image) { UpdateBFSettings(image); std::stringstream message; std::string name; message << "Performing Bandpass filter on image "; if (node->GetName(name)) { // a property called "name" was found for this DataNode message << "'" << name << "'"; m_OldNodeName = name; } else m_OldNodeName = " "; message << "."; MITK_INFO << message.str(); m_Controls.buttonApplyBandpass->setText("working..."); DisableControls(); BandpassThread *thread = new BandpassThread(); connect(thread, &BandpassThread::result, this, &PAImageProcessing::HandleBandpassResults); connect(thread, &BandpassThread::finished, thread, &QObject::deleteLater); float recordTime = image->GetDimension(1)*image->GetGeometry()->GetSpacing()[1] / 1000 / m_Controls.BPSpeedOfSound->value(); // add a safeguard so the program does not chrash when applying a Bandpass that reaches out of the bounds of the image float maxFrequency = 1 / (recordTime / image->GetDimension(1)) * image->GetDimension(1) / 2 / 2 / 1000; float BPHighPass = 1000000 * m_Controls.BPhigh->value(); // [Hz] float BPLowPass = maxFrequency - 1000000 * m_Controls.BPlow->value(); // [Hz] if (BPLowPass > maxFrequency && m_Controls.UseBP->isChecked()) { QMessageBox Msgbox; Msgbox.setText("LowPass too low, disabled it."); Msgbox.exec(); BPLowPass = 0; } if (BPLowPass < 0 && m_Controls.UseBP->isChecked()) { QMessageBox Msgbox; Msgbox.setText("LowPass too high, disabled it."); Msgbox.exec(); BPLowPass = 0; } if (BPHighPass > maxFrequency && m_Controls.UseBP->isChecked()) { QMessageBox Msgbox; Msgbox.setText("HighPass too high, disabled it."); Msgbox.exec(); BPHighPass = 0; } if (BPHighPass > maxFrequency - BFconfig.BPLowPass) { QMessageBox Msgbox; Msgbox.setText("HighPass higher than LowPass, disabled both."); Msgbox.exec(); BPHighPass = 0; BPLowPass = 0; } thread->setConfig(BPHighPass, BPLowPass, m_Controls.BPFalloff->value(), recordTime); thread->setInputImage(image); MITK_INFO << "Started new thread for Bandpass filter"; thread->start(); } } } void PAImageProcessing::HandleBandpassResults(mitk::Image::Pointer image) { auto newNode = mitk::DataNode::New(); newNode->SetData(image); // name the new Data node std::stringstream newNodeName; newNodeName << m_OldNodeName << " "; newNodeName << "Bandpassed"; newNode->SetName(newNodeName.str()); // update level window for the current dynamic range mitk::LevelWindow levelWindow; newNode->GetLevelWindow(levelWindow); auto data = newNode->GetData(); levelWindow.SetAuto(dynamic_cast(data), true, true); newNode->SetLevelWindow(levelWindow); // add new node to data storage this->GetDataStorage()->Add(newNode); m_Controls.buttonApplyBandpass->setText("Apply Bandpass"); EnableControls(); // update rendering mitk::RenderingManager::GetInstance()->InitializeViews( dynamic_cast(data)->GetGeometry(), mitk::RenderingManager::REQUEST_UPDATE_ALL, true); mitk::RenderingManager::GetInstance()->RequestUpdateAll(); } void PAImageProcessing::UpdateBounds() { if (!m_Controls.Partial->isChecked()) { m_Controls.boundLow->setEnabled(false); m_Controls.boundHigh->setEnabled(false); BFconfig.partial = false; return; } else { m_Controls.boundLow->setEnabled(true); m_Controls.boundHigh->setEnabled(true); BFconfig.partial = true; } if(m_Controls.boundLow->value()>m_Controls.boundHigh->value()) { MITK_INFO << "high bound < low bound -> setting both to beamform only first slice"; m_Controls.boundLow->setValue(0); m_Controls.boundHigh->setValue(0); BFconfig.CropBounds[0] = 0; BFconfig.CropBounds[1] = 0; } else { BFconfig.CropBounds[0] = m_Controls.boundLow->value(); BFconfig.CropBounds[1] = m_Controls.boundHigh->value(); } } void PAImageProcessing::UpdateProgress(int progress, std::string progressInfo) { if (progress < 100) m_Controls.progressBar->setValue(progress); else m_Controls.progressBar->setValue(100); m_Controls.ProgressInfo->setText(progressInfo.c_str()); qApp->processEvents(); } void PAImageProcessing::UpdateImageInfo() { QList nodes = this->GetDataManagerSelection(); if (nodes.empty()) return; mitk::DataNode::Pointer node = nodes.front(); if (!node) { // Nothing selected return; } mitk::BaseData* data = node->GetData(); if (data) { // test if this data item is an image or not (could also be a surface or something totally different) mitk::Image* image = dynamic_cast(data); if (image) { // beamforming configs if (m_Controls.UseImageSpacing->isChecked()) { m_Controls.ElementCount->setValue(image->GetDimension(0)); m_Controls.Pitch->setValue(image->GetGeometry()->GetSpacing()[0]); m_Controls.boundLow->setMaximum(image->GetDimension(2) - 1); } UpdateRecordTime(image); // bandpass configs float speedOfSound = m_Controls.BPSpeedOfSound->value(); // [m/s] float recordTime = image->GetDimension(1)*image->GetGeometry()->GetSpacing()[1] / 1000 / speedOfSound; std::stringstream frequency; float maxFrequency = 1 / (recordTime / image->GetDimension(1)) * image->GetDimension(1) / 2 / 2 / 1000; frequency << maxFrequency / 1000000; //[MHz] frequency << "MHz"; frequency << " is the maximal allowed frequency for the selected image."; m_Controls.BPhigh->setToolTip(frequency.str().c_str()); m_Controls.BPlow->setToolTip(frequency.str().c_str()); m_Controls.BPhigh->setToolTipDuration(5000); m_Controls.BPlow->setToolTipDuration(5000); } } } void PAImageProcessing::OnSelectionChanged( berry::IWorkbenchPart::Pointer /*source*/, const QList& nodes ) { // iterate all selected objects, adjust warning visibility foreach( mitk::DataNode::Pointer node, nodes ) { if( node.IsNotNull() && dynamic_cast(node->GetData()) ) { m_Controls.labelWarning->setVisible( false ); m_Controls.buttonApplyBModeFilter->setEnabled( true ); m_Controls.labelWarning2->setVisible(false); m_Controls.buttonApplyCropFilter->setEnabled(true); m_Controls.labelWarning3->setVisible(false); m_Controls.buttonApplyBandpass->setEnabled(true); UpdateImageInfo(); return; } } m_Controls.labelWarning->setVisible( true ); m_Controls.buttonApplyBModeFilter->setEnabled( false ); m_Controls.labelWarning2->setVisible(true); m_Controls.buttonApplyCropFilter->setEnabled(false); m_Controls.labelWarning3->setVisible(true); m_Controls.buttonApplyBandpass->setEnabled(false); } void PAImageProcessing::UseResampling() { if (m_Controls.DoResampling->isChecked()) { m_Controls.ResamplingValue->setEnabled(true); m_ResampleSpacing = m_Controls.ResamplingValue->value(); } else { m_Controls.ResamplingValue->setEnabled(false); m_ResampleSpacing = 0; } } void PAImageProcessing::UseLogfilter() { m_UseLogfilter = m_Controls.Logfilter->isChecked(); } void PAImageProcessing::SetResampling() { m_ResampleSpacing = m_Controls.ResamplingValue->value(); } void PAImageProcessing::UpdateBFSettings(mitk::Image::Pointer image) { if ("DAS" == m_Controls.BFAlgorithm->currentText()) BFconfig.Algorithm = mitk::BeamformingFilter::beamformingSettings::BeamformingAlgorithm::DAS; else if ("DMAS" == m_Controls.BFAlgorithm->currentText()) BFconfig.Algorithm = mitk::BeamformingFilter::beamformingSettings::BeamformingAlgorithm::DMAS; if ("Quad. Approx." == m_Controls.DelayCalculation->currentText()) { BFconfig.DelayCalculationMethod = mitk::BeamformingFilter::beamformingSettings::DelayCalc::QuadApprox; } else if ("Spherical Wave" == m_Controls.DelayCalculation->currentText()) { BFconfig.DelayCalculationMethod = mitk::BeamformingFilter::beamformingSettings::DelayCalc::Spherical; } if ("Von Hann" == m_Controls.Apodization->currentText()) { BFconfig.Apod = mitk::BeamformingFilter::beamformingSettings::Apodization::Hann; } else if ("Hamming" == m_Controls.Apodization->currentText()) { BFconfig.Apod = mitk::BeamformingFilter::beamformingSettings::Apodization::Hamm; } else if ("Box" == m_Controls.Apodization->currentText()) { BFconfig.Apod = mitk::BeamformingFilter::beamformingSettings::Apodization::Box; } BFconfig.Pitch = m_Controls.Pitch->value() / 1000; // [m] BFconfig.SpeedOfSound = m_Controls.SpeedOfSound->value(); // [m/s] BFconfig.SamplesPerLine = m_Controls.Samples->value(); BFconfig.ReconstructionLines = m_Controls.Lines->value(); BFconfig.TransducerElements = m_Controls.ElementCount->value(); BFconfig.Angle = m_Controls.Angle->value(); // [deg] BFconfig.ButterworthOrder = m_Controls.BPFalloff->value(); BFconfig.UseBP = m_Controls.UseBP->isChecked(); BFconfig.UseGPU = m_Controls.UseGPU->isChecked(); UpdateRecordTime(image); UpdateBounds(); } void PAImageProcessing::UpdateRecordTime(mitk::Image::Pointer image) { if (m_Controls.UseImageSpacing->isChecked()) { BFconfig.RecordTime = image->GetDimension(1)*image->GetGeometry()->GetSpacing()[1] / 1000000; // [s] MITK_INFO << "Calculated Scan Depth of " << BFconfig.RecordTime * BFconfig.SpeedOfSound * 100 << "cm"; } else { BFconfig.RecordTime = m_Controls.ScanDepth->value() / 1000 / BFconfig.SpeedOfSound; // [s] } if ("US Image" == m_Controls.ImageType->currentText()) { - if (m_Controls.UseImageSpacing->isChecked()) - { - BFconfig.RecordTime = BFconfig.RecordTime / 2; // [s] - } - BFconfig.Photoacoustic = false; } else if ("PA Image" == m_Controls.ImageType->currentText()) { BFconfig.Photoacoustic = true; } } void PAImageProcessing::EnableControls() { m_Controls.DoResampling->setEnabled(true); UseResampling(); m_Controls.Logfilter->setEnabled(true); m_Controls.buttonApplyBModeFilter->setEnabled(true); m_Controls.CutoffAbove->setEnabled(true); m_Controls.CutoffBelow->setEnabled(true); m_Controls.Cutoff->setEnabled(true); m_Controls.buttonApplyCropFilter->setEnabled(true); m_Controls.buttonApplyBandpass->setEnabled(true); m_Controls.BFAlgorithm->setEnabled(true); m_Controls.DelayCalculation->setEnabled(true); m_Controls.ImageType->setEnabled(true); m_Controls.Apodization->setEnabled(true); m_Controls.UseBP->setEnabled(true); m_Controls.UseGPU->setEnabled(true); m_Controls.BPhigh->setEnabled(true); m_Controls.BPlow->setEnabled(true); m_Controls.BPFalloff->setEnabled(true); m_Controls.UseImageSpacing->setEnabled(true); UseImageSpacing(); m_Controls.Pitch->setEnabled(true); m_Controls.ElementCount->setEnabled(true); m_Controls.SpeedOfSound->setEnabled(true); m_Controls.Samples->setEnabled(true); m_Controls.Lines->setEnabled(true); m_Controls.Angle->setEnabled(true); m_Controls.buttonApplyBeamforming->setEnabled(true); } void PAImageProcessing::DisableControls() { m_Controls.DoResampling->setEnabled(false); m_Controls.ResamplingValue->setEnabled(false); m_Controls.Logfilter->setEnabled(false); m_Controls.buttonApplyBModeFilter->setEnabled(false); m_Controls.CutoffAbove->setEnabled(false); m_Controls.CutoffBelow->setEnabled(false); m_Controls.Cutoff->setEnabled(false); m_Controls.buttonApplyCropFilter->setEnabled(false); m_Controls.buttonApplyBandpass->setEnabled(false); m_Controls.BFAlgorithm->setEnabled(false); m_Controls.DelayCalculation->setEnabled(false); m_Controls.ImageType->setEnabled(false); m_Controls.Apodization->setEnabled(false); m_Controls.UseBP->setEnabled(false); m_Controls.UseGPU->setEnabled(false); m_Controls.BPhigh->setEnabled(false); m_Controls.BPlow->setEnabled(false); m_Controls.BPFalloff->setEnabled(false); m_Controls.UseImageSpacing->setEnabled(false); m_Controls.ScanDepth->setEnabled(false); m_Controls.Pitch->setEnabled(false); m_Controls.ElementCount->setEnabled(false); m_Controls.SpeedOfSound->setEnabled(false); m_Controls.Samples->setEnabled(false); m_Controls.Lines->setEnabled(false); m_Controls.Angle->setEnabled(false); m_Controls.buttonApplyBeamforming->setEnabled(false); } void PAImageProcessing::UseImageSpacing() { if (m_Controls.UseImageSpacing->isChecked()) { m_Controls.ScanDepth->setDisabled(true); } else { m_Controls.ScanDepth->setEnabled(true); } } void BeamformingThread::run() { mitk::Image::Pointer resultImage; mitk::PhotoacousticImage::Pointer filterbank = mitk::PhotoacousticImage::New(); std::function progressHandle = [this](int progress, std::string progressInfo) { emit updateProgress(progress, progressInfo); }; resultImage = filterbank->ApplyBeamforming(m_InputImage, m_BFconfig, m_Cutoff, progressHandle); emit result(resultImage); } void BeamformingThread::setConfig(mitk::BeamformingFilter::beamformingSettings BFconfig) { m_BFconfig = BFconfig; } void BeamformingThread::setInputImage(mitk::Image::Pointer image) { m_InputImage = image; } void BeamformingThread::setCutoff(int cutoff) { m_Cutoff = cutoff; } void BmodeThread::run() { mitk::Image::Pointer resultImage; mitk::PhotoacousticImage::Pointer filterbank = mitk::PhotoacousticImage::New(); resultImage = filterbank->ApplyBmodeFilter(m_InputImage, m_UseLogfilter, m_ResampleSpacing); emit result(resultImage); } void BmodeThread::setConfig(bool useLogfilter, double resampleSpacing) { m_UseLogfilter = useLogfilter; m_ResampleSpacing = resampleSpacing; } void BmodeThread::setInputImage(mitk::Image::Pointer image) { m_InputImage = image; } void CropThread::run() { mitk::Image::Pointer resultImage; mitk::PhotoacousticImage::Pointer filterbank = mitk::PhotoacousticImage::New(); resultImage = filterbank->ApplyCropping(m_InputImage, m_CutAbove, m_CutBelow, 0, 0, 0, m_InputImage->GetDimension(2) - 1); emit result(resultImage); } void CropThread::setConfig(unsigned int CutAbove, unsigned int CutBelow) { m_CutAbove = CutAbove; m_CutBelow = CutBelow; } void CropThread::setInputImage(mitk::Image::Pointer image) { m_InputImage = image; } void BandpassThread::run() { mitk::Image::Pointer resultImage; mitk::PhotoacousticImage::Pointer filterbank = mitk::PhotoacousticImage::New(); resultImage = filterbank->BandpassFilter(m_InputImage, m_RecordTime, m_BPHighPass, m_BPLowPass, m_TukeyAlpha); - + MITK_INFO << "lol"; emit result(resultImage); } void BandpassThread::setConfig(float BPHighPass, float BPLowPass, float TukeyAlpha, float recordTime) { m_BPHighPass = BPHighPass; m_BPLowPass = BPLowPass; m_TukeyAlpha = TukeyAlpha; m_RecordTime = recordTime; } void BandpassThread::setInputImage(mitk::Image::Pointer image) { m_InputImage = image; } \ No newline at end of file