diff --git a/Modules/PhotoacousticsAlgorithms/Algorithms/OCL/mitkPhotoacousticOCLBeamformer.h b/Modules/PhotoacousticsAlgorithms/Algorithms/OCL/mitkPhotoacousticOCLBeamformer.h index da7fc7455d..8b77730c68 100644 --- a/Modules/PhotoacousticsAlgorithms/Algorithms/OCL/mitkPhotoacousticOCLBeamformer.h +++ b/Modules/PhotoacousticsAlgorithms/Algorithms/OCL/mitkPhotoacousticOCLBeamformer.h @@ -1,141 +1,144 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef _MITKPHOTOACOUSTICSOCLBEAMFORMER_H_ #define _MITKPHOTOACOUSTICSOCLBEAMFORMER_H_ +#ifdef PHOTOACOUSTICS_USE_GPU #include "mitkOclDataSetToDataSetFilter.h" #include + namespace mitk { class OclImageToImageFilter; /** Documentation * * \brief The OclBinaryThresholdImageFilter computes a binary segmentation based on given threshold values. * * The filter requires two threshold values ( the upper and the lower threshold ) and two image values ( inside and outside ). The resulting voxel of the segmentation image is assigned the inside value 1 if the image value is between the given thresholds and the outside value otherwise. */ class PhotoacousticOCLBeamformer : public OclDataSetToDataSetFilter, public itk::Object { public: mitkClassMacroItkParent(PhotoacousticOCLBeamformer, itk::Object); itkNewMacro(Self); /** - * @brief SetInput Set the input image. Only 3D images are supported for now. - * @param image a 3D image. - * @throw mitk::Exception if the dimesion is not 3. + * @brief SetInput Set the input data through an image. Arbitrary images are supported */ void SetInput(Image::Pointer image); + /** + * @brief SetInput Manually set the input data while providing dimensions and memory size of the input data. + */ void SetInput(void* data, unsigned int* dimensions, unsigned int BpE); + /** + * @brief GetOutputAsImage Returns an mitk::Image constructed from the processed data + */ mitk::Image::Pointer GetOutputAsImage(); /** Update the filter */ void Update(); + /** Set the Output dimensions, which are also used for the openCL global worksize */ void SetOutputDim( unsigned int outputDim[3]) { m_OutputDim[0] = outputDim[0]; m_OutputDim[1] = outputDim[1]; m_OutputDim[2] = outputDim[2]; } + /** Set the Apodisation function to apply when beamforming */ void SetApodisation(float* apodisation, unsigned short apodArraySize) { m_ApodArraySize = apodArraySize; m_Apodisation = apodisation; } enum BeamformingAlgorithm { DASQuad, DMASQuad, DASSphe, DMASSphe }; + /** Set which Algorithm should be used for beamforming */ void SetAlgorithm(BeamformingAlgorithm algorithm, bool PA) { m_Algorithm = algorithm; m_PAImage = PA; } + /** Set various beamforming parameters */ void SetBeamformingParameters(float SpeedOfSound, float timeSpacing, float Pitch, float Angle, bool PAImage, unsigned short transducerElements) { m_SpeedOfSound = SpeedOfSound; m_TimeSpacing = timeSpacing; m_Pitch = Pitch; m_Angle = Angle; m_PAImage = PAImage; m_TransducerElements = transducerElements; } protected: /** Constructor */ PhotoacousticOCLBeamformer(); /** Destructor */ virtual ~PhotoacousticOCLBeamformer(); /** Initialize the filter */ bool Initialize(); void Execute(); mitk::PixelType GetOutputType() { return mitk::MakeScalarPixelType(); } int GetBytesPerElem() { return sizeof(float); } virtual us::Module* GetModule(); private: /** The OpenCL kernel for the filter */ cl_kernel m_PixelCalculation; unsigned int m_OutputDim[3]; unsigned int m_InputDim[3]; float* m_Apodisation; unsigned short m_ApodArraySize; BeamformingAlgorithm m_Algorithm; unsigned short m_PAImage; float m_SpeedOfSound; float m_TimeSpacing; float m_Pitch; float m_Angle; unsigned short m_TransducerElements; mitk::Image::Pointer m_InputImage; }; } #endif - -/* -float* data = new float[m_InputDim[0] * m_InputDim[1] * m_InputDim[2]]; -for (unsigned int i = 0; i < m_InputDim[0] * m_InputDim[1] * m_InputDim[2]; ++i) -{ - data[i] = i; -} -mitk::OclDataSetToDataSetFilter::SetInput(data, m_InputDim[0] * m_InputDim[1] * m_InputDim[2], sizeof(float));*/ \ No newline at end of file +#endif \ No newline at end of file diff --git a/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp b/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp index c8acae5b99..eab1b3e812 100644 --- a/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp +++ b/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.cpp @@ -1,603 +1,603 @@ /*=================================================================== mitkPhotoacousticBeamformingFilter The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #define _USE_MATH_DEFINES #include "mitkPhotoacousticBeamformingFilter.h" #include "mitkProperties.h" #include "mitkImageReadAccessor.h" #include #include #include #include #include #include #include "mitkImageCast.h" #include - mitk::BeamformingFilter::BeamformingFilter() : m_OutputData(nullptr), m_InputData(nullptr) { this->SetNumberOfIndexedInputs(1); this->SetNumberOfRequiredInputs(1); m_ProgressHandle = [](int, std::string) {}; } void mitk::BeamformingFilter::SetProgressHandle(std::function progressHandle) { m_ProgressHandle = progressHandle; } mitk::BeamformingFilter::~BeamformingFilter() { } void mitk::BeamformingFilter::GenerateInputRequestedRegion() { Superclass::GenerateInputRequestedRegion(); mitk::Image* output = this->GetOutput(); mitk::Image* input = const_cast (this->GetInput()); if (!output->IsInitialized()) { return; } input->SetRequestedRegionToLargestPossibleRegion(); //GenerateTimeInInputRegion(output, input); } void mitk::BeamformingFilter::GenerateOutputInformation() { mitk::Image::ConstPointer input = this->GetInput(); mitk::Image::Pointer output = this->GetOutput(); if ((output->IsInitialized()) && (this->GetMTime() <= m_TimeOfHeaderInitialization.GetMTime())) return; itkDebugMacro(<< "GenerateOutputInformation()"); unsigned int dim[] = { m_Conf.ReconstructionLines, m_Conf.SamplesPerLine, input->GetDimension(2) }; output->Initialize(mitk::MakeScalarPixelType(), 3, dim); mitk::Vector3D spacing; spacing[0] = m_Conf.Pitch * m_Conf.TransducerElements * 1000 / m_Conf.ReconstructionLines; spacing[1] = m_Conf.RecordTime / 2 * m_Conf.SpeedOfSound * 1000 / m_Conf.SamplesPerLine; spacing[2] = 1; output->GetGeometry()->SetSpacing(spacing); output->GetGeometry()->Modified(); output->SetPropertyList(input->GetPropertyList()->Clone()); m_TimeOfHeaderInitialization.Modified(); } void mitk::BeamformingFilter::GenerateData() { GenerateOutputInformation(); mitk::Image::Pointer input = this->GetInput(); mitk::Image::Pointer output = this->GetOutput(); if (!output->IsInitialized()) return; float inputDim[2] = { (float)input->GetDimension(0), (float)input->GetDimension(1) }; float outputDim[2] = { (float)output->GetDimension(0), (float)output->GetDimension(1) }; unsigned short chunkSize = 2; // TODO: make this slightly less arbitrary unsigned int oclOutputDim[3] = { output->GetDimension(0), output->GetDimension(1), output->GetDimension(2) }; unsigned int oclOutputDimChunk[3] = { output->GetDimension(0), output->GetDimension(1), chunkSize}; unsigned int oclInputDimChunk[3] = { input->GetDimension(0), input->GetDimension(1), chunkSize}; unsigned int oclOutputDimLastChunk[3] = { output->GetDimension(0), output->GetDimension(1), input->GetDimension(2) % chunkSize }; unsigned int oclInputDimLastChunk[3] = { input->GetDimension(0), input->GetDimension(1), input->GetDimension(2) % chunkSize }; const int apodArraySize = m_Conf.TransducerElements * 4; // set the resolution of the apodization array float* ApodWindow; // calculate the appropiate apodization window switch (m_Conf.Apod) { case beamformingSettings::Apodization::Hann: ApodWindow = VonHannFunction(apodArraySize); break; case beamformingSettings::Apodization::Hamm: ApodWindow = HammFunction(apodArraySize); break; case beamformingSettings::Apodization::Box: ApodWindow = BoxFunction(apodArraySize); break; default: ApodWindow = BoxFunction(apodArraySize); break; } int progInterval = output->GetDimension(2) / 20 > 1 ? output->GetDimension(2) / 20 : 1; // the interval at which we update the gui progress bar auto begin = std::chrono::high_resolution_clock::now(); // debbuging the performance... if (!m_Conf.UseGPU) { for (unsigned int i = 0; i < output->GetDimension(2); ++i) // seperate Slices should get Beamforming seperately applied { mitk::ImageReadAccessor inputReadAccessor(input, input->GetSliceData(i)); // first, we check whether the dara is float, other formats are unsupported if (input->GetPixelType().GetTypeAsString() == "scalar (float)" || input->GetPixelType().GetTypeAsString() == " (float)") { m_InputData = (float*)inputReadAccessor.GetData(); } else { MITK_INFO << "Pixel type is not float, abort"; return; } m_OutputData = new float[m_Conf.ReconstructionLines*m_Conf.SamplesPerLine]; // fill the image with zeros for (int l = 0; l < outputDim[0]; ++l) { for (int s = 0; s < outputDim[1]; ++s) { m_OutputData[l*(short)outputDim[1] + s] = 0; } } std::thread *threads = new std::thread[(short)outputDim[0]]; // every line will be beamformed in a seperate thread if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DASQuadraticLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DASSphericalLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } } else if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DMAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DMASQuadraticLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) { for (short line = 0; line < outputDim[0]; ++line) { threads[line] = std::thread(&BeamformingFilter::DMASSphericalLine, this, m_InputData, m_OutputData, inputDim, outputDim, line, ApodWindow, apodArraySize); } } } // wait for all lines to finish for (short line = 0; line < outputDim[0]; ++line) { threads[line].join(); } output->SetSlice(m_OutputData, i); if (i % progInterval == 0) m_ProgressHandle((int)((i + 1) / (float)output->GetDimension(2) * 100), "performing reconstruction"); delete[] m_OutputData; m_OutputData = nullptr; m_InputData = nullptr; } } + #ifdef PHOTOACOUSTICS_USE_GPU else { mitk::PhotoacousticOCLBeamformer::Pointer m_oclFilter = mitk::PhotoacousticOCLBeamformer::New(); try { if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformer::BeamformingAlgorithm::DASQuad, true); else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformer::BeamformingAlgorithm::DASSphe, true); } else if (m_Conf.Algorithm == beamformingSettings::BeamformingAlgorithm::DMAS) { if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::QuadApprox) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformer::BeamformingAlgorithm::DMASQuad, true); else if (m_Conf.DelayCalculationMethod == beamformingSettings::DelayCalc::Spherical) m_oclFilter->SetAlgorithm(PhotoacousticOCLBeamformer::BeamformingAlgorithm::DMASSphe, true); } m_oclFilter->SetApodisation(ApodWindow, apodArraySize); m_oclFilter->SetOutputDim(oclOutputDimChunk); m_oclFilter->SetBeamformingParameters(m_Conf.SpeedOfSound, m_Conf.TimeSpacing, m_Conf.Pitch, m_Conf.Angle, m_Conf.Photoacoustic, m_Conf.TransducerElements); mitk::ImageReadAccessor inputReadAccessor(input); // first, we check whether the data is float, other formats are unsupported if (input->GetPixelType().GetTypeAsString() == "scalar (float)" || input->GetPixelType().GetTypeAsString() == " (float)") { m_InputData = (float*)inputReadAccessor.GetData(); } else { MITK_INFO << "Pixel type is not float, abort"; return; } if (chunkSize < oclOutputDim[2]) { unsigned short curChunkSize = chunkSize; for (unsigned int i = 0; i < ceil((float)oclOutputDim[2] / (float)chunkSize); ++i) { m_ProgressHandle(100 * ((float)(i * chunkSize) / (float)oclOutputDim[2]), "performing reconstruction"); if ((oclOutputDim[2]) - (i * chunkSize) >= chunkSize) { m_oclFilter->SetInput((void*)&m_InputData[i * chunkSize * input->GetDimension(0) * input->GetDimension(1)], oclInputDimChunk, sizeof(float)); } else { m_oclFilter->SetInput((void*)&m_InputData[i * chunkSize * input->GetDimension(0) * input->GetDimension(1)], oclInputDimLastChunk, sizeof(float)); curChunkSize = (unsigned short)oclOutputDimLastChunk[2]; // the last chunk might have a different size! } m_oclFilter->Update(); float* out = (float*)m_oclFilter->GetOutput(); for (unsigned short s = 0; s < curChunkSize; ++s) { output->SetImportSlice( (void*)&(out[s * oclOutputDim[0] * oclOutputDim[1]]), i * chunkSize + s, 0, 0, mitk::Image::ReferenceMemory); } } } else { m_ProgressHandle(50, "performing reconstruction"); m_oclFilter->SetOutputDim(oclOutputDim); m_oclFilter->SetInput(input); m_oclFilter->Update(); float* out = (float*)m_oclFilter->GetOutput(); output->SetImportVolume((void*)out, 0, 0, mitk::Image::ReferenceMemory); } } catch (mitk::Exception &e) { std::string errorMessage = "Caught unexpected exception "; errorMessage.append(e.what()); MITK_ERROR << errorMessage; } } - + #endif m_TimeOfHeaderInitialization.Modified(); auto end = std::chrono::high_resolution_clock::now(); MITK_INFO << "Beamforming of " << output->GetDimension(2) << " Images completed in " << ((float)std::chrono::duration_cast(end - begin).count()) / 1000000 << "ms" << std::endl; } void mitk::BeamformingFilter::Configure(beamformingSettings settings) { m_Conf = settings; } float* mitk::BeamformingFilter::VonHannFunction(int samples) { float* ApodWindow = new float[samples]; for (int n = 0; n < samples; ++n) { ApodWindow[n] = (1 - cos(2 * M_PI * n / (samples - 1))) / 2; } return ApodWindow; } float* mitk::BeamformingFilter::HammFunction(int samples) { float* ApodWindow = new float[samples]; for (int n = 0; n < samples; ++n) { ApodWindow[n] = 0.54 - 0.46*cos(2 * M_PI*n / (samples - 1)); } return ApodWindow; } float* mitk::BeamformingFilter::BoxFunction(int samples) { float* ApodWindow = new float[samples]; for (int n = 0; n < samples; ++n) { ApodWindow[n] = 1; } return ApodWindow; } void mitk::BeamformingFilter::DASQuadraticLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short AddSample = 0; short maxLine = 0; short minLine = 0; float delayMultiplicator = 0; float l_i = 0; float s_i = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; float apod_mult = 1; short usedLines = (maxLine - minLine); //quadratic delay l_i = line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = (float)sample / outputS * inputS / 2; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); delayMultiplicator = pow((1 / (m_Conf.TimeSpacing*m_Conf.SpeedOfSound) * (m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) / s_i / 2; for (short l_s = minLine; l_s < maxLine; ++l_s) { AddSample = delayMultiplicator * pow((l_s - l_i), 2) + s_i + (1 - m_Conf.Photoacoustic)*s_i; if (AddSample < inputS && AddSample >= 0) output[sample*(short)outputL + line] += input[l_s + AddSample*(short)inputL] * apodisation[(short)((l_s - minLine)*apod_mult)]; else --usedLines; } output[sample*(short)outputL + line] = output[sample*(short)outputL + line] / usedLines; } } void mitk::BeamformingFilter::DASSphericalLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short AddSample = 0; short maxLine = 0; short minLine = 0; float l_i = 0; float s_i = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; float apod_mult = 1; short usedLines = (maxLine - minLine); //exact delay l_i = (float)line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = (float)sample / outputS * inputS / 2; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); for (short l_s = minLine; l_s < maxLine; ++l_s) { AddSample = (int)sqrt( pow(s_i, 2) + pow((1 / (m_Conf.TimeSpacing*m_Conf.SpeedOfSound) * ((l_s - l_i)*m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) ) + (1 - m_Conf.Photoacoustic)*s_i; if (AddSample < inputS && AddSample >= 0) output[sample*(short)outputL + line] += input[l_s + AddSample*(short)inputL] * apodisation[(short)((l_s - minLine)*apod_mult)]; else --usedLines; } output[sample*(short)outputL + line] = output[sample*(short)outputL + line] / usedLines; } } void mitk::BeamformingFilter::DMASQuadraticLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short maxLine = 0; short minLine = 0; float delayMultiplicator = 0; float l_i = 0; float s_i = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; float apod_mult = 1; float mult = 0; short usedLines = (maxLine - minLine); //quadratic delay l_i = line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = sample / outputS * inputS / 2; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); delayMultiplicator = pow((1 / (m_Conf.TimeSpacing*m_Conf.SpeedOfSound) * (m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) / s_i / 2; //calculate the AddSamples beforehand to save some time short* AddSample = new short[maxLine - minLine]; for (short l_s = 0; l_s < maxLine - minLine; ++l_s) { AddSample[l_s] = (short)(delayMultiplicator * pow((minLine + l_s - l_i), 2) + s_i) + (1 - m_Conf.Photoacoustic)*s_i; } for (short l_s1 = minLine; l_s1 < maxLine - 1; ++l_s1) { if (AddSample[l_s1 - minLine] < (short)inputS && AddSample[l_s1 - minLine] >= 0) { for (short l_s2 = l_s1 + 1; l_s2 < maxLine; ++l_s2) { if (AddSample[l_s2 - minLine] < inputS && AddSample[l_s2 - minLine] >= 0) { mult = input[l_s2 + AddSample[l_s2 - minLine] * (short)inputL] * apodisation[(short)((l_s2 - minLine)*apod_mult)] * input[l_s1 + AddSample[l_s1 - minLine] * (short)inputL] * apodisation[(short)((l_s1 - minLine)*apod_mult)]; output[sample*(short)outputL + line] += sqrt(abs(mult)) * ((mult > 0) - (mult < 0)); } } } else --usedLines; } output[sample*(short)outputL + line] = 10 * output[sample*(short)outputL + line] / (pow(usedLines, 2) - (usedLines - 1)); delete[] AddSample; } } void mitk::BeamformingFilter::DMASSphericalLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize) { float& inputS = inputDim[1]; float& inputL = inputDim[0]; float& outputS = outputDim[1]; float& outputL = outputDim[0]; short maxLine = 0; short minLine = 0; float l_i = 0; float s_i = 0; float part = 0.07 * inputL; float tan_phi = std::tan(m_Conf.Angle / 360 * 2 * M_PI); float part_multiplicator = tan_phi * m_Conf.TimeSpacing * m_Conf.SpeedOfSound / m_Conf.Pitch * m_Conf.ReconstructionLines / m_Conf.TransducerElements; float apod_mult = 1; float mult = 0; short usedLines = (maxLine - minLine); //exact delay l_i = line / outputL * inputL; for (short sample = 0; sample < outputS; ++sample) { s_i = sample / outputS * inputS / 2; part = part_multiplicator*s_i; if (part < 1) part = 1; maxLine = (short)std::min((l_i + part) + 1, inputL); minLine = (short)std::max((l_i - part), 0.0f); usedLines = (maxLine - minLine); apod_mult = apodArraySize / (maxLine - minLine); //calculate the AddSamples beforehand to save some time short* AddSample = new short[maxLine - minLine]; for (short l_s = 0; l_s < maxLine - minLine; ++l_s) { AddSample[l_s] = (short)sqrt( pow(s_i, 2) + pow((1 / (m_Conf.TimeSpacing*m_Conf.SpeedOfSound) * ((minLine + l_s - l_i)*m_Conf.Pitch*m_Conf.TransducerElements) / inputL), 2) ) + (1 - m_Conf.Photoacoustic)*s_i; } for (short l_s1 = minLine; l_s1 < maxLine - 1; ++l_s1) { if (AddSample[l_s1 - minLine] < inputS && AddSample[l_s1 - minLine] >= 0) { for (short l_s2 = l_s1 + 1; l_s2 < maxLine; ++l_s2) { if (AddSample[l_s2 - minLine] < inputS && AddSample[l_s2 - minLine] >= 0) { mult = input[l_s2 + AddSample[l_s2 - minLine] * (short)inputL] * apodisation[(int)((l_s2 - minLine)*apod_mult)] * input[l_s1 + AddSample[l_s1 - minLine] * (short)inputL] * apodisation[(int)((l_s1 - minLine)*apod_mult)]; output[sample*(short)outputL + line] += sqrt(abs(mult)) * ((mult > 0) - (mult < 0)); } } } else --usedLines; } output[sample*(short)outputL + line] = 10 * output[sample*(short)outputL + line] / (pow(usedLines, 2) - (usedLines - 1)); delete[] AddSample; } } diff --git a/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.h b/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.h index e2654fc7fd..3851aac704 100644 --- a/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.h +++ b/Modules/PhotoacousticsAlgorithms/Algorithms/mitkPhotoacousticBeamformingFilter.h @@ -1,108 +1,107 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ - #ifndef MITK_PHOTOACOUSTICS_BEAMFORMING_FILTER #define MITK_PHOTOACOUSTICS_BEAMFORMING_FILTER #include "mitkImageToImageFilter.h" #include namespace mitk { //##Documentation //## @brief //## @ingroup Process class BeamformingFilter : public ImageToImageFilter { public: mitkClassMacro(BeamformingFilter, ImageToImageFilter); itkFactorylessNewMacro(Self) itkCloneMacro(Self) struct beamformingSettings { float Pitch = 0.0003; // [m] float SpeedOfSound = 1540; // [m/s] unsigned int SamplesPerLine = 2048; unsigned int ReconstructionLines = 128; float RecordTime = 0.00006; // [s] float TimeSpacing = 0.0000000000001; // [s] unsigned int TransducerElements = 128; bool partial = false; unsigned int CropBounds[2] = { 0,0 }; bool UseGPU = true; enum DelayCalc {QuadApprox, Spherical}; DelayCalc DelayCalculationMethod = QuadApprox; enum Apodization {Hamm, Hann, Box}; Apodization Apod = Hann; enum BeamformingAlgorithm {DMAS, DAS}; BeamformingAlgorithm Algorithm = DAS; float Angle = 10; bool Photoacoustic = true; float BPHighPass = 50; float BPLowPass = 50; bool UseBP = false; }; void Configure(beamformingSettings settings); void SetProgressHandle(std::function progressHandle); protected: BeamformingFilter(); ~BeamformingFilter(); virtual void GenerateInputRequestedRegion() override; virtual void GenerateOutputInformation() override; virtual void GenerateData() override; //##Description //## @brief Time when Header was last initialized itk::TimeStamp m_TimeOfHeaderInitialization; std::function m_ProgressHandle; float* VonHannFunction(int samples); float* HammFunction(int samples); float* BoxFunction(int samples); void DASQuadraticLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize); void DASSphericalLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize); void DMASQuadraticLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize); void DMASSphericalLine(float* input, float* output, float inputDim[2], float outputDim[2], const short& line, float* apodisation, const short& apodArraySize); float* m_OutputData; float* m_InputData; float* m_InputDataPuffer; beamformingSettings m_Conf; }; } // namespace mitk #endif //MITK_PHOTOACOUSTICS_BEAMFORMING_FILTER diff --git a/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.h b/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.h index daab4a43cf..40bad188c7 100644 --- a/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.h +++ b/Modules/PhotoacousticsAlgorithms/mitkPhotoacousticImage.h @@ -1,53 +1,52 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ - #ifndef mitkPhotoacousticImage_H_HEADER_INCLUDED #define mitkPhotoacousticImage_H_HEADER_INCLUDED #include "itkObject.h" #include "mitkCommon.h" #include "mitkImage.h" #include #include "mitkOclImageToImageFilter.h" #include "mitkPhotoacousticBeamformingFilter.h" #include "MitkPhotoacousticsAlgorithmsExports.h" namespace mitk { class MITKPHOTOACOUSTICSALGORITHMS_EXPORT PhotoacousticImage : public itk::Object { public: mitkClassMacroItkParent(mitk::PhotoacousticImage, itk::Object); itkFactorylessNewMacro(Self); enum BModeMethod { ShapeDetection, Abs }; mitk::Image::Pointer ApplyBmodeFilter(mitk::Image::Pointer inputImage, BModeMethod method = BModeMethod::Abs, bool UseLogFilter = false, float resampleSpacing = 0.15); // mitk::Image::Pointer ApplyScatteringCompensation(mitk::Image::Pointer inputImage, int scatteringCoefficient); mitk::Image::Pointer ApplyResampling(mitk::Image::Pointer inputImage, unsigned int outputSize[2]); mitk::Image::Pointer ApplyBeamforming(mitk::Image::Pointer inputImage, BeamformingFilter::beamformingSettings config, int cutoff, std::function progressHandle = [](int, std::string) {}); mitk::Image::Pointer ApplyCropping(mitk::Image::Pointer inputImage, int above, int below, int right, int left, int minSlice, int maxSlice); mitk::Image::Pointer BandpassFilter(mitk::Image::Pointer data, float recordTime, float BPHighPass, float BPLowPass, float alpha); protected: PhotoacousticImage(); virtual ~PhotoacousticImage(); itk::Image::Pointer BPFunction(mitk::Image::Pointer reference, int cutoffFrequencyPixelHighPass, int cutoffFrequencyPixelLowPass, float alpha); }; } // namespace mitk #endif /* mitkPhotoacousticImage_H_HEADER_INCLUDED */ diff --git a/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/CMakeLists.txt b/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/CMakeLists.txt index ce750d05d7..0b44d554e6 100644 --- a/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/CMakeLists.txt +++ b/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/CMakeLists.txt @@ -1,7 +1,11 @@ project(org_mitk_gui_qt_photoacoustics_imageprocessing) mitk_create_plugin( EXPORT_DIRECTIVE IMAGEPROCESSING_EXPORT EXPORTED_INCLUDE_SUFFIXES src MODULE_DEPENDS MitkQtWidgetsExt MitkPhotoacousticsAlgorithms ) + +IF(MITK_USE_OpenCL) + add_definitions(-DPHOTOACOUSTICS_USE_GPU) +ENDIF(MITK_USE_OpenCL) \ No newline at end of file diff --git a/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/src/internal/PAImageProcessing.cpp b/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/src/internal/PAImageProcessing.cpp index 008a208181..50c0182c5c 100644 --- a/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/src/internal/PAImageProcessing.cpp +++ b/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/src/internal/PAImageProcessing.cpp @@ -1,1092 +1,1105 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ // Blueberry #include #include // Qmitk #include "PAImageProcessing.h" // Qt #include #include #include #include //mitk image #include #include "mitkPhotoacousticImage.h" #include "mitkPhotoacousticBeamformingFilter.h" //other #include #include #include const std::string PAImageProcessing::VIEW_ID = "org.mitk.views.paimageprocessing"; PAImageProcessing::PAImageProcessing() : m_ResampleSpacing(0), m_UseLogfilter(false) { qRegisterMetaType(); qRegisterMetaType(); } void PAImageProcessing::SetFocus() { m_Controls.buttonApplyBModeFilter->setFocus(); } void PAImageProcessing::CreateQtPartControl(QWidget *parent) { // create GUI widgets from the Qt Designer's .ui file m_Controls.setupUi(parent); connect(m_Controls.buttonApplyBModeFilter, SIGNAL(clicked()), this, SLOT(StartBmodeThread())); connect(m_Controls.DoResampling, SIGNAL(clicked()), this, SLOT(UseResampling())); connect(m_Controls.Logfilter, SIGNAL(clicked()), this, SLOT(UseLogfilter())); connect(m_Controls.ResamplingValue, SIGNAL(valueChanged(double)), this, SLOT(SetResampling())); connect(m_Controls.buttonApplyBeamforming, SIGNAL(clicked()), this, SLOT(StartBeamformingThread())); connect(m_Controls.buttonApplyCropFilter, SIGNAL(clicked()), this, SLOT(StartCropThread())); connect(m_Controls.buttonApplyBandpass, SIGNAL(clicked()), this, SLOT(StartBandpassThread())); connect(m_Controls.UseImageSpacing, SIGNAL(clicked()), this, SLOT(UseImageSpacing())); connect(m_Controls.ScanDepth, SIGNAL(valueChanged(double)), this, SLOT(UpdateImageInfo())); connect(m_Controls.SpeedOfSound, SIGNAL(valueChanged(double)), this, SLOT(UpdateImageInfo())); connect(m_Controls.SpeedOfSound, SIGNAL(valueChanged(double)), this, SLOT(ChangedSOSBeamforming())); connect(m_Controls.BPSpeedOfSound, SIGNAL(valueChanged(double)), this, SLOT(ChangedSOSBandpass())); connect(m_Controls.Samples, SIGNAL(valueChanged(int)), this, SLOT(UpdateImageInfo())); connect(m_Controls.UseImageSpacing, SIGNAL(clicked()), this, SLOT(UpdateImageInfo())); connect(m_Controls.boundLow, SIGNAL(valueChanged(int)), this, SLOT(UpdateBounds())); connect(m_Controls.boundHigh, SIGNAL(valueChanged(int)), this, SLOT(UpdateBounds())); connect(m_Controls.Partial, SIGNAL(clicked()), this, SLOT(UpdateBounds())); connect(m_Controls.BatchProcessing, SIGNAL(clicked()), this, SLOT(BatchProcessing())); connect(m_Controls.StepBeamforming, SIGNAL(clicked()), this, SLOT(UpdateSaveBoxes())); connect(m_Controls.StepCropping, SIGNAL(clicked()), this, SLOT(UpdateSaveBoxes())); connect(m_Controls.StepBandpass, SIGNAL(clicked()), this, SLOT(UpdateSaveBoxes())); connect(m_Controls.StepBMode, SIGNAL(clicked()), this, SLOT(UpdateSaveBoxes())); UpdateSaveBoxes(); m_Controls.DoResampling->setChecked(false); m_Controls.ResamplingValue->setEnabled(false); m_Controls.progressBar->setMinimum(0); m_Controls.progressBar->setMaximum(100); m_Controls.progressBar->setVisible(false); m_Controls.UseImageSpacing->setToolTip("Image spacing of y-Axis must be in us, x-Axis in mm."); m_Controls.UseImageSpacing->setToolTipDuration(5000); m_Controls.ProgressInfo->setVisible(false); m_Controls.UseBP->hide(); + #ifndef PHOTOACOUSTICS_USE_GPU + m_Controls.UseGPU->setEnabled(false); + m_Controls.UseGPU->setChecked(false); + #endif + UseImageSpacing(); } void PAImageProcessing::ChangedSOSBandpass() { m_Controls.SpeedOfSound->setValue(m_Controls.BPSpeedOfSound->value()); } void PAImageProcessing::ChangedSOSBeamforming() { m_Controls.BPSpeedOfSound->setValue(m_Controls.SpeedOfSound->value()); } std::vector splitpath( const std::string& str , const std::set delimiters) { std::vector result; char const* pch = str.c_str(); char const* start = pch; for (; *pch; ++pch) { if (delimiters.find(*pch) != delimiters.end()) { if (start != pch) { std::string str(start, pch); result.push_back(str); } else { result.push_back(""); } start = pch + 1; } } result.push_back(start); return result; } void PAImageProcessing::UpdateSaveBoxes() { if (m_Controls.StepBeamforming->isChecked()) m_Controls.SaveBeamforming->setEnabled(true); else m_Controls.SaveBeamforming->setEnabled(false); if (m_Controls.StepCropping->isChecked()) m_Controls.SaveCropping->setEnabled(true); else m_Controls.SaveCropping->setEnabled(false); if (m_Controls.StepBandpass->isChecked()) m_Controls.SaveBandpass->setEnabled(true); else m_Controls.SaveBandpass->setEnabled(false); if (m_Controls.StepBMode->isChecked()) m_Controls.SaveBMode->setEnabled(true); else m_Controls.SaveBMode->setEnabled(false); } void PAImageProcessing::BatchProcessing() { QFileDialog LoadDialog(nullptr, "Select Files to be processed"); LoadDialog.setFileMode(QFileDialog::FileMode::ExistingFiles); LoadDialog.setNameFilter(tr("Images (*.nrrd)")); LoadDialog.setViewMode(QFileDialog::Detail); QStringList fileNames; if (LoadDialog.exec()) fileNames = LoadDialog.selectedFiles(); QString saveDir = QFileDialog::getExistingDirectory(nullptr, tr("Select Directory To Save To"), "", QFileDialog::ShowDirsOnly | QFileDialog::DontResolveSymlinks); DisableControls(); std::set delims{'/'}; mitk::PhotoacousticImage::Pointer filterbank = mitk::PhotoacousticImage::New(); bool doSteps[] = { m_Controls.StepBeamforming->isChecked(), m_Controls.StepCropping->isChecked() , m_Controls.StepBandpass->isChecked(), m_Controls.StepBMode->isChecked() }; bool saveSteps[] = { m_Controls.SaveBeamforming->isChecked(), m_Controls.SaveCropping->isChecked() , m_Controls.SaveBandpass->isChecked(), m_Controls.SaveBMode->isChecked() }; for (int fileNumber = 0; fileNumber < fileNames.size(); ++fileNumber) { m_Controls.progressBar->setValue(0); m_Controls.progressBar->setVisible(true); m_Controls.ProgressInfo->setVisible(true); m_Controls.ProgressInfo->setText("loading file"); QString filename = fileNames.at(fileNumber); auto split = splitpath(filename.toStdString(), delims); std::string imageName = split.at(split.size()-1); // remove ".nrrd" imageName = imageName.substr(0, imageName.size()-5); mitk::Image::Pointer image = mitk::IOUtil::LoadImage(filename.toStdString().c_str()); UpdateBFSettings(image); // Beamforming if (doSteps[0]) { std::function progressHandle = [this](int progress, std::string progressInfo) { this->UpdateProgress(progress, progressInfo); }; m_Controls.progressBar->setValue(100); image = filterbank->ApplyBeamforming(image, BFconfig, m_Controls.Cutoff->value(), progressHandle); if (saveSteps[0]) { std::string saveFileName = saveDir.toStdString() + "/" + imageName + " beamformed" + ".nrrd"; mitk::IOUtil::Save(image, saveFileName); } } // Cropping if (doSteps[1]) { m_Controls.ProgressInfo->setText("cropping image"); image = filterbank->ApplyCropping(image, m_Controls.CutoffAbove->value(), m_Controls.CutoffBelow->value(), 0, 0, 0, image->GetDimension(2) - 1); if (saveSteps[1]) { std::string saveFileName = saveDir.toStdString() + "/" + imageName + " cropped" + ".nrrd"; mitk::IOUtil::Save(image, saveFileName); } } // Bandpass if (doSteps[2]) { m_Controls.ProgressInfo->setText("applying bandpass"); float recordTime = image->GetDimension(1)*image->GetGeometry()->GetSpacing()[1] / 1000 / m_Controls.BPSpeedOfSound->value(); // add a safeguard so the program does not chrash when applying a Bandpass that reaches out of the bounds of the image float maxFrequency = 1 / (recordTime / image->GetDimension(1)) * image->GetDimension(1) / 2 / 2 / 1000; float BPHighPass = 1000000 * m_Controls.BPhigh->value(); // [Hz] float BPLowPass = maxFrequency - 1000000 * m_Controls.BPlow->value(); // [Hz] if (BPLowPass > maxFrequency && m_Controls.UseBP->isChecked()) { QMessageBox Msgbox; Msgbox.setText("LowPass too low, disabled it."); Msgbox.exec(); BPLowPass = 0; } if (BPLowPass < 0 && m_Controls.UseBP->isChecked()) { QMessageBox Msgbox; Msgbox.setText("LowPass too high, disabled it."); Msgbox.exec(); BPLowPass = 0; } if (BPHighPass > maxFrequency && m_Controls.UseBP->isChecked()) { QMessageBox Msgbox; Msgbox.setText("HighPass too high, disabled it."); Msgbox.exec(); BPHighPass = 0; } if (BPHighPass > maxFrequency - BFconfig.BPLowPass) { QMessageBox Msgbox; Msgbox.setText("HighPass higher than LowPass, disabled both."); Msgbox.exec(); BPHighPass = 0; BPLowPass = 0; } image = filterbank->BandpassFilter(image, recordTime, BPHighPass, BPLowPass, m_Controls.BPFalloff->value()); if (saveSteps[2]) { std::string saveFileName = saveDir.toStdString() + "/" + imageName + " bandpassed" + ".nrrd"; mitk::IOUtil::Save(image, saveFileName); } } // Bmode if (doSteps[3]) { m_Controls.ProgressInfo->setText("applying bmode filter"); if (m_Controls.BModeMethod->currentText() == "Simple Abs Filter") image = filterbank->ApplyBmodeFilter(image, mitk::PhotoacousticImage::BModeMethod::Abs, m_UseLogfilter, m_ResampleSpacing); else if (m_Controls.BModeMethod->currentText() == "Shape Detection") image = filterbank->ApplyBmodeFilter(image, mitk::PhotoacousticImage::BModeMethod::ShapeDetection, m_UseLogfilter, m_ResampleSpacing); if (saveSteps[3]) { std::string saveFileName = saveDir.toStdString() + "/" + imageName + " bmode" + ".nrrd"; mitk::IOUtil::Save(image, saveFileName); } } m_Controls.progressBar->setVisible(false); } EnableControls(); } void PAImageProcessing::StartBeamformingThread() { QList nodes = this->GetDataManagerSelection(); if (nodes.empty()) return; mitk::DataStorage::Pointer storage = this->GetDataStorage(); mitk::DataNode::Pointer node = nodes.front(); if (!node) { // Nothing selected. Inform the user and return QMessageBox::information(NULL, "Template", "Please load and select an image before starting image processing."); return; } mitk::BaseData* data = node->GetData(); if (data) { // test if this data item is an image or not (could also be a surface or something totally different) mitk::Image* image = dynamic_cast(data); if (image) { UpdateBFSettings(image); std::stringstream message; std::string name; message << "Performing beamforming for image "; if (node->GetName(name)) { // a property called "name" was found for this DataNode message << "'" << name << "'"; m_OldNodeName = name; } else m_OldNodeName = " "; message << "."; MITK_INFO << message.str(); m_Controls.progressBar->setValue(0); m_Controls.progressBar->setVisible(true); m_Controls.ProgressInfo->setVisible(true); m_Controls.ProgressInfo->setText("started"); m_Controls.buttonApplyBeamforming->setText("working..."); DisableControls(); BeamformingThread *thread = new BeamformingThread(); connect(thread, &BeamformingThread::result, this, &PAImageProcessing::HandleBeamformingResults); connect(thread, &BeamformingThread::updateProgress, this, &PAImageProcessing::UpdateProgress); connect(thread, &BeamformingThread::finished, thread, &QObject::deleteLater); thread->setConfig(BFconfig); thread->setCutoff(m_Controls.Cutoff->value()); thread->setInputImage(image); MITK_INFO << "Started new thread for Beamforming"; thread->start(); } } } void PAImageProcessing::HandleBeamformingResults(mitk::Image::Pointer image) { auto newNode = mitk::DataNode::New(); newNode->SetData(image); // name the new Data node std::stringstream newNodeName; newNodeName << m_OldNodeName << " "; if (BFconfig.Algorithm == mitk::BeamformingFilter::beamformingSettings::BeamformingAlgorithm::DAS) newNodeName << "DAS bf, "; else if (BFconfig.Algorithm == mitk::BeamformingFilter::beamformingSettings::BeamformingAlgorithm::DMAS) newNodeName << "DMAS bf, "; if (BFconfig.DelayCalculationMethod == mitk::BeamformingFilter::beamformingSettings::DelayCalc::QuadApprox) newNodeName << "q. delay"; if (BFconfig.DelayCalculationMethod == mitk::BeamformingFilter::beamformingSettings::DelayCalc::Spherical) newNodeName << "s. delay"; newNode->SetName(newNodeName.str()); // update level window for the current dynamic range mitk::LevelWindow levelWindow; newNode->GetLevelWindow(levelWindow); auto data = newNode->GetData(); levelWindow.SetAuto(dynamic_cast(data), true, true); newNode->SetLevelWindow(levelWindow); // add new node to data storage this->GetDataStorage()->Add(newNode); // disable progress bar m_Controls.progressBar->setVisible(false); m_Controls.ProgressInfo->setVisible(false); m_Controls.buttonApplyBeamforming->setText("Apply Beamforming"); EnableControls(); // update rendering mitk::RenderingManager::GetInstance()->InitializeViews( dynamic_cast(data)->GetGeometry(), mitk::RenderingManager::REQUEST_UPDATE_ALL, true); mitk::RenderingManager::GetInstance()->RequestUpdateAll(); } void PAImageProcessing::StartBmodeThread() { QList nodes = this->GetDataManagerSelection(); if (nodes.empty()) return; mitk::DataStorage::Pointer storage = this->GetDataStorage(); mitk::DataNode::Pointer node = nodes.front(); if (!node) { // Nothing selected. Inform the user and return QMessageBox::information(NULL, "Template", "Please load and select an image before starting image processing."); return; } mitk::BaseData* data = node->GetData(); if (data) { // test if this data item is an image or not (could also be a surface or something totally different) mitk::Image* image = dynamic_cast(data); if (image) { UpdateBFSettings(image); std::stringstream message; std::string name; message << "Performing image processing for image "; if (node->GetName(name)) { // a property called "name" was found for this DataNode message << "'" << name << "'"; m_OldNodeName = name; } else m_OldNodeName = " "; message << "."; MITK_INFO << message.str(); m_Controls.buttonApplyBModeFilter->setText("working..."); DisableControls(); BmodeThread *thread = new BmodeThread(); connect(thread, &BmodeThread::result, this, &PAImageProcessing::HandleBmodeResults); connect(thread, &BmodeThread::finished, thread, &QObject::deleteLater); if(m_Controls.BModeMethod->currentText() == "Simple Abs Filter") thread->setConfig(m_UseLogfilter, m_ResampleSpacing, mitk::PhotoacousticImage::BModeMethod::Abs); else if(m_Controls.BModeMethod->currentText() == "Shape Detection") thread->setConfig(m_UseLogfilter, m_ResampleSpacing, mitk::PhotoacousticImage::BModeMethod::ShapeDetection); thread->setInputImage(image); MITK_INFO << "Started new thread for Image Processing"; thread->start(); } } } void PAImageProcessing::HandleBmodeResults(mitk::Image::Pointer image) { auto newNode = mitk::DataNode::New(); newNode->SetData(image); // name the new Data node std::stringstream newNodeName; newNodeName << m_OldNodeName << " "; newNodeName << "B-Mode"; newNode->SetName(newNodeName.str()); // update level window for the current dynamic range mitk::LevelWindow levelWindow; newNode->GetLevelWindow(levelWindow); auto data = newNode->GetData(); levelWindow.SetAuto(dynamic_cast(data), true, true); newNode->SetLevelWindow(levelWindow); // add new node to data storage this->GetDataStorage()->Add(newNode); // disable progress bar m_Controls.progressBar->setVisible(false); m_Controls.buttonApplyBModeFilter->setText("Apply B-mode Filter"); EnableControls(); // update rendering mitk::RenderingManager::GetInstance()->InitializeViews( dynamic_cast(data)->GetGeometry(), mitk::RenderingManager::REQUEST_UPDATE_ALL, true); mitk::RenderingManager::GetInstance()->RequestUpdateAll(); } void PAImageProcessing::StartCropThread() { QList nodes = this->GetDataManagerSelection(); if (nodes.empty()) return; mitk::DataStorage::Pointer storage = this->GetDataStorage(); mitk::DataNode::Pointer node = nodes.front(); if (!node) { // Nothing selected. Inform the user and return QMessageBox::information(NULL, "Template", "Please load and select an image before starting image cropping."); return; } mitk::BaseData* data = node->GetData(); if (data) { // test if this data item is an image or not (could also be a surface or something totally different) mitk::Image* image = dynamic_cast(data); if (image) { UpdateBFSettings(image); std::stringstream message; std::string name; message << "Performing image cropping for image "; if (node->GetName(name)) { // a property called "name" was found for this DataNode message << "'" << name << "'"; m_OldNodeName = name; } else m_OldNodeName = " "; message << "."; MITK_INFO << message.str(); m_Controls.buttonApplyCropFilter->setText("working..."); DisableControls(); CropThread *thread = new CropThread(); connect(thread, &CropThread::result, this, &PAImageProcessing::HandleCropResults); connect(thread, &CropThread::finished, thread, &QObject::deleteLater); thread->setConfig(m_Controls.CutoffAbove->value(), m_Controls.CutoffBelow->value()); thread->setInputImage(image); MITK_INFO << "Started new thread for Image Cropping"; thread->start(); } } } void PAImageProcessing::HandleCropResults(mitk::Image::Pointer image) { auto newNode = mitk::DataNode::New(); newNode->SetData(image); // name the new Data node std::stringstream newNodeName; newNodeName << m_OldNodeName << " "; newNodeName << "Cropped"; newNode->SetName(newNodeName.str()); // update level window for the current dynamic range mitk::LevelWindow levelWindow; newNode->GetLevelWindow(levelWindow); auto data = newNode->GetData(); levelWindow.SetAuto(dynamic_cast(data), true, true); newNode->SetLevelWindow(levelWindow); // add new node to data storage this->GetDataStorage()->Add(newNode); m_Controls.buttonApplyCropFilter->setText("Apply Crop Filter"); EnableControls(); // update rendering mitk::RenderingManager::GetInstance()->InitializeViews( dynamic_cast(data)->GetGeometry(), mitk::RenderingManager::REQUEST_UPDATE_ALL, true); mitk::RenderingManager::GetInstance()->RequestUpdateAll(); } void PAImageProcessing::StartBandpassThread() { QList nodes = this->GetDataManagerSelection(); if (nodes.empty()) return; mitk::DataStorage::Pointer storage = this->GetDataStorage(); mitk::DataNode::Pointer node = nodes.front(); if (!node) { // Nothing selected. Inform the user and return QMessageBox::information(NULL, "Template", "Please load and select an image before starting image cropping."); return; } mitk::BaseData* data = node->GetData(); if (data) { // test if this data item is an image or not (could also be a surface or something totally different) mitk::Image* image = dynamic_cast(data); if (image) { UpdateBFSettings(image); std::stringstream message; std::string name; message << "Performing Bandpass filter on image "; if (node->GetName(name)) { // a property called "name" was found for this DataNode message << "'" << name << "'"; m_OldNodeName = name; } else m_OldNodeName = " "; message << "."; MITK_INFO << message.str(); m_Controls.buttonApplyBandpass->setText("working..."); DisableControls(); BandpassThread *thread = new BandpassThread(); connect(thread, &BandpassThread::result, this, &PAImageProcessing::HandleBandpassResults); connect(thread, &BandpassThread::finished, thread, &QObject::deleteLater); float recordTime = image->GetDimension(1)*image->GetGeometry()->GetSpacing()[1] / 1000 / m_Controls.BPSpeedOfSound->value(); // add a safeguard so the program does not chrash when applying a Bandpass that reaches out of the bounds of the image float maxFrequency = 1 / (recordTime / image->GetDimension(1)) * image->GetDimension(1) / 2 / 2 / 1000; float BPHighPass = 1000000 * m_Controls.BPhigh->value(); // [Hz] float BPLowPass = maxFrequency - 1000000 * m_Controls.BPlow->value(); // [Hz] if (BPLowPass > maxFrequency && m_Controls.UseBP->isChecked()) { QMessageBox Msgbox; Msgbox.setText("LowPass too low, disabled it."); Msgbox.exec(); BPLowPass = 0; } if (BPLowPass < 0 && m_Controls.UseBP->isChecked()) { QMessageBox Msgbox; Msgbox.setText("LowPass too high, disabled it."); Msgbox.exec(); BPLowPass = 0; } if (BPHighPass > maxFrequency && m_Controls.UseBP->isChecked()) { QMessageBox Msgbox; Msgbox.setText("HighPass too high, disabled it."); Msgbox.exec(); BPHighPass = 0; } if (BPHighPass > maxFrequency - BFconfig.BPLowPass) { QMessageBox Msgbox; Msgbox.setText("HighPass higher than LowPass, disabled both."); Msgbox.exec(); BPHighPass = 0; BPLowPass = 0; } thread->setConfig(BPHighPass, BPLowPass, m_Controls.BPFalloff->value(), recordTime); thread->setInputImage(image); MITK_INFO << "Started new thread for Bandpass filter"; thread->start(); } } } void PAImageProcessing::HandleBandpassResults(mitk::Image::Pointer image) { auto newNode = mitk::DataNode::New(); newNode->SetData(image); // name the new Data node std::stringstream newNodeName; newNodeName << m_OldNodeName << " "; newNodeName << "Bandpassed"; newNode->SetName(newNodeName.str()); // update level window for the current dynamic range mitk::LevelWindow levelWindow; newNode->GetLevelWindow(levelWindow); auto data = newNode->GetData(); levelWindow.SetAuto(dynamic_cast(data), true, true); newNode->SetLevelWindow(levelWindow); // add new node to data storage this->GetDataStorage()->Add(newNode); m_Controls.buttonApplyBandpass->setText("Apply Bandpass"); EnableControls(); // update rendering mitk::RenderingManager::GetInstance()->InitializeViews( dynamic_cast(data)->GetGeometry(), mitk::RenderingManager::REQUEST_UPDATE_ALL, true); mitk::RenderingManager::GetInstance()->RequestUpdateAll(); } void PAImageProcessing::UpdateBounds() { if (!m_Controls.Partial->isChecked()) { m_Controls.boundLow->setEnabled(false); m_Controls.boundHigh->setEnabled(false); BFconfig.partial = false; return; } else { m_Controls.boundLow->setEnabled(true); m_Controls.boundHigh->setEnabled(true); BFconfig.partial = true; } if(m_Controls.boundLow->value()>m_Controls.boundHigh->value()) { MITK_INFO << "high bound < low bound -> setting both to beamform only first slice"; m_Controls.boundLow->setValue(0); m_Controls.boundHigh->setValue(0); BFconfig.CropBounds[0] = 0; BFconfig.CropBounds[1] = 0; } else { BFconfig.CropBounds[0] = m_Controls.boundLow->value(); BFconfig.CropBounds[1] = m_Controls.boundHigh->value(); } } void PAImageProcessing::UpdateProgress(int progress, std::string progressInfo) { if (progress < 100) m_Controls.progressBar->setValue(progress); else m_Controls.progressBar->setValue(100); m_Controls.ProgressInfo->setText(progressInfo.c_str()); qApp->processEvents(); } void PAImageProcessing::UpdateImageInfo() { QList nodes = this->GetDataManagerSelection(); if (nodes.empty()) return; mitk::DataNode::Pointer node = nodes.front(); if (!node) { // Nothing selected return; } mitk::BaseData* data = node->GetData(); if (data) { // test if this data item is an image or not (could also be a surface or something totally different) mitk::Image* image = dynamic_cast(data); if (image) { // beamforming configs if (m_Controls.UseImageSpacing->isChecked()) { m_Controls.ElementCount->setValue(image->GetDimension(0)); m_Controls.Pitch->setValue(image->GetGeometry()->GetSpacing()[0]); m_Controls.boundLow->setMaximum(image->GetDimension(2) - 1); } UpdateRecordTime(image); // bandpass configs float speedOfSound = m_Controls.BPSpeedOfSound->value(); // [m/s] float recordTime = image->GetDimension(1)*image->GetGeometry()->GetSpacing()[1] / 1000 / speedOfSound; std::stringstream frequency; float maxFrequency = 1 / (recordTime / image->GetDimension(1)) * image->GetDimension(1) / 2 / 2 / 1000; frequency << maxFrequency / 1000000; //[MHz] frequency << "MHz"; m_Controls.BPhigh->setMaximum(maxFrequency / 1000000); m_Controls.BPlow->setMaximum(maxFrequency / 1000000); frequency << " is the maximal allowed frequency for the selected image."; m_Controls.BPhigh->setToolTip(frequency.str().c_str()); m_Controls.BPlow->setToolTip(frequency.str().c_str()); m_Controls.BPhigh->setToolTipDuration(5000); m_Controls.BPlow->setToolTipDuration(5000); } } } void PAImageProcessing::OnSelectionChanged( berry::IWorkbenchPart::Pointer /*source*/, const QList& nodes ) { // iterate all selected objects, adjust warning visibility foreach( mitk::DataNode::Pointer node, nodes ) { if( node.IsNotNull() && dynamic_cast(node->GetData()) ) { m_Controls.labelWarning->setVisible( false ); m_Controls.buttonApplyBModeFilter->setEnabled( true ); m_Controls.labelWarning2->setVisible(false); m_Controls.buttonApplyCropFilter->setEnabled(true); m_Controls.labelWarning3->setVisible(false); m_Controls.buttonApplyBandpass->setEnabled(true); UpdateImageInfo(); return; } } m_Controls.labelWarning->setVisible( true ); m_Controls.buttonApplyBModeFilter->setEnabled( false ); m_Controls.labelWarning2->setVisible(true); m_Controls.buttonApplyCropFilter->setEnabled(false); m_Controls.labelWarning3->setVisible(true); m_Controls.buttonApplyBandpass->setEnabled(false); } void PAImageProcessing::UseResampling() { if (m_Controls.DoResampling->isChecked()) { m_Controls.ResamplingValue->setEnabled(true); m_ResampleSpacing = m_Controls.ResamplingValue->value(); } else { m_Controls.ResamplingValue->setEnabled(false); m_ResampleSpacing = 0; } } void PAImageProcessing::UseLogfilter() { m_UseLogfilter = m_Controls.Logfilter->isChecked(); } void PAImageProcessing::SetResampling() { m_ResampleSpacing = m_Controls.ResamplingValue->value(); } void PAImageProcessing::UpdateBFSettings(mitk::Image::Pointer image) { if ("DAS" == m_Controls.BFAlgorithm->currentText()) BFconfig.Algorithm = mitk::BeamformingFilter::beamformingSettings::BeamformingAlgorithm::DAS; else if ("DMAS" == m_Controls.BFAlgorithm->currentText()) BFconfig.Algorithm = mitk::BeamformingFilter::beamformingSettings::BeamformingAlgorithm::DMAS; if ("Quad. Approx." == m_Controls.DelayCalculation->currentText()) { BFconfig.DelayCalculationMethod = mitk::BeamformingFilter::beamformingSettings::DelayCalc::QuadApprox; } else if ("Spherical Wave" == m_Controls.DelayCalculation->currentText()) { BFconfig.DelayCalculationMethod = mitk::BeamformingFilter::beamformingSettings::DelayCalc::Spherical; } if ("Von Hann" == m_Controls.Apodization->currentText()) { BFconfig.Apod = mitk::BeamformingFilter::beamformingSettings::Apodization::Hann; } else if ("Hamming" == m_Controls.Apodization->currentText()) { BFconfig.Apod = mitk::BeamformingFilter::beamformingSettings::Apodization::Hamm; } else if ("Box" == m_Controls.Apodization->currentText()) { BFconfig.Apod = mitk::BeamformingFilter::beamformingSettings::Apodization::Box; } BFconfig.Pitch = m_Controls.Pitch->value() / 1000; // [m] BFconfig.SpeedOfSound = m_Controls.SpeedOfSound->value(); // [m/s] BFconfig.SamplesPerLine = m_Controls.Samples->value(); BFconfig.ReconstructionLines = m_Controls.Lines->value(); BFconfig.TransducerElements = m_Controls.ElementCount->value(); BFconfig.Angle = m_Controls.Angle->value(); // [deg] BFconfig.UseBP = m_Controls.UseBP->isChecked(); BFconfig.UseGPU = m_Controls.UseGPU->isChecked(); UpdateRecordTime(image); UpdateBounds(); } void PAImageProcessing::UpdateRecordTime(mitk::Image::Pointer image) { if (m_Controls.UseImageSpacing->isChecked()) { BFconfig.RecordTime = image->GetDimension(1)*image->GetGeometry()->GetSpacing()[1] / 1000000; // [s] BFconfig.TimeSpacing = image->GetGeometry()->GetSpacing()[1] / 1000000; MITK_INFO << "Calculated Scan Depth of " << BFconfig.RecordTime * BFconfig.SpeedOfSound * 100 << "cm"; } else { BFconfig.RecordTime = 2 * m_Controls.ScanDepth->value() / 1000 / BFconfig.SpeedOfSound; // [s] BFconfig.TimeSpacing = BFconfig.RecordTime / image->GetDimension(1); } if ("US Image" == m_Controls.ImageType->currentText()) { BFconfig.Photoacoustic = false; } else if ("PA Image" == m_Controls.ImageType->currentText()) { BFconfig.Photoacoustic = true; } } void PAImageProcessing::EnableControls() { m_Controls.DoResampling->setEnabled(true); UseResampling(); m_Controls.Logfilter->setEnabled(true); m_Controls.buttonApplyBModeFilter->setEnabled(true); m_Controls.CutoffAbove->setEnabled(true); m_Controls.CutoffBelow->setEnabled(true); m_Controls.Cutoff->setEnabled(true); m_Controls.buttonApplyCropFilter->setEnabled(true); m_Controls.buttonApplyBandpass->setEnabled(true); m_Controls.BFAlgorithm->setEnabled(true); m_Controls.DelayCalculation->setEnabled(true); m_Controls.ImageType->setEnabled(true); m_Controls.Apodization->setEnabled(true); m_Controls.UseBP->setEnabled(true); - m_Controls.UseGPU->setEnabled(true); + + #ifdef PHOTOACOUSTICS_USE_GPU + m_Controls.UseGPU->setEnabled(true); + #endif + m_Controls.BPhigh->setEnabled(true); m_Controls.BPlow->setEnabled(true); m_Controls.BPFalloff->setEnabled(true); m_Controls.UseImageSpacing->setEnabled(true); UseImageSpacing(); m_Controls.Pitch->setEnabled(true); m_Controls.ElementCount->setEnabled(true); m_Controls.SpeedOfSound->setEnabled(true); m_Controls.Samples->setEnabled(true); m_Controls.Lines->setEnabled(true); m_Controls.Angle->setEnabled(true); m_Controls.buttonApplyBeamforming->setEnabled(true); } void PAImageProcessing::DisableControls() { m_Controls.DoResampling->setEnabled(false); m_Controls.ResamplingValue->setEnabled(false); m_Controls.Logfilter->setEnabled(false); m_Controls.buttonApplyBModeFilter->setEnabled(false); m_Controls.CutoffAbove->setEnabled(false); m_Controls.CutoffBelow->setEnabled(false); m_Controls.Cutoff->setEnabled(false); m_Controls.buttonApplyCropFilter->setEnabled(false); m_Controls.buttonApplyBandpass->setEnabled(false); m_Controls.BFAlgorithm->setEnabled(false); m_Controls.DelayCalculation->setEnabled(false); m_Controls.ImageType->setEnabled(false); m_Controls.Apodization->setEnabled(false); m_Controls.UseBP->setEnabled(false); - m_Controls.UseGPU->setEnabled(false); + + #ifdef PHOTOACOUSTICS_USE_GPU + m_Controls.UseGPU->setEnabled(false); + #endif + m_Controls.BPhigh->setEnabled(false); m_Controls.BPlow->setEnabled(false); m_Controls.BPFalloff->setEnabled(false); m_Controls.UseImageSpacing->setEnabled(false); m_Controls.ScanDepth->setEnabled(false); m_Controls.Pitch->setEnabled(false); m_Controls.ElementCount->setEnabled(false); m_Controls.SpeedOfSound->setEnabled(false); m_Controls.Samples->setEnabled(false); m_Controls.Lines->setEnabled(false); m_Controls.Angle->setEnabled(false); m_Controls.buttonApplyBeamforming->setEnabled(false); } void PAImageProcessing::UseImageSpacing() { if (m_Controls.UseImageSpacing->isChecked()) { m_Controls.ScanDepth->setDisabled(true); } else { m_Controls.ScanDepth->setEnabled(true); } } void BeamformingThread::run() { mitk::Image::Pointer resultImage; mitk::PhotoacousticImage::Pointer filterbank = mitk::PhotoacousticImage::New(); std::function progressHandle = [this](int progress, std::string progressInfo) { emit updateProgress(progress, progressInfo); }; resultImage = filterbank->ApplyBeamforming(m_InputImage, m_BFconfig, m_Cutoff, progressHandle); emit result(resultImage); } void BeamformingThread::setConfig(mitk::BeamformingFilter::beamformingSettings BFconfig) { m_BFconfig = BFconfig; } void BeamformingThread::setInputImage(mitk::Image::Pointer image) { m_InputImage = image; } void BeamformingThread::setCutoff(int cutoff) { m_Cutoff = cutoff; } void BmodeThread::run() { mitk::Image::Pointer resultImage; mitk::PhotoacousticImage::Pointer filterbank = mitk::PhotoacousticImage::New(); resultImage = filterbank->ApplyBmodeFilter(m_InputImage, m_Method, m_UseLogfilter, m_ResampleSpacing); emit result(resultImage); } void BmodeThread::setConfig(bool useLogfilter, double resampleSpacing, mitk::PhotoacousticImage::BModeMethod method) { m_UseLogfilter = useLogfilter; m_ResampleSpacing = resampleSpacing; m_Method = method; } void BmodeThread::setInputImage(mitk::Image::Pointer image) { m_InputImage = image; } void CropThread::run() { mitk::Image::Pointer resultImage; mitk::PhotoacousticImage::Pointer filterbank = mitk::PhotoacousticImage::New(); resultImage = filterbank->ApplyCropping(m_InputImage, m_CutAbove, m_CutBelow, 0, 0, 0, m_InputImage->GetDimension(2) - 1); emit result(resultImage); } void CropThread::setConfig(unsigned int CutAbove, unsigned int CutBelow) { m_CutAbove = CutAbove; m_CutBelow = CutBelow; } void CropThread::setInputImage(mitk::Image::Pointer image) { m_InputImage = image; } void BandpassThread::run() { mitk::Image::Pointer resultImage; mitk::PhotoacousticImage::Pointer filterbank = mitk::PhotoacousticImage::New(); resultImage = filterbank->BandpassFilter(m_InputImage, m_RecordTime, m_BPHighPass, m_BPLowPass, m_TukeyAlpha); emit result(resultImage); } void BandpassThread::setConfig(float BPHighPass, float BPLowPass, float TukeyAlpha, float recordTime) { m_BPHighPass = BPHighPass; m_BPLowPass = BPLowPass; m_TukeyAlpha = TukeyAlpha; m_RecordTime = recordTime; } void BandpassThread::setInputImage(mitk::Image::Pointer image) { m_InputImage = image; } diff --git a/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/src/internal/PAImageProcessing.h b/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/src/internal/PAImageProcessing.h index d97ed84f8a..17a8caec9d 100644 --- a/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/src/internal/PAImageProcessing.h +++ b/Plugins/org.mitk.gui.qt.photoacoustics.imageprocessing/src/internal/PAImageProcessing.h @@ -1,185 +1,185 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ - #ifndef PAImageProcessing_h #define PAImageProcessing_h #include + #include #include #include #include "ui_PAImageProcessingControls.h" #include "mitkPhotoacousticBeamformingFilter.h" Q_DECLARE_METATYPE(mitk::Image::Pointer) Q_DECLARE_METATYPE(std::string) class PAImageProcessing : public QmitkAbstractView { // this is needed for all Qt objects that should have a Qt meta-object // (everything that derives from QObject and wants to have signal/slots) Q_OBJECT public: static const std::string VIEW_ID; PAImageProcessing(); protected slots: /// \brief Called when the user clicks the GUI button void UpdateBounds(); void UseResampling(); void UseLogfilter(); void SetResampling(); void UseImageSpacing(); void UpdateImageInfo(); void UpdateRecordTime(mitk::Image::Pointer image); void HandleBeamformingResults(mitk::Image::Pointer image); void StartBeamformingThread(); void HandleBmodeResults(mitk::Image::Pointer image); void StartBmodeThread(); void HandleCropResults(mitk::Image::Pointer image); void StartCropThread(); void HandleBandpassResults(mitk::Image::Pointer image); void StartBandpassThread(); void UpdateProgress(int progress, std::string progressInfo); void BatchProcessing(); void UpdateSaveBoxes(); void ChangedSOSBandpass(); void ChangedSOSBeamforming(); protected: virtual void CreateQtPartControl(QWidget *parent) override; virtual void SetFocus() override; /// \brief called by QmitkFunctionality when DataManager's selection has changed virtual void OnSelectionChanged( berry::IWorkbenchPart::Pointer source, const QList& nodes ) override; Ui::PAImageProcessingControls m_Controls; float m_ResampleSpacing; bool m_UseLogfilter; std::string m_OldNodeName; mitk::BeamformingFilter::beamformingSettings BFconfig; void UpdateBFSettings(mitk::Image::Pointer image); void EnableControls(); void DisableControls(); }; class BeamformingThread : public QThread { Q_OBJECT void run() Q_DECL_OVERRIDE; signals: void result(mitk::Image::Pointer); void updateProgress(int, std::string); public: void setConfig(mitk::BeamformingFilter::beamformingSettings BFconfig); void setInputImage(mitk::Image::Pointer image); void setCutoff(int cutoff); protected: mitk::BeamformingFilter::beamformingSettings m_BFconfig; mitk::Image::Pointer m_InputImage; int m_Cutoff; }; class BmodeThread : public QThread { Q_OBJECT void run() Q_DECL_OVERRIDE; signals: void result(mitk::Image::Pointer); public: enum BModeMethod { ShapeDetection, Abs }; void setConfig(bool useLogfilter, double resampleSpacing, mitk::PhotoacousticImage::BModeMethod method); void setInputImage(mitk::Image::Pointer image); protected: mitk::Image::Pointer m_InputImage; mitk::PhotoacousticImage::BModeMethod m_Method; bool m_UseLogfilter; double m_ResampleSpacing; }; class CropThread : public QThread { Q_OBJECT void run() Q_DECL_OVERRIDE; signals: void result(mitk::Image::Pointer); public: void setConfig(unsigned int CutAbove, unsigned int CutBelow); void setInputImage(mitk::Image::Pointer image); protected: mitk::Image::Pointer m_InputImage; unsigned int m_CutAbove; unsigned int m_CutBelow; }; class BandpassThread : public QThread { Q_OBJECT void run() Q_DECL_OVERRIDE; signals: void result(mitk::Image::Pointer); public: void setConfig(float BPHighPass, float BPLowPass, float TukeyAlpha, float recordTime); void setInputImage(mitk::Image::Pointer image); protected: mitk::Image::Pointer m_InputImage; float m_BPHighPass; float m_BPLowPass; float m_TukeyAlpha; float m_RecordTime; }; #endif // PAImageProcessing_h