diff --git a/Modules/PhotoacousticsAlgorithms/MitkPABeamformingTool/PABeamformingTool.cpp b/Modules/PhotoacousticsAlgorithms/MitkPABeamformingTool/PABeamformingTool.cpp index e773b3467f..1a9ce11188 100644 --- a/Modules/PhotoacousticsAlgorithms/MitkPABeamformingTool/PABeamformingTool.cpp +++ b/Modules/PhotoacousticsAlgorithms/MitkPABeamformingTool/PABeamformingTool.cpp @@ -1,229 +1,230 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include #include #include #include #include #include #include #include #include struct InputParameters { mitk::Image::Pointer inputImage; std::string outputFilename; bool verbose; float speedOfSound; unsigned int cutoff; float angle; unsigned int samples; mitk::BeamformingSettings::BeamformingAlgorithm algorithm; }; InputParameters parseInput(int argc, char* argv[]) { mitkCommandLineParser parser; parser.setCategory("MITK-Photoacoustics"); parser.setTitle("Mitk Photoacoustics Beamforming Tool"); parser.setDescription("Reads a nrrd file as an input and applies a beamforming method as set with the parameters."); parser.setContributor("Computer Assisted Medical Interventions, DKFZ"); parser.setArgumentPrefix("--", "-"); parser.beginGroup("Required parameters"); parser.addArgument( "inputImage", "i", mitkCommandLineParser::InputImage, "Input image (mitk::Image)", "input image (.nrrd file)", us::Any(), false); parser.addArgument( "output", "o", mitkCommandLineParser::OutputFile, "Output filename", "output image (.nrrd file)", us::Any(), false); parser.endGroup(); parser.beginGroup("Optional parameters"); parser.addArgument( "verbose", "v", mitkCommandLineParser::Bool, "Verbose Output", "Whether to produce verbose, or rather debug output. (default: false)"); parser.addArgument( "speed-of-sound", "sos", mitkCommandLineParser::Float, "Speed of Sound [m/s]", "The average speed of sound as assumed for the reconstruction in [m/s]. (default: 1500)"); parser.addArgument( "cutoff", "co", mitkCommandLineParser::Int, "cutoff margin on the top of the image [pixels]", "The number of pixels to be ignored for this filter in [pixels] (default: 0)."); parser.addArgument( "angle", "a", mitkCommandLineParser::Float, "field of view of the transducer elements [degrees]", "The field of view of each individual transducer element [degrees] (default: 27)."); parser.addArgument( "samples", "s", mitkCommandLineParser::Int, "samples per reconstruction line [pixels]", "The pixels along the y axis in the beamformed image [pixels] (default: 2048)."); parser.addArgument( "algorithm", "alg", mitkCommandLineParser::String, "one of [\"DAS\", \"DMAS\", \"sDMAS\"]", "The beamforming algorithm to be used for reconstruction (default: DAS)."); parser.endGroup(); InputParameters input; std::map parsedArgs = parser.parseArguments(argc, argv); if (parsedArgs.size() == 0) exit(-1); if (parsedArgs.count("verbose")) { input.verbose = true; } else { input.verbose = false; } MITK_INFO(input.verbose) << "### VERBOSE OUTPUT ENABLED ###"; if (parsedArgs.count("inputImage")) { MITK_INFO(input.verbose) << "Reading input image..."; input.inputImage = mitk::IOUtil::Load(us::any_cast(parsedArgs["inputImage"])); MITK_INFO(input.verbose) << "Reading input image...[Done]"; } else { mitkThrow() << "No input image given."; } if (parsedArgs.count("output")) { input.outputFilename = us::any_cast(parsedArgs["output"]); } else { mitkThrow() << "No output image path given.."; } if (parsedArgs.count("speed-of-sound")) { input.speedOfSound = us::any_cast(parsedArgs["speed-of-sound"]); } else { input.speedOfSound = 1500; } if (parsedArgs.count("cutoff")) { input.cutoff = us::any_cast(parsedArgs["cutoff"]); } else { input.cutoff = 0; } if (parsedArgs.count("angle")) { input.angle = us::any_cast(parsedArgs["angle"]); } else { input.angle = 27; } if (parsedArgs.count("samples")) { input.samples = us::any_cast(parsedArgs["samples"]); } else { input.samples = 2048; } if (parsedArgs.count("algorithm")) { std::string algorithm = us::any_cast(parsedArgs["algorithm"]); MITK_INFO(input.verbose) << "Parsing algorithm: " << algorithm; if (algorithm == "DAS") input.algorithm = mitk::BeamformingSettings::BeamformingAlgorithm::DAS; else if (algorithm == "DMAS") input.algorithm = mitk::BeamformingSettings::BeamformingAlgorithm::DMAS; else if (algorithm == "sDMAS") input.algorithm = mitk::BeamformingSettings::BeamformingAlgorithm::sDMAS; else { MITK_INFO(input.verbose) << "Not a valid beamforming algorithm: " << algorithm << " Reverting to DAS"; input.algorithm = mitk::BeamformingSettings::BeamformingAlgorithm::DAS; } MITK_INFO(input.verbose) << "Sucessfully set algorithm: " << algorithm; } else { input.algorithm = mitk::BeamformingSettings::BeamformingAlgorithm::DAS; MITK_INFO(input.verbose) << "No matching algorithm found. Using DAS."; } return input; } mitk::BeamformingSettings::Pointer ParseSettings(InputParameters &input) { mitk::BeamformingSettings::Pointer outputSettings = mitk::BeamformingSettings::New( (float)(input.inputImage->GetGeometry()->GetSpacing()[0] / 1000), (float)(input.speedOfSound), (float)(input.inputImage->GetGeometry()->GetSpacing()[1] / 1000000), input.angle, true, input.inputImage->GetDimension(1), input.inputImage->GetDimension(0), input.inputImage->GetDimensions(), + 0.04, false, 16, mitk::BeamformingSettings::DelayCalc::Spherical, mitk::BeamformingSettings::Apodization::Box, input.inputImage->GetDimension(0), input.algorithm ); return outputSettings; } int main(int argc, char * argv[]) { auto input = parseInput(argc, argv); MITK_INFO(input.verbose) << "Beamforming input image..."; mitk::PhotoacousticFilterService::Pointer m_BeamformingService = mitk::PhotoacousticFilterService::New(); mitk::BeamformingSettings::Pointer settings = ParseSettings(input); mitk::CastToFloatImageFilter::Pointer castFilter = mitk::CastToFloatImageFilter::New(); castFilter->SetInput(input.inputImage); castFilter->Update(); auto floatImage = castFilter->GetOutput(); auto output = m_BeamformingService->ApplyBeamforming(floatImage, settings); MITK_INFO(input.verbose) << "Applying BModeFilter to image..."; auto output2 = m_BeamformingService->ApplyBmodeFilter(output, mitk::PhotoacousticFilterService::EnvelopeDetection, false); MITK_INFO(input.verbose) << "Applying BModeFilter to image...[Done]"; MITK_INFO(input.verbose) << "Saving image..."; mitk::IOUtil::Save(output2, input.outputFilename); MITK_INFO(input.verbose) << "Saving image...[Done]"; MITK_INFO(input.verbose) << "Beamforming input image...[Done]"; } diff --git a/Modules/PhotoacousticsAlgorithms/include/mitkBeamformingSettings.h b/Modules/PhotoacousticsAlgorithms/include/mitkBeamformingSettings.h index 02e30c32fd..5b76798dee 100644 --- a/Modules/PhotoacousticsAlgorithms/include/mitkBeamformingSettings.h +++ b/Modules/PhotoacousticsAlgorithms/include/mitkBeamformingSettings.h @@ -1,223 +1,223 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef MITK_BEAMFORMING_SETTINGS #define MITK_BEAMFORMING_SETTINGS #include #include #include #include namespace mitk { /*! * \brief Class holding the configuration data for the beamforming filters mitk::BeamformingFilter and mitk::PhotoacousticOCLBeamformingFilter * * A detailed description can be seen below. All parameters should be set manually for successfull beamforming. */ class MITKPHOTOACOUSTICSALGORITHMS_EXPORT BeamformingSettings : public itk::Object { public: mitkClassMacroItkParent(BeamformingSettings, itk::Object); mitkNewMacro1Param(BeamformingSettings, std::string); itkCloneMacro(Self); /** \brief Available delay calculation methods: * - Spherical delay for best results. * - DEPRECATED quadratic Taylor approximation for slightly faster results with hardly any quality loss. */ enum DelayCalc { QuadApprox, Spherical }; /** \brief Available apodization functions: * - Hamming function. * - Von-Hann function. * - Box function. */ enum Apodization { Hamm, Hann, Box }; /** \brief Available beamforming algorithms: * - DAS (Delay and sum). * - DMAS (Delay multiply and sum). */ enum BeamformingAlgorithm { DMAS, DAS, sDMAS }; itkGetConstMacro(PitchInMeters, float); itkGetConstMacro(SpeedOfSound, float); itkGetConstMacro(TimeSpacing, float); itkGetConstMacro(Angle, float); itkGetConstMacro(IsPhotoacousticImage, bool); itkGetConstMacro(TransducerElements, unsigned int); itkGetConstMacro(SamplesPerLine, unsigned int); itkGetConstMacro(ReconstructionLines, unsigned int); itkGetConstMacro(InputDim, const unsigned int*); itkGetConstMacro(UseGPU, bool); itkGetConstMacro(GPUBatchSize, unsigned int); itkGetConstMacro(DelayCalculationMethod, DelayCalc); itkGetConstMacro(ApodizationFunction, const float*); itkGetConstMacro(Apod, Apodization); itkGetConstMacro(ApodizationArraySize, int); itkGetConstMacro(Algorithm, BeamformingAlgorithm); itkGetConstMacro(ReconstructionDepth, float); /** \brief function for mitk::PhotoacousticOCLBeamformingFilter to check whether buffers need to be updated * this method only checks parameters relevant for the openCL implementation */ static bool SettingsChangedOpenCL(const BeamformingSettings::Pointer lhs, const BeamformingSettings::Pointer rhs) { return !((abs(lhs->GetAngle() - rhs->GetAngle()) < 0.01f) && // 0.01 degree error margin (lhs->GetApod() == rhs->GetApod()) && (lhs->GetDelayCalculationMethod() == rhs->GetDelayCalculationMethod()) && (lhs->GetIsPhotoacousticImage() == rhs->GetIsPhotoacousticImage()) && (abs(lhs->GetPitchInMeters() - rhs->GetPitchInMeters()) < 0.000001f) && // 0.0001 mm error margin (lhs->GetReconstructionLines() == rhs->GetReconstructionLines()) && (lhs->GetSamplesPerLine() == rhs->GetSamplesPerLine()) && (lhs->GetReconstructionDepth() == rhs->GetReconstructionDepth()) && (abs(lhs->GetSpeedOfSound() - rhs->GetSpeedOfSound()) < 0.01f) && (abs(lhs->GetTimeSpacing() - rhs->GetTimeSpacing()) < 0.00000000001f) && //0.01 ns error margin (lhs->GetTransducerElements() == rhs->GetTransducerElements())); } static Pointer New(float pitchInMeters, float speedOfSound, float timeSpacing, float angle, bool isPhotoacousticImage, unsigned int samplesPerLine, unsigned int reconstructionLines, unsigned int* inputDim, float reconstructionDepth, bool useGPU, unsigned int GPUBatchSize, DelayCalc delayCalculationMethod, Apodization apod, unsigned int apodizationArraySize, BeamformingAlgorithm algorithm) { Pointer smartPtr = new BeamformingSettings(pitchInMeters, speedOfSound, timeSpacing, angle, isPhotoacousticImage, samplesPerLine, reconstructionLines, inputDim, reconstructionDepth, useGPU, GPUBatchSize, delayCalculationMethod, apod, apodizationArraySize, algorithm); smartPtr->UnRegister(); return smartPtr; } protected: /** */ BeamformingSettings(std::string xmlFile); /** */ BeamformingSettings(float pitchInMeters, float speedOfSound, float timeSpacing, float angle, bool isPhotoacousticImage, unsigned int samplesPerLine, unsigned int reconstructionLines, unsigned int* inputDim, float reconstructionDepth, bool useGPU, unsigned int GPUBatchSize, DelayCalc delayCalculationMethod, Apodization apod, unsigned int apodizationArraySize, BeamformingAlgorithm algorithm ); ~BeamformingSettings(); /** \brief Pitch of the used transducer in [m]. */ float m_PitchInMeters; /** \brief Speed of sound in the used medium in [m/s]. */ float m_SpeedOfSound; /** \brief The time spacing of the input image */ float m_TimeSpacing; // [s] /** \brief The angle of the transducer elements */ float m_Angle; /** \brief Flag whether processed image is a photoacoustic image or an ultrasound image */ bool m_IsPhotoacousticImage; /** \brief How many transducer elements the used transducer had. */ unsigned int m_TransducerElements; /** \brief How many vertical samples should be used in the final image. */ unsigned int m_SamplesPerLine; /** \brief How many lines should be reconstructed in the final image. */ unsigned int m_ReconstructionLines; /** \brief Sets the dimensions of the inputImage. */ const unsigned int* m_InputDim; + /** \brief The Depth up to which the filter should reconstruct the image [m] + */ + float m_ReconstructionDepth; + /** \brief Decides whether GPU computing should be used */ bool m_UseGPU; unsigned int m_GPUBatchSize; /** \brief Sets the amount of image slices in batches when GPU is used */ /** \brief Sets how the delays for beamforming should be calculated. */ DelayCalc m_DelayCalculationMethod; const float* m_ApodizationFunction; /** \brief Sets the used apodization function. */ Apodization m_Apod; /** \brief Sets the resolution of the apodization array (must be greater than 0). */ int m_ApodizationArraySize; /** \brief Sets the used beamforming algorithm. */ BeamformingAlgorithm m_Algorithm; - - /** \brief The Depth up to which the filter should reconstruct the image [m] - */ - float m_ReconstructionDepth; }; } #endif //MITK_BEAMFORMING_SETTINGS