diff --git a/CMake/PackageDepends/MITK_OpenCV_Config.cmake b/CMake/PackageDepends/MITK_OpenCV_Config.cmake index e8e59a903c..32b3c5888a 100644 --- a/CMake/PackageDepends/MITK_OpenCV_Config.cmake +++ b/CMake/PackageDepends/MITK_OpenCV_Config.cmake @@ -1,7 +1,12 @@ -list(APPEND ALL_LIBRARIES ${OpenCV_LIBS}) -list(APPEND ALL_INCLUDE_DIRECTORIES ${OpenCV_INCLUDE_DIRS}) +foreach(opencv_module ${OpenCV_REQUIRED_COMPONENTS_BY_MODULE}) + if(NOT opencv_module MATCHES "^opencv_") + set(opencv_module "opencv_${opencv_module}") + endif() + list(APPEND _opencv_required_components_by_module ${opencv_module}) +endforeach() -# adding option for videoinput library on windows (for directshow based frame grabbing) -if(WIN32) - option(MITK_USE_videoInput "Use videoInput (DirectShow wrapper) library" OFF) -endif(WIN32) +find_package(OpenCV COMPONENTS ${_opencv_required_components_by_module} REQUIRED) + +foreach(opencv_module ${_opencv_required_components_by_module}) + list(APPEND ALL_LIBRARIES ${opencv_module}) +endforeach() diff --git a/CMakeExternals/OpenCV.cmake b/CMakeExternals/OpenCV.cmake index 36aacee775..65d61e2f88 100644 --- a/CMakeExternals/OpenCV.cmake +++ b/CMakeExternals/OpenCV.cmake @@ -1,85 +1,72 @@ #----------------------------------------------------------------------------- # OpenCV #----------------------------------------------------------------------------- if(MITK_USE_OpenCV) # Sanity checks if(DEFINED OpenCV_DIR AND NOT EXISTS ${OpenCV_DIR}) message(FATAL_ERROR "OpenCV_DIR variable is defined but corresponds to non-existing directory") endif() set(proj OpenCV) set(proj_DEPENDENCIES) set(OpenCV_DEPENDS ${proj}) if(NOT DEFINED OpenCV_DIR) - set(additional_cmake_args - -DBUILD_opencv_java:BOOL=OFF - -DBUILD_opencv_ts:BOOL=OFF - -DBUILD_PERF_TESTS:BOOL=OFF - -DBUILD_opencv_python:BOOL=OFF - -DBUILD_opencv_python3:BOOL=OFF - -DBUILD_opencv_python_bindings_generator:BOOL=OFF - #-DBUILD_NEW_PYTHON_SUPPORT:BOOL=OFF - ) - - if(MITK_USE_Qt5) - list(APPEND additional_cmake_args - -DWITH_QT:BOOL=OFF - -DWITH_QT_OPENGL:BOOL=OFF - -DQT_QMAKE_EXECUTABLE:FILEPATH=${QT_QMAKE_EXECUTABLE} - ) - endif() + set(additional_cmake_args) if(CTEST_USE_LAUNCHERS) list(APPEND additional_cmake_args "-DCMAKE_PROJECT_${proj}_INCLUDE:FILEPATH=${CMAKE_ROOT}/Modules/CTestUseLaunchers.cmake" - ) + ) endif() mitk_query_custom_ep_vars() - set(opencv_url ${MITK_THIRDPARTY_DOWNLOAD_PREFIX_URL}/opencv-3.4.16.tar.gz) - set(opencv_url_md5 ce69441a75d3358e22fbfb579fab52a9) - ExternalProject_Add(${proj} + GIT_REPOSITORY https://github.com/opencv/opencv.git + GIT_TAG 4.6.0 LIST_SEPARATOR ${sep} - URL ${opencv_url} - URL_MD5 ${opencv_url_md5} CMAKE_GENERATOR ${gen} CMAKE_GENERATOR_PLATFORM ${gen_platform} CMAKE_ARGS ${ep_common_args} + -DBUILD_JAVA:BOOL=OFF + -DBUILD_opencv_ts:BOOL=OFF + -DBUILD_PERF_TESTS:BOOL=OFF + -DBUILD_opencv_python3:BOOL=OFF + -DBUILD_opencv_python_bindings_generator:BOOL=OFF + -DBUILD_opencv_python_tests:BOOL=OFF + -DWITH_QT:BOOL=OFF -DBUILD_TESTS:BOOL=OFF -DBUILD_DOCS:BOOL=OFF -DBUILD_EXAMPLES:BOOL=OFF - -DBUILD_DOXYGEN_DOCS:BOOL=OFF -DWITH_CUDA:BOOL=OFF -DWITH_VTK:BOOL=OFF -DENABLE_CXX11:BOOL=ON -DWITH_IPP:BOOL=OFF -DBUILD_IPP_IW:BOOL=OFF -DENABLE_PRECOMPILED_HEADERS:BOOL=OFF ${additional_cmake_args} ${${proj}_CUSTOM_CMAKE_ARGS} CMAKE_CACHE_ARGS ${ep_common_cache_args} ${${proj}_CUSTOM_CMAKE_CACHE_ARGS} CMAKE_CACHE_DEFAULT_ARGS ${ep_common_cache_default_args} ${${proj}_CUSTOM_CMAKE_CACHE_DEFAULT_ARGS} DEPENDS ${proj_DEPENDENCIES} ) set(OpenCV_DIR ${ep_prefix}) mitkFunctionInstallExternalCMakeProject(${proj}) else() mitkMacroEmptyExternalProject(${proj} "${proj_DEPENDENCIES}") endif() endif() diff --git a/Modules/CameraCalibration/CMakeLists.txt b/Modules/CameraCalibration/CMakeLists.txt index 361c50a164..bf8e292636 100644 --- a/Modules/CameraCalibration/CMakeLists.txt +++ b/Modules/CameraCalibration/CMakeLists.txt @@ -1,7 +1,7 @@ MITK_CREATE_MODULE( DEPENDS MitkIGT - PACKAGE_DEPENDS PUBLIC OpenCV + PACKAGE_DEPENDS PUBLIC OpenCV|calib3d ) # add testing dir add_subdirectory(Testing) diff --git a/Modules/OpenCVVideoSupport/CMakeLists.txt b/Modules/OpenCVVideoSupport/CMakeLists.txt index 2040203b4a..10db853b78 100644 --- a/Modules/OpenCVVideoSupport/CMakeLists.txt +++ b/Modules/OpenCVVideoSupport/CMakeLists.txt @@ -1,19 +1,24 @@ -set(dependencies OpenCV) +set(dependencies OpenCV|videoio+calib3d) + +# adding option for videoinput library on windows (for directshow based frame grabbing) +if(WIN32 AND MITK_USE_OpenCV) + option(MITK_USE_videoInput "Use videoInput (DirectShow wrapper) library" OFF) +endif() + if(MITK_USE_videoInput) - set(dependencies ${dependencies} videoInput) -endif(MITK_USE_videoInput) + list(APPEND dependencies videoInput) +endif() mitk_create_module( INCLUDE_DIRS Commands DEPENDS MitkAlgorithmsExt PACKAGE_DEPENDS PUBLIC ${dependencies} - ADDITIONAL_LIBS ${OPENCVVIDEOSUPPORT_ADDITIONAL_LIBS} ) if(MODULE_IS_ENABLED) if(MITK_USE_Qt5) add_subdirectory(UI) endif() endif() add_subdirectory(Testing) diff --git a/Modules/OpenCVVideoSupport/Commands/mitkConvertGrayscaleOpenCVImageFilter.cpp b/Modules/OpenCVVideoSupport/Commands/mitkConvertGrayscaleOpenCVImageFilter.cpp index 56a6241f47..91385c18c3 100644 --- a/Modules/OpenCVVideoSupport/Commands/mitkConvertGrayscaleOpenCVImageFilter.cpp +++ b/Modules/OpenCVVideoSupport/Commands/mitkConvertGrayscaleOpenCVImageFilter.cpp @@ -1,33 +1,34 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #include "mitkConvertGrayscaleOpenCVImageFilter.h" #include +#include namespace mitk { bool ConvertGrayscaleOpenCVImageFilter::OnFilterImage( cv::Mat& image ) { // there is nothing to do if the image is grayscale already if (image.channels() == 1) { return true; } cv::Mat buffer; cv::cvtColor(image, buffer, CV_RGB2GRAY, 1); // content of buffer should now be the content of image buffer.copyTo(image); return true; } } // namespace mitk diff --git a/Modules/OpenCVVideoSupport/Commands/mitkGrabCutOpenCVImageFilter.cpp b/Modules/OpenCVVideoSupport/Commands/mitkGrabCutOpenCVImageFilter.cpp index 294c802013..dd911780a9 100644 --- a/Modules/OpenCVVideoSupport/Commands/mitkGrabCutOpenCVImageFilter.cpp +++ b/Modules/OpenCVVideoSupport/Commands/mitkGrabCutOpenCVImageFilter.cpp @@ -1,391 +1,392 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ // mitk headers #include "mitkGrabCutOpenCVImageFilter.h" #include "mitkPointSet.h" #include +#include // This is a magic number defined in "grabcut.cpp" of OpenCV. // GrabCut function crashes if less than this number of model // points are given. There must be at least as much model points // as components of the Gaussian Mixture Model. #define GMM_COMPONENTS_COUNT 5 mitk::GrabCutOpenCVImageFilter::GrabCutOpenCVImageFilter() : m_ModelPointsDilationSize(0), m_UseOnlyRegionAroundModelPoints(false), m_CurrentProcessImageNum(0), m_InputImageId(AbstractOpenCVImageFilter::INVALID_IMAGE_ID), m_ResultImageId(AbstractOpenCVImageFilter::INVALID_IMAGE_ID), m_StopThread(false) { m_Thread = std::thread(&GrabCutOpenCVImageFilter::SegmentationWorker, this); } mitk::GrabCutOpenCVImageFilter::~GrabCutOpenCVImageFilter() { // terminate worker thread on destruction m_StopThread = true; m_WorkerBarrier.notify_all(); if (m_Thread.joinable()) m_Thread.detach(); } bool mitk::GrabCutOpenCVImageFilter::OnFilterImage( cv::Mat& image ) { if ( image.empty() ) { MITK_WARN << "Filtering empty image?"; return false; } // make sure that the image is an rgb image as needed // by the GrabCut algorithm if (image.type() != CV_8UC3) { cv::Mat tmp = image.clone(); cv::cvtColor(tmp, image, CV_GRAY2RGB); } // set image as the current input image, guarded by // a mutex as the worker thread reads this imagr m_ImageMutex.lock(); m_InputImage = image.clone(); m_InputImageId = this->GetCurrentImageId(); m_ImageMutex.unlock(); // wake up the worker thread if there was an image set // and foreground model points are available if ( ! m_ForegroundPoints.empty()) { m_WorkerBarrier.notify_all(); } return true; } void mitk::GrabCutOpenCVImageFilter::SetModelPoints(ModelPointsList foregroundPoints) { m_PointSetsMutex.lock(); m_ForegroundPoints = foregroundPoints; m_PointSetsMutex.unlock(); } void mitk::GrabCutOpenCVImageFilter::SetModelPoints(ModelPointsList foregroundPoints, ModelPointsList backgroundPoints) { m_PointSetsMutex.lock(); m_BackgroundPoints = backgroundPoints; m_ForegroundPoints = foregroundPoints; m_PointSetsMutex.unlock(); } void mitk::GrabCutOpenCVImageFilter::SetModelPoints(cv::Mat foregroundMask) { m_PointSetsMutex.lock(); m_ForegroundPoints = this->ConvertMaskToModelPointsList(foregroundMask); m_PointSetsMutex.unlock(); } void mitk::GrabCutOpenCVImageFilter::SetModelPoints(cv::Mat foregroundMask, cv::Mat backgroundMask) { m_PointSetsMutex.lock(); m_ForegroundPoints = this->ConvertMaskToModelPointsList(foregroundMask); m_BackgroundPoints = this->ConvertMaskToModelPointsList(backgroundMask); m_PointSetsMutex.unlock(); } void mitk::GrabCutOpenCVImageFilter::SetModelPointsDilationSize(int modelPointsDilationSize) { if ( modelPointsDilationSize < 0 ) { MITK_ERROR("AbstractOpenCVImageFilter")("GrabCutOpenCVImageFilter") << "Model points dilation size must not be smaller then zero."; mitkThrow() << "Model points dilation size must not be smaller then zero."; } m_ModelPointsDilationSize = modelPointsDilationSize; } void mitk::GrabCutOpenCVImageFilter::SetUseOnlyRegionAroundModelPoints(unsigned int additionalWidth) { m_UseOnlyRegionAroundModelPoints = true; m_AdditionalWidth = additionalWidth; } void mitk::GrabCutOpenCVImageFilter::SetUseFullImage() { m_UseOnlyRegionAroundModelPoints = false; } cv::Rect mitk::GrabCutOpenCVImageFilter::GetRegionAroundModelPoints() { return m_BoundingBox; } int mitk::GrabCutOpenCVImageFilter::GetResultImageId() { return m_ResultImageId; } cv::Mat mitk::GrabCutOpenCVImageFilter::GetResultMask() { cv::Mat result; m_ResultMutex.lock(); result = m_ResultMask.clone(); m_ResultMutex.unlock(); return result; } std::vector mitk::GrabCutOpenCVImageFilter::GetResultContours() { std::vector > cvContours; std::vector hierarchy; std::vector contourPoints; cv::Mat resultMask = this->GetResultMask(); if (resultMask.empty()) { return contourPoints; } cv::findContours(resultMask, cvContours, hierarchy, cv::RETR_LIST, cv::CHAIN_APPROX_NONE); // convert cvContours to vector of ModelPointsLists for ( unsigned int i = 0; i < cvContours.size(); ++i ) { mitk::GrabCutOpenCVImageFilter::ModelPointsList curContourPoints; for ( auto it = cvContours[i].begin(); it != cvContours[i].end(); ++it) { itk::Index<2> index; index.SetElement(0, it->x); index.SetElement(1, it->y); curContourPoints.push_back(index); } contourPoints.push_back(curContourPoints); } return contourPoints; } mitk::GrabCutOpenCVImageFilter::ModelPointsList mitk::GrabCutOpenCVImageFilter::GetResultContourWithPixel(itk::Index<2> pixelIndex) { cv::Mat mask = this->GetResultMask(); if (mask.empty()) { return mitk::GrabCutOpenCVImageFilter::ModelPointsList(); } // return empty model point list if given pixel is outside the image borders if (pixelIndex.GetElement(0) < 0 || pixelIndex.GetElement(0) >= mask.size().height || pixelIndex.GetElement(1) < 0 || pixelIndex.GetElement(1) >= mask.size().width) { MITK_WARN("AbstractOpenCVImageFilter")("GrabCutOpenCVImageFilter") << "Given pixel index ("<< pixelIndex.GetElement(0) << ", " << pixelIndex.GetElement(1) << ") is outside the image (" << mask.size().height << ", " << mask.size().width << ")."; return mitk::GrabCutOpenCVImageFilter::ModelPointsList(); } // create a mask where the segmentation around the given pixel index is // set (done by flood filling the result mask using the pixel as seed) cv::floodFill(mask, cv::Point(pixelIndex.GetElement(0), pixelIndex.GetElement(1)), 5); cv::Mat foregroundMask; cv::compare(mask, 5, foregroundMask, cv::CMP_EQ); // find the contour on the flood filled image (there can be only one now) std::vector > cvContours; std::vector hierarchy; cv::findContours(foregroundMask, cvContours, hierarchy, cv::RETR_LIST, cv::CHAIN_APPROX_NONE); ModelPointsList contourPoints; // convert cvContours to ModelPointsList for ( auto it = cvContours[0].begin(); it != cvContours[0].end(); ++it) { itk::Index<2> index; index.SetElement(0, it->x); index.SetElement(1, it->y); contourPoints.push_back(index); } return contourPoints; } cv::Mat mitk::GrabCutOpenCVImageFilter::GetMaskFromPointSets() { // initialize mask with values of propably background cv::Mat mask(m_InputImage.size().height, m_InputImage.size().width, CV_8UC1, cv::GC_PR_BGD); // get foreground and background points (guarded by mutex) m_PointSetsMutex.lock(); ModelPointsList pointsLists[2] = {ModelPointsList(m_ForegroundPoints), ModelPointsList(m_BackgroundPoints)}; m_PointSetsMutex.unlock(); // define values for foreground and background pixels unsigned int pixelValues[2] = {cv::GC_FGD, cv::GC_BGD}; for (unsigned int n = 0; n < 2; ++n) { for (auto it = pointsLists[n].begin(); it != pointsLists[n].end(); ++it) { // set pixels around current pixel to the same value (size of this // area is specified by ModelPointsDilationSize) for ( int i = -m_ModelPointsDilationSize; i <= m_ModelPointsDilationSize; ++i ) { for ( int j = -m_ModelPointsDilationSize; j <= m_ModelPointsDilationSize; ++j) { int x = it->GetElement(1) + i; int y = it->GetElement(0) + j; if ( x >= 0 && y >= 0 && x < mask.cols && y < mask.rows) { mask.at(x, y) = pixelValues[n]; } } } } } return mask; } cv::Rect mitk::GrabCutOpenCVImageFilter::GetBoundingRectFromMask(cv::Mat mask) { cv::Mat nonPropablyBackgroundMask, modelPoints; cv::compare(mask, cv::GC_PR_BGD, nonPropablyBackgroundMask, cv::CMP_NE); cv::findNonZero(nonPropablyBackgroundMask, modelPoints); if (modelPoints.empty()) { MITK_WARN("AbstractOpenCVImageFilter")("GrabCutOpenCVImageFilter") << "Cannot find any foreground points. Returning full image size as bounding rectangle."; return cv::Rect(0, 0, mask.rows, mask.cols); } // calculate bounding rect around the model points cv::Rect boundingRect = cv::boundingRect(modelPoints); // substract additional width to x and y value (and make sure that they aren't outside the image then) boundingRect.x = static_cast(boundingRect.x) > m_AdditionalWidth ? boundingRect.x - m_AdditionalWidth : 0; boundingRect.y = static_cast(boundingRect.y) > m_AdditionalWidth ? boundingRect.y - m_AdditionalWidth : 0; // add additional width to width of bounding rect (twice as x value was moved before) // and make sure that the bounding rect will stay inside the image borders) if ( static_cast(boundingRect.x + boundingRect.width) + 2 * m_AdditionalWidth < static_cast(mask.size().width) ) { boundingRect.width += 2 * m_AdditionalWidth; } else { boundingRect.width = mask.size().width - boundingRect.x - 1; } // add additional width to height of bounding rect (twice as y value was moved before) // and make sure that the bounding rect will stay inside the image borders) if ( static_cast(boundingRect.y + boundingRect.height) + 2 * m_AdditionalWidth < static_cast(mask.size().height) ) { boundingRect.height += 2 * m_AdditionalWidth; } else { boundingRect.height = mask.size().height - boundingRect.y - 1; } assert(boundingRect.x + boundingRect.width < mask.size().width); assert(boundingRect.y + boundingRect.height < mask.size().height); return boundingRect; } cv::Mat mitk::GrabCutOpenCVImageFilter::RunSegmentation(cv::Mat input, cv::Mat mask) { // test if foreground and background models are large enough for GrabCut cv::Mat compareFgResult, compareBgResult; cv::compare(mask, cv::GC_FGD, compareFgResult, cv::CMP_EQ); cv::compare(mask, cv::GC_PR_BGD, compareBgResult, cv::CMP_EQ); if ( cv::countNonZero(compareFgResult) < GMM_COMPONENTS_COUNT || cv::countNonZero(compareBgResult) < GMM_COMPONENTS_COUNT) { // return result mask with no pixels set to foreground return cv::Mat::zeros(mask.size(), mask.type()); } // do the actual grab cut segmentation (initialized with the mask) cv::Mat bgdModel, fgdModel; cv::grabCut(input, mask, cv::Rect(), bgdModel, fgdModel, 1, cv::GC_INIT_WITH_MASK); // set propably foreground pixels to white on result mask cv::Mat result; cv::compare(mask, cv::GC_PR_FGD, result, cv::CMP_EQ); // set foreground pixels to white on result mask cv::Mat foregroundMat; cv::compare(mask, cv::GC_FGD, foregroundMat, cv::CMP_EQ); foregroundMat.copyTo(result, foregroundMat); return result; // now the result mask can be returned } mitk::GrabCutOpenCVImageFilter::ModelPointsList mitk::GrabCutOpenCVImageFilter::ConvertMaskToModelPointsList(cv::Mat mask) { cv::Mat points; cv::findNonZero(mask, points); // push extracted points into a vector of itk indices ModelPointsList pointsVector; for ( size_t n = 0; n < points.total(); ++n) { itk::Index<2> index; index.SetElement(0, points.at(n).x); index.SetElement(1, points.at(n).y); pointsVector.push_back(index); } return pointsVector; } void mitk::GrabCutOpenCVImageFilter::SegmentationWorker() { std::mutex mutex; std::unique_lock lock(mutex); while (true) { m_WorkerBarrier.wait(lock, [this] { return !m_StopThread; }); m_ImageMutex.lock(); cv::Mat image = m_InputImage.clone(); int inputImageId = m_InputImageId; m_ImageMutex.unlock(); cv::Mat mask = this->GetMaskFromPointSets(); cv::Mat result; if (m_UseOnlyRegionAroundModelPoints) { result = cv::Mat(mask.rows, mask.cols, mask.type(), 0.0); m_BoundingBox = this->GetBoundingRectFromMask(mask); RunSegmentation(image(m_BoundingBox), mask(m_BoundingBox)).copyTo(result(m_BoundingBox)); } else { result = this->RunSegmentation(image, mask); } // save result to member attribute m_ResultMutex.lock(); m_ResultMask = result; m_ResultImageId = inputImageId; m_ResultMutex.unlock(); } } diff --git a/Modules/OpenCVVideoSupport/Testing/mitkBasicCombinationOpenCVImageFilterTest.cpp b/Modules/OpenCVVideoSupport/Testing/mitkBasicCombinationOpenCVImageFilterTest.cpp index 215ea78ac6..bd2ab53c9f 100644 --- a/Modules/OpenCVVideoSupport/Testing/mitkBasicCombinationOpenCVImageFilterTest.cpp +++ b/Modules/OpenCVVideoSupport/Testing/mitkBasicCombinationOpenCVImageFilterTest.cpp @@ -1,88 +1,89 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #include "mitkBasicCombinationOpenCVImageFilter.h" #include "mitkConvertGrayscaleOpenCVImageFilter.h" #include "mitkCropOpenCVImageFilter.h" #include -#include +#include +#include #include static bool ImagesAreEqualInGray(const cv::Mat& img1, const cv::Mat& img2) { cv::Mat grayImg1; cv::Mat grayImg2; cv::cvtColor(img1, grayImg1, CV_RGB2GRAY, 1); cv::cvtColor(img2, grayImg2, CV_RGB2GRAY, 1); return cv::countNonZero(grayImg1 != grayImg2) == 0; } static void ConvertTestLoadedImage(std::string mitkImagePath) { cv::Mat image = cv::imread(mitkImagePath.c_str()); cv::Mat compareImg = image.clone(); mitk::BasicCombinationOpenCVImageFilter::Pointer combinationFilter = mitk::BasicCombinationOpenCVImageFilter::New(); MITK_TEST_CONDITION(combinationFilter->FilterImage(image), "Filtering with empty filter list is ok."); MITK_TEST_CONDITION(ImagesAreEqualInGray(image, compareImg), "Image must not be changed after filtering with empty filter list."); mitk::ConvertGrayscaleOpenCVImageFilter::Pointer grayscaleFilter = mitk::ConvertGrayscaleOpenCVImageFilter::New(); combinationFilter->PushFilter(grayscaleFilter.GetPointer()); MITK_TEST_CONDITION(combinationFilter->FilterImage(image), "Filtering with grayscale filter should be ok."); MITK_TEST_CONDITION(image.channels() == 1, "Image must not have more than one channel after grayscale conversion."); image.release(); image = compareImg.clone(); mitk::CropOpenCVImageFilter::Pointer cropFilter = mitk::CropOpenCVImageFilter::New(); combinationFilter->PushFilter(cropFilter.GetPointer()); MITK_TEST_CONDITION( ! combinationFilter->FilterImage(image), "Filter function must return false if an filter returns false."); MITK_TEST_CONDITION(combinationFilter->PopFilter() == cropFilter, "Last added filter is equal to returned filter."); image.release(); image = compareImg.clone(); MITK_TEST_CONDITION(combinationFilter->FilterImage(image), "Filter function should return true again after removing incorrect filter."); MITK_TEST_CONDITION(combinationFilter->RemoveFilter(grayscaleFilter.GetPointer()), "Filter must be found."); image.release(); image = compareImg.clone(); MITK_TEST_CONDITION(combinationFilter->FilterImage(image), "Filter function should still return true."); MITK_TEST_CONDITION(ImagesAreEqualInGray(image, compareImg), "Image must not be changed after all filters were removed."); } /**Documentation * test for the class "ConvertGrayscaleOpenCVImageFilter". */ int mitkBasicCombinationOpenCVImageFilterTest(int argc, char* argv[]) { MITK_TEST_BEGIN("BasicCombinationOpenCVImageFilter") MITK_TEST_CONDITION_REQUIRED(argc == 2, "Two parameters are needed for this test.") ConvertTestLoadedImage(argv[1]); MITK_TEST_END(); // always end with this! } diff --git a/Modules/OpenCVVideoSupport/Testing/mitkConvertGrayscaleOpenCVImageFilterTest.cpp b/Modules/OpenCVVideoSupport/Testing/mitkConvertGrayscaleOpenCVImageFilterTest.cpp index 734589e1d4..cb063aa307 100644 --- a/Modules/OpenCVVideoSupport/Testing/mitkConvertGrayscaleOpenCVImageFilterTest.cpp +++ b/Modules/OpenCVVideoSupport/Testing/mitkConvertGrayscaleOpenCVImageFilterTest.cpp @@ -1,53 +1,54 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #include "mitkConvertGrayscaleOpenCVImageFilter.h" #include #include +#include #include static void ConvertTestLoadedImage(std::string mitkImagePath, std::string mitkGrayscaleImagePath) { cv::Mat image = cv::imread(mitkImagePath.c_str()); cv::Mat compareImg = cv::imread(mitkGrayscaleImagePath.c_str()); // directly convert the image for comparison cv::Mat comparisonImg; cv::cvtColor(compareImg, comparisonImg, CV_RGB2GRAY, 1); mitk::ConvertGrayscaleOpenCVImageFilter::Pointer grayscaleFilter = mitk::ConvertGrayscaleOpenCVImageFilter::New(); MITK_TEST_CONDITION(grayscaleFilter->FilterImage(image), "Filtering should return true for success."); MITK_TEST_CONDITION(image.channels() == 1, "Image must not have more than one channel after grayscale conversion."); MITK_TEST_CONDITION(cv::countNonZero(image != comparisonImg) == 0, "All pixel values must be the same between the two converted images."); MITK_TEST_CONDITION_REQUIRED(grayscaleFilter->FilterImage(image), "Image conversion should be no problem if image is a grayscale image already.") } /**Documentation * test for the class "ConvertGrayscaleOpenCVImageFilter". */ int mitkConvertGrayscaleOpenCVImageFilterTest(int argc, char* argv[]) { MITK_TEST_BEGIN("ConvertGrayscaleOpenCVImageFilter") MITK_TEST_CONDITION_REQUIRED(argc > 2, "At least three parameters needed for this test.") ConvertTestLoadedImage(argv[1], argv[2]); MITK_TEST_END(); // always end with this! } diff --git a/Modules/OpenCVVideoSupport/Testing/mitkCropOpenCVImageFilterTest.cpp b/Modules/OpenCVVideoSupport/Testing/mitkCropOpenCVImageFilterTest.cpp index 954b9c2791..d9f59c6bbd 100644 --- a/Modules/OpenCVVideoSupport/Testing/mitkCropOpenCVImageFilterTest.cpp +++ b/Modules/OpenCVVideoSupport/Testing/mitkCropOpenCVImageFilterTest.cpp @@ -1,97 +1,98 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #include "mitkCropOpenCVImageFilter.h" #include #include +#include #include static bool ImagesAreEqualInGray(const cv::Mat& img1, const cv::Mat& img2) { cv::Mat grayImg1; cv::Mat grayImg2; cv::cvtColor(img1, grayImg1, CV_RGB2GRAY, 1); cv::cvtColor(img2, grayImg2, CV_RGB2GRAY, 1); return cv::countNonZero(grayImg1 != grayImg2) == 0; } static void CropTestLoadedImage(std::string mitkImagePath, std::string mitkCroppedImagePath) { cv::Mat image = cv::imread(mitkImagePath.c_str()); cv::Mat croppedImage = cv::imread(mitkCroppedImagePath.c_str()); MITK_INFO << mitkImagePath.c_str(); MITK_INFO << mitkCroppedImagePath.c_str(); mitk::CropOpenCVImageFilter::Pointer cropFilter = mitk::CropOpenCVImageFilter::New(); // try to crop without setting a region of interest cv::Mat testImage = image.clone(); MITK_TEST_CONDITION( ! cropFilter->FilterImage(testImage), "Filter function must return false if no region of interest is set."); MITK_TEST_CONDITION(ImagesAreEqualInGray(testImage, image), "Image should not be changed yet."); // set region of interst now and then try to crop again cv::Rect roi = cv::Rect(0,0, testImage.cols, testImage.rows); cropFilter->SetCropRegion(roi); MITK_TEST_CONDITION(cropFilter->FilterImage(testImage), "Filter function should return successfully."); MITK_TEST_CONDITION(ImagesAreEqualInGray(testImage, image), "Image should not be changed if cropping with a roi of the whole image."); // test if filter corrects negative roi position cv::Rect roiWrong = cv::Rect(-1,-1, 2, 2); roi = cv::Rect(0,0,2,2); cropFilter->SetCropRegion(roiWrong); MITK_TEST_CONDITION(cropFilter->FilterImage(testImage), "Filter function should return successfully."); MITK_TEST_CONDITION(ImagesAreEqualInGray(testImage, image(roi)), "Image should be equal to directly cropped image whith correct roi."); // test whith "normal" roi testImage = image.clone(); roi = cv::Rect( 150,100,100,100 ); cropFilter->SetCropRegion(roi); MITK_TEST_CONDITION(cropFilter->FilterImage(testImage), "Filter function should return successfully."); MITK_TEST_CONDITION(ImagesAreEqualInGray(testImage, croppedImage), "Image should be equal to cropped image (loaded from data directory)."); // test with not correctable roi roiWrong = cv::Rect( 5,5,-1,-1 ); MITK_TEST_FOR_EXCEPTION(mitk::Exception, cropFilter->SetCropRegion(roiWrong)); // test with rois where the top left corner is outside the image boundaries roiWrong = cv::Rect( testImage.cols,0,1,1 ); cropFilter->SetCropRegion(roiWrong); MITK_TEST_CONDITION(!cropFilter->FilterImage(testImage), "Filter function should return unsuccessfully if top left corner is outside image boundary (cols)."); roiWrong = cv::Rect( 0,testImage.rows,1,1 ); cropFilter->SetCropRegion(roiWrong); MITK_TEST_CONDITION(!cropFilter->FilterImage(testImage), "Filter function should return unsuccessfully if top left corner is outside image boundary (rows)."); roiWrong = cv::Rect( testImage.cols,testImage.rows,1,1 ); cropFilter->SetCropRegion(roiWrong); MITK_TEST_CONDITION(!cropFilter->FilterImage(testImage), "Filter function should return unsuccessfully if top left corner is outside image boundary (cols+rows)."); } /**Documentation * test for the class "CropOpenCVImageFilter". */ int mitkCropOpenCVImageFilterTest(int argc, char* argv[]) { MITK_TEST_BEGIN("CropOpenCVImageFilter") MITK_TEST_CONDITION_REQUIRED(argc > 2, "At least three parameters needed for this test."); CropTestLoadedImage(argv[1], argv[2]); MITK_TEST_END(); // always end with this! } diff --git a/Modules/OpenCVVideoSupport/Testing/mitkGrabCutOpenCVImageFilterTest.cpp b/Modules/OpenCVVideoSupport/Testing/mitkGrabCutOpenCVImageFilterTest.cpp index be50a4358c..4d9288e879 100644 --- a/Modules/OpenCVVideoSupport/Testing/mitkGrabCutOpenCVImageFilterTest.cpp +++ b/Modules/OpenCVVideoSupport/Testing/mitkGrabCutOpenCVImageFilterTest.cpp @@ -1,177 +1,178 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #include "mitkGrabCutOpenCVImageFilter.h" #include #include "itkIndex.h" #include #include +#include #include #include "mitkOpenCVToMitkImageFilter.h" static void GrabCutTestLoadedImage(std::string imagePath, std::string maskPath, std::string resultMaskPath) { // load test images cv::Mat image = cv::imread(imagePath.c_str()); cv::Mat maskImage = cv::imread(maskPath.c_str()); cv::Mat resultMaskImage = cv::imread(resultMaskPath.c_str()); // make sure that the loaded mask is a gray scale image cv::Mat maskImageGray; cv::cvtColor(maskImage, maskImageGray, CV_RGB2GRAY, 1); // make sure that the loaded reference image is a gray scale image cv::Mat resultMaskImageGray; cv::cvtColor(resultMaskImage, resultMaskImageGray, CV_RGB2GRAY, 1); // extract foreground points from loaded mask image cv::Mat foregroundMask, foregroundPoints; cv::compare(maskImageGray, 250, foregroundMask, cv::CMP_GE); cv::findNonZero(foregroundMask, foregroundPoints); // push extracted forground points into a vector of itk indices std::vector > foregroundPointsVector; for ( size_t n = 0; n < foregroundPoints.total(); ++n) { itk::Index<2> index; index.SetElement(0, foregroundPoints.at(n).x); index.SetElement(1, foregroundPoints.at(n).y); foregroundPointsVector.push_back(index); } mitk::GrabCutOpenCVImageFilter::Pointer grabCutFilter = mitk::GrabCutOpenCVImageFilter::New(); int currentImageId = 0; // test filtering with image set but no model points set { MITK_TEST_CONDITION(grabCutFilter->FilterImage(image), "Filtering should return true for sucess.") cv::Mat resultMask = grabCutFilter->GetResultMask(); MITK_TEST_CONDITION(resultMask.empty(), "Result mask must be empty when no foreground points are set.") } // test filtering with very little model points set { std::vector > littleForegroundPointsSet(foregroundPointsVector.begin(), foregroundPointsVector.begin()+3); grabCutFilter->SetModelPoints(littleForegroundPointsSet); grabCutFilter->FilterImage(image, ++currentImageId); cv::Mat resultMask; // wait up to ten seconds for the segmentation to finish for (unsigned int n = 0; n < 100; ++n) { if ( grabCutFilter->GetResultImageId() == currentImageId ) { resultMask = grabCutFilter->GetResultMask(); break; } itksys::SystemTools::Delay(100); } MITK_TEST_CONDITION(!resultMask.empty(), "Result mask must not be empty when little (" << littleForegroundPointsSet.size() <<") foreground points are set."); } // test filtering with image and model points set { grabCutFilter->SetModelPoints(foregroundPointsVector); MITK_TEST_CONDITION(grabCutFilter->FilterImage(image, ++currentImageId), "Filtering should return true for sucess.") cv::Mat resultMask; // wait up to ten seconds for the segmentation to finish for (unsigned int n = 0; n < 100; ++n) { if ( grabCutFilter->GetResultImageId() == currentImageId ) { resultMask = grabCutFilter->GetResultMask(); break; } itksys::SystemTools::Delay(100); } MITK_TEST_CONDITION( ! resultMask.empty() && cv::countNonZero(resultMask != resultMaskImageGray) == 0, "Filtered image should match reference image.") // adding new image should still work MITK_TEST_CONDITION(grabCutFilter->FilterImage(image), "Adding new image should still work.") } // test filtering with using only region around model points // (but with really big additional width so that whole image should be used again) { grabCutFilter->SetUseOnlyRegionAroundModelPoints(image.cols); grabCutFilter->FilterImage(image, ++currentImageId); cv::Mat resultMask; // wait up to ten seconds for the segmentation to finish for (unsigned int n = 0; n < 100; ++n) { if (grabCutFilter->GetResultImageId() == currentImageId) { resultMask = grabCutFilter->GetResultMask(); break; } itksys::SystemTools::Delay(100); } MITK_TEST_CONDITION( ! resultMask.empty() && cv::countNonZero(resultMask != resultMaskImageGray) == 0, "Filtered image with really big region used should match reference image again.") } // test filtering with using only region around model points { grabCutFilter->SetUseOnlyRegionAroundModelPoints(0); grabCutFilter->FilterImage(image, ++currentImageId); cv::Mat resultMask; // wait up to ten seconds for the segmentation to finish for (unsigned int n = 0; n < 100; ++n) { if (grabCutFilter->GetResultImageId() == currentImageId) { resultMask = grabCutFilter->GetResultMask(); break; } itksys::SystemTools::Delay(100); } cv::Mat nonPropablyBackgroundMask, modelPoints; cv::compare(maskImageGray, 250, nonPropablyBackgroundMask, cv::CMP_GE); cv::findNonZero(nonPropablyBackgroundMask, modelPoints); cv::Rect boundingRect = cv::boundingRect(modelPoints); cv::Mat compareMask(resultMask.rows, resultMask.cols, resultMask.type(), 0.0); resultMaskImageGray(boundingRect).copyTo(compareMask(boundingRect)); MITK_TEST_CONDITION( ! resultMask.empty() && cv::countNonZero(resultMask != compareMask) == 0, "Filtered image with region just around the model points used should match reference image again.") } } int mitkGrabCutOpenCVImageFilterTest(int argc, char* argv[]) { MITK_TEST_BEGIN("GrabCutOpenCVImageFilter") MITK_TEST_CONDITION_REQUIRED(argc == 4, "Test needs four command line parameters.") GrabCutTestLoadedImage(argv[1], argv[2], argv[3]); MITK_TEST_END() // always end with this! } diff --git a/Modules/OpenCVVideoSupport/Testing/mitkOpenCVMitkConversionTest.cpp b/Modules/OpenCVVideoSupport/Testing/mitkOpenCVMitkConversionTest.cpp index a5c5b0a068..0ed282d3a3 100644 --- a/Modules/OpenCVVideoSupport/Testing/mitkOpenCVMitkConversionTest.cpp +++ b/Modules/OpenCVVideoSupport/Testing/mitkOpenCVMitkConversionTest.cpp @@ -1,247 +1,246 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ // mitk includes #include "mitkImageToOpenCVImageFilter.h" #include "mitkOpenCVToMitkImageFilter.h" #include #include #include #include #include "mitkImageReadAccessor.h" #include "mitkImageSliceSelector.h" // itk includes #include #include -#include // define test pixel indexes and intensities and other values typedef itk::RGBPixel< unsigned char > TestUCRGBPixelType; cv::Size testImageSize; cv::Point pos1; cv::Point pos2; cv::Point pos3; cv::Vec3b color1; cv::Vec3b color2; cv::Vec3b color3; uchar greyValue1; uchar greyValue2; uchar greyValue3; /*! Documentation * Test for image conversion of OpenCV images and mitk::Images. It tests the classes * OpenCVToMitkImageFilter and ImageToOpenCVImageFilter */ // Some declarations template void ComparePixels( itk::Image,VImageDimension>* image ); void ReadImageDataAndConvertForthAndBack(std::string imageFileName); void ConvertCVMatForthAndBack(mitk::Image::Pointer inputForCVMat, std::string imageFileName); // Begin the test for mitkImage to OpenCV image conversion and back. int mitkOpenCVMitkConversionTest(int argc, char* argv[]) { MITK_TEST_BEGIN("ImageToOpenCVImageFilter") // the first part of this test checks the conversion of a cv::Mat style OpenCV image. // we build an cv::Mat image MITK_INFO << "setting test values"; testImageSize = cv::Size(11,11); pos1 = cv::Point(0,0); pos2 = cv::Point(5,5); pos3 = cv::Point(10,10); color1 = cv::Vec3b(50,0,0); color2 = cv::Vec3b(0,128,0); color3 = cv::Vec3b(0,0,255); greyValue1 = 0; greyValue2 = 128; greyValue3 = 255; MITK_INFO << "generating test OpenCV image (RGB)"; cv::Mat testRGBImage = cv::Mat::zeros( testImageSize, CV_8UC3 ); // generate some test intensity values testRGBImage.at(pos1)= color1; testRGBImage.at(pos2)= color2; testRGBImage.at(pos3)= color3; // convert it to a mitk::Image MITK_INFO << "converting OpenCV test image to mitk image and comparing scalar rgb values"; mitk::OpenCVToMitkImageFilter::Pointer openCvToMitkFilter = mitk::OpenCVToMitkImageFilter::New(); openCvToMitkFilter->SetOpenCVMat( testRGBImage ); openCvToMitkFilter->Update(); mitk::Image::Pointer mitkImage = openCvToMitkFilter->GetOutput(); AccessFixedTypeByItk(mitkImage.GetPointer(), ComparePixels, (itk::RGBPixel), // rgb image (2) ); // convert it back to OpenCV image MITK_INFO << "converting mitk image to OpenCV image and comparing scalar rgb values"; mitk::ImageToOpenCVImageFilter::Pointer mitkToOpenCv = mitk::ImageToOpenCVImageFilter::New(); mitkToOpenCv->SetImage( mitkImage ); cv::Mat openCvImage = mitkToOpenCv->GetOpenCVMat(); // and test equality of the sentinel pixel cv::Vec3b convertedColor1 = openCvImage.at(pos1); cv::Vec3b convertedColor2 = openCvImage.at(pos2); cv::Vec3b convertedColor3 = openCvImage.at(pos3); MITK_TEST_CONDITION( color1 == convertedColor1, "Testing if initially created color values " << static_cast( color1[0] ) << ", " << static_cast( color1[1] ) << ", " << static_cast( color1[2] ) << " matches the color values " << static_cast( convertedColor1[0] ) << ", " << static_cast( convertedColor1[1] ) << ", " << static_cast( convertedColor1[2] ) << " at the same position " << pos1.x << ", " << pos1.y << " in the back converted OpenCV image" ) MITK_TEST_CONDITION( color2 == convertedColor2, "Testing if initially created color values " << static_cast( color2[0] ) << ", " << static_cast( color2[1] ) << ", " << static_cast( color2[2] ) << " matches the color values " << static_cast( convertedColor2[0] ) << ", " << static_cast( convertedColor2[1] ) << ", " << static_cast( convertedColor2[2] ) << " at the same position " << pos2.x << ", " << pos2.y << " in the back converted OpenCV image" ) MITK_TEST_CONDITION( color3 == convertedColor3, "Testing if initially created color values " << static_cast( color3[0] ) << ", " << static_cast( color3[1] ) << ", " << static_cast( color3[2] ) << " matches the color values " << static_cast( convertedColor3[0] ) << ", " << static_cast( convertedColor3[1] ) << ", " << static_cast( convertedColor3[2] ) << " at the same position " << pos3.x << ", " << pos3.y << " in the back converted OpenCV image" ) // the second part of this test checks the conversion of mitk::Image to cv::Mat and back. for (int i = 1; i < argc; ++i) { ReadImageDataAndConvertForthAndBack(argv[i]); } MITK_TEST_END(); } template void ComparePixels( itk::Image,VImageDimension>* image ) { typedef itk::RGBPixel PixelType; typedef itk::Image ImageType; typename ImageType::IndexType pixelIndex; pixelIndex[0] = pos1.x; pixelIndex[1] = pos1.y; PixelType onePixel = image->GetPixel( pixelIndex ); MITK_TEST_CONDITION( color1[0] == onePixel.GetBlue(), "Testing if blue value (= " << static_cast(color1[0]) << ") at postion " << pos1.x << ", " << pos1.y << " in OpenCV image is " << "equals the blue value (= " << static_cast(onePixel.GetBlue()) << ")" << " in the generated mitk image"); pixelIndex[0] = pos2.x; pixelIndex[1] = pos2.y; onePixel = image->GetPixel( pixelIndex ); MITK_TEST_CONDITION( color2[1] == onePixel.GetGreen(), "Testing if green value (= " << static_cast(color2[1]) << ") at postion " << pos2.x << ", " << pos2.y << " in OpenCV image is " << "equals the green value (= " << static_cast(onePixel.GetGreen()) << ")" << " in the generated mitk image"); pixelIndex[0] = pos3.x; pixelIndex[1] = pos3.y; onePixel = image->GetPixel( pixelIndex ); MITK_TEST_CONDITION( color3[2] == onePixel.GetRed(), "Testing if red value (= " << static_cast(color3[2]) << ") at postion " << pos3.x << ", " << pos3.y << " in OpenCV image is " << "equals the red value (= " << static_cast(onePixel.GetRed()) << ")" << " in the generated mitk image"); } void ReadImageDataAndConvertForthAndBack(std::string imageFileName) { // first we load an mitk::Image from the data repository mitk::Image::Pointer mitkTestImage = mitk::IOUtil::Load(imageFileName); // some format checking mitk::Image::Pointer resultImg = nullptr; if( mitkTestImage->GetDimension() <= 3 ) { if( mitkTestImage->GetDimension() > 2 && mitkTestImage->GetDimension(2) == 1 ) { mitk::ImageSliceSelector::Pointer sliceSelector = mitk::ImageSliceSelector::New(); sliceSelector->SetInput(mitkTestImage); sliceSelector->SetSliceNr(0); sliceSelector->Update(); resultImg = sliceSelector->GetOutput()->Clone(); } else if(mitkTestImage->GetDimension() < 3) { resultImg = mitkTestImage; } else { return; // 3D images are not supported, except with just one slice. } } else { return; // 4D images are not supported! } ConvertCVMatForthAndBack(resultImg, imageFileName); } void ConvertCVMatForthAndBack(mitk::Image::Pointer inputForCVMat, std::string) { // now we convert it to OpenCV Mat mitk::ImageToOpenCVImageFilter::Pointer toOCvConverter = mitk::ImageToOpenCVImageFilter::New(); toOCvConverter->SetImage(inputForCVMat); cv::Mat cvmatTestImage = toOCvConverter->GetOpenCVMat(); MITK_TEST_CONDITION_REQUIRED( !cvmatTestImage.empty(), "Conversion to cv::Mat successful!"); mitk::OpenCVToMitkImageFilter::Pointer toMitkConverter = mitk::OpenCVToMitkImageFilter::New(); toMitkConverter->SetOpenCVMat(cvmatTestImage); toMitkConverter->Update(); // initialize the image with the input image, since we want to test equality and OpenCV does not feature geometries and spacing mitk::Image::Pointer result = inputForCVMat->Clone(); mitk::ImageReadAccessor resultAcc(toMitkConverter->GetOutput(), toMitkConverter->GetOutput()->GetSliceData()); result->SetImportSlice(const_cast(resultAcc.GetData())); if( result->GetPixelType().GetNumberOfComponents() == 1 ) { MITK_ASSERT_EQUAL( result, inputForCVMat, "Testing equality of input and output image of cv::Mat conversion" ); } else if( result->GetPixelType().GetNumberOfComponents() == 3 ) { MITK_ASSERT_EQUAL( result, inputForCVMat, "Testing equality of input and output image of cv::Mat conversion" ); } else { MITK_WARN << "Unhandled number of components used to test equality, please enhance test!"; } // change OpenCV image to test if the filter gets updated cv::Mat changedcvmatTestImage = cvmatTestImage.clone(); if (result->GetPixelType().GetBitsPerComponent() == sizeof(char)*8) { changedcvmatTestImage.at(0,0) = cvmatTestImage.at(0,0) != 0 ? 0 : 1; } else if (result->GetPixelType().GetBitsPerComponent() == sizeof(float)*8) { changedcvmatTestImage.at(0,0) = cvmatTestImage.at(0,0) != 0 ? 0 : 1; } toMitkConverter->SetOpenCVMat(changedcvmatTestImage); toMitkConverter->Update(); MITK_TEST_NOT_EQUAL(toMitkConverter->GetOutput(), inputForCVMat, "Converted image must not be the same as before."); } diff --git a/Modules/OpenCVVideoSupport/UI/QmitkOpenCVVideoControls.cpp b/Modules/OpenCVVideoSupport/UI/QmitkOpenCVVideoControls.cpp index 41dc3fef68..7127bd0068 100644 --- a/Modules/OpenCVVideoSupport/UI/QmitkOpenCVVideoControls.cpp +++ b/Modules/OpenCVVideoSupport/UI/QmitkOpenCVVideoControls.cpp @@ -1,444 +1,446 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #include "QmitkOpenCVVideoControls.h" #include #include #include #include +#include + class QmitkOpenCVVideoControlsPrivate { public: QmitkOpenCVVideoControlsPrivate(QmitkOpenCVVideoControls* q, const std::string& id) : q(q) , m_Id(id) {} /// /// muellerm: persitence service implementation /// PERSISTENCE_GET_SERVICE_METHOD_MACRO QmitkOpenCVVideoControls* q; /// /// muellerm: a unique id for the prop list /// std::string m_Id; void ToPropertyList(); void FromPropertyList(); }; QmitkOpenCVVideoControls::QmitkOpenCVVideoControls(QmitkVideoBackground* _VideoBackground , QmitkRenderWindow* _RenderWindow , QWidget * parent, Qt::WindowFlags f) : QWidget(parent, f) , m_VideoBackground(nullptr) , m_RenderWindow(nullptr) , m_VideoSource(nullptr) , m_Controls(new Ui::QmitkOpenCVVideoControls) , m_SliderCurrentlyMoved(false) , d(new QmitkOpenCVVideoControlsPrivate(this, "QmitkOpenCVVideoControls")) { m_Controls->setupUi(this); m_Controls->FileChooser->SetFileMustExist(true); m_Controls->FileChooser->SetSelectDir(false); this->SetRenderWindow(_RenderWindow); this->SetVideoBackground(_VideoBackground); d->FromPropertyList(); mitk::IPersistenceService* persistenceService = d->GetPersistenceService(); if (persistenceService != nullptr) { persistenceService->AddPropertyListReplacedObserver(this); } else { MITK_WARN << "No Persistence Service available in constructor"; } } QmitkOpenCVVideoControls::~QmitkOpenCVVideoControls() { if (m_VideoSource != nullptr && m_VideoSource->IsCapturingEnabled()) { this->Stop(); // emulate stop } mitk::IPersistenceService* persistenceService = d->GetPersistenceService(); if (persistenceService != nullptr) { persistenceService->RemovePropertyListReplacedObserver(this); } else { MITK_WARN << "No Persistence Service available in destructor"; } d->ToPropertyList(); } void QmitkOpenCVVideoControls::on_VideoProgressSlider_valueChanged(int /*value*/) { //Fixes T23169 on_VideoProgressSlider_sliderReleased(); } void QmitkOpenCVVideoControls::on_UseGrabbingDeviceButton_clicked(bool /*checked=false*/) { m_Controls->GrabbingDevicePanel->setEnabled(true); m_Controls->VideoFilePanel->setEnabled(false); } void QmitkOpenCVVideoControls::on_UseVideoFileButton_clicked(bool /*checked=false*/) { m_Controls->GrabbingDevicePanel->setEnabled(false); m_Controls->VideoFilePanel->setEnabled(true); m_Controls->FileChooser->setEnabled(true); } void QmitkOpenCVVideoControls::on_VideoProgressSlider_sliderPressed() { m_SliderCurrentlyMoved = true; // temporary pause the video while sliding if (!m_VideoSource->GetCapturePaused()) m_VideoSource->PauseCapturing(); } void QmitkOpenCVVideoControls::on_VideoProgressSlider_sliderReleased() { double progressRatio = static_cast(m_Controls->VideoProgressSlider->value()) / static_cast(m_Controls->VideoProgressSlider->maximum()); m_VideoSource->SetVideoCaptureProperty(CV_CAP_PROP_POS_FRAMES, progressRatio*m_VideoSource->GetVideoCaptureProperty(CV_CAP_PROP_FRAME_COUNT)); // resume the video ( if it was not paused by the user) if (m_VideoSource->GetCapturePaused() && m_Controls->PlayButton->isChecked()) m_VideoSource->PauseCapturing(); m_SliderCurrentlyMoved = false; } void QmitkOpenCVVideoControls::on_RepeatVideoButton_clicked(bool checked) { MITK_INFO << "repeat video clicked"; m_VideoSource->SetRepeatVideo(checked); } void QmitkOpenCVVideoControls::on_PlayButton_clicked(bool checked) { MITK_INFO << "play button clicked"; if (checked) { this->Play(); } else { // show pause button this->IsPlaying(true); m_VideoSource->PauseCapturing(); } } void QmitkOpenCVVideoControls::on_StopButton_clicked(bool /*checked=false*/) { this->Stop(); } void QmitkOpenCVVideoControls::Play() { if (m_VideoSource->GetCapturePaused()) { this->IsPlaying(false); m_VideoSource->PauseCapturing(); } else { if (m_Controls->UseGrabbingDeviceButton->isChecked()) { m_VideoSource->SetVideoCameraInput(m_Controls->GrabbingDeviceNumber->text().toInt(), false); m_Controls->VideoFileControls->setEnabled(false); } else { m_VideoSource->SetVideoFileInput(m_Controls->FileChooser->GetFile().c_str(), m_Controls->RepeatVideoButton->isChecked(), false); m_VideoSource->SetRepeatVideo(m_Controls->RepeatVideoButton->isChecked()); m_Controls->VideoProgressSlider->setValue(0); } m_VideoSource->StartCapturing(); if (!m_VideoSource->IsCapturingEnabled()) { MITK_ERROR << "Video could not be initialized!"; m_Controls->PlayButton->setChecked(false); } else { int hertz = m_Controls->UpdateRate->text().toInt(); int updateTime = itk::Math::Round(1000.0 / hertz); // resets the whole background m_VideoBackground->SetTimerDelay(updateTime); m_VideoBackground->AddRenderWindow(m_RenderWindow->GetVtkRenderWindow()); this->connect(m_VideoBackground, SIGNAL(NewFrameAvailable(mitk::VideoSource*)) , this, SLOT(NewFrameAvailable(mitk::VideoSource*))); this->connect(m_VideoBackground, SIGNAL(EndOfVideoSourceReached(mitk::VideoSource*)) , this, SLOT(EndOfVideoSourceReached(mitk::VideoSource*))); m_VideoBackground->Enable(); this->m_Controls->StopButton->setEnabled(true); // show video file controls if (m_Controls->UseVideoFileButton->isChecked()) { m_Controls->VideoFileControls->setEnabled(true); m_Controls->RepeatVideoButton->setEnabled(true); m_Controls->VideoProgressSlider->setEnabled(true); } // show pause button this->IsPlaying(false); // disable other controls m_Controls->GrabbingDevicePanel->setEnabled(false); m_Controls->VideoFilePanel->setEnabled(false); m_Controls->UseGrabbingDeviceButton->setEnabled(false); m_Controls->UseVideoFileButton->setEnabled(false); m_Controls->UpdateRatePanel->setEnabled(false); } } } void QmitkOpenCVVideoControls::Stop() { // disable video file controls, stop button and show play button again m_Controls->UseGrabbingDeviceButton->setEnabled(true); m_Controls->UseVideoFileButton->setEnabled(true); if (m_Controls->UseGrabbingDeviceButton->isChecked()) on_UseGrabbingDeviceButton_clicked(true); else on_UseVideoFileButton_clicked(true); m_Controls->UpdateRatePanel->setEnabled(true); m_Controls->VideoProgressSlider->setValue(0); m_Controls->VideoFileControls->setEnabled(false); this->m_Controls->StopButton->setEnabled(false); this->IsPlaying(true); if (m_VideoBackground) { m_VideoBackground->Disable(); if (m_RenderWindow) m_VideoBackground->RemoveRenderWindow(m_RenderWindow->GetVtkRenderWindow()); this->disconnect(m_VideoBackground, SIGNAL(NewFrameAvailable(mitk::VideoSource*)) , this, SLOT(NewFrameAvailable(mitk::VideoSource*))); } if (m_VideoSource != nullptr) m_VideoSource->StopCapturing(); } void QmitkOpenCVVideoControls::Reset() { this->Stop(); } void QmitkOpenCVVideoControls::IsPlaying(bool paused) { if (paused) { m_Controls->PlayButton->setText("Play"); m_Controls->PlayButton->setIcon(QIcon(":/OpenCVVideoSupportUI/media-playback-start.png")); m_Controls->PlayButton->setChecked(false); } else { m_Controls->PlayButton->setText("Pause"); m_Controls->PlayButton->setIcon(QIcon(":/OpenCVVideoSupportUI/media-playback-pause.png")); m_Controls->PlayButton->setChecked(true); } } void QmitkOpenCVVideoControls::NewFrameAvailable(mitk::VideoSource* /*videoSource*/) { emit NewOpenCVFrameAvailable(m_VideoSource->GetCurrentFrame()); if (!m_SliderCurrentlyMoved) { m_Controls->VideoProgressSlider->setValue(itk::Math::Round(m_VideoSource->GetVideoCaptureProperty(CV_CAP_PROP_POS_FRAMES) *(1 / m_VideoSource->GetVideoCaptureProperty(CV_CAP_PROP_FRAME_COUNT) *m_Controls->VideoProgressSlider->maximum()))); } } void QmitkOpenCVVideoControls::EndOfVideoSourceReached(mitk::VideoSource* /*videoSource*/) { if (m_Controls->RepeatVideoButton->isChecked()) { this->Reset(); this->Play(); } else { this->Stop(); } } void QmitkOpenCVVideoControls::SetRenderWindow(QmitkRenderWindow* _RenderWindow) { if (m_RenderWindow == _RenderWindow) return; // In Reset() m_MultiWidget is used, set it to 0 now for avoiding errors if (_RenderWindow == nullptr) m_RenderWindow = nullptr; this->Reset(); m_RenderWindow = _RenderWindow; if (m_RenderWindow == nullptr) { this->setEnabled(false); } else { this->setEnabled(true); } } QmitkRenderWindow* QmitkOpenCVVideoControls::GetRenderWindow() const { return m_RenderWindow; } void QmitkOpenCVVideoControls::SetVideoBackground(QmitkVideoBackground* _VideoBackground) { if (m_VideoBackground == _VideoBackground) return; if (m_VideoBackground != nullptr) this->disconnect(m_VideoBackground, SIGNAL(destroyed(QObject*)) , this, SLOT(QObjectDestroyed(QObject*))); this->Reset(); m_VideoBackground = _VideoBackground; if (m_VideoBackground == nullptr) { m_VideoSource = nullptr; MITK_WARN << "m_MultiWidget is 0"; this->setEnabled(false); } else { this->setEnabled(true); m_VideoSource = dynamic_cast(m_VideoBackground->GetVideoSource()); // preset form entries if (m_VideoSource != nullptr) { if (!m_VideoSource->GetVideoFileName().empty()) { m_Controls->FileChooser->SetFile(m_VideoSource->GetVideoFileName()); on_UseGrabbingDeviceButton_clicked(false); } else if (m_VideoSource->GetGrabbingDeviceNumber() >= 0) m_Controls->GrabbingDeviceNumber->setValue(m_VideoSource->GetGrabbingDeviceNumber()); m_Controls->UpdateRate->setValue(m_VideoBackground->GetTimerDelay()); this->connect(m_VideoBackground, SIGNAL(destroyed(QObject*)) , this, SLOT(QObjectDestroyed(QObject*))); } else { MITK_WARN << "m_VideoSource is 0"; this->setEnabled(false); } } } QmitkVideoBackground* QmitkOpenCVVideoControls::GetVideoBackground() const { return m_VideoBackground; } void QmitkOpenCVVideoControls::QObjectDestroyed(QObject * obj /*= 0 */) { if (m_VideoBackground == obj) { m_VideoSource = nullptr; this->SetVideoBackground(nullptr); } } void QmitkOpenCVVideoControlsPrivate::ToPropertyList() { mitk::IPersistenceService* persistenceService = this->GetPersistenceService(); if (persistenceService != nullptr) { mitk::PropertyList::Pointer propList = persistenceService->GetPropertyList(m_Id); propList->Set("deviceType", q->m_Controls->UseGrabbingDeviceButton->isChecked() ? 0 : 1); propList->Set("grabbingDeviceNumber", q->m_Controls->GrabbingDeviceNumber->value()); propList->Set("updateRate", q->m_Controls->UpdateRate->value()); propList->Set("repeatVideo", q->m_Controls->RepeatVideoButton->isChecked()); } else { MITK_WARN << "Persistence Service not available."; } } void QmitkOpenCVVideoControlsPrivate::FromPropertyList() { mitk::IPersistenceService* persistenceService = this->GetPersistenceService(); if (persistenceService != nullptr) { mitk::PropertyList::Pointer propList = persistenceService->GetPropertyList(m_Id); bool repeatVideo = false; propList->Get("repeatVideo", repeatVideo); q->m_Controls->RepeatVideoButton->setChecked(repeatVideo); int updateRate = 25; propList->Get("updateRate", updateRate); q->m_Controls->UpdateRate->setValue(updateRate); int grabbingDeviceNumber = 0; propList->Get("grabbingDeviceNumber", grabbingDeviceNumber); q->m_Controls->GrabbingDeviceNumber->setValue(grabbingDeviceNumber); int deviceType = 0; propList->Get("deviceType", deviceType); if (deviceType == 0) { q->m_Controls->UseGrabbingDeviceButton->setChecked(true); } else { q->m_Controls->UseVideoFileButton->setChecked(true); } } else { MITK_WARN << "Persistence Service not available."; } } void QmitkOpenCVVideoControls::AfterPropertyListReplaced(const std::string& id, mitk::PropertyList* /*propertyList*/) { if (id == d->m_Id) d->FromPropertyList(); } diff --git a/Modules/OpenCVVideoSupport/UI/QmitkOpenCVVideoControls.h b/Modules/OpenCVVideoSupport/UI/QmitkOpenCVVideoControls.h index ed5ea696f2..d70660f536 100644 --- a/Modules/OpenCVVideoSupport/UI/QmitkOpenCVVideoControls.h +++ b/Modules/OpenCVVideoSupport/UI/QmitkOpenCVVideoControls.h @@ -1,116 +1,117 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #ifndef QmitkOpenCVVideoControls_h #define QmitkOpenCVVideoControls_h #include #include #include -#include "opencv2/core.hpp" +#include +#include class QmitkRenderWindow; class QmitkVideoBackground; namespace mitk { class VideoSource; class OpenCVVideoSource; } class QmitkOpenCVVideoControlsPrivate; /// /// \brief Offers widgets to play/pause/stop a video on a certain render window with /// the use of an !initialized! QmitkVideoBackground. The QmitkVideoBackground should /// contain an OpenCVVideoSource is then owned by this widget (and deleted) /// class MITKOPENCVVIDEOSUPPORTUI_EXPORT QmitkOpenCVVideoControls : public QWidget, public mitk::PropertyListReplacedObserver { Q_OBJECT public: /// /// Construct the widget with the given render window and the given preset values /// QmitkOpenCVVideoControls(QmitkVideoBackground* _VideoBackground, QmitkRenderWindow* _RenderWindow , QWidget* parent = nullptr, Qt::WindowFlags f = nullptr); /// /// call reset if video playback is enabled here /// ~QmitkOpenCVVideoControls() override; /// /// sets the render window for this video player /// void SetRenderWindow(QmitkRenderWindow* _RenderWindow); /// /// returns the current render window /// QmitkRenderWindow* GetRenderWindow() const; /// /// sets the qmitkvideobackground for this /// void SetVideoBackground(QmitkVideoBackground* _VideoBackground); /// /// returns the current QmitkVideoBackground /// QmitkVideoBackground* GetVideoBackground() const; /// /// calls FromPropertyList /// void AfterPropertyListReplaced(const std::string& id, mitk::PropertyList* propertyList) override; signals: /// /// When playback is started this informs when a new frame was grabbed /// void NewOpenCVFrameAvailable(const IplImage*); protected slots: void on_UseGrabbingDeviceButton_clicked(bool checked = false); void on_UseVideoFileButton_clicked(bool checked = false); void on_VideoProgressSlider_sliderPressed(); void on_VideoProgressSlider_sliderReleased(); void on_VideoProgressSlider_valueChanged(int value); void on_RepeatVideoButton_clicked(bool checked = false); void on_PlayButton_clicked(bool checked = false); void on_StopButton_clicked(bool checked = false); void Play(); void Stop(); void Reset(); void IsPlaying(bool paused); void QObjectDestroyed(QObject * obj = nullptr); void NewFrameAvailable(mitk::VideoSource* videoSource); void EndOfVideoSourceReached(mitk::VideoSource* videoSource); protected: QmitkVideoBackground* m_VideoBackground; QmitkRenderWindow* m_RenderWindow; mitk::OpenCVVideoSource* m_VideoSource; Ui::QmitkOpenCVVideoControls* m_Controls; bool m_SliderCurrentlyMoved; private: friend class QmitkOpenCVVideoControlsPrivate; QScopedPointer d; }; #endif diff --git a/Modules/OpenCVVideoSupport/mitkMovieGeneratorOpenCV.cpp b/Modules/OpenCVVideoSupport/mitkMovieGeneratorOpenCV.cpp index 44a9a8dfa2..a150980125 100644 --- a/Modules/OpenCVVideoSupport/mitkMovieGeneratorOpenCV.cpp +++ b/Modules/OpenCVVideoSupport/mitkMovieGeneratorOpenCV.cpp @@ -1,129 +1,130 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #include "mitkMovieGeneratorOpenCV.h" //#include #include "mitkGL.h" #include +#include mitk::MovieGeneratorOpenCV::MovieGeneratorOpenCV() { m_initialized = false; m_aviWriter = nullptr; m_dwRate = 20; m_FourCCCodec = nullptr; m_RemoveColouredFrame = true; } void mitk::MovieGeneratorOpenCV::SetFileName( const char *fileName ) { m_sFile = fileName; } void mitk::MovieGeneratorOpenCV::SetFrameRate(unsigned int rate) { m_dwRate = static_cast(rate); } void mitk::MovieGeneratorOpenCV::SetRemoveColouredFrame(bool RemoveColouredFrame) { m_RemoveColouredFrame = RemoveColouredFrame; } bool mitk::MovieGeneratorOpenCV::InitGenerator() { m_width = m_renderer->GetRenderWindow()->GetSize()[0]; // changed from glGetIntegerv( GL_VIEWPORT, viewport ); m_height = m_renderer->GetRenderWindow()->GetSize()[1]; // due to sometimes strange dimensions if(m_RemoveColouredFrame) { m_width -= 10; //remove colored boarders around renderwindows m_height -= 10; } m_width -= m_width % 4; // some video codecs have prerequisites to the image dimensions m_height -= m_height % 4; m_currentFrame = cvCreateImage(cvSize(m_width,m_height),8,3); // creating image with widget size, 8 bit per pixel and 3 channel r,g,b m_currentFrame->origin = 1; // avoid building a video with bottom up /* m_sFile = Name of the output video file. CV_FOURCC = 4-character code of codec used to compress the frames. For example, CV_FOURCC('P','I','M','1') is MPEG-1 codec, CV_FOURCC('M','J','P','G') is motion-jpeg codec etc. CV_FOURCC('P','I','M','1') = MPEG-1 codec CV_FOURCC('M','J','P','G') = motion-jpeg codec (does not work well) CV_FOURCC('M', 'P', '4', '2') = MPEG-4.2 codec CV_FOURCC('D', 'I', 'V', '3') = MPEG-4.3 codec CV_FOURCC('D', 'I', 'V', 'X') = MPEG-4 codec CV_FOURCC('U', '2', '6', '3') = H263 codec CV_FOURCC('I', '2', '6', '3') = H263I codec CV_FOURCC('F', 'L', 'V', '1') = FLV1 codec Under Win32 it is possible to pass -1 in order to choose compression method and additional compression parameters from dialog. m_dwRate = Framerate of the created video stream. frame_size Size of video frames. InitGenerator 1 = If it is not zero, the encoder will expect and encode color frames, otherwise it will work with grayscale frames (the flag is currently supported on Windows only).*/ if(m_FourCCCodec != nullptr) { #ifdef WIN32 m_aviWriter = cvCreateVideoWriter(m_sFile.c_str(),CV_FOURCC(m_FourCCCodec[0],m_FourCCCodec[1],m_FourCCCodec[2], m_FourCCCodec[3]),m_dwRate,cvSize(m_width,m_height),1); //initializing video writer #else m_aviWriter = cvCreateVideoWriter(m_sFile.c_str(),CV_FOURCC(m_FourCCCodec[0],m_FourCCCodec[1],m_FourCCCodec[2], m_FourCCCodec[3]),m_dwRate,cvSize(m_width,m_height)); //initializing video writer #endif } else { #ifdef WIN32 m_aviWriter = cvCreateVideoWriter(m_sFile.c_str(),-1,m_dwRate,cvSize(m_width,m_height),1); //initializing video writer #else m_aviWriter = cvCreateVideoWriter(m_sFile.c_str(),CV_FOURCC('X','V','I','D'),m_dwRate,cvSize(m_width,m_height)); //initializing video writer #endif } if(!m_aviWriter) { std::cout << "errors initializing video writer...correct video file path? on linux: ffmpeg must be included in OpenCV."; return false; } return true; } bool mitk::MovieGeneratorOpenCV::AddFrame( void *data ) { //cvSetImageData(m_currentFrame,data,m_width*3); memcpy(m_currentFrame->imageData,data,m_width*m_height*3); cvWriteFrame(m_aviWriter,m_currentFrame); return true; } bool mitk::MovieGeneratorOpenCV::TerminateGenerator() { if (m_aviWriter) { cvReleaseVideoWriter(&m_aviWriter); } return true; } diff --git a/Modules/OpenCVVideoSupport/mitkMovieGeneratorOpenCV.h b/Modules/OpenCVVideoSupport/mitkMovieGeneratorOpenCV.h index f677abe127..9ff24df620 100755 --- a/Modules/OpenCVVideoSupport/mitkMovieGeneratorOpenCV.h +++ b/Modules/OpenCVVideoSupport/mitkMovieGeneratorOpenCV.h @@ -1,85 +1,85 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #ifndef MovieGeneratorOpenCV_H_HEADER_INCLUDED #define MovieGeneratorOpenCV_H_HEADER_INCLUDED #include "mitkMovieGenerator.h" #include #include #include // OpenCV includes -#include "cv.h" -#include "highgui.h" +#include +#include namespace mitk { class MITKOPENCVVIDEOSUPPORT_EXPORT MovieGeneratorOpenCV : public MovieGenerator { public: mitkClassMacro(MovieGeneratorOpenCV, MovieGenerator); itkFactorylessNewMacro(Self); itkCloneMacro(Self); void SetFileName( const char *fileName ) override; void SetFourCCCodec(char* codec) { m_FourCCCodec = codec; } void SetFrameRate(unsigned int rate) override; /// /// if true the movie will be cutted by a 10 pixel margin /// in order to remove the standard mitk coloured borders /// default: true /// void SetRemoveColouredFrame(bool); protected: MovieGeneratorOpenCV(); //! called directly before the first frame is added bool InitGenerator() override; //! used to add a frame bool AddFrame( void *data ) override; //! called after the last frame is added bool TerminateGenerator() override; //! name of output file std::string m_sFile; //! frame rate int m_dwRate; private: CvVideoWriter* m_aviWriter; IplImage * m_currentFrame; char * m_FourCCCodec; bool m_RemoveColouredFrame; }; } // namespace mitk #endif /* MovieGeneratorOpenCV_H_HEADER_INCLUDED */ diff --git a/Modules/OpenCVVideoSupport/mitkOpenCVToMitkImageFilter.h b/Modules/OpenCVVideoSupport/mitkOpenCVToMitkImageFilter.h index c92612cadd..e0ecd28678 100644 --- a/Modules/OpenCVVideoSupport/mitkOpenCVToMitkImageFilter.h +++ b/Modules/OpenCVVideoSupport/mitkOpenCVToMitkImageFilter.h @@ -1,96 +1,97 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #ifndef mitkOpenCVToMitkImageFilter_h #define mitkOpenCVToMitkImageFilter_h // mitk includes #include #include #include // itk includes #include #include // OpenCV includes #include +#include #include namespace mitk { /// /// \brief Filter for creating MITK RGB Images from an OpenCV image /// class MITKOPENCVVIDEOSUPPORT_EXPORT OpenCVToMitkImageFilter : public ImageSource { public: typedef itk::RGBPixel< unsigned char > UCRGBPixelType; typedef itk::RGBPixel< unsigned short > USRGBPixelType; typedef itk::RGBPixel< float > FloatRGBPixelType; typedef itk::RGBPixel< double > DoubleRGBPixelType; /// /// the static function for the conversion /// template static Image::Pointer ConvertCVMatToMitkImage(const cv::Mat input); mitkClassMacro(OpenCVToMitkImageFilter, ImageSource); itkFactorylessNewMacro(Self); itkCloneMacro(Self); /// /// sets an iplimage as input /// void SetOpenCVImage(const IplImage* image); //itkGetMacro(OpenCVImage, const IplImage*); /// /// sets an opencv mat as input (will be used if OpenCVImage Ipl image is 0) /// void SetOpenCVMat(const cv::Mat& image); itkGetMacro(OpenCVMat, cv::Mat); OutputImageType* GetOutput(void); //##Documentation //## @brief Convenient method to insert an openCV image as a slice at a //## certain time step into a 3D or 4D mitk::Image. //## //## @param openCVImage - the image that is inserted into the mitk Image //## @param mitkImage - pointer to the mitkImage, which is changed by this method! //## @param timeStep - the time step, at which the openCVImage is inserted //## //## @attention The parameter mitkImage will be changed! void InsertOpenCVImageAsMitkTimeSlice(const cv::Mat openCVImage, Image::Pointer mitkImage, int timeStep); protected: OpenCVToMitkImageFilter(); // purposely hidden ~OpenCVToMitkImageFilter() override; void GenerateData() override; protected: Image::Pointer m_Image; cv::Mat m_OpenCVMat; std::mutex m_ImageMutex; std::mutex m_OpenCVMatMutex; }; } // namespace mitk #endif // mitkOpenCVToMitkImageFilter_h diff --git a/Modules/OpenCVVideoSupport/mitkOpenCVVideoSource.cpp b/Modules/OpenCVVideoSupport/mitkOpenCVVideoSource.cpp index 1fb0d2a342..b63d9da6cf 100644 --- a/Modules/OpenCVVideoSupport/mitkOpenCVVideoSource.cpp +++ b/Modules/OpenCVVideoSupport/mitkOpenCVVideoSource.cpp @@ -1,424 +1,426 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #include "mitkOpenCVVideoSource.h" #include #include +#include + mitk::OpenCVVideoSource::OpenCVVideoSource() : m_VideoCapture(nullptr), m_CurrentImage(nullptr), m_CurrentVideoTexture(nullptr), m_PauseImage(nullptr), m_GrabbingDeviceNumber(-1), m_RepeatVideo(false), m_UseCVCAMLib(false), m_UndistortImage(false), m_FlipXAxisEnabled(false), m_FlipYAxisEnabled(false) { } mitk::OpenCVVideoSource::~OpenCVVideoSource() { this->Reset(); } void mitk::OpenCVVideoSource::SetVideoFileInput(const char * filename, bool repeatVideo, bool /*useCVCAMLib*/) { this->Reset(); m_VideoFileName = filename; m_VideoCapture = cvCaptureFromFile(filename); if(!m_VideoCapture) MITK_WARN << "Error in initializing video file input!"; m_RepeatVideo = repeatVideo; //m_CurrentImage = cvCreateImage(cvSize(m_CaptureWidth,m_CaptureHeight),8,3); this->Modified(); } void mitk::OpenCVVideoSource::SetVideoCameraInput(int cameraindex, bool /*useCVCAMLib*/) { this->Reset(); m_GrabbingDeviceNumber = cameraindex; m_VideoCapture = cvCaptureFromCAM(m_GrabbingDeviceNumber); if(!m_VideoCapture) MITK_ERROR << "Error in initializing CVHighGUI video camera!"<< std::endl; this->Modified(); } double mitk::OpenCVVideoSource::GetVideoCaptureProperty(int property_id) { return cvGetCaptureProperty(m_VideoCapture, property_id); } int mitk::OpenCVVideoSource::SetVideoCaptureProperty(int property_id, double value) { return cvSetCaptureProperty(m_VideoCapture, property_id, value); } //method extended for "static video feature" if enabled unsigned char* mitk::OpenCVVideoSource::GetVideoTexture() { // Fetch Frame and return pointer to opengl texture FetchFrame(); if (m_FlipXAxisEnabled || m_FlipYAxisEnabled) { //rotate the image to get a static video m_CurrentImage = this->FlipImage(m_CurrentImage); } //transfer the image to a texture this->UpdateVideoTexture(); return this->m_CurrentVideoTexture; this->Modified(); } cv::Mat mitk::OpenCVVideoSource::GetImage() { if(m_CurrentImage) { cv::Mat copy = cv::cvarrToMat( m_CurrentImage, false ); return copy.clone(); } return cv::Mat(); } const IplImage * mitk::OpenCVVideoSource::GetCurrentFrame() { return m_CurrentImage; } void mitk::OpenCVVideoSource::GetCurrentFrameAsOpenCVImage(IplImage * image) { // get last captured frame for processing the image data if(m_CurrentImage) { if(image) { image->origin = m_CurrentImage->origin; memcpy(image->imageData,m_CurrentImage->imageData,m_CurrentImage->width*m_CurrentImage->height*m_CurrentImage->nChannels); } } } void mitk::OpenCVVideoSource::FetchFrame() { // main procedure for updating video data if(m_CapturingInProcess) { if(m_VideoCapture) // we use highgui { if(!m_CapturePaused) { // release old image here m_CurrentImage = cvQueryFrame(m_VideoCapture); ++m_FrameCount; } if(m_CurrentImage == nullptr) // do we need to repeat the video if it is from video file? { double framePos = this->GetVideoCaptureProperty(CV_CAP_PROP_POS_AVI_RATIO); MITK_DEBUG << "End of video file found. framePos: " << framePos; if(m_RepeatVideo && framePos >= 0.99) { MITK_DEBUG << "Restarting video file playback."; this->SetVideoCaptureProperty(CV_CAP_PROP_POS_AVI_RATIO, 0); m_FrameCount = 0; m_CurrentImage = cvQueryFrame(m_VideoCapture); } else { std::ostringstream s; s << "End of video file " << m_VideoFileName; std::logic_error err( s.str() ); throw err; } } else { // only undistort if not paused if(m_UndistortImage && m_UndistortCameraImage.IsNotNull()) m_UndistortCameraImage->UndistortImageFast(m_CurrentImage, nullptr); } if(m_CaptureWidth == 0 || m_CaptureHeight == 0) { MITK_DEBUG << "Trying to set m_CaptureWidth & m_CaptureHeight."; m_CaptureWidth = m_CurrentImage->width; m_CaptureHeight = m_CurrentImage->height; MITK_INFO << "frame width: " << m_CaptureWidth << ", height: " << m_CaptureHeight; m_CurrentImage->origin = 0; } } } } void mitk::OpenCVVideoSource::UpdateVideoTexture() { //write the grabbed frame into an opengl compatible array, that means flip it and swap channel order if(!m_CurrentImage) return; if(m_CurrentVideoTexture == nullptr) m_CurrentVideoTexture = new unsigned char[m_CaptureWidth*m_CaptureHeight*3]; int width = m_CurrentImage->width; int height = m_CurrentImage->height; int widthStep = m_CurrentImage->widthStep; int nChannels = m_CurrentImage->nChannels; unsigned char* tex = m_CurrentVideoTexture; char* data = m_CurrentImage->imageData; char* currentData = m_CurrentImage->imageData; int hIndex=0; int wIndex=0; int iout,jout; for(int i=0;i= width) { wIndex=0; hIndex++; } // vertically flip the image iout = -hIndex+height-1; jout = wIndex; currentData = data + iout*widthStep; tex[i+2] = currentData[jout*nChannels + 0]; // B tex[i+1] = currentData[jout*nChannels + 1]; // G tex[i] = currentData[jout*nChannels + 2]; // R } } void mitk::OpenCVVideoSource::StartCapturing() { if(m_VideoCapture != nullptr) m_CapturingInProcess = true; else m_CapturingInProcess = false; } void mitk::OpenCVVideoSource::StopCapturing() { m_CapturePaused = false; m_CapturingInProcess = false; } bool mitk::OpenCVVideoSource::OnlineImageUndistortionEnabled() const { return m_UndistortCameraImage; } void mitk::OpenCVVideoSource::PauseCapturing() { m_CapturePaused = !m_CapturePaused; if(m_CapturePaused) { m_PauseImage = cvCloneImage(m_CurrentImage); // undistort this pause image if necessary if(m_UndistortImage) m_UndistortCameraImage->UndistortImageFast(m_PauseImage, nullptr); m_CurrentImage = m_PauseImage; } else { cvReleaseImage( &m_PauseImage ); // release old pause image if necessary m_CurrentImage = nullptr; m_PauseImage = nullptr; } } void mitk::OpenCVVideoSource::EnableOnlineImageUndistortion(mitk::Point3D focal, mitk::Point3D principal, mitk::Point4D distortion) { // Initialize Undistortion m_UndistortImage = true; float kc[4]; kc[0] = distortion[0]; kc[1] = distortion[1]; kc[2] = distortion[2]; kc[3] = distortion[3]; if(m_CaptureWidth == 0 || m_CaptureHeight == 0) FetchFrame(); m_UndistortCameraImage = mitk::UndistortCameraImage::New(); m_UndistortCameraImage->SetUndistortImageFastInfo(focal[0], focal[1], principal[0], principal[1], kc, (float)m_CaptureWidth, (float)m_CaptureHeight); } void mitk::OpenCVVideoSource::DisableOnlineImageUndistortion() { m_UndistortImage = false; } // functions for compatibility with ITK segmentation only void mitk::OpenCVVideoSource::GetCurrentFrameAsItkHSVPixelImage(HSVPixelImageType::Pointer &Image) { FetchFrame(); // Prepare iteration HSVConstIteratorType itImage( Image, Image->GetLargestPossibleRegion()); itImage.GoToBegin(); HSVPixelType pixel; int rowsize = 3 * m_CaptureWidth; char* bufferend; char* picture; picture = this->m_CurrentImage->imageData; bufferend = this->m_CurrentImage->imageData + 3*(m_CaptureHeight*m_CaptureWidth); float r,g,b,h,s,v; try { // we have to flip the image for(char* datapointer = bufferend - rowsize;datapointer >= picture; datapointer -= rowsize) { for(char* current = datapointer; current < datapointer + rowsize; current++) { b = *current; current++; g = *current; current++; r = *current; RGBtoHSV(r,g,b,h,s,v); pixel[0] = h; pixel[1] = s; pixel[2] = v; itImage.Set(pixel); ++itImage; } } } catch( ... ) { std::cout << "Exception raised mitkOpenCVVideoSource: get hsv itk image conversion error." << std::endl; } } void mitk::OpenCVVideoSource::RGBtoHSV(float r, float g, float b, float &h, float &s, float &v) { if(r > 1.0) r = r/255; if(b > 1.0) b = b/255; if(g > 1.0) g = g/255; float mn=r,mx=r; int maxVal=0; if (g > mx){ mx=g;maxVal=1;} if (b > mx){ mx=b;maxVal=2;} if (g < mn) mn=g; if (b < mn) mn=b; float delta = mx - mn; v = mx; if( mx != 0 ) s = delta / mx; else { s = 0; h = 0; return; } if (s==0.0f) { h=-1; return; } else { switch (maxVal) { case 0:{h = ( g - b ) / delta;break;} // yel < h < mag case 1:{h = 2 + ( b - r ) / delta;break;} // cyan < h < yel case 2:{h = 4 + ( r - g ) / delta;break;} // mag < h < cyan } } h *= 60; if( h < 0 ) h += 360; } /* * Rotate input image according to rotation angle around the viewing direction. * Angle is supposed to be calculated in QmitkARRotationComponet in the update() method. */ IplImage* mitk::OpenCVVideoSource::FlipImage(IplImage* input) { if(input == nullptr) { //warn the user and quit std::cout<<"openCVVideoSource: Current video image is null! "<< std::endl; return input; } if(m_FlipXAxisEnabled && !m_FlipYAxisEnabled) { cvFlip(input,nullptr,0); } if(!m_FlipXAxisEnabled && m_FlipYAxisEnabled) { cvFlip(input,nullptr,1); } if(m_FlipXAxisEnabled && m_FlipYAxisEnabled) { cvFlip(input,nullptr,-1); } return input; } void mitk::OpenCVVideoSource::Reset() { // set capturing to false this->StopCapturing(); this->m_FrameCount = 0; if(m_VideoCapture) cvReleaseCapture(&m_VideoCapture); m_VideoCapture = nullptr; m_CurrentImage = nullptr; m_CaptureWidth = 0; m_CaptureHeight = 0; delete m_CurrentVideoTexture; m_CurrentVideoTexture = nullptr; if(m_PauseImage) cvReleaseImage(&m_PauseImage); m_PauseImage = nullptr; m_CapturePaused = false; m_VideoFileName.clear(); m_GrabbingDeviceNumber = -1; // do not touch repeat video //m_RepeatVideo = false; m_UseCVCAMLib = false; // do not touch undistort settings // bool m_UndistortImage; } void mitk::OpenCVVideoSource::SetEnableXAxisFlip(bool enable) { this->m_FlipXAxisEnabled = enable; this->Modified(); } void mitk::OpenCVVideoSource::SetEnableYAxisFlip(bool enable) { this->m_FlipXAxisEnabled = enable; this->Modified(); } diff --git a/Modules/OpenCVVideoSupport/mitkUndistortCameraImage.cpp b/Modules/OpenCVVideoSupport/mitkUndistortCameraImage.cpp index 1d78e43af2..40844de3fc 100644 --- a/Modules/OpenCVVideoSupport/mitkUndistortCameraImage.cpp +++ b/Modules/OpenCVVideoSupport/mitkUndistortCameraImage.cpp @@ -1,223 +1,231 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ /// The list of internal camera parameters: /// * Focal length: The focal length in pixels is stored in m_fcX and m_fcY. /// * Principal point: The principal point coordinates are stored in the m_ccX and m_ccY. /// * Skew coefficient: The skew coefficient defining the angle between the x and y pixel axes is stored in the scalar alpha_c = 0. /// * Distortions: The image distortion coefficients (radial and tangential distortions) are stored in the 4x1 vector. #include "mitkUndistortCameraImage.h" - +#include +#include mitk::UndistortCameraImage::UndistortCameraImage() { m_tempImage = nullptr; } mitk::UndistortCameraImage::~UndistortCameraImage() { if(m_tempImage != nullptr) cvReleaseImage(&m_tempImage); } /// undistort one pixel coordinate using floating point accuracy... mitk::Point2D mitk::UndistortCameraImage::UndistortPixel(const mitk::Point2D& src) { float r_2 = 0; // radial distance squared const mitk::Point2D old_src = src; // copy of the original distorted point // distortion coefficients float k1 = m_distortionMatrixData[0]; float k2 = m_distortionMatrixData[1]; float p1 = m_distortionMatrixData[2]; float p2 = m_distortionMatrixData[3]; // Shift points to principal point and use focal length mitk::Point2D dstd; dstd[0] = (src[0] - m_ccX) / m_fcX; dstd[1] = (src[1] - m_ccY) / m_fcY; mitk::Point2D desPnt = dstd; // Compensate lens distortion float x = dstd[0]; float y = dstd[1]; for (int iter = 0; iter < 5; iter++) { r_2 = x*x + y*y; const float k_radial = 1 + k1 * r_2 + k2 * r_2 * r_2; const float delta_x = 2 * p1*x*y + p2 * (r_2 + 2*x*x); const float delta_y = 2 * p2*x*y + p1 * (r_2 + 2*y*y); x = (desPnt[0] - delta_x) / k_radial; y = (desPnt[1] - delta_y) / k_radial; } dstd[0] = x; dstd[1] = y; dstd[0] *= m_fcX; dstd[1] *= m_fcY; dstd[0] += m_ccX; dstd[1] += m_ccY; // ready // const mitk::Point2D dst = dstd; // do a sanity check to make sure this ideal point translates properly to the distorted point // this does the reverse of the above. It maps ideal undistorted to distorted image coordinates x = dstd[0] - m_ccX; y = dstd[1] - m_ccY; x /= m_fcX; y /= m_fcY; r_2 = x*x + y*y; float distx = x + x*(k1*r_2 + k2*r_2*r_2) + (2*p1*x*y + p2*(r_2 + 2*x*x)); float disty = y + y*(k1*r_2 + k2*r_2*r_2) + (2*p2*x*y + p1*(r_2 + 2*y*y)); distx *= m_fcX; disty *= m_fcY; distx += m_ccX; disty += m_ccY; // this should never be more than .2 pixels... const float diffx = old_src[0] - distx; const float diffy = old_src[1] - disty; if (fabs(diffx) > .1 || fabs(diffy) > .1) { std::cout << "undistort sanity check error: diffx =" << diffx << " , diffy = " << diffy; } return dstd; } void mitk::UndistortCameraImage::UndistortImage(IplImage *src, IplImage *dst) { // init intrinsic camera matrix [fx 0 cx; 0 fy cy; 0 0 1]. m_intrinsicMatrixData[0] = (double)m_fcX; m_intrinsicMatrixData[1] = 0.0; m_intrinsicMatrixData[2] = (double)m_ccX; m_intrinsicMatrixData[3] = 0.0; m_intrinsicMatrixData[4] = (double)m_fcY; m_intrinsicMatrixData[5] = (double)m_ccY; m_intrinsicMatrixData[6] = 0.0; m_intrinsicMatrixData[7] = 0.0; m_intrinsicMatrixData[8] = 1.0; m_intrinsicMatrix = cvMat(3, 3, CV_32FC1, m_intrinsicMatrixData); // init distortion matrix m_distortionMatrix = cvMat(1, 4, CV_32F, m_distortionMatrixData); // undistort - cvUndistort2(src,dst, &m_intrinsicMatrix, &m_distortionMatrix); + auto srcMat = cv::cvarrToMat(src); + auto dstMat = cv::cvarrToMat(dst); + auto intrinsicMat = cv::cvarrToMat(&m_intrinsicMatrix); + auto distortionMat = cv::cvarrToMat(&m_distortionMatrix); + cv::undistort(srcMat, dstMat, intrinsicMat, distortionMat); } // FAST METHODS FOR UNDISTORTION IN REALTIME // void mitk::UndistortCameraImage::UndistortImageFast(IplImage * src, IplImage* dst) { if(!src) return; /*if(dst == nullptr) dst = src; if(src->nChannels == 3) { IplImage *r = cvCreateImage(cvGetSize(src),src->depth,1);//subpixel IplImage *g = cvCreateImage(cvGetSize(src),src->depth,1);//subpixel IplImage *b = cvCreateImage(cvGetSize(src),src->depth,1);//subpixel cvSplit(src, r,g,b, nullptr); cvRemap( r, r, m_mapX, m_mapY ); // Undistort image cvRemap( g, g, m_mapX, m_mapY ); // Undistort image cvRemap( b, b, m_mapX, m_mapY ); // Undistort image cvMerge(r,g,b, nullptr, dst); } else { cvRemap(src, dst, m_mapX, m_mapY); }*/ /*if(m_tempImage == nullptr) m_tempImage = cvCreateImage(cvSize(src->width,src->height),src->depth,src->nChannels);*/ /*if(dst == nullptr) dst = cvCreateImage(cvSize(src->width,src->height),src->depth,src->nChannels);*/ if(!dst) { m_tempImage = cvCloneImage( src ); cvRemap(m_tempImage, src, m_mapX, m_mapY, CV_INTER_CUBIC); cvReleaseImage( &m_tempImage ); m_tempImage = nullptr; /*memcpy( src->imageData, m_tempImage->imageData, m_tempImage->imageSize ); cvReleaseImage( &m_tempImage );*/ } else { cvRemap(src, dst, m_mapX, m_mapY, CV_INTER_CUBIC); } /*m_tempImage->origin = src->origin; if(dst == nullptr) memcpy( src->imageData, m_tempImage->imageData, m_tempImage->imageSize ); else memcpy( dst->imageData, m_tempImage->imageData, m_tempImage->imageSize ); //cvUnDistort(m_srcImg, m_dstImg, m_undistMap,m_interpolationMode); //cvUndistort2(m_srcImg, m_dstImg, &m_intrinsicMatrix,&m_distortionMatrixDataCoefficients);*/ } void mitk::UndistortCameraImage::SetUndistortImageFastInfo(float in_dF1, float in_dF2, float in_dPrincipalX, float in_dPrincipalY, float in_Dist[4], float ImageSizeX, float ImageSizeY) { //create new matrix m_DistortionCoeffs = cvCreateMat(4, 1, CV_64FC1); m_CameraMatrix = cvCreateMat(3, 3, CV_64FC1); //set the camera matrix [fx 0 cx; 0 fy cy; 0 0 1]. cvSetReal2D(m_CameraMatrix, 0, 0, in_dF1); cvSetReal2D(m_CameraMatrix, 0, 1, 0.0); cvSetReal2D(m_CameraMatrix, 0, 2, in_dPrincipalX); cvSetReal2D(m_CameraMatrix, 1, 0, 0.0); cvSetReal2D(m_CameraMatrix, 1, 1, in_dF2); cvSetReal2D(m_CameraMatrix, 1, 2, in_dPrincipalY); cvSetReal2D(m_CameraMatrix, 2, 0, 0.0); cvSetReal2D(m_CameraMatrix, 2, 1, 0.0); cvSetReal2D(m_CameraMatrix, 2, 2, 1.0); //set distortions coefficients cvSetReal1D(m_DistortionCoeffs, 0, in_Dist[0]); cvSetReal1D(m_DistortionCoeffs, 1, in_Dist[1]); cvSetReal1D(m_DistortionCoeffs, 2, in_Dist[2]); cvSetReal1D(m_DistortionCoeffs, 3, in_Dist[3]); m_mapX = cvCreateMat(ImageSizeY, ImageSizeX, CV_32FC1); m_mapY = cvCreateMat(ImageSizeY, ImageSizeX, CV_32FC1); - //cv::initUndistortRectifyMap(m_CameraMatrix, m_DistortionCoeffs, m_mapX, m_mapY); - cvInitUndistortMap(m_CameraMatrix, m_DistortionCoeffs, m_mapX, m_mapY); + auto cameraMat = cv::cvarrToMat(m_CameraMatrix); + auto distortionCoeffs = cv::cvarrToMat(m_DistortionCoeffs); + auto mapX = cv::cvarrToMat(m_mapX); + auto mapY = cv::cvarrToMat(m_mapY); + cv::initUndistortRectifyMap(cameraMat, distortionCoeffs, cv::Mat(), cameraMat, mapX.size(), mapX.type(), mapX, mapY); } diff --git a/Modules/OpenCVVideoSupport/mitkUndistortCameraImage.h b/Modules/OpenCVVideoSupport/mitkUndistortCameraImage.h index 1f9509b827..cd678889e3 100644 --- a/Modules/OpenCVVideoSupport/mitkUndistortCameraImage.h +++ b/Modules/OpenCVVideoSupport/mitkUndistortCameraImage.h @@ -1,125 +1,125 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #ifndef __mitkUndistortCameraImage_h #define __mitkUndistortCameraImage_h #include "mitkCommon.h" #include #include "itkObject.h" #include "mitkPoint.h" -#include "opencv2/core.hpp" -#include "opencv2/imgproc.hpp" +#include +#include /*! \brief UndistortCameraImage This class is used to undistort camera images. Before any undistortion the class has to be initialized using the functions: SetFocalLength(),SetPrinzipalPoint() and SetCameraDistortion(). After this you can either use UndistortPixel() to undistort a single pixel's coordinates or UndistortImage() to undistort an OpenCV image. A faster version of UndistortImage() is UndistortImageFast(), however, it has to be initialized once with SetUndistortImageFastInfo() instead of the Set... methods before use. \ingroup Functionalities */ namespace mitk { class MITKOPENCVVIDEOSUPPORT_EXPORT UndistortCameraImage : public itk::Object { public: mitkClassMacroItkParent(UndistortCameraImage,itk::Object); itkFactorylessNewMacro(Self); itkCloneMacro(Self); /// Initialization /// /* * Set the camera's intrinsic focal length */ void SetFocalLength(float fc_x, float fc_y) { m_fcX = fc_x; m_fcY = fc_y; } /* * Set the camera's intrinsic principal point */ void SetPrincipalPoint(float cc_x, float cc_y) { m_ccX = cc_x; m_ccY = cc_y; } /* * Set the camera's intrinsic distortion parameters */ void SetCameraDistortion(float kc1, float kc2, float kc3, float kc4) { m_distortionMatrixData[0] = kc1; m_distortionMatrixData[1] = kc2; m_distortionMatrixData[2] = kc3; m_distortionMatrixData[3] = kc4; } /* * Pre-Calculates matrices for the later use of UndistortImageFast() */ void InitRemapUndistortion(int sizeX, int sizeY); /// USAGE /// /* * Undistort a single pixel, returns undistorted pixel */ mitk::Point2D UndistortPixel(const mitk::Point2D& src); /* * Complete undistortion of an OpenCV image, including all calculations */ void UndistortImage(IplImage* src, IplImage* dst); /* * Complete undistortion of an OpenCV image, using pre-calculated matrices from SetUndistortImageFastInfo() * The use of only a source parameter will cause the source to be overwritten. * NOTE: Using the Fast undistortion methods does not require a initialization via the Set... methods. */ void UndistortImageFast( IplImage * src, IplImage* dst = nullptr ); void SetUndistortImageFastInfo(float in_dF1, float in_dF2, float in_dPrincipalX, float in_dPrincipalY, float in_Dist[4], float ImageSizeX, float ImageSizeY); UndistortCameraImage(); ~UndistortCameraImage() override; protected: // principal point and focal length parameters float m_ccX, m_ccY, m_fcX, m_fcY; // undistortion parameters float m_distortionMatrixData[4]; // intrinsic camera parameters float m_intrinsicMatrixData[9]; // precalculated matrices for fast image undistortion with UndistortImageFast() CvMat * m_mapX, * m_mapY; // intrinsic and undistortion camera matrices CvMat m_intrinsicMatrix, m_distortionMatrix; // temp image IplImage * m_tempImage; CvMat *m_DistortionCoeffs; CvMat *m_CameraMatrix; }; } #endif diff --git a/Modules/Python/CMakeLists.txt b/Modules/Python/CMakeLists.txt index 14a48964fa..161bddb4c7 100644 --- a/Modules/Python/CMakeLists.txt +++ b/Modules/Python/CMakeLists.txt @@ -1,13 +1,8 @@ if(MITK_USE_Python3) - set(OpenCV_DEP ) - if(MITK_USE_OpenCV) - set(OpenCV_DEP OpenCV) - endif() - mitk_create_module( DEPENDS MitkCore PACKAGE_DEPENDS PUBLIC Python3|Python ) add_subdirectory(autoload/PythonService) endif() diff --git a/Modules/QtPython/CMakeLists.txt b/Modules/QtPython/CMakeLists.txt index d497d472ef..433b7ae4f4 100644 --- a/Modules/QtPython/CMakeLists.txt +++ b/Modules/QtPython/CMakeLists.txt @@ -1,18 +1,13 @@ if(MITK_USE_Python3) mitkFunctionCheckCompilerFlags("/wd4273" CMAKE_CXX_FLAGS) - set(OpenCV_DEP ) - if(MITK_USE_OpenCV) - set(OpenCV_DEP OpenCV) - endif() - mitk_create_module( DEPENDS MitkCore MitkQtWidgets MitkPython PACKAGE_DEPENDS PUBLIC Qt5|Widgets CTK|CTKScriptingPythonCore+CTKScriptingPythonWidgets ) if(BUILD_TESTING) add_subdirectory(Testing) endif() endif() diff --git a/Modules/ToFHardware/mitkToFOpenCVImageGrabber.cpp b/Modules/ToFHardware/mitkToFOpenCVImageGrabber.cpp index fd8b4e0cb7..561a6386ff 100644 --- a/Modules/ToFHardware/mitkToFOpenCVImageGrabber.cpp +++ b/Modules/ToFHardware/mitkToFOpenCVImageGrabber.cpp @@ -1,176 +1,178 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #include "mitkToFOpenCVImageGrabber.h" // mitk includes #include "mitkImageDataItem.h" #include #include "mitkImageReadAccessor.h" #include "vtkSmartPointer.h" #include "vtkColorTransferFunction.h" #include "vtkFloatArray.h" +#include + namespace mitk { ToFOpenCVImageGrabber::ToFOpenCVImageGrabber() { m_CurrentOpenCVIntensityImage = nullptr; m_CurrentOpenCVAmplitudeImage = nullptr; m_CurrentOpenCVDistanceImage = nullptr; m_ImageType = 0; m_ImageDepth = IPL_DEPTH_32F; m_ImageGrabber = nullptr; } ToFOpenCVImageGrabber::~ToFOpenCVImageGrabber() { } cv::Mat ToFOpenCVImageGrabber::GetImage() { m_ImageGrabber->Update(); unsigned int numOfPixel = m_ImageGrabber->GetCaptureWidth()*m_ImageGrabber->GetCaptureHeight(); // copy current mitk images unsigned int dimensions[4]; dimensions[0] = this->m_ImageGrabber->GetCaptureWidth(); dimensions[1] = this->m_ImageGrabber->GetCaptureHeight(); dimensions[2] = 1; dimensions[3] = 1; // create single component float pixel type mitk::PixelType FloatType = MakeScalarPixelType(); ImageReadAccessor imgGrabAcc0(m_ImageGrabber->GetOutput(0), m_ImageGrabber->GetOutput(0)->GetSliceData()); ImageReadAccessor imgGrabAcc1(m_ImageGrabber->GetOutput(1), m_ImageGrabber->GetOutput(1)->GetSliceData()); ImageReadAccessor imgGrabAcc2(m_ImageGrabber->GetOutput(2), m_ImageGrabber->GetOutput(2)->GetSliceData()); mitk::Image::Pointer currentMITKIntensityImage = mitk::Image::New(); currentMITKIntensityImage->Initialize(FloatType, 2, dimensions); currentMITKIntensityImage->SetSlice((float*) imgGrabAcc2.GetData(),0,0,0); mitk::Image::Pointer currentMITKAmplitudeImage = mitk::Image::New(); currentMITKAmplitudeImage->Initialize(FloatType, 2, dimensions); currentMITKAmplitudeImage->SetSlice((float*)imgGrabAcc1.GetData(),0,0,0); mitk::Image::Pointer currentMITKDistanceImage = mitk::Image::New(); currentMITKDistanceImage->Initialize(FloatType, 2, dimensions); currentMITKDistanceImage->SetSlice((float*)imgGrabAcc0.GetData(),0,0,0); // copy mitk images to OpenCV images if (m_ImageDepth==IPL_DEPTH_32F) { if (m_ImageType==1) { ImageReadAccessor currentAmplAcc(currentMITKAmplitudeImage, currentMITKAmplitudeImage->GetSliceData(0, 0, 0)); float* amplitudeFloatData = (float*) currentAmplAcc.GetData(); memcpy(m_CurrentOpenCVAmplitudeImage->imageData,(unsigned char*)amplitudeFloatData,numOfPixel*sizeof(float)); return cv::cvarrToMat(m_CurrentOpenCVAmplitudeImage, false); } else if (m_ImageType==2) { ImageReadAccessor currentIntenAcc(currentMITKIntensityImage, currentMITKIntensityImage->GetSliceData(0, 0, 0)); float* intensityFloatData = (float*) currentIntenAcc.GetData(); memcpy(m_CurrentOpenCVIntensityImage->imageData,(unsigned char*)intensityFloatData,numOfPixel*sizeof(float)); return cv::cvarrToMat(m_CurrentOpenCVIntensityImage, false); } else { ImageReadAccessor currentDistAcc(currentMITKDistanceImage, currentMITKDistanceImage->GetSliceData(0, 0, 0)); float* distanceFloatData = (float*) currentDistAcc.GetData(); memcpy(m_CurrentOpenCVDistanceImage->imageData,(unsigned char*)distanceFloatData,numOfPixel*sizeof(float)); return cv::cvarrToMat(m_CurrentOpenCVDistanceImage, false); } } else { if (m_ImageType==1) { this->MapScalars(currentMITKAmplitudeImage, m_CurrentOpenCVAmplitudeImage); return cv::cvarrToMat(m_CurrentOpenCVAmplitudeImage, false); } else if (m_ImageType==2) { this->MapScalars(currentMITKIntensityImage, m_CurrentOpenCVIntensityImage); return cv::cvarrToMat(m_CurrentOpenCVIntensityImage, false); } else { this->MapScalars(currentMITKDistanceImage, m_CurrentOpenCVDistanceImage); return cv::cvarrToMat(m_CurrentOpenCVDistanceImage, false); } } } void ToFOpenCVImageGrabber::SetImageType(unsigned int imageType) { m_ImageType = imageType; } void ToFOpenCVImageGrabber::SetImageDepth(unsigned int imageDepth) { m_ImageDepth = imageDepth; } void ToFOpenCVImageGrabber::SetToFImageGrabber(ToFImageGrabber::Pointer imageGrabber) { m_ImageGrabber = imageGrabber; } ToFImageGrabber::Pointer ToFOpenCVImageGrabber::GetToFImageGrabber() { return m_ImageGrabber; } void ToFOpenCVImageGrabber::StartCapturing() { if (m_ImageGrabber.IsNotNull()) { m_ImageGrabber->ConnectCamera(); //Initialize cv Images after the camera is conneceted and we know the resolution m_CurrentOpenCVIntensityImage = cvCreateImage(cvSize(m_ImageGrabber->GetCaptureWidth(), m_ImageGrabber->GetCaptureHeight()), m_ImageDepth, 1); m_CurrentOpenCVAmplitudeImage = cvCreateImage(cvSize(m_ImageGrabber->GetCaptureWidth(), m_ImageGrabber->GetCaptureHeight()), m_ImageDepth, 1); m_CurrentOpenCVDistanceImage = cvCreateImage(cvSize(m_ImageGrabber->GetCaptureWidth(), m_ImageGrabber->GetCaptureHeight()), m_ImageDepth, 1); m_ImageGrabber->StartCamera(); } } void ToFOpenCVImageGrabber::StopCapturing() { if (m_ImageGrabber.IsNotNull()) { m_ImageGrabber->StopCamera(); m_ImageGrabber->DisconnectCamera(); } } void ToFOpenCVImageGrabber::MapScalars( mitk::Image::Pointer mitkImage, IplImage* openCVImage) { unsigned int numOfPixel = m_ImageGrabber->GetCaptureWidth()*m_ImageGrabber->GetCaptureHeight(); ImageReadAccessor imgAcc(mitkImage, mitkImage->GetSliceData(0, 0, 0)); float* floatData = (float*)imgAcc.GetData(); vtkSmartPointer colorTransferFunction = vtkSmartPointer::New(); vtkSmartPointer floatArrayInt = vtkSmartPointer::New(); floatArrayInt->Initialize(); floatArrayInt->SetArray(floatData, numOfPixel, 0); mitk::ScalarType min = mitkImage->GetStatistics()->GetScalarValueMin(); mitk::ScalarType max = mitkImage->GetStatistics()->GetScalarValueMaxNoRecompute(); MITK_INFO<<"Minimum: "<RemoveAllPoints(); colorTransferFunction->AddRGBPoint(min, 0, 0, 0); colorTransferFunction->AddRGBPoint(max, 1, 1, 1); colorTransferFunction->SetColorSpaceToHSV(); colorTransferFunction->MapScalarsThroughTable(floatArrayInt, (unsigned char*)openCVImage->imageData, VTK_LUMINANCE); } } // end namespace mitk diff --git a/Modules/ToFHardware/mitkToFOpenCVImageGrabber.h b/Modules/ToFHardware/mitkToFOpenCVImageGrabber.h index 03a64cadd7..292ba8176b 100644 --- a/Modules/ToFHardware/mitkToFOpenCVImageGrabber.h +++ b/Modules/ToFHardware/mitkToFOpenCVImageGrabber.h @@ -1,102 +1,104 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #ifndef __mitkToFOpenCVImageGrabber_h #define __mitkToFOpenCVImageGrabber_h #include #include "mitkCommon.h" #include "mitkOpenCVImageSource.h" #include "mitkToFImageGrabber.h" #include "itkObject.h" #include "itkObjectFactory.h" +#include + namespace mitk { /** * @brief TofImageGrabber class providing OpenCV images * * * @ingroup ToFHardware */ class MITKTOFHARDWARE_EXPORT ToFOpenCVImageGrabber : public mitk::OpenCVImageSource { public: ToFOpenCVImageGrabber(); ~ToFOpenCVImageGrabber() override; mitkClassMacro( ToFOpenCVImageGrabber , OpenCVImageSource ); itkFactorylessNewMacro(Self); itkCloneMacro(Self); /*! \brief Get current ToF image. Specify image you want to grab with SetImageType() */ cv::Mat GetImage() override; /*! \brief set type of image you want to grab. 0: Distance image (Default) 1: Amplitude image 2: Intensity image */ void SetImageType(unsigned int imageType); /*! \brief set the depth of the image. Some functions of OpenCV do not support IPL_DEPTH_32F. Warning: changing from default results in a mapping of the pixel value through a lookup table IPL_DEPTH_1U 1 IPL_DEPTH_8U 8 IPL_DEPTH_16U 16 IPL_DEPTH_32F 32 (Default) */ void SetImageDepth(unsigned int imageDepth); /*! \brief set the ImageGrabber used for fetching image data from the camera */ void SetToFImageGrabber(mitk::ToFImageGrabber::Pointer imageGrabber); /*! \brief get the ImageGrabber used for fetching image data from the camera */ mitk::ToFImageGrabber::Pointer GetToFImageGrabber(); void StartCapturing(); void StopCapturing(); protected: /*! \brief map scalars through lookup table \param mitkImage current MITK image \param openCVImage */ void MapScalars(mitk::Image::Pointer mitkImage, IplImage* openCVImage); mitk::ToFImageGrabber::Pointer m_ImageGrabber; ///< ImageGrabber used for fetching ToF image data from the camera unsigned int m_ImageType; ///< type of image currently supplied by this image source /*! \brief image depth currently used by this image source. Warning: Changing from default (IPL_DEPTH_32F) results in a mapping of the pixel value through a lookup table */ unsigned int m_ImageDepth; IplImage* m_CurrentOpenCVIntensityImage; ///< OpenCV image holding the current intensity data IplImage* m_CurrentOpenCVAmplitudeImage; ///< OpenCV image holding the current amplitude data IplImage* m_CurrentOpenCVDistanceImage; ///< OpenCV image holding the current distance data private: }; } //END mitk namespace #endif diff --git a/Modules/ToFProcessing/CMakeLists.txt b/Modules/ToFProcessing/CMakeLists.txt index 5e2e9cc973..47c6001b6a 100644 --- a/Modules/ToFProcessing/CMakeLists.txt +++ b/Modules/ToFProcessing/CMakeLists.txt @@ -1,12 +1,11 @@ MITK_CREATE_MODULE( DEPENDS MitkCameraCalibration - PACKAGE_DEPENDS OpenCV WARNINGS_NO_ERRORS ) if(BUILD_TESTING) add_subdirectory(Testing) endif(BUILD_TESTING) diff --git a/Modules/ToFProcessing/mitkToFCompositeFilter.cpp b/Modules/ToFProcessing/mitkToFCompositeFilter.cpp index e7306d8edc..f86fc47521 100644 --- a/Modules/ToFProcessing/mitkToFCompositeFilter.cpp +++ b/Modules/ToFProcessing/mitkToFCompositeFilter.cpp @@ -1,398 +1,398 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #include #include #include "mitkImageReadAccessor.h" #include -#include "opencv2/imgproc.hpp" +#include mitk::ToFCompositeFilter::ToFCompositeFilter() : m_SegmentationMask(nullptr), m_ImageWidth(0), m_ImageHeight(0), m_ImageSize(0), m_IplDistanceImage(nullptr), m_IplOutputImage(nullptr), m_ItkInputImage(nullptr), m_ApplyTemporalMedianFilter(false), m_ApplyAverageFilter(false), m_ApplyMedianFilter(false), m_ApplyThresholdFilter(false), m_ApplyMaskSegmentation(false), m_ApplyBilateralFilter(false), m_DataBuffer(nullptr), m_DataBufferCurrentIndex(0), m_DataBufferMaxSize(0), m_TemporalMedianFilterNumOfFrames(10), m_ThresholdFilterMin(1), m_ThresholdFilterMax(7000), m_BilateralFilterDomainSigma(2), m_BilateralFilterRangeSigma(60), m_BilateralFilterKernelRadius(0) { } mitk::ToFCompositeFilter::~ToFCompositeFilter() { cvReleaseImage(&(this->m_IplDistanceImage)); cvReleaseImage(&(this->m_IplOutputImage)); if (m_DataBuffer!=nullptr) { delete [] m_DataBuffer; } } void mitk::ToFCompositeFilter::SetInput( const InputImageType* distanceImage ) { this->SetInput(0, distanceImage); } void mitk::ToFCompositeFilter::SetInput( unsigned int idx, const InputImageType* distanceImage ) { if ((distanceImage == nullptr) && (idx == this->GetNumberOfInputs() - 1)) // if the last input is set to nullptr, reduce the number of inputs by one { this->SetNumberOfIndexedInputs(this->GetNumberOfInputs() - 1); } else { if (idx==0) //create IPL image holding distance data { if (!distanceImage->IsEmpty()) { this->m_ImageWidth = distanceImage->GetDimension(0); this->m_ImageHeight = distanceImage->GetDimension(1); this->m_ImageSize = this->m_ImageWidth * this->m_ImageHeight * sizeof(float); if (this->m_IplDistanceImage != nullptr) { cvReleaseImage(&(this->m_IplDistanceImage)); } ImageReadAccessor distImgAcc(distanceImage, distanceImage->GetSliceData(0,0,0)); float* distanceFloatData = (float*) distImgAcc.GetData(); this->m_IplDistanceImage = cvCreateImage(cvSize(this->m_ImageWidth, this->m_ImageHeight), IPL_DEPTH_32F, 1); memcpy(this->m_IplDistanceImage->imageData, (void*)distanceFloatData, this->m_ImageSize); if (this->m_IplOutputImage != nullptr) { cvReleaseImage(&(this->m_IplOutputImage)); } this->m_IplOutputImage = cvCreateImage(cvSize(this->m_ImageWidth, this->m_ImageHeight), IPL_DEPTH_32F, 1); CreateItkImage(this->m_ItkInputImage); } } this->ProcessObject::SetNthInput(idx, const_cast(distanceImage)); // Process object is not const-correct so the const_cast is required here } this->CreateOutputsForAllInputs(); } mitk::Image* mitk::ToFCompositeFilter::GetInput() { return this->GetInput(0); } mitk::Image* mitk::ToFCompositeFilter::GetInput( unsigned int idx ) { if (this->GetNumberOfInputs() < 1) return nullptr; //TODO: geeignete exception werfen return static_cast< mitk::Image*>(this->ProcessObject::GetInput(idx)); } void mitk::ToFCompositeFilter::GenerateData() { // copy input 1...n to output 1...n for (unsigned int idx=0; idxGetNumberOfOutputs(); idx++) { mitk::Image::Pointer outputImage = this->GetOutput(idx); mitk::Image::Pointer inputImage = this->GetInput(idx); if (outputImage.IsNotNull()&&inputImage.IsNotNull()) { ImageReadAccessor inputAcc(inputImage, inputImage->GetSliceData()); outputImage->CopyInformation(inputImage); outputImage->Initialize(inputImage->GetPixelType(),inputImage->GetDimension(),inputImage->GetDimensions()); outputImage->SetSlice(inputAcc.GetData()); } } //mitk::Image::Pointer outputDistanceImage = this->GetOutput(); ImageReadAccessor outputAcc(this->GetOutput(), this->GetOutput()->GetSliceData(0, 0, 0) ); float* outputDistanceFloatData = (float*) outputAcc.GetData(); //mitk::Image::Pointer inputDistanceImage = this->GetInput(); ImageReadAccessor inputAcc(this->GetInput(), this->GetInput()->GetSliceData(0, 0, 0) ); // copy initial distance image to ipl image float* distanceFloatData = (float*)inputAcc.GetData(); memcpy(this->m_IplDistanceImage->imageData, (void*)distanceFloatData, this->m_ImageSize); if (m_ApplyThresholdFilter||m_ApplyMaskSegmentation) { ProcessSegmentation(this->m_IplDistanceImage); } if (this->m_ApplyTemporalMedianFilter||this->m_ApplyAverageFilter) { ProcessStreamedQuickSelectMedianImageFilter(this->m_IplDistanceImage); } if (this->m_ApplyMedianFilter) { ProcessCVMedianFilter(this->m_IplDistanceImage, this->m_IplOutputImage); memcpy( this->m_IplDistanceImage->imageData, this->m_IplOutputImage->imageData, this->m_ImageSize ); } if (this->m_ApplyBilateralFilter) { float* itkFloatData = this->m_ItkInputImage->GetBufferPointer(); memcpy(itkFloatData, this->m_IplDistanceImage->imageData, this->m_ImageSize ); ItkImageType2D::Pointer itkOutputImage = ProcessItkBilateralFilter(this->m_ItkInputImage); memcpy( this->m_IplDistanceImage->imageData, itkOutputImage->GetBufferPointer(), this->m_ImageSize ); //ProcessCVBilateralFilter(this->m_IplDistanceImage, this->m_OutputIplImage, domainSigma, rangeSigma, kernelRadius); //memcpy( distanceFloatData, this->m_OutputIplImage->imageData, distanceImageSize ); } memcpy( outputDistanceFloatData, this->m_IplDistanceImage->imageData, this->m_ImageSize ); } void mitk::ToFCompositeFilter::CreateOutputsForAllInputs() { this->SetNumberOfIndexedOutputs(this->GetNumberOfInputs()); // create outputs for all inputs for (unsigned int idx = 0; idx < this->GetNumberOfIndexedInputs(); ++idx) { if (this->GetOutput(idx) == nullptr) { DataObjectPointer newOutput = this->MakeOutput(idx); this->SetNthOutput(idx, newOutput); } } this->Modified(); } void mitk::ToFCompositeFilter::GenerateOutputInformation() { mitk::Image::ConstPointer input = this->GetInput(); mitk::Image::Pointer output = this->GetOutput(); if (output->IsInitialized()) return; itkDebugMacro(<<"GenerateOutputInformation()"); output->Initialize(input->GetPixelType(), *input->GetTimeGeometry()); output->SetPropertyList(input->GetPropertyList()->Clone()); } void mitk::ToFCompositeFilter::ProcessSegmentation(IplImage* inputIplImage) { char* segmentationMask; if (m_SegmentationMask.IsNotNull()) { ImageReadAccessor segMaskAcc(m_SegmentationMask, m_SegmentationMask->GetSliceData(0,0,0)); segmentationMask = (char*)segMaskAcc.GetData(); } else { segmentationMask = nullptr; } float *f = (float*)inputIplImage->imageData; for(int i=0; im_ImageWidth*this->m_ImageHeight; i++) { if (this->m_ApplyThresholdFilter) { if (f[i]<=m_ThresholdFilterMin) { f[i] = 0.0; } else if (f[i]>=m_ThresholdFilterMax) { f[i] = 0.0; } } if (this->m_ApplyMaskSegmentation) { if (segmentationMask) { if (segmentationMask[i]==0) { f[i] = 0.0; } } } } } ItkImageType2D::Pointer mitk::ToFCompositeFilter::ProcessItkBilateralFilter(ItkImageType2D::Pointer inputItkImage) { ItkImageType2D::Pointer outputItkImage; BilateralFilterType::Pointer bilateralFilter = BilateralFilterType::New(); bilateralFilter->SetInput(inputItkImage); bilateralFilter->SetDomainSigma(m_BilateralFilterDomainSigma); bilateralFilter->SetRangeSigma(m_BilateralFilterRangeSigma); //bilateralFilter->SetRadius(m_BilateralFilterKernelRadius); outputItkImage = bilateralFilter->GetOutput(); outputItkImage->Update(); return outputItkImage; } void mitk::ToFCompositeFilter::ProcessCVBilateralFilter(IplImage* inputIplImage, IplImage* outputIplImage) { int diameter = m_BilateralFilterKernelRadius; double sigmaColor = m_BilateralFilterRangeSigma; double sigmaSpace = m_BilateralFilterDomainSigma; cvSmooth(inputIplImage, outputIplImage, CV_BILATERAL, diameter, 0, sigmaColor, sigmaSpace); } void mitk::ToFCompositeFilter::ProcessCVMedianFilter(IplImage* inputIplImage, IplImage* outputIplImage, int radius) { cvSmooth(inputIplImage, outputIplImage, CV_MEDIAN, radius, 0, 0, 0); } void mitk::ToFCompositeFilter::ProcessStreamedQuickSelectMedianImageFilter(IplImage* inputIplImage) { float* data = (float*)inputIplImage->imageData; int imageSize = inputIplImage->width * inputIplImage->height; float* tmpArray; if (this->m_TemporalMedianFilterNumOfFrames == 0) { return; } if (m_TemporalMedianFilterNumOfFrames != this->m_DataBufferMaxSize) // reset { //delete current buffer for( int i=0; im_DataBufferMaxSize; i++ ) { delete[] this->m_DataBuffer[i]; } if (this->m_DataBuffer != nullptr) { delete[] this->m_DataBuffer; } this->m_DataBufferMaxSize = m_TemporalMedianFilterNumOfFrames; // create new buffer with current size this->m_DataBuffer = new float*[this->m_DataBufferMaxSize]; for(int i=0; im_DataBufferMaxSize; i++) { this->m_DataBuffer[i] = nullptr; } this->m_DataBufferCurrentIndex = 0; } int currentBufferSize = this->m_DataBufferMaxSize; tmpArray = new float[this->m_DataBufferMaxSize]; // copy data to buffer if (this->m_DataBuffer[this->m_DataBufferCurrentIndex] == nullptr) { this->m_DataBuffer[this->m_DataBufferCurrentIndex] = new float[imageSize]; currentBufferSize = this->m_DataBufferCurrentIndex + 1; } for(int j=0; jm_DataBuffer[this->m_DataBufferCurrentIndex][j] = data[j]; } float tmpValue = 0.0f; for(int i=0; im_DataBuffer[j][i]; } data[i] = tmpValue/currentBufferSize; } else if (m_ApplyTemporalMedianFilter) { for(int j=0; jm_DataBuffer[j][i]; } data[i] = quick_select(tmpArray, currentBufferSize); } } this->m_DataBufferCurrentIndex = (this->m_DataBufferCurrentIndex + 1) % this->m_DataBufferMaxSize; delete[] tmpArray; } #define ELEM_SWAP(a,b) { float t=(a);(a)=(b);(b)=t; } float mitk::ToFCompositeFilter::quick_select(float arr[], int n) { int low = 0; int high = n-1; int median = (low + high)/2; int middle = 0; int ll = 0; int hh = 0; for (;;) { if (high <= low) /* One element only */ return arr[median] ; if (high == low + 1) { /* Two elements only */ if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ; return arr[median] ; } /* Find median of low, middle and high items; swap into position low */ middle = (low + high) / 2; if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ; if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ; if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ; /* Swap low item (now in position middle) into position (low+1) */ ELEM_SWAP(arr[middle], arr[low+1]) ; /* Nibble from each end towards middle, swapping items when stuck */ ll = low + 1; hh = high; for (;;) { do ll++; while (arr[low] > arr[ll]) ; do hh--; while (arr[hh] > arr[low]) ; if (hh < ll) break; ELEM_SWAP(arr[ll], arr[hh]) ; } /* Swap middle item (in position low) back into correct position */ ELEM_SWAP(arr[low], arr[hh]) ; /* Re-set active partition */ if (hh <= median) low = ll; if (hh >= median) high = hh - 1; } } #undef ELEM_SWAP void mitk::ToFCompositeFilter::SetTemporalMedianFilterParameter(int tmporalMedianFilterNumOfFrames) { this->m_TemporalMedianFilterNumOfFrames = tmporalMedianFilterNumOfFrames; } void mitk::ToFCompositeFilter::SetThresholdFilterParameter(int min, int max) { if (min > max) { min = max; } this->m_ThresholdFilterMin = min; this->m_ThresholdFilterMax = max; } void mitk::ToFCompositeFilter::SetBilateralFilterParameter(double domainSigma, double rangeSigma, int kernelRadius = 0) { this->m_BilateralFilterDomainSigma = domainSigma; this->m_BilateralFilterRangeSigma = rangeSigma; this->m_BilateralFilterKernelRadius = kernelRadius; } void mitk::ToFCompositeFilter::CreateItkImage(ItkImageType2D::Pointer &itkInputImage) { itkInputImage = ItkImageType2D::New(); ItkImageType2D::IndexType startIndex; startIndex[0] = 0; // first index on X startIndex[1] = 0; // first index on Y ItkImageType2D::SizeType size; size[0] = this->m_ImageWidth; // size along X size[1] = this->m_ImageHeight; // size along Y ItkImageType2D::RegionType region; region.SetSize( size ); region.SetIndex( startIndex ); itkInputImage->SetRegions( region ); itkInputImage->Allocate(); } diff --git a/Modules/ToFProcessing/mitkToFCompositeFilter.h b/Modules/ToFProcessing/mitkToFCompositeFilter.h index 81ce0f269b..eab524fa67 100644 --- a/Modules/ToFProcessing/mitkToFCompositeFilter.h +++ b/Modules/ToFProcessing/mitkToFCompositeFilter.h @@ -1,193 +1,193 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #ifndef __mitkToFCompositeFilter_h #define __mitkToFCompositeFilter_h #include #include "mitkImageToImageFilter.h" #include #include -#include "opencv2/core.hpp" +#include typedef itk::Image ItkImageType2D; typedef itk::Image ItkImageType3D; typedef itk::BilateralImageFilter BilateralFilterType; namespace mitk { /** * @brief Applies a common filter-pipeline to the first input of this filter * * This class intends to allow quick preprocessing of (ToF) range data. Input 0 of this filter, holding the range image, * is processed using the following image processing filters: * - threshold filter * - mask segmentation * - temporal median filter * - spatial median filter * - bilateral filter * * @ingroup ToFProcessing */ class MITKTOFPROCESSING_EXPORT ToFCompositeFilter : public ImageToImageFilter { public: mitkClassMacro( ToFCompositeFilter , ImageToImageFilter ); itkFactorylessNewMacro(Self); itkCloneMacro(Self); itkSetMacro(SegmentationMask,mitk::Image::Pointer); itkSetMacro(ApplyTemporalMedianFilter,bool); itkGetConstMacro(ApplyTemporalMedianFilter,bool); itkSetMacro(ApplyAverageFilter,bool); itkGetConstMacro(ApplyAverageFilter,bool); itkSetMacro(ApplyMedianFilter,bool); itkGetConstMacro(ApplyMedianFilter,bool); itkSetMacro(ApplyThresholdFilter,bool); itkGetConstMacro(ApplyThresholdFilter,bool); itkSetMacro(ApplyMaskSegmentation,bool); itkGetConstMacro(ApplyMaskSegmentation,bool); itkSetMacro(ApplyBilateralFilter,bool); itkGetConstMacro(ApplyBilateralFilter,bool); using itk::ProcessObject::SetInput; /*! \brief sets the input of this filter \param distanceImage input is the distance image of e.g. a ToF camera */ void SetInput( const InputImageType* distanceImage) override; /*! \brief sets the input of this filter at idx \param idx number of the current input \param distanceImage input is the distance image of e.g. a ToF camera */ void SetInput(unsigned int idx, const InputImageType* distanceImage) override; /*! \brief returns the input of this filter */ Image* GetInput(); /*! \brief returns the input with id idx of this filter */ Image* GetInput(unsigned int idx); /*! \brief Sets the parameter of the temporal median filter \param tmporalMedianFilterNumOfFrames number of frames to be considered for calulating the temporal median */ void SetTemporalMedianFilterParameter(int tmporalMedianFilterNumOfFrames); /*! \brief Sets the parameters (lower, upper threshold) of the threshold filter \param min lower threshold of the threshold filter \param max upper threshold of the threshold filter */ void SetThresholdFilterParameter(int min, int max); /*! \brief Sets the parameters (domain sigma, range sigma, kernel radius) of the bilateral filter \param domainSigma Parameter controlling the smoothing effect of the bilateral filter. Default value: 2 \param rangeSigma Parameter controlling the edge preserving effect of the bilateral filter. Default value: 60 \param kernelRadius radius of the filter mask of the bilateral filter */ void SetBilateralFilterParameter(double domainSigma, double rangeSigma, int kernelRadius); protected: /*! \brief standard constructor */ ToFCompositeFilter(); /*! \brief standard destructor */ ~ToFCompositeFilter() override; void GenerateOutputInformation() override; /*! \brief method generating the output of this filter. Called in the updated process of the pipeline. This method generates the output of the ToFSurfaceSource: The generated surface of the 3d points */ void GenerateData() override; /** * \brief Create an output for each input * * This Method sets the number of outputs to the number of inputs * and creates missing outputs objects. * \warning any additional outputs that exist before the method is called are deleted */ void CreateOutputsForAllInputs(); /*! \brief Applies a mask and/or threshold segmentation to the input image. All pixels with values outside the mask, below the lower threshold (min) and above the upper threshold (max) are assigned the pixel value 0 */ void ProcessSegmentation(IplImage* inputIplImage); /*! \brief Applies the ITK bilateral filter to the input image */ ItkImageType2D::Pointer ProcessItkBilateralFilter(ItkImageType2D::Pointer inputItkImage); /*! \brief Applies the OpenCV bilateral filter to the input image. */ void ProcessCVBilateralFilter(IplImage* inputIplImage, IplImage* outputIplImage); /*! \brief Applies the OpenCV median filter to the input image. */ void ProcessCVMedianFilter(IplImage* inputIplImage, IplImage* outputIplImage, int radius = 3); /*! \brief Performs temporal median filter on an image given the number of frames to be considered */ void ProcessStreamedQuickSelectMedianImageFilter(IplImage* inputIplImage); /*! \brief Quickselect algorithm * This Quickselect routine is based on the algorithm described in * "Numerical recipes in C", Second Edition, * Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5 * This code by Nicolas Devillard - 1998. Public domain. */ float quick_select(float arr[], int n); /*! \brief Initialize and allocate a 2D ITK image of dimension m_ImageWidth*m_ImageHeight */ void CreateItkImage(ItkImageType2D::Pointer &itkInputImage); mitk::Image::Pointer m_SegmentationMask; ///< mask image used for segmenting the image int m_ImageWidth; ///< x-dimension of the image int m_ImageHeight; ///< y-dimension of the image int m_ImageSize; ///< size of the image in bytes IplImage* m_IplDistanceImage; ///< OpenCV-representation of the distance image IplImage* m_IplOutputImage; ///< OpenCV-representation of the output image ItkImageType2D::Pointer m_ItkInputImage; ///< ITK representation of the distance image bool m_ApplyTemporalMedianFilter; ///< Flag indicating if the temporal median filter is currently active for processing the distance image bool m_ApplyAverageFilter; ///< Flag indicating if the average filter is currently active for processing the distance image bool m_ApplyMedianFilter; ///< Flag indicating if the spatial median filter is currently active for processing the distance image bool m_ApplyThresholdFilter; ///< Flag indicating if the threshold filter is currently active for processing the distance image bool m_ApplyMaskSegmentation; ///< Flag indicating if a mask segmentation is performed bool m_ApplyBilateralFilter; ///< Flag indicating if the bilateral filter is currently active for processing the distance image float** m_DataBuffer; ///< Buffer used for calculating the pixel-wise median over the last n (m_TemporalMedianFilterNumOfFrames) number of frames int m_DataBufferCurrentIndex; ///< Current index in the buffer of the temporal median filter int m_DataBufferMaxSize; ///< Maximal size for the buffer of the temporal median filter (m_DataBuffer) int m_TemporalMedianFilterNumOfFrames; ///< Number of frames to be used in the calculation of the temporal median int m_ThresholdFilterMin; ///< Lower threshold of the threshold filter. Pixels with values below will be assigned value 0 when applying the threshold filter int m_ThresholdFilterMax; ///< Lower threshold of the threshold filter. Pixels with values above will be assigned value 0 when applying the threshold filter double m_BilateralFilterDomainSigma; ///< Parameter of the bilateral filter controlling the smoothing effect of the filter. Default value: 2 double m_BilateralFilterRangeSigma; ///< Parameter of the bilateral filter controlling the edge preserving effect of the filter. Default value: 60 int m_BilateralFilterKernelRadius; ///< Kernel radius of the bilateral filter mask }; } //END mitk namespace #endif diff --git a/Modules/ToFProcessing/mitkToFDistanceImageToSurfaceFilter.h b/Modules/ToFProcessing/mitkToFDistanceImageToSurfaceFilter.h index 1a65814578..ce5d418fce 100644 --- a/Modules/ToFProcessing/mitkToFDistanceImageToSurfaceFilter.h +++ b/Modules/ToFProcessing/mitkToFDistanceImageToSurfaceFilter.h @@ -1,190 +1,192 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ #ifndef __mitkToFDistanceImageToSurfaceFilter_h #define __mitkToFDistanceImageToSurfaceFilter_h #include #include #include #include #include #include "mitkCameraIntrinsics.h" #include #include #include +#include + namespace mitk { /** * @brief Converts a Time-of-Flight (ToF) distance image to a 3D surface using the pinhole camera model for coordinate computation. * The intrinsic parameters of the camera (FocalLength, PrincipalPoint, InterPixelDistance) are set via SetCameraIntrinsics(). The * measured distance for each pixel corresponds to the distance between the object point and the corresponding image point on the * image plane. * * The coordinate conversion follows the model of a common pinhole camera where the origin of the camera * coordinate system (world coordinates) is at the pinhole * \image html Modules/ToFProcessing/Documentation/PinholeCameraModel.png * The definition of the image plane and its coordinate systems (pixel and mm) is depicted in the following image * \image html Modules/ToFProcessing/Documentation/ImagePlane.png * * @ingroup SurfaceFilters * @ingroup ToFProcessing */ class MITKTOFPROCESSING_EXPORT ToFDistanceImageToSurfaceFilter : public SurfaceSource { public: mitkClassMacro( ToFDistanceImageToSurfaceFilter , SurfaceSource ); itkFactorylessNewMacro(Self); itkCloneMacro(Self); itkSetMacro(CameraIntrinsics, mitk::CameraIntrinsics::Pointer); itkGetMacro(CameraIntrinsics, mitk::CameraIntrinsics::Pointer); itkSetMacro(InterPixelDistance,ToFProcessingCommon::ToFPoint2D); itkGetMacro(InterPixelDistance,ToFProcessingCommon::ToFPoint2D); itkSetMacro(TextureIndex,int); /** * @brief SetTriangulationThreshold Sets a triangulation threshold in order * to remove unusually huge faces from the surface. If this value is set, * the filter will check whether the distance between two neighboring vertices * exceeds the triangulation threshold. If yes, there vertices will not be * triangulated (connected with lines). The vertices will still be added to * the surface, but only as single point (if they have no other neighbors). * @param triangulationThreshold The triangulationThreshold in mm. (not mm*mm!) * @note vtkMath::Distance2BetweenPoints returns the squared distance * between two points and hence we square m_TriangulationThreshold in * order to save run-time. */ void SetTriangulationThreshold( double triangulationThreshold ); itkGetMacro(TriangulationThreshold, double); itkSetMacro(VertexIdList, vtkSmartPointer); itkGetMacro(VertexIdList, vtkSmartPointer); itkSetMacro(GenerateTriangularMesh,bool); itkGetMacro(GenerateTriangularMesh,bool); /** * @brief The ReconstructionModeType enum: Defines the reconstruction mode, if using no interpixeldistances and focal lenghts in pixel units or interpixeldistances and focal length in mm. The Kinect option defines a special reconstruction mode for the kinect. */ enum ReconstructionModeType{ WithOutInterPixelDistance = 1, WithInterPixelDistance = 2, Kinect = 3}; itkSetEnumMacro(ReconstructionMode,ReconstructionModeType); itkGetEnumMacro(ReconstructionMode,ReconstructionModeType); /*! \brief Set scalar image used as texture of the surface. \param iplScalarImage OpenCV image for texturing */ void SetScalarImage(IplImage* iplScalarImage); /*! \brief Set scalar image used as texture of the surface. \return OpenCV image for texturing */ IplImage* GetScalarImage(); /*! \brief Set width of the scalar image used for texturing the surface \param width width (x-dimension) of the texture image */ void SetTextureImageWidth(int width); /*! \brief Set height of the scalar image used for texturing the surface \param height height (y-dimension) of the texture image */ void SetTextureImageHeight(int height); using itk::ProcessObject::SetInput; /*! \brief Sets the input of this filter \param distanceImage input is the distance image of e.g. a ToF camera */ virtual void SetInput( Image* distanceImage); /*! \brief Sets the input of this filter and the intrinsic parameters \param distanceImage input is the distance image of e.g. a ToF camera \param cameraIntrinsics */ virtual void SetInput( Image* distanceImage, mitk::CameraIntrinsics::Pointer cameraIntrinsics ); /*! \brief Sets the input of this filter at idx \param idx number of the current input \param distanceImage input is the distance image of e.g. a ToF camera */ virtual void SetInput(unsigned int idx, Image* distanceImage); /*! \brief Sets the input of this filter at idx and the intrinsic parameters \param idx number of the current input \param distanceImage input is the distance image of e.g. a ToF camera \param cameraIntrinsics This is the camera model which holds parameters like focal length, pixel size, etc. which are needed for the reconstruction of the surface. */ virtual void SetInput( unsigned int idx, Image* distanceImage, mitk::CameraIntrinsics::Pointer cameraIntrinsics ); /*! \brief Returns the input of this filter */ Image* GetInput(); /*! \brief Returns the input with id idx of this filter */ Image* GetInput(unsigned int idx); protected: /*! \brief Standard constructor */ ToFDistanceImageToSurfaceFilter(); /*! \brief Standard destructor */ ~ToFDistanceImageToSurfaceFilter() override; void GenerateOutputInformation() override; /*! \brief Method generating the output of this filter. Called in the updated process of the pipeline. This method generates the output of the ToFSurfaceSource: The generated surface of the 3d points */ void GenerateData() override; /** * \brief Create an output for each input * * This Method sets the number of outputs to the number of inputs * and creates missing outputs objects. * \warning any additional outputs that exist before the method is called are deleted */ void CreateOutputsForAllInputs(); IplImage* m_IplScalarImage; ///< Scalar image used for surface texturing mitk::CameraIntrinsics::Pointer m_CameraIntrinsics; ///< Specifies the intrinsic parameters int m_TextureImageWidth; ///< Width (x-dimension) of the texture image int m_TextureImageHeight; ///< Height (y-dimension) of the texture image ToFProcessingCommon::ToFPoint2D m_InterPixelDistance; ///< distance in mm between two adjacent pixels on the ToF camera chip int m_TextureIndex; ///< Index of the input used as texture image when no scalar image was set via SetIplScalarImage(). 0 = Distance, 1 = Amplitude, 2 = Intensity bool m_GenerateTriangularMesh; ReconstructionModeType m_ReconstructionMode; ///< The ReconstructionModeType enum: Defines the reconstruction mode, if using no interpixeldistances and focal lenghts in pixel units or interpixeldistances and focal length in mm. The Kinect option defines a special reconstruction mode for the kinect. vtkSmartPointer m_VertexIdList; ///< Make a vtkIdList to save the ID's of the polyData corresponding to the image pixel ID's. This can be accessed after generate data to obtain the mapping. double m_TriangulationThreshold; }; } //END mitk namespace #endif diff --git a/Modules/US/USFilters/mitkUSImageVideoSource.cpp b/Modules/US/USFilters/mitkUSImageVideoSource.cpp index 539158691c..f95ff55eed 100644 --- a/Modules/US/USFilters/mitkUSImageVideoSource.cpp +++ b/Modules/US/USFilters/mitkUSImageVideoSource.cpp @@ -1,226 +1,226 @@ /*============================================================================ The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. Use of this source code is governed by a 3-clause BSD license that can be found in the LICENSE file. ============================================================================*/ // MITK HEADER #include "mitkUSImageVideoSource.h" #include "mitkImage.h" //Other #include -#include +#include mitk::USImageVideoSource::USImageVideoSource() : m_VideoCapture(new cv::VideoCapture()), m_IsVideoReady(false), m_IsGreyscale(false), m_IsCropped(false), m_ResolutionOverrideWidth(0), m_ResolutionOverrideHeight(0), m_ResolutionOverride(false), m_GrayscaleFilter(mitk::ConvertGrayscaleOpenCVImageFilter::New()), m_CropFilter(mitk::CropOpenCVImageFilter::New()) { } mitk::USImageVideoSource::~USImageVideoSource() { m_VideoCapture->release(); delete m_VideoCapture; } void mitk::USImageVideoSource::SetVideoFileInput(std::string path) { m_VideoCapture->open(path.c_str()); // check if we succeeded if(!m_VideoCapture->isOpened()) { m_IsVideoReady = false; } else { m_IsVideoReady = true; } // if Override is enabled, use it if (m_ResolutionOverride) { m_VideoCapture->set(CV_CAP_PROP_FRAME_WIDTH, this->m_ResolutionOverrideWidth); m_VideoCapture->set(CV_CAP_PROP_FRAME_HEIGHT, this->m_ResolutionOverrideHeight); } } void mitk::USImageVideoSource::SetCameraInput(int deviceID) { m_VideoCapture->open(deviceID); if(!m_VideoCapture->isOpened()) // check if we succeeded m_IsVideoReady = false; else m_IsVideoReady = true; // if Override is enabled, use it if (m_ResolutionOverride) { m_VideoCapture->set(CV_CAP_PROP_FRAME_WIDTH, this->m_ResolutionOverrideWidth); m_VideoCapture->set(CV_CAP_PROP_FRAME_HEIGHT, this->m_ResolutionOverrideHeight); } } void mitk::USImageVideoSource::ReleaseInput() { m_VideoCapture->release(); delete m_VideoCapture; m_VideoCapture = new cv::VideoCapture(); } void mitk::USImageVideoSource::SetColorOutput(bool isColor){ if ( ! isColor && ! m_IsGreyscale ) { this->PushFilter(m_GrayscaleFilter.GetPointer()); } else if ( isColor && m_IsGreyscale ) { this->RemoveFilter(m_GrayscaleFilter.GetPointer()); } m_IsGreyscale = !isColor; } int mitk::USImageVideoSource::GetImageHeight() { if (m_VideoCapture) { return m_VideoCapture->get(CV_CAP_PROP_FRAME_HEIGHT); } else { return 0; } } int mitk::USImageVideoSource::GetImageWidth() { if (m_VideoCapture) { return m_VideoCapture->get(CV_CAP_PROP_FRAME_WIDTH); } else { return 0; } } bool mitk::USImageVideoSource::GetIsReady() { if (!m_VideoCapture) { return false; } return m_VideoCapture->isOpened(); } void mitk::USImageVideoSource::SetRegionOfInterest(int topLeftX, int topLeftY, int bottomRightX, int bottomRightY) { m_CropFilter->SetCropRegion(topLeftX, topLeftY, bottomRightX, bottomRightY); if (! m_IsCropped && ! m_CropFilter->GetIsCropRegionEmpty()) { this->PushFilter(m_CropFilter.GetPointer()); m_IsCropped = true; } } void mitk::USImageVideoSource::SetRegionOfInterest(USImageRoi roi) { this->SetRegionOfInterest(roi.topLeftX, roi.topLeftY, roi.bottomRightX, roi.bottomRightY); } void mitk::USImageVideoSource::SetCropping(USImageCropping cropping) { int width = this->GetImageWidth(); int height = this->GetImageHeight(); this->SetRegionOfInterest(cropping.left, cropping.top, width - cropping.right, height - cropping.bottom); } mitk::USImageVideoSource::USImageCropping mitk::USImageVideoSource::GetCropping() { cv::Rect cropRect = m_CropFilter->GetCropRegion(); USImageCropping cropping; cropping.left = cropRect.x; cropping.top = cropRect.y; if ( cropRect.height == 0 ) { cropping.bottom = 0; } else { cropping.bottom = this->GetImageHeight() - (cropRect.y + cropRect.height); } if ( cropRect.width == 0 ) { cropping.right = 0; } else { cropping.right = this->GetImageWidth() - (cropRect.x + cropRect.width); } return cropping; } mitk::USImageVideoSource::USImageRoi mitk::USImageVideoSource::GetRegionOfInterest() { cv::Rect cropRect = m_CropFilter->GetCropRegion(); return USImageRoi(cropRect.x, cropRect.y, cropRect.x + cropRect.width, cropRect.y + cropRect.height); } void mitk::USImageVideoSource::RemoveRegionOfInterest() { this->RemoveFilter(m_CropFilter.GetPointer()); m_IsCropped = false; } void mitk::USImageVideoSource::GetNextRawImage(std::vector& image ) { // loop video if necessary //Commented out because setting and getting of these properties is not supported. Therefore on Linux //you'll always get some Highgui errors from OpenCV /*if (m_VideoCapture->get(CV_CAP_PROP_POS_FRAMES) == m_VideoCapture->get(CV_CAP_PROP_FRAME_COUNT)) { m_VideoCapture->set(CV_CAP_PROP_POS_FRAMES, 0); }*/ if (image.size() != 1) image.resize(1); // retrieve image *m_VideoCapture >> image[0]; // get a new frame from camera } void mitk::USImageVideoSource::GetNextRawImage(std::vector& image ) { if (image.size() != 1) image.resize(1); std::vector cv_img; this->GetNextRawImage(cv_img); // convert to MITK-Image IplImage ipl_img = cvIplImage(cv_img[0]); this->m_OpenCVToMitkFilter->SetOpenCVImage(&ipl_img); this->m_OpenCVToMitkFilter->Update(); // OpenCVToMitkImageFilter returns a standard mitk::image. We then transform it into an USImage image[0] = this->m_OpenCVToMitkFilter->GetOutput(); // clean up cv_img[0].release(); } void mitk::USImageVideoSource::OverrideResolution(int width, int height) { this->m_ResolutionOverrideHeight = height; this->m_ResolutionOverrideWidth = width; if (m_VideoCapture != nullptr) { m_VideoCapture->set(CV_CAP_PROP_FRAME_WIDTH, width); m_VideoCapture->set(CV_CAP_PROP_FRAME_HEIGHT, height); } }