diff --git a/CMake/PackageDepends/MITK_OpenCV_Config.cmake b/CMake/PackageDepends/MITK_OpenCV_Config.cmake index 9a5e92493a..e8e59a903c 100644 --- a/CMake/PackageDepends/MITK_OpenCV_Config.cmake +++ b/CMake/PackageDepends/MITK_OpenCV_Config.cmake @@ -1,56 +1,7 @@ -macro(REMOVE_LAST_PATH_ELEMENT_IF_EQUALS INPUT_VAR TARGET_VALUE) - STRING( REPLACE "/" ";" INPUT_VARLIST ${${INPUT_VAR}} ) - LIST( LENGTH INPUT_VARLIST INPUT_VARLIST_LENGTH ) - - SET(INPUT_VARLIST_LAST_INDEX ${INPUT_VARLIST_LENGTH}) - MATH( EXPR INPUT_VARLIST_LAST_INDEX "${INPUT_VARLIST_LENGTH}-1" ) - LIST( GET INPUT_VARLIST ${INPUT_VARLIST_LAST_INDEX} INPUT_VARLAST_ELEMENT ) - - if(INPUT_VARLAST_ELEMENT MATCHES "^${TARGET_VALUE}$") - LIST( REMOVE_AT INPUT_VARLIST ${INPUT_VARLIST_LAST_INDEX} ) - endif() - - set(NEW_VAR "") - foreach(folderPart ${INPUT_VARLIST}) - if(NOT NEW_VAR) - if(NOT WIN32) - set(folderPart "/${folderPart}") - endif() - set(NEW_VAR "${folderPart}") - else() - set(NEW_VAR "${NEW_VAR}/${folderPart}") - endif() - endforeach() - - set(${INPUT_VAR} ${NEW_VAR}) -endmacro() - -# bug in opencv 2.4.2 and mitk: if we have a submodule OpenCV_LIB_DIR_OPT and -# OpenCV_LIB_DIR_DBG is cached when find_package is called -# resulting in wrong windows output paths, e.g. C:\OpenCV\lib\Release\Release -# removing last Release and Debug string manually to fix it -if( OpenCV_LIB_DIR ) - - # remove the Release/Debug in \Release and \Debug - # of OpenCV_LIB_DIR_OPT and OpenCV_LIB_DIR_DBG here - REMOVE_LAST_PATH_ELEMENT_IF_EQUALS(OpenCV_LIB_DIR_DBG Debug) - #message("OpenCV_LIB_DIR_DBG: ${OpenCV_LIB_DIR_DBG}") - - set(OpenCV_LIB_DIR_OPT ${OpenCV_LIB_DIR_DBG}) - #message("OpenCV_LIB_DIR_OPT: ${OpenCV_LIB_DIR_OPT}") - - REMOVE_LAST_PATH_ELEMENT_IF_EQUALS(OpenCV_3RDPARTY_LIB_DIR_DBG Debug) - #message("OpenCV_3RDPARTY_LIB_DIR_DBG: ${OpenCV_3RDPARTY_LIB_DIR_DBG}") - - set(OpenCV_3RDPARTY_LIB_DIR_OPT ${OpenCV_3RDPARTY_LIB_DIR_DBG}) - #message("OpenCV_3RDPARTY_LIB_DIR_OPT: ${OpenCV_3RDPARTY_LIB_DIR_OPT}") - -endif() - list(APPEND ALL_LIBRARIES ${OpenCV_LIBS}) list(APPEND ALL_INCLUDE_DIRECTORIES ${OpenCV_INCLUDE_DIRS}) # adding option for videoinput library on windows (for directshow based frame grabbing) if(WIN32) option(MITK_USE_videoInput "Use videoInput (DirectShow wrapper) library" OFF) endif(WIN32) diff --git a/CMake/mitkFunctionGetLibrarySearchPaths.cmake b/CMake/mitkFunctionGetLibrarySearchPaths.cmake index 7df47de0c3..faeb5e64ca 100644 --- a/CMake/mitkFunctionGetLibrarySearchPaths.cmake +++ b/CMake/mitkFunctionGetLibrarySearchPaths.cmake @@ -1,162 +1,152 @@ function(mitkFunctionGetLibrarySearchPaths search_path intermediate_dir) set(_dir_candidates "${MITK_CMAKE_RUNTIME_OUTPUT_DIRECTORY}" "${MITK_CMAKE_RUNTIME_OUTPUT_DIRECTORY}/plugins" "${MITK_CMAKE_LIBRARY_OUTPUT_DIRECTORY}" "${MITK_CMAKE_LIBRARY_OUTPUT_DIRECTORY}/plugins" ) if(MITK_EXTERNAL_PROJECT_PREFIX) list(APPEND _dir_candidates "${MITK_EXTERNAL_PROJECT_PREFIX}/bin" "${MITK_EXTERNAL_PROJECT_PREFIX}/lib" ) endif() # Determine the Qt5 library installation prefix set(_qmake_location ) if(MITK_USE_Qt5 AND TARGET ${Qt5Core_QMAKE_EXECUTABLE}) get_property(_qmake_location TARGET ${Qt5Core_QMAKE_EXECUTABLE} PROPERTY IMPORT_LOCATION) endif() if(_qmake_location) if(NOT _qt_install_libs) if(WIN32) execute_process(COMMAND ${_qmake_location} -query QT_INSTALL_BINS OUTPUT_VARIABLE _qt_install_libs OUTPUT_STRIP_TRAILING_WHITESPACE) else() execute_process(COMMAND ${_qmake_location} -query QT_INSTALL_LIBS OUTPUT_VARIABLE _qt_install_libs OUTPUT_STRIP_TRAILING_WHITESPACE) endif() file(TO_CMAKE_PATH "${_qt_install_libs}" _qt_install_libs) set(_qt_install_libs ${_qt_install_libs} CACHE INTERNAL "Qt library installation prefix" FORCE) endif() if(_qt_install_libs) list(APPEND _dir_candidates ${_qt_install_libs}) endif() elseif(MITK_USE_Qt5) message(WARNING "The qmake executable could not be found.") endif() get_property(_additional_paths GLOBAL PROPERTY MITK_ADDITIONAL_LIBRARY_SEARCH_PATHS) if(MITK_USE_HDF5) FIND_PACKAGE(HDF5 COMPONENTS C HL NO_MODULE REQUIRED shared) get_target_property(_location hdf5-shared LOCATION) get_filename_component(_location ${_location} PATH) list(APPEND _additional_paths ${_location}) # This is a work-around. The hdf5-config.cmake file is not robust enough # to be included several times via find_pakcage calls. set(HDF5_LIBRARIES ${HDF5_LIBRARIES} PARENT_SCOPE) endif() if(MITK_USE_Vigra) # we cannot use _find_package(Vigra) here because the vigra-config.cmake file # always includes the target-exports files without using an include guard. This # would lead to errors when another find_package(Vigra) call is processed. The # (bad) assumption here is that for the time being, only the Classification module # is using Vigra. if(UNIX) list(APPEND _additional_paths ${Vigra_DIR}/lib) else() list(APPEND _additional_paths ${Vigra_DIR}/bin) endif() endif() if(_additional_paths) list(APPEND _dir_candidates ${_additional_paths}) endif() # The code below is sub-optimal. It makes assumptions about # the structure of the build directories, pointed to by # the *_DIR variables. Instead, we should rely on package # specific "LIBRARY_DIRS" variables, if they exist. if(WIN32) list(APPEND _dir_candidates "${ITK_DIR}/bin") endif() if(MITK_USE_MatchPoint) if(WIN32) list(APPEND _dir_candidates "${MatchPoint_DIR}/bin") else() list(APPEND _dir_candidates "${MatchPoint_DIR}/lib") endif() endif() if(OpenCV_DIR) - set(_opencv_link_directories - "${OpenCV_LIB_DIR_DBG}" - "${OpenCV_LIB_DIR_OPT}" - "${OpenCV_3RDPARTY_LIB_DIR_DBG}" - "${OpenCV_3RDPARTY_LIB_DIR_OPT}") - list(REMOVE_DUPLICATES _opencv_link_directories) if(WIN32) - foreach(_opencv_link_directory ${_opencv_link_directories}) - list(APPEND _dir_candidates "${_opencv_link_directory}/../bin") - endforeach() - else() - list(APPEND _dir_candidates ${_opencv_link_directories}) + list(APPEND _dir_candidates "${OpenCV_LIB_PATH}/../bin") endif() endif() if(MITK_USE_Python) list(APPEND _dir_candidates "${CTK_DIR}/CMakeExternals/Install/bin") get_filename_component(_python_dir ${PYTHON_EXECUTABLE} DIRECTORY) list(APPEND _dir_candidates "${_python_dir}") endif() if(MITK_USE_TOF_PMDO3 OR MITK_USE_TOF_PMDCAMCUBE OR MITK_USE_TOF_PMDCAMBOARD) list(APPEND _dir_candidates "${MITK_PMD_SDK_DIR}/plugins" "${MITK_PMD_SDK_DIR}/bin") endif() if(MITK_USE_CTK) list(APPEND _dir_candidates "${CTK_LIBRARY_DIRS}") foreach(_ctk_library ${CTK_LIBRARIES}) if(${_ctk_library}_LIBRARY_DIRS) list(APPEND _dir_candidates "${${_ctk_library}_LIBRARY_DIRS}") endif() endforeach() endif() if(MITK_USE_BLUEBERRY) if(DEFINED CTK_PLUGIN_RUNTIME_OUTPUT_DIRECTORY) if(IS_ABSOLUTE "${CTK_PLUGIN_RUNTIME_OUTPUT_DIRECTORY}") list(APPEND _dir_candidates "${CTK_PLUGIN_RUNTIME_OUTPUT_DIRECTORY}") else() list(APPEND _dir_candidates "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${CTK_PLUGIN_RUNTIME_OUTPUT_DIRECTORY}") endif() endif() endif() if(MITK_LIBRARY_DIRS) list(APPEND _dir_candidates ${MITK_LIBRARY_DIRS}) endif() list(REMOVE_DUPLICATES _dir_candidates) set(_search_dirs ) foreach(_dir ${_dir_candidates}) if(EXISTS "${_dir}/${intermediate_dir}") list(APPEND _search_dirs "${_dir}/${intermediate_dir}") else() list(APPEND _search_dirs "${_dir}") endif() endforeach() # Special handling for "internal" search dirs. The intermediate directory # might not have been created yet, so we can't check for its existence. # Hence we just add it for Windows without checking. set(_internal_search_dirs "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}" "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/plugins") if(WIN32) foreach(_dir ${_internal_search_dirs}) set(_search_dirs "${_dir}/${intermediate_dir}" ${_search_dirs}) endforeach() else() set(_search_dirs ${_internal_search_dirs} ${_search_dirs}) endif() list(REMOVE_DUPLICATES _search_dirs) set(${search_path} ${_search_dirs} PARENT_SCOPE) endfunction() diff --git a/CMakeExternals/OpenCV.cmake b/CMakeExternals/OpenCV.cmake index 2835bad833..fcc38cd141 100644 --- a/CMakeExternals/OpenCV.cmake +++ b/CMakeExternals/OpenCV.cmake @@ -1,96 +1,98 @@ #----------------------------------------------------------------------------- # OpenCV #----------------------------------------------------------------------------- if(MITK_USE_OpenCV) # Sanity checks if(DEFINED OpenCV_DIR AND NOT EXISTS ${OpenCV_DIR}) message(FATAL_ERROR "OpenCV_DIR variable is defined but corresponds to non-existing directory") endif() set(proj OpenCV) set(proj_DEPENDENCIES) set(OpenCV_DEPENDS ${proj}) if(NOT DEFINED OpenCV_DIR) set(additional_cmake_args -DBUILD_opencv_java:BOOL=OFF -DBUILD_opencv_ts:BOOL=OFF -DBUILD_PERF_TESTS:BOOL=OFF ) if(MITK_USE_Python) set(CV_PACKAGE_PATH -DPYTHON_PACKAGES_PATH:PATH=${ep_prefix}/lib/python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}/site-packages) list(APPEND additional_cmake_args -DBUILD_opencv_python:BOOL=ON - -DBUILD_NEW_PYTHON_SUPPORT:BOOL=ON + -DBUILD_opencv_python3:BOOL=ON + #-DBUILD_NEW_PYTHON_SUPPORT:BOOL=ON -DPYTHON_DEBUG_LIBRARY:FILEPATH=${PYTHON_DEBUG_LIBRARY} -DPYTHON_EXECUTABLE:FILEPATH=${PYTHON_EXECUTABLE} -DPYTHON_INCLUDE_DIR:PATH=${PYTHON_INCLUDE_DIR} -DPYTHON_INCLUDE_DIR2:PATH=${PYTHON_INCLUDE_DIR2} -DPYTHON_LIBRARY:FILEPATH=${PYTHON_LIBRARY} ${CV_PACKAGE_PATH} #-DPYTHON_LIBRARIES=${PYTHON_LIBRARY} #-DPYTHON_DEBUG_LIBRARIES=${PYTHON_DEBUG_LIBRARIES} ) else() list(APPEND additional_cmake_args -DBUILD_opencv_python:BOOL=OFF - -DBUILD_NEW_PYTHON_SUPPORT:BOOL=OFF + -DBUILD_opencv_python3:BOOL=OFF + -DBUILD_opencv_python_bindings_generator:BOOL=OFF + #-DBUILD_NEW_PYTHON_SUPPORT:BOOL=OFF ) endif() # 12-05-02, muellerm, added QT usage by OpenCV if QT is used in MITK # 12-09-11, muellerm, removed automatic usage again, since this will struggle with the MITK Qt application object if(MITK_USE_Qt5) list(APPEND additional_cmake_args -DWITH_QT:BOOL=OFF -DWITH_QT_OPENGL:BOOL=OFF -DQT_QMAKE_EXECUTABLE:FILEPATH=${QT_QMAKE_EXECUTABLE} ) endif() if(CTEST_USE_LAUNCHERS) list(APPEND additional_cmake_args "-DCMAKE_PROJECT_${proj}_INCLUDE:FILEPATH=${CMAKE_ROOT}/Modules/CTestUseLaunchers.cmake" ) endif() - set(opencv_url ${MITK_THIRDPARTY_DOWNLOAD_PREFIX_URL}/opencv-2.4.13.5.tar.gz) - set(opencv_url_md5 6cbe56ffb9ab1424fc2f5e78f46c82a8) - + set(opencv_url ${MITK_THIRDPARTY_DOWNLOAD_PREFIX_URL}/opencv-3.4.1.zip) + set(opencv_url_md5 8464ce888f4c283895626950bada1e44) ExternalProject_Add(${proj} LIST_SEPARATOR ${sep} URL ${opencv_url} URL_MD5 ${opencv_url_md5} - # Related bug: http://bugs.mitk.org/show_bug.cgi?id=5912 - PATCH_COMMAND ${PATCH_COMMAND} -N -p1 -i ${CMAKE_CURRENT_LIST_DIR}/OpenCV.patch CMAKE_GENERATOR ${gen} CMAKE_ARGS ${ep_common_args} -DBUILD_TESTS:BOOL=OFF -DBUILD_DOCS:BOOL=OFF -DBUILD_EXAMPLES:BOOL=OFF -DBUILD_DOXYGEN_DOCS:BOOL=OFF -DWITH_CUDA:BOOL=OFF + -DWITH_VTK:BOOL=OFF + -DENABLE_CXX11:BOOL=ON ${additional_cmake_args} CMAKE_CACHE_ARGS ${ep_common_cache_args} CMAKE_CACHE_DEFAULT_ARGS ${ep_common_cache_default_args} DEPENDS ${proj_DEPENDENCIES} ) set(OpenCV_DIR ${ep_prefix}) mitkFunctionInstallExternalCMakeProject(${proj}) else() mitkMacroEmptyExternalProject(${proj} "${proj_DEPENDENCIES}") endif() endif() diff --git a/Modules/CameraCalibration/mitkEndoMacros.h b/Modules/CameraCalibration/mitkEndoMacros.h index ff43f90e17..707572b3e1 100644 --- a/Modules/CameraCalibration/mitkEndoMacros.h +++ b/Modules/CameraCalibration/mitkEndoMacros.h @@ -1,100 +1,98 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef mitkEndoMacros_h #define mitkEndoMacros_h /// /// COLLECTION OF MACROS FOR THE ENDOTRACKING MODULE /// /// /// multiplexing for cv mats /// #define endoAccessCvMat(function, T, arg1, arg2) \ if( arg2.type() == cv::DataType::type ) \ function( arg1, arg2 ); \ else if( arg2.type() == cv::DataType::type ) \ function( arg1, arg2 ); \ -else if( arg2.type() == cv::DataType::type ) \ - function( arg1, arg2 ); \ else if( arg2.type() == cv::DataType::type ) \ function( arg1, arg2 ); \ else if( arg2.type() == cv::DataType::type ) \ function( arg1, arg2 ); \ else if( arg2.type() == cv::DataType::type ) \ function( arg1, arg2 ); \ else if( arg2.type() == cv::DataType::type ) \ function( arg1, arg2 ); \ else if( arg2.type() == cv::DataType::type ) \ function( arg1, arg2 ); \ else \ throw std::invalid_argument("Unknown type for cv::Mat"); /// /// exec an algorithm with 1 output argument /// #define endoExec(macroAlgName, macroOutputType, macroOutputVarName, ...)\ macroOutputType macroOutputVarName;\ { \ macroAlgName _macroAlgName(__VA_ARGS__, ¯oOutputVarName);\ _macroAlgName.Update();\ } /// /// exec an algorithm with 2 output argument /// #define endoExec2(macroAlgName, macroOutputType1, macroOutputVarName1, macroOutputType2, macroOutputVarName2, ...)\ macroOutputType1 macroOutputVarName1;\ macroOutputType1 macroOutputVarName1;\ { \ macroAlgName _macroAlgName(__VA_ARGS__, ¯oOutputVarName1, ¯oOutputVarName2);\ _macroAlgName.Update();\ } /// /// definition of the corresponding directory separator /// #ifdef WIN32 static const std::string DIR_SEPARATOR = "\\"; #else static const std::string DIR_SEPARATOR = "/"; #endif #define endoSetInput(name, type) \ public: \ virtual void Set##name (const type _arg) \ { \ if ( this->m_##name != _arg ) \ { \ this->m_##name = _arg; \ } \ } \ protected: \ const type m_##name; #define endoSetOutput(name, type) \ public: \ virtual void Set##name (type _arg) \ { \ if ( this->m_##name != _arg ) \ { \ this->m_##name = _arg; \ } \ } \ protected: \ type m_##name; #endif // mitkEndoMacros_h diff --git a/Modules/CameraCalibration/mitkTransform.h b/Modules/CameraCalibration/mitkTransform.h index 43c5ac9485..9644e5a718 100644 --- a/Modules/CameraCalibration/mitkTransform.h +++ b/Modules/CameraCalibration/mitkTransform.h @@ -1,305 +1,305 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef MITKTRANSFORM_H #define MITKTRANSFORM_H #include -#include +#include #include #include #include #include #include #include #include #include namespace mitk { /// /// \brief class representing a transfrom in 3D /// /// internally it stores a mitk navigation data. this is more /// or less a wrapper for navigation data for easy casting /// between opencv/vnl/mitk/xml representations of transform /// data /// class MITKCAMERACALIBRATION_EXPORT Transform: public itk::Object, public XMLSerializable { public: mitkClassMacroItkParent(Transform, itk::Object); itkFactorylessNewMacro(Transform); mitkNewMacro1Param(Transform, const mitk::NavigationData*); mitkNewMacro1Param(Transform, const std::string&); /// /// constants describing the type of transform /// represented here /// static const std::string UNKNOWN_TYPE; static const std::string ENDOSCOPE_SCOPE_TOOL; static const std::string ENDOSCOPE_CAM_TOOL; static const std::string CHESSBOARD_TOOL; static const std::string POINTER_TOOL; static const std::string POINTER_TO_CHESSBOARD_ORIGIN; static const std::string POINTER_TO_CHESSBOARD_X_SUPPORT_POINT; static const std::string POINTER_TO_CHESSBOARD_Y_SUPPORT_POINT; static const std::string BOARD_TO_BOARD_TOOL; static const std::string REFERENCE_CAMERA_TRANSFORM; static const std::string REFERENCE_SCOPE_TRANSFORM; static const std::string EYE_TO_HAND_TRANSFORM; static const std::string CAMERA_EXTRINSICS; itkGetConstMacro(Type, std::string); itkSetMacro(Type, std::string&); /// /// Copies the content of transform to this /// instance /// void Copy( const mitk::Transform* transform ); /// /// Copies the content of transform to this /// instance /// void Copy( const mitk::NavigationData* transform ); /// /// Inverts the rotation of this transform /// (Polaris navigation Data have inverted rotation /// so you may want to call this function when using /// polaris data) /// void TransposeRotation(); /// /// get a copy of this transform /// mitk::Transform::Pointer Clone() const; /// /// concatenate this transform with the given one, /// i.e. this transform is done first, then transform /// ( if x is this transform, y is transform, then this will be y*x) /// post multiply semantics! /// \see vtkTransform /// void Concatenate( mitk::Transform* transform ); /// /// same as above with vnl mat argument /// void Concatenate( const vnl_matrix_fixed& transform ); /// /// same as above with vtk mat argument /// void Concatenate( const vtkMatrix4x4* transform ); /// /// invert this transform /// void Invert(); /// /// resets the internal variables except type /// void Reset(); /// /// read from xml /// void FromXML(TiXmlElement* elem) override; /// /// read csv file /// void FromCSVFile(const std::string& file); /// /// grafts the data from naviData to this transform /// void SetNavigationData( const mitk::NavigationData* naviData ); /// /// method to set orientation quat /// void SetOrientation( const vnl_quaternion& orientation); /// /// method to set float valued orientation quat /// void SetOrientation( const vnl_quaternion& orientation); /// /// method to set translation /// void SetTranslation( const vnl_vector_fixed& transl); /// /// method to set a vector of doubles as translation /// void SetTranslation( const vnl_vector& transl); /// /// method to set a mitk::Point3D as position /// void SetPosition( const mitk::Point3D& transl); /// /// sets rotation with a rotation matrix /// void SetRotation( vnl_matrix_fixed& mat); /// /// sets rotation with a non fixed rotation matrix /// void SetRotation( vnl_matrix& mat); /// /// sets rotation and translation with a transformation matrix /// void SetMatrix( const vnl_matrix_fixed& mat); /// /// sets rotation and translation with a vtk transformation matrix /// void SetMatrix( const vtkMatrix4x4* mat); /// /// sets translation from a POD vector /// void SetTranslation( float* array ); /// /// sets translation from a POD vector. this must be a /// 3x3=9 sized vector in row major format (first row = first /// three elements) /// void SetRotation( float* array ); /// /// sets translation from a POD vector /// void SetTranslation( double array[3] ); /// /// sets translation from a POD vector /// void SetRotation( double array[3][3] ); /// /// method to set translation by cv vector /// void SetTranslation( const cv::Mat& transl); /// /// sets rotation with a rotation matrix /// void SetRotation( const cv::Mat& mat ); /// /// sets rotation with a rodrigues rotation vector /// void SetRotationVector( const cv::Mat& rotVec); /// /// \return the navigation data that stores all information /// mitk::NavigationData::Pointer GetNavigationData() const; /// /// calls navigationdata::GetPosition() /// mitk::Point3D GetPosition() const; /// /// same as GetPosition /// mitk::Point3D GetTranslation() const; /// /// calls navigationdata::IsValid() /// bool IsValid() const; /// /// calls navigationdata::SetValid() /// void SetValid(bool valid); /// /// calls navigationdata::GetOrientation() /// mitk::Quaternion GetOrientation() const; /// /// \return the homogeneous matrix representing this transform /// vnl_matrix_fixed GetMatrix() const; /// /// \return the homogeneous vtk matrix representing this transform /// void GetMatrix(vtkMatrix4x4* matrix) const; /// /// \return the homogeneous vtk matrix representing this transform /// in !OpenGL! left handed coordinate system /// void GetVtkOpenGlMatrix(vtkMatrix4x4* matrix) const; mitk::Point3D TransformPoint(mitk::Point3D point) const; /// /// create xml representation /// void ToXML(TiXmlElement* elem) const override; /// /// create string representation /// std::string ToString() const; /// /// create string csv representation (only the transformation values!!!!) /// std::string ToCSVString() const; /// /// create matlab representation /// std::string ToMatlabString(const std::string& varname="transform", bool printLastRow=true) const; /// /// write csv representation to file (only the transformation values!!!!) /// void ToCSVFile(const std::string& file) const; /// /// write matlab representation to file /// void ToMatlabFile(const std::string& file , const std::string& varname="transform") const; /// /// conversion to cv types /// cv::Mat GetCvTranslation() const; cv::Mat GetCvRotationVector() const; cv::Mat GetCvRotationMatrix() const; cv::Mat GetCvMatrix() const; /// /// conversion to vnl types /// vnl_vector_fixed GetVnlTranslation() const; vnl_vector_fixed GetVnlDoubleTranslation() const; vnl_quaternion GetVnlDoubleQuaternion() const; vnl_matrix_fixed GetVnlRotationMatrix() const; vnl_matrix_fixed GetVnlDoubleMatrix() const; protected: Transform(); Transform(const mitk::NavigationData* nd); Transform(const std::string& s); // everything is stored here mitk::NavigationData::Pointer m_NavData; /// /// saves the type of the transform (Default is UNKNOWN_TYPE) /// std::string m_Type; }; } // namespace mitk MITKCAMERACALIBRATION_EXPORT std::ostream& operator<< (std::ostream& os, mitk::Transform::Pointer p); #endif // MITKTRANSFORM_H diff --git a/Modules/CameraCalibration/mitkVnlMatrixFromCvMat.h b/Modules/CameraCalibration/mitkVnlMatrixFromCvMat.h index 9886c65725..d8edca11ca 100644 --- a/Modules/CameraCalibration/mitkVnlMatrixFromCvMat.h +++ b/Modules/CameraCalibration/mitkVnlMatrixFromCvMat.h @@ -1,78 +1,78 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef mitkVnlMatrixFromCvMat_h #define mitkVnlMatrixFromCvMat_h #include -#include +#include #include #include #include namespace mitk { /// /// create a vnl_matrix from a cv mat /// template class VnlMatrixFromCvMat: virtual public Algorithm { public: /// /// init default values and save references /// VnlMatrixFromCvMat( const cv::Mat* _CvMat, vnl_matrix* _VnlMatrix): m_CvMat(_CvMat), m_VnlMatrix(_VnlMatrix) { } /// /// cv mat to vnl matrix with known cv type /// template void ToVnlMatrix( vnl_matrix& vnlMat, const cv::Mat& mat ) { vnlMat.set_size( mat.rows, mat.cols ); for(int i=0; i( mat.at(i,j) ); } /// /// executes the Algorithm /// void Update() override { endoAccessCvMat( ToVnlMatrix, T, (*m_VnlMatrix), (*m_CvMat) ); } private: /// /// VnlMatrixFromCvMat input member variable /// const cv::Mat* m_CvMat; /// /// VnlMatrixFromCvMat output member variable /// vnl_matrix* m_VnlMatrix; }; } // namespace mitk #endif // mitkVnlMatrixFromCvMat_h diff --git a/Modules/OpenCVVideoSupport/Commands/mitkConvertGrayscaleOpenCVImageFilter.cpp b/Modules/OpenCVVideoSupport/Commands/mitkConvertGrayscaleOpenCVImageFilter.cpp index ade3c3d1e4..271a7b0f0c 100644 --- a/Modules/OpenCVVideoSupport/Commands/mitkConvertGrayscaleOpenCVImageFilter.cpp +++ b/Modules/OpenCVVideoSupport/Commands/mitkConvertGrayscaleOpenCVImageFilter.cpp @@ -1,37 +1,37 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include "mitkConvertGrayscaleOpenCVImageFilter.h" -#include "cv.h" +#include namespace mitk { bool ConvertGrayscaleOpenCVImageFilter::OnFilterImage( cv::Mat& image ) { // there is nothing to do if the image is grayscale already if (image.channels() == 1) { return true; } cv::Mat buffer; cv::cvtColor(image, buffer, CV_RGB2GRAY, 1); // content of buffer should now be the content of image buffer.copyTo(image); return true; } } // namespace mitk diff --git a/Modules/OpenCVVideoSupport/Commands/mitkGrabCutOpenCVImageFilter.cpp b/Modules/OpenCVVideoSupport/Commands/mitkGrabCutOpenCVImageFilter.cpp index 4870035ee1..db8b011fb9 100644 --- a/Modules/OpenCVVideoSupport/Commands/mitkGrabCutOpenCVImageFilter.cpp +++ b/Modules/OpenCVVideoSupport/Commands/mitkGrabCutOpenCVImageFilter.cpp @@ -1,414 +1,416 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ // mitk headers #include "mitkGrabCutOpenCVImageFilter.h" #include "mitkPointSet.h" // itk headers #include "itkMultiThreader.h" #include "itkFastMutexLock.h" #include "itkConditionVariable.h" +#include + // This is a magic number defined in "grabcut.cpp" of OpenCV. // GrabCut function crashes if less than this number of model // points are given. There must be at least as much model points // as components of the Gaussian Mixture Model. #define GMM_COMPONENTS_COUNT 5 mitk::GrabCutOpenCVImageFilter::GrabCutOpenCVImageFilter() : m_ModelPointsDilationSize(0), m_UseOnlyRegionAroundModelPoints(false), m_CurrentProcessImageNum(0), m_InputImageId(AbstractOpenCVImageFilter::INVALID_IMAGE_ID), m_ResultImageId(AbstractOpenCVImageFilter::INVALID_IMAGE_ID), m_ThreadId(-1), m_StopThread(false), m_MultiThreader(itk::MultiThreader::New()), m_WorkerBarrier(itk::ConditionVariable::New()), m_ImageMutex(itk::FastMutexLock::New()), m_ResultMutex(itk::FastMutexLock::New()), m_PointSetsMutex(itk::FastMutexLock::New()) { m_ThreadId = m_MultiThreader->SpawnThread(this->SegmentationWorker, this); } mitk::GrabCutOpenCVImageFilter::~GrabCutOpenCVImageFilter() { // terminate worker thread on destruction m_StopThread = true; m_WorkerBarrier->Broadcast(); if ( m_ThreadId >= 0) { m_MultiThreader->TerminateThread(m_ThreadId); } } bool mitk::GrabCutOpenCVImageFilter::OnFilterImage( cv::Mat& image ) { if ( image.empty() ) { MITK_WARN << "Filtering empty image?"; return false; } // make sure that the image is an rgb image as needed // by the GrabCut algorithm if (image.type() != CV_8UC3) { cv::Mat tmp = image.clone(); cv::cvtColor(tmp, image, CV_GRAY2RGB); } // set image as the current input image, guarded by // a mutex as the worker thread reads this imagr m_ImageMutex->Lock(); m_InputImage = image.clone(); m_InputImageId = this->GetCurrentImageId(); m_ImageMutex->Unlock(); // wake up the worker thread if there was an image set // and foreground model points are available if ( ! m_ForegroundPoints.empty()) { m_WorkerBarrier->Broadcast(); } return true; } void mitk::GrabCutOpenCVImageFilter::SetModelPoints(ModelPointsList foregroundPoints) { m_PointSetsMutex->Lock(); m_ForegroundPoints = foregroundPoints; m_PointSetsMutex->Unlock(); } void mitk::GrabCutOpenCVImageFilter::SetModelPoints(ModelPointsList foregroundPoints, ModelPointsList backgroundPoints) { m_PointSetsMutex->Lock(); m_BackgroundPoints = backgroundPoints; m_ForegroundPoints = foregroundPoints; m_PointSetsMutex->Unlock(); } void mitk::GrabCutOpenCVImageFilter::SetModelPoints(cv::Mat foregroundMask) { m_PointSetsMutex->Lock(); m_ForegroundPoints = this->ConvertMaskToModelPointsList(foregroundMask); m_PointSetsMutex->Unlock(); } void mitk::GrabCutOpenCVImageFilter::SetModelPoints(cv::Mat foregroundMask, cv::Mat backgroundMask) { m_PointSetsMutex->Lock(); m_ForegroundPoints = this->ConvertMaskToModelPointsList(foregroundMask); m_BackgroundPoints = this->ConvertMaskToModelPointsList(backgroundMask); m_PointSetsMutex->Unlock(); } void mitk::GrabCutOpenCVImageFilter::SetModelPointsDilationSize(int modelPointsDilationSize) { if ( modelPointsDilationSize < 0 ) { MITK_ERROR("AbstractOpenCVImageFilter")("GrabCutOpenCVImageFilter") << "Model points dilation size must not be smaller then zero."; mitkThrow() << "Model points dilation size must not be smaller then zero."; } m_ModelPointsDilationSize = modelPointsDilationSize; } void mitk::GrabCutOpenCVImageFilter::SetUseOnlyRegionAroundModelPoints(unsigned int additionalWidth) { m_UseOnlyRegionAroundModelPoints = true; m_AdditionalWidth = additionalWidth; } void mitk::GrabCutOpenCVImageFilter::SetUseFullImage() { m_UseOnlyRegionAroundModelPoints = false; } cv::Rect mitk::GrabCutOpenCVImageFilter::GetRegionAroundModelPoints() { return m_BoundingBox; } int mitk::GrabCutOpenCVImageFilter::GetResultImageId() { return m_ResultImageId; } cv::Mat mitk::GrabCutOpenCVImageFilter::GetResultMask() { cv::Mat result; m_ResultMutex->Lock(); result = m_ResultMask.clone(); m_ResultMutex->Unlock(); return result; } std::vector mitk::GrabCutOpenCVImageFilter::GetResultContours() { std::vector > cvContours; std::vector hierarchy; std::vector contourPoints; cv::Mat resultMask = this->GetResultMask(); if (resultMask.empty()) { return contourPoints; } cv::findContours(resultMask, cvContours, hierarchy, cv::RETR_LIST, cv::CHAIN_APPROX_NONE); // convert cvContours to vector of ModelPointsLists for ( unsigned int i = 0; i < cvContours.size(); ++i ) { mitk::GrabCutOpenCVImageFilter::ModelPointsList curContourPoints; for ( auto it = cvContours[i].begin(); it != cvContours[i].end(); ++it) { itk::Index<2> index; index.SetElement(0, it->x); index.SetElement(1, it->y); curContourPoints.push_back(index); } contourPoints.push_back(curContourPoints); } return contourPoints; } mitk::GrabCutOpenCVImageFilter::ModelPointsList mitk::GrabCutOpenCVImageFilter::GetResultContourWithPixel(itk::Index<2> pixelIndex) { cv::Mat mask = this->GetResultMask(); if (mask.empty()) { return mitk::GrabCutOpenCVImageFilter::ModelPointsList(); } // return empty model point list if given pixel is outside the image borders if (pixelIndex.GetElement(0) < 0 || pixelIndex.GetElement(0) >= mask.size().height || pixelIndex.GetElement(1) < 0 || pixelIndex.GetElement(1) >= mask.size().width) { MITK_WARN("AbstractOpenCVImageFilter")("GrabCutOpenCVImageFilter") << "Given pixel index ("<< pixelIndex.GetElement(0) << ", " << pixelIndex.GetElement(1) << ") is outside the image (" << mask.size().height << ", " << mask.size().width << ")."; return mitk::GrabCutOpenCVImageFilter::ModelPointsList(); } // create a mask where the segmentation around the given pixel index is // set (done by flood filling the result mask using the pixel as seed) cv::floodFill(mask, cv::Point(pixelIndex.GetElement(0), pixelIndex.GetElement(1)), 5); cv::Mat foregroundMask; cv::compare(mask, 5, foregroundMask, cv::CMP_EQ); // find the contour on the flood filled image (there can be only one now) std::vector > cvContours; std::vector hierarchy; cv::findContours(foregroundMask, cvContours, hierarchy, cv::RETR_LIST, cv::CHAIN_APPROX_NONE); ModelPointsList contourPoints; // convert cvContours to ModelPointsList for ( auto it = cvContours[0].begin(); it != cvContours[0].end(); ++it) { itk::Index<2> index; index.SetElement(0, it->x); index.SetElement(1, it->y); contourPoints.push_back(index); } return contourPoints; } cv::Mat mitk::GrabCutOpenCVImageFilter::GetMaskFromPointSets() { // initialize mask with values of propably background cv::Mat mask(m_InputImage.size().height, m_InputImage.size().width, CV_8UC1, cv::GC_PR_BGD); // get foreground and background points (guarded by mutex) m_PointSetsMutex->Lock(); ModelPointsList pointsLists[2] = {ModelPointsList(m_ForegroundPoints), ModelPointsList(m_BackgroundPoints)}; m_PointSetsMutex->Unlock(); // define values for foreground and background pixels unsigned int pixelValues[2] = {cv::GC_FGD, cv::GC_BGD}; for (unsigned int n = 0; n < 2; ++n) { for (auto it = pointsLists[n].begin(); it != pointsLists[n].end(); ++it) { // set pixels around current pixel to the same value (size of this // area is specified by ModelPointsDilationSize) for ( int i = -m_ModelPointsDilationSize; i <= m_ModelPointsDilationSize; ++i ) { for ( int j = -m_ModelPointsDilationSize; j <= m_ModelPointsDilationSize; ++j) { int x = it->GetElement(1) + i; int y = it->GetElement(0) + j; if ( x >= 0 && y >= 0 && x < mask.cols && y < mask.rows) { mask.at(x, y) = pixelValues[n]; } } } } } return mask; } cv::Rect mitk::GrabCutOpenCVImageFilter::GetBoundingRectFromMask(cv::Mat mask) { cv::Mat nonPropablyBackgroundMask, modelPoints; cv::compare(mask, cv::GC_PR_BGD, nonPropablyBackgroundMask, cv::CMP_NE); cv::findNonZero(nonPropablyBackgroundMask, modelPoints); if (modelPoints.empty()) { MITK_WARN("AbstractOpenCVImageFilter")("GrabCutOpenCVImageFilter") << "Cannot find any foreground points. Returning full image size as bounding rectangle."; return cv::Rect(0, 0, mask.rows, mask.cols); } // calculate bounding rect around the model points cv::Rect boundingRect = cv::boundingRect(modelPoints); // substract additional width to x and y value (and make sure that they aren't outside the image then) boundingRect.x = static_cast(boundingRect.x) > m_AdditionalWidth ? boundingRect.x - m_AdditionalWidth : 0; boundingRect.y = static_cast(boundingRect.y) > m_AdditionalWidth ? boundingRect.y - m_AdditionalWidth : 0; // add additional width to width of bounding rect (twice as x value was moved before) // and make sure that the bounding rect will stay inside the image borders) if ( static_cast(boundingRect.x + boundingRect.width) + 2 * m_AdditionalWidth < static_cast(mask.size().width) ) { boundingRect.width += 2 * m_AdditionalWidth; } else { boundingRect.width = mask.size().width - boundingRect.x - 1; } // add additional width to height of bounding rect (twice as y value was moved before) // and make sure that the bounding rect will stay inside the image borders) if ( static_cast(boundingRect.y + boundingRect.height) + 2 * m_AdditionalWidth < static_cast(mask.size().height) ) { boundingRect.height += 2 * m_AdditionalWidth; } else { boundingRect.height = mask.size().height - boundingRect.y - 1; } assert(boundingRect.x + boundingRect.width < mask.size().width); assert(boundingRect.y + boundingRect.height < mask.size().height); return boundingRect; } cv::Mat mitk::GrabCutOpenCVImageFilter::RunSegmentation(cv::Mat input, cv::Mat mask) { // test if foreground and background models are large enough for GrabCut cv::Mat compareFgResult, compareBgResult; cv::compare(mask, cv::GC_FGD, compareFgResult, cv::CMP_EQ); cv::compare(mask, cv::GC_PR_BGD, compareBgResult, cv::CMP_EQ); if ( cv::countNonZero(compareFgResult) < GMM_COMPONENTS_COUNT || cv::countNonZero(compareBgResult) < GMM_COMPONENTS_COUNT) { // return result mask with no pixels set to foreground return cv::Mat::zeros(mask.size(), mask.type()); } // do the actual grab cut segmentation (initialized with the mask) cv::Mat bgdModel, fgdModel; cv::grabCut(input, mask, cv::Rect(), bgdModel, fgdModel, 1, cv::GC_INIT_WITH_MASK); // set propably foreground pixels to white on result mask cv::Mat result; cv::compare(mask, cv::GC_PR_FGD, result, cv::CMP_EQ); // set foreground pixels to white on result mask cv::Mat foregroundMat; cv::compare(mask, cv::GC_FGD, foregroundMat, cv::CMP_EQ); foregroundMat.copyTo(result, foregroundMat); return result; // now the result mask can be returned } mitk::GrabCutOpenCVImageFilter::ModelPointsList mitk::GrabCutOpenCVImageFilter::ConvertMaskToModelPointsList(cv::Mat mask) { cv::Mat points; cv::findNonZero(mask, points); // push extracted points into a vector of itk indices ModelPointsList pointsVector; for ( size_t n = 0; n < points.total(); ++n) { itk::Index<2> index; index.SetElement(0, points.at(n).x); index.SetElement(1, points.at(n).y); pointsVector.push_back(index); } return pointsVector; } ITK_THREAD_RETURN_TYPE mitk::GrabCutOpenCVImageFilter::SegmentationWorker(void* pInfoStruct) { // extract this pointer from thread info structure struct itk::MultiThreader::ThreadInfoStruct * pInfo = (struct itk::MultiThreader::ThreadInfoStruct*)pInfoStruct; mitk::GrabCutOpenCVImageFilter* thisObject = static_cast(pInfo->UserData); itk::SimpleMutexLock mutex; mutex.Lock(); while (true) { if (thisObject->m_StopThread) { break; } thisObject->m_WorkerBarrier->Wait(&mutex); if (thisObject->m_StopThread) { break; } thisObject->m_ImageMutex->Lock(); cv::Mat image = thisObject->m_InputImage.clone(); int inputImageId = thisObject->m_InputImageId; thisObject->m_ImageMutex->Unlock(); cv::Mat mask = thisObject->GetMaskFromPointSets(); cv::Mat result; if (thisObject->m_UseOnlyRegionAroundModelPoints) { result = cv::Mat(mask.rows, mask.cols, mask.type(), 0.0); thisObject->m_BoundingBox = thisObject->GetBoundingRectFromMask(mask); thisObject->RunSegmentation(image(thisObject->m_BoundingBox), mask(thisObject->m_BoundingBox)).copyTo(result(thisObject->m_BoundingBox)); } else { result = thisObject->RunSegmentation(image, mask); } // save result to member attribute thisObject->m_ResultMutex->Lock(); thisObject->m_ResultMask = result; thisObject->m_ResultImageId = inputImageId; thisObject->m_ResultMutex->Unlock(); } mutex.Unlock(); return ITK_THREAD_RETURN_VALUE; } diff --git a/Modules/OpenCVVideoSupport/Commands/mitkGrabCutOpenCVImageFilter.h b/Modules/OpenCVVideoSupport/Commands/mitkGrabCutOpenCVImageFilter.h index 17963c5785..26161cce9a 100644 --- a/Modules/OpenCVVideoSupport/Commands/mitkGrabCutOpenCVImageFilter.h +++ b/Modules/OpenCVVideoSupport/Commands/mitkGrabCutOpenCVImageFilter.h @@ -1,289 +1,289 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef MITKGRABCUTOPENCVIMAGEFILTER_H #define MITKGRABCUTOPENCVIMAGEFILTER_H // mitk headers #include "mitkAbstractOpenCVImageFilter.h" #include "mitkVector.h" // itk headers #include "itkObjectFactory.h" #include "itkMutexLock.h" // opencv headers -#include "cv.h" +#include namespace itk { template class Index; template class SmartPointer; class MultiThreader; class ConditionVariable; class FastMutexLock; } namespace mitk { class PointSet; /** * \brief Makes the OpenCV GrabCut filter available as OpenCVImageFilter. * * Image filtering is done asynchronly by using a worker thread as GrabCut segmentation * can take up to some seconds. Calling the mitk::GrabCutOpenCVImageFilter::OnFilterImage() * method sets just the input image and wakes up the worker thread. It is not guaranteed * that every image gets segmented. If multiple new images where set before a segmentation * was finished, only the last new image gets segmented afterwards. * * At least foreground model points have to be set by * mitk::GrabCutOpenCVImageFilter::SetModelPoints() before a segmentation can be performed. * The worder thread will not be waken up before any model points were set. * * When a new segmentation is ready, mitk::GrabCutOpenCVImageFilter::GetCurrentImageId() * returns a new image id. The segmentation can be got then by calling * mitk::GrabCutOpenCVImageFilter::GetResultMask(), * mitk::GrabCutOpenCVImageFilter::GetResultContours() or * mitk::GrabCutOpenCVImageFilter::GetResultContourWithPixel(). */ class MITKOPENCVVIDEOSUPPORT_EXPORT GrabCutOpenCVImageFilter : public AbstractOpenCVImageFilter { public: /** \brief List holding image indices of the model points. */ typedef std::vector > ModelPointsList; mitkClassMacro(GrabCutOpenCVImageFilter, AbstractOpenCVImageFilter) itkFactorylessNewMacro(Self) itkCloneMacro(Self) GrabCutOpenCVImageFilter(); ~GrabCutOpenCVImageFilter() override; /** * \brief Implementation of the virtual image filtering method. * The input image is copied to a member attribute, but the actual filtering is not done * in this method. Instead a worker thread is waken up every time this method is called, * if foreground model points were set before. * * \param OpenCV image to be segmentated * \return false if an empty image was set, true otherwise */ bool OnFilterImage( cv::Mat& image ) override; /** * \brief Sets a list of image indices as foreground model points. * \param foregroundPoints List of image indices which definitely belong to the foreground. */ void SetModelPoints(ModelPointsList foregroundPoints); /** * \brief Sets a list of image indices as foreground and another list as background model points. * \param foregroundPoints List of image indices which definitely belong to the foreground. * \param backgroundPoints List of image indices which definitely belong to the background. */ void SetModelPoints(ModelPointsList foregroundPoints, ModelPointsList backgroundPoints); /** * \brief Sets a mask where every non-zero pixel is treated as a foreground model pixel. */ void SetModelPoints(cv::Mat foregroundMask); /** * \brief Sets masks specifying foreground and background points. * \param foregroundMask every non-zero pixel is treated as a foreground model pixel * \param backgroundMask every non-zero pixel is treated as a background model pixel */ void SetModelPoints(cv::Mat foregroundMask, cv::Mat backgroundMask); /** * \brief Set a size of which each model point is dilated before image filtering. * The more color information of the foreground object the GrabCut filter gets the better * the result will be. Therefore the foreground pixels can be dilated before filtering. The * caller is responsible for setting a dilation size so that no foreground model pixels will * be indeed part of the background after dilation. * * Dilation is done to the background model pixles as well, if there are any set for the * filter. * * \param modelPointsDilationSize how many pixels are added in every direction, 0 sets back to no dilation */ void SetModelPointsDilationSize(int modelPointsDilationSize); /** * \brief Use only the region around the foreground model points for the segmentation. * * This is mainly for reasons of segmentation speed and has the drawback that the foreground * model points (plus the given additional border) have to cover the whole foreground object. * * The segmentation filter can be set back to using the whole image by calling * mitk::GrabCutOpenCVImageFilter::SetUseFullImage(). * * \param additionalBorder size of the border around the foreground points which will be used for segmentation, too */ void SetUseOnlyRegionAroundModelPoints(unsigned int additionalBorder); /** * \brief The full image is used as input for the segmentation. * This method sets the behaviour back to the default behaviour in case * mitk::GrabCutOpenCVImageFilter::SetUseOnlyRegionAroundModelPoints() was * called before. */ void SetUseFullImage(); /** * \brief Getter for the rectangle used for the area of segmentation. * See mitk::GrabCutOpenCVImageFilter::SetUseOnlyRegionAroundModelPoints(). * This method is mainly for debugging purposes and may be removed in * the future. */ cv::Rect GetRegionAroundModelPoints(); /** * \brief Getter for an ascending id of the current result image. * The id will be increased for every segmentation that is produced by the worker thread. * It can be used to determine if a new segmentation was produced since the last time a * segmentation was got from this filter. * * int lastResultImageId = grabCutFilter->GetResultImageId(); * // do something * if ( lastResultImageId != grabCutFilter->GetResultImageId() ) * // get new segmentation */ int GetResultImageId(); /** * \brief Getter for the result mask of the current segmentation. * The result of this method is not necessarily consistent with the result of * mitk::GrabCutOpenCVImageFilter::GetResultContours() if they are called afterwards. * The segmentation may have changed in the meantime. One should decide if he needs * a mask or a contour or convert one into the other on his own. * \return image of the size of the input image where all pixels segmented as foreground are non-zero */ cv::Mat GetResultMask(); /** * \brief Getter for the contours of the current segmentation. * * A segmentation can consist of multiple regions therefore a list of contours * is returned. If one needs only one specific region he can call * mitk::GrabCutOpenCVImageFilter::GetResultContourWithPixel(). * * This result of this method is not necessarily consistent with the result of * mitk::GrabCutOpenCVImageFilter::GetResultContours() if they are called afterwards. * The segmentation may have changed in the meantime. One should decide if he needs * a mask or a contour or convert one into the other on his own. * * \return List containing lists of pixel indices for every contour. */ std::vector GetResultContours(); /** * \brief Getter for one specific contour of the current segmentation. * * Can be used if only one (of possible multiple contours) is needed. A pixel index * must be given to select from the contours. This could be one of the foreground * model pixels for example. If other criteria are needed to distinguish the contours * mitk::GrabCutOpenCVImageFilter::GetResultContours() can be used instead and therefore * contour selection can be done by hand then. * * This result of this method is not necessarily consistent with the result of * mitk::GrabCutOpenCVImageFilter::GetResultContours() if they are called afterwards. * The segmentation may have changed in the meantime. One should decide if he needs * a mask or a contour or convert one into the other on his own. * * \param pixelIndex index of a pixel which lies inside the contour * \return list of pixel indices for the selected contour */ ModelPointsList GetResultContourWithPixel(itk::Index<2> pixelIndex); protected: /** \brief Creates an image mask for GrabCut algorithm by using the foreground and background point sets. * Background and foreground points will be dilated by the size set by * mitk::GrabCutOpenCVImageFilter::SetModelPointsDilationSize(). */ cv::Mat GetMaskFromPointSets(); /** * \brief Creates a bounding box around all pixels which aren't propably background. * The bounding box is widened as specified by * mitk::GrabCutOpenCVImageFilter::SetUseOnlyRegionAroundModelPoints(). */ cv::Rect GetBoundingRectFromMask(cv::Mat mask); /** * \brief Performs a GrabCut segmentation of the given input image. * \param input image on which the segmentation will be performed * \param mask foreground and background pixels used as basis for segmentation * \return mask with every pixel of the segmented foreground object set non-zero */ cv::Mat RunSegmentation(cv::Mat input, cv::Mat mask); /** * \brief Creates a list of points from every non-zero pixel of the given mask. */ ModelPointsList ConvertMaskToModelPointsList(cv::Mat mask); int m_ModelPointsDilationSize; bool m_UseOnlyRegionAroundModelPoints; unsigned int m_AdditionalWidth; cv::Rect m_BoundingBox; ModelPointsList m_ForegroundPoints; ModelPointsList m_BackgroundPoints; cv::Mat m_InputImage; cv::Mat m_GrabCutMask; cv::Mat m_ResultMask; unsigned int m_CurrentProcessImageNum; /** \brief id of the image currently set as m_InputImage */ int m_InputImageId; /** \brief id of the image which segmentation result is currently present in m_ResultMask */ int m_ResultImageId; private: /** * \brief Worker thread for doing the segmentation. * It blocks every time a image was segmented and will be waken up again by * the mitk::GrabCutOpenCVImageFilter::OnFilterImage() method. * * \param pInfoStruct pointer to the GrabCutOpenCVImageFilter object * \return */ static ITK_THREAD_RETURN_TYPE SegmentationWorker(void* pInfoStruct); int m_ThreadId; /** \brief worker thread will terminate after the next wakeup if set to true */ bool m_StopThread; itk::SmartPointer m_MultiThreader; itk::SmartPointer m_WorkerBarrier; /** \brief mutex for guarding m_InputImage and m_InputImageId */ itk::SmartPointer m_ImageMutex; /** \brief mutex for guarding m_ResultMask and m_ResultImageId */ itk::SmartPointer m_ResultMutex; /** \brief mutex for guarding m_ForegroundPoints and m_BackgroundPoints */ itk::SmartPointer m_PointSetsMutex; }; } // namespace mitk #endif // MITKGRABCUTOPENCVIMAGEFILTER_H diff --git a/Modules/OpenCVVideoSupport/Testing/mitkBasicCombinationOpenCVImageFilterTest.cpp b/Modules/OpenCVVideoSupport/Testing/mitkBasicCombinationOpenCVImageFilterTest.cpp index e7853b4336..b7784f9733 100644 --- a/Modules/OpenCVVideoSupport/Testing/mitkBasicCombinationOpenCVImageFilterTest.cpp +++ b/Modules/OpenCVVideoSupport/Testing/mitkBasicCombinationOpenCVImageFilterTest.cpp @@ -1,93 +1,92 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include "mitkBasicCombinationOpenCVImageFilter.h" #include "mitkConvertGrayscaleOpenCVImageFilter.h" #include "mitkCropOpenCVImageFilter.h" #include -#include -#include +#include +#include static bool ImagesAreEqualInGray(const cv::Mat& img1, const cv::Mat& img2) { cv::Mat grayImg1; cv::Mat grayImg2; cv::cvtColor(img1, grayImg1, CV_RGB2GRAY, 1); cv::cvtColor(img2, grayImg2, CV_RGB2GRAY, 1); return cv::countNonZero(grayImg1 != grayImg2) == 0; } static void ConvertTestLoadedImage(std::string mitkImagePath) { - cv::Mat image = cvLoadImage(mitkImagePath.c_str()); + cv::Mat image = cv::imread(mitkImagePath.c_str()); cv::Mat compareImg = image.clone(); - mitk::BasicCombinationOpenCVImageFilter::Pointer combinationFilter = mitk::BasicCombinationOpenCVImageFilter::New(); MITK_TEST_CONDITION(combinationFilter->FilterImage(image), "Filtering with empty filter list is ok."); MITK_TEST_CONDITION(ImagesAreEqualInGray(image, compareImg), "Image must not be changed after filtering with empty filter list."); mitk::ConvertGrayscaleOpenCVImageFilter::Pointer grayscaleFilter = mitk::ConvertGrayscaleOpenCVImageFilter::New(); combinationFilter->PushFilter(grayscaleFilter.GetPointer()); MITK_TEST_CONDITION(combinationFilter->FilterImage(image), "Filtering with grayscale filter should be ok."); MITK_TEST_CONDITION(image.channels() == 1, "Image must not have more than one channel after grayscale conversion."); image.release(); image = compareImg.clone(); mitk::CropOpenCVImageFilter::Pointer cropFilter = mitk::CropOpenCVImageFilter::New(); combinationFilter->PushFilter(cropFilter.GetPointer()); MITK_TEST_CONDITION( ! combinationFilter->FilterImage(image), "Filter function must return false if an filter returns false."); MITK_TEST_CONDITION(combinationFilter->PopFilter() == cropFilter, "Last added filter is equal to returned filter."); image.release(); image = compareImg.clone(); MITK_TEST_CONDITION(combinationFilter->FilterImage(image), "Filter function should return true again after removing incorrect filter."); MITK_TEST_CONDITION(combinationFilter->RemoveFilter(grayscaleFilter.GetPointer()), "Filter must be found."); image.release(); image = compareImg.clone(); MITK_TEST_CONDITION(combinationFilter->FilterImage(image), "Filter function should still return true."); MITK_TEST_CONDITION(ImagesAreEqualInGray(image, compareImg), "Image must not be changed after all filters were removed."); } /**Documentation * test for the class "ConvertGrayscaleOpenCVImageFilter". */ int mitkBasicCombinationOpenCVImageFilterTest(int argc, char* argv[]) { MITK_TEST_BEGIN("BasicCombinationOpenCVImageFilter") MITK_TEST_CONDITION_REQUIRED(argc == 2, "Two parameters are needed for this test.") ConvertTestLoadedImage(argv[1]); MITK_TEST_END(); // always end with this! } diff --git a/Modules/OpenCVVideoSupport/Testing/mitkConvertGrayscaleOpenCVImageFilterTest.cpp b/Modules/OpenCVVideoSupport/Testing/mitkConvertGrayscaleOpenCVImageFilterTest.cpp index f98f7c46ec..c50de232e5 100644 --- a/Modules/OpenCVVideoSupport/Testing/mitkConvertGrayscaleOpenCVImageFilterTest.cpp +++ b/Modules/OpenCVVideoSupport/Testing/mitkConvertGrayscaleOpenCVImageFilterTest.cpp @@ -1,57 +1,57 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include "mitkConvertGrayscaleOpenCVImageFilter.h" #include -#include -#include +#include +#include static void ConvertTestLoadedImage(std::string mitkImagePath, std::string mitkGrayscaleImagePath) { - cv::Mat image = cvLoadImage(mitkImagePath.c_str()); - cv::Mat compareImg = cvLoadImage(mitkGrayscaleImagePath.c_str()); + cv::Mat image = cv::imread(mitkImagePath.c_str()); + cv::Mat compareImg = cv::imread(mitkGrayscaleImagePath.c_str()); // directly convert the image for comparison cv::Mat comparisonImg; cv::cvtColor(compareImg, comparisonImg, CV_RGB2GRAY, 1); mitk::ConvertGrayscaleOpenCVImageFilter::Pointer grayscaleFilter = mitk::ConvertGrayscaleOpenCVImageFilter::New(); MITK_TEST_CONDITION(grayscaleFilter->FilterImage(image), "Filtering should return true for success."); MITK_TEST_CONDITION(image.channels() == 1, "Image must not have more than one channel after grayscale conversion."); MITK_TEST_CONDITION(cv::countNonZero(image != comparisonImg) == 0, "All pixel values must be the same between the two converted images."); MITK_TEST_CONDITION_REQUIRED(grayscaleFilter->FilterImage(image), "Image conversion should be no problem if image is a grayscale image already.") } /**Documentation * test for the class "ConvertGrayscaleOpenCVImageFilter". */ int mitkConvertGrayscaleOpenCVImageFilterTest(int argc, char* argv[]) { MITK_TEST_BEGIN("ConvertGrayscaleOpenCVImageFilter") MITK_TEST_CONDITION_REQUIRED(argc > 2, "At least three parameters needed for this test.") ConvertTestLoadedImage(argv[1], argv[2]); MITK_TEST_END(); // always end with this! } diff --git a/Modules/OpenCVVideoSupport/Testing/mitkCropOpenCVImageFilterTest.cpp b/Modules/OpenCVVideoSupport/Testing/mitkCropOpenCVImageFilterTest.cpp index 6650c73f43..1338c70869 100644 --- a/Modules/OpenCVVideoSupport/Testing/mitkCropOpenCVImageFilterTest.cpp +++ b/Modules/OpenCVVideoSupport/Testing/mitkCropOpenCVImageFilterTest.cpp @@ -1,101 +1,101 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include "mitkCropOpenCVImageFilter.h" #include -#include -#include +#include +#include static bool ImagesAreEqualInGray(const cv::Mat& img1, const cv::Mat& img2) { cv::Mat grayImg1; cv::Mat grayImg2; cv::cvtColor(img1, grayImg1, CV_RGB2GRAY, 1); cv::cvtColor(img2, grayImg2, CV_RGB2GRAY, 1); return cv::countNonZero(grayImg1 != grayImg2) == 0; } static void CropTestLoadedImage(std::string mitkImagePath, std::string mitkCroppedImagePath) { - cv::Mat image = cvLoadImage(mitkImagePath.c_str()); - cv::Mat croppedImage = cvLoadImage(mitkCroppedImagePath.c_str()); + cv::Mat image = cv::imread(mitkImagePath.c_str()); + cv::Mat croppedImage = cv::imread(mitkCroppedImagePath.c_str()); MITK_INFO << mitkImagePath.c_str(); MITK_INFO << mitkCroppedImagePath.c_str(); mitk::CropOpenCVImageFilter::Pointer cropFilter = mitk::CropOpenCVImageFilter::New(); // try to crop without setting a region of interest cv::Mat testImage = image.clone(); MITK_TEST_CONDITION( ! cropFilter->FilterImage(testImage), "Filter function must return false if no region of interest is set."); MITK_TEST_CONDITION(ImagesAreEqualInGray(testImage, image), "Image should not be changed yet."); // set region of interst now and then try to crop again cv::Rect roi = cv::Rect(0,0, testImage.cols, testImage.rows); cropFilter->SetCropRegion(roi); MITK_TEST_CONDITION(cropFilter->FilterImage(testImage), "Filter function should return successfully."); MITK_TEST_CONDITION(ImagesAreEqualInGray(testImage, image), "Image should not be changed if cropping with a roi of the whole image."); // test if filter corrects negative roi position cv::Rect roiWrong = cv::Rect(-1,-1, 2, 2); roi = cv::Rect(0,0,2,2); cropFilter->SetCropRegion(roiWrong); MITK_TEST_CONDITION(cropFilter->FilterImage(testImage), "Filter function should return successfully."); MITK_TEST_CONDITION(ImagesAreEqualInGray(testImage, image(roi)), "Image should be equal to directly cropped image whith correct roi."); // test whith "normal" roi testImage = image.clone(); roi = cv::Rect( 150,100,100,100 ); cropFilter->SetCropRegion(roi); MITK_TEST_CONDITION(cropFilter->FilterImage(testImage), "Filter function should return successfully."); MITK_TEST_CONDITION(ImagesAreEqualInGray(testImage, croppedImage), "Image should be equal to cropped image (loaded from data directory)."); // test with not correctable roi roiWrong = cv::Rect( 5,5,-1,-1 ); MITK_TEST_FOR_EXCEPTION(mitk::Exception, cropFilter->SetCropRegion(roiWrong)); // test with rois where the top left corner is outside the image boundaries roiWrong = cv::Rect( testImage.cols,0,1,1 ); cropFilter->SetCropRegion(roiWrong); MITK_TEST_CONDITION(!cropFilter->FilterImage(testImage), "Filter function should return unsuccessfully if top left corner is outside image boundary (cols)."); roiWrong = cv::Rect( 0,testImage.rows,1,1 ); cropFilter->SetCropRegion(roiWrong); MITK_TEST_CONDITION(!cropFilter->FilterImage(testImage), "Filter function should return unsuccessfully if top left corner is outside image boundary (rows)."); roiWrong = cv::Rect( testImage.cols,testImage.rows,1,1 ); cropFilter->SetCropRegion(roiWrong); MITK_TEST_CONDITION(!cropFilter->FilterImage(testImage), "Filter function should return unsuccessfully if top left corner is outside image boundary (cols+rows)."); } /**Documentation * test for the class "CropOpenCVImageFilter". */ int mitkCropOpenCVImageFilterTest(int argc, char* argv[]) { MITK_TEST_BEGIN("CropOpenCVImageFilter") MITK_TEST_CONDITION_REQUIRED(argc > 2, "At least three parameters needed for this test."); CropTestLoadedImage(argv[1], argv[2]); MITK_TEST_END(); // always end with this! } diff --git a/Modules/OpenCVVideoSupport/Testing/mitkGrabCutOpenCVImageFilterTest.cpp b/Modules/OpenCVVideoSupport/Testing/mitkGrabCutOpenCVImageFilterTest.cpp index 5329d25798..0e18bee408 100644 --- a/Modules/OpenCVVideoSupport/Testing/mitkGrabCutOpenCVImageFilterTest.cpp +++ b/Modules/OpenCVVideoSupport/Testing/mitkGrabCutOpenCVImageFilterTest.cpp @@ -1,181 +1,181 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include "mitkGrabCutOpenCVImageFilter.h" #include #include "itkIndex.h" #include -#include -#include +#include +#include #include "mitkOpenCVToMitkImageFilter.h" static void GrabCutTestLoadedImage(std::string imagePath, std::string maskPath, std::string resultMaskPath) { // load test images - cv::Mat image = cvLoadImage(imagePath.c_str()); - cv::Mat maskImage = cvLoadImage(maskPath.c_str()); - cv::Mat resultMaskImage = cvLoadImage(resultMaskPath.c_str()); + cv::Mat image = cv::imread(imagePath.c_str()); + cv::Mat maskImage = cv::imread(maskPath.c_str()); + cv::Mat resultMaskImage = cv::imread(resultMaskPath.c_str()); // make sure that the loaded mask is a gray scale image cv::Mat maskImageGray; cv::cvtColor(maskImage, maskImageGray, CV_RGB2GRAY, 1); // make sure that the loaded reference image is a gray scale image cv::Mat resultMaskImageGray; cv::cvtColor(resultMaskImage, resultMaskImageGray, CV_RGB2GRAY, 1); // extract foreground points from loaded mask image cv::Mat foregroundMask, foregroundPoints; cv::compare(maskImageGray, 250, foregroundMask, cv::CMP_GE); cv::findNonZero(foregroundMask, foregroundPoints); // push extracted forground points into a vector of itk indices std::vector > foregroundPointsVector; for ( size_t n = 0; n < foregroundPoints.total(); ++n) { itk::Index<2> index; index.SetElement(0, foregroundPoints.at(n).x); index.SetElement(1, foregroundPoints.at(n).y); foregroundPointsVector.push_back(index); } mitk::GrabCutOpenCVImageFilter::Pointer grabCutFilter = mitk::GrabCutOpenCVImageFilter::New(); int currentImageId = 0; // test filtering with image set but no model points set { MITK_TEST_CONDITION(grabCutFilter->FilterImage(image), "Filtering should return true for sucess.") cv::Mat resultMask = grabCutFilter->GetResultMask(); MITK_TEST_CONDITION(resultMask.empty(), "Result mask must be empty when no foreground points are set.") } // test filtering with very little model points set { std::vector > littleForegroundPointsSet(foregroundPointsVector.begin(), foregroundPointsVector.begin()+3); grabCutFilter->SetModelPoints(littleForegroundPointsSet); grabCutFilter->FilterImage(image, ++currentImageId); cv::Mat resultMask; // wait up to ten seconds for the segmentation to finish for (unsigned int n = 0; n < 100; ++n) { if ( grabCutFilter->GetResultImageId() == currentImageId ) { resultMask = grabCutFilter->GetResultMask(); break; } itksys::SystemTools::Delay(100); } MITK_TEST_CONDITION(!resultMask.empty(), "Result mask must not be empty when little (" << littleForegroundPointsSet.size() <<") foreground points are set."); } // test filtering with image and model points set { grabCutFilter->SetModelPoints(foregroundPointsVector); MITK_TEST_CONDITION(grabCutFilter->FilterImage(image, ++currentImageId), "Filtering should return true for sucess.") cv::Mat resultMask; // wait up to ten seconds for the segmentation to finish for (unsigned int n = 0; n < 100; ++n) { if ( grabCutFilter->GetResultImageId() == currentImageId ) { resultMask = grabCutFilter->GetResultMask(); break; } itksys::SystemTools::Delay(100); } MITK_TEST_CONDITION( ! resultMask.empty() && cv::countNonZero(resultMask != resultMaskImageGray) == 0, "Filtered image should match reference image.") // adding new image should still work MITK_TEST_CONDITION(grabCutFilter->FilterImage(image), "Adding new image should still work.") } // test filtering with using only region around model points // (but with really big additional width so that whole image should be used again) { grabCutFilter->SetUseOnlyRegionAroundModelPoints(image.cols); grabCutFilter->FilterImage(image, ++currentImageId); cv::Mat resultMask; // wait up to ten seconds for the segmentation to finish for (unsigned int n = 0; n < 100; ++n) { if (grabCutFilter->GetResultImageId() == currentImageId) { resultMask = grabCutFilter->GetResultMask(); break; } itksys::SystemTools::Delay(100); } MITK_TEST_CONDITION( ! resultMask.empty() && cv::countNonZero(resultMask != resultMaskImageGray) == 0, "Filtered image with really big region used should match reference image again.") } // test filtering with using only region around model points { grabCutFilter->SetUseOnlyRegionAroundModelPoints(0); grabCutFilter->FilterImage(image, ++currentImageId); cv::Mat resultMask; // wait up to ten seconds for the segmentation to finish for (unsigned int n = 0; n < 100; ++n) { if (grabCutFilter->GetResultImageId() == currentImageId) { resultMask = grabCutFilter->GetResultMask(); break; } itksys::SystemTools::Delay(100); } cv::Mat nonPropablyBackgroundMask, modelPoints; cv::compare(maskImageGray, 250, nonPropablyBackgroundMask, cv::CMP_GE); cv::findNonZero(nonPropablyBackgroundMask, modelPoints); cv::Rect boundingRect = cv::boundingRect(modelPoints); cv::Mat compareMask(resultMask.rows, resultMask.cols, resultMask.type(), 0.0); resultMaskImageGray(boundingRect).copyTo(compareMask(boundingRect)); MITK_TEST_CONDITION( ! resultMask.empty() && cv::countNonZero(resultMask != compareMask) == 0, "Filtered image with region just around the model points used should match reference image again.") } } int mitkGrabCutOpenCVImageFilterTest(int argc, char* argv[]) { MITK_TEST_BEGIN("GrabCutOpenCVImageFilter") MITK_TEST_CONDITION_REQUIRED(argc == 4, "Test needs four command line parameters.") GrabCutTestLoadedImage(argv[1], argv[2], argv[3]); MITK_TEST_END() // always end with this! } diff --git a/Modules/OpenCVVideoSupport/Testing/mitkOpenCVToMitkImageFilterTest.cpp b/Modules/OpenCVVideoSupport/Testing/mitkOpenCVToMitkImageFilterTest.cpp index e783522a8b..267a9e3c4b 100644 --- a/Modules/OpenCVVideoSupport/Testing/mitkOpenCVToMitkImageFilterTest.cpp +++ b/Modules/OpenCVVideoSupport/Testing/mitkOpenCVToMitkImageFilterTest.cpp @@ -1,235 +1,235 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ // mitk includes #include "mitkOpenCVToMitkImageFilter.h" #include #include #include #include #include #include -#include -#include +#include +#include /** Documentation * * @brief Objects of this class can start an internal thread by calling the Start() method. * The thread is then updateing the tested object until the method Stop() is called. The class * can be used to test if a filter is thread-save by using multiple objects and let * them update simuntanously. */ class mitkTestOpenCVToMITKImageFilterThread : public itk::Object { public: mitkClassMacroItkParent(mitkTestOpenCVToMITKImageFilterThread, itk::Object); mitkNewMacro1Param(mitkTestOpenCVToMITKImageFilterThread, itk::MultiThreader::Pointer); int NumberOfMessages; protected: mitkTestOpenCVToMITKImageFilterThread(itk::MultiThreader::Pointer MultiThreader) { ThreadID = -1; NumberOfMessages = 0; m_MultiThreader = MultiThreader; } bool ThreadRunning; int ThreadID; cv::Mat currentImage; mitk::OpenCVToMitkImageFilter::Pointer m_testedFilter; itk::MultiThreader::Pointer m_MultiThreader; void DoSomething() { while (ThreadRunning) { m_testedFilter->SetOpenCVMat(currentImage); m_testedFilter->Update(); mitk::Image::Pointer result; result = m_testedFilter->GetOutput(); //std::cout << "Thread " << ThreadID << " Update Call" << std::endl; } } static ITK_THREAD_RETURN_TYPE ThreadStartTracking(void* pInfoStruct) { /* extract this pointer from Thread Info structure */ struct itk::MultiThreader::ThreadInfoStruct * pInfo = (struct itk::MultiThreader::ThreadInfoStruct*)pInfoStruct; if (pInfo == nullptr) { return ITK_THREAD_RETURN_VALUE; } if (pInfo->UserData == nullptr) { return ITK_THREAD_RETURN_VALUE; } mitkTestOpenCVToMITKImageFilterThread *thisthread = (mitkTestOpenCVToMITKImageFilterThread*)pInfo->UserData; if (thisthread != nullptr) thisthread->DoSomething(); return ITK_THREAD_RETURN_VALUE; } public: int Start() { ThreadRunning = true; this->ThreadID = m_MultiThreader->SpawnThread(this->ThreadStartTracking, this); return ThreadID; } void Stop() { ThreadRunning = false; } void setFilter(mitk::OpenCVToMitkImageFilter::Pointer testedFilter) { m_testedFilter = testedFilter; } void setImage(cv::Mat image) { currentImage = image; } }; class mitkOpenCVToMitkImageFilterTestSuite : public mitk::TestFixture { CPPUNIT_TEST_SUITE(mitkOpenCVToMitkImageFilterTestSuite); MITK_TEST(TestInitialization); MITK_TEST(TestThreadSafety); CPPUNIT_TEST_SUITE_END(); private: cv::Mat image1,image2,image3,image4,image5; mitk::OpenCVToMitkImageFilter::Pointer testFilter; public: void setUp() override { - image1 = cvLoadImage(GetTestDataFilePath("NrrdWritingTestImage.jpg").c_str()); - image2 = cvLoadImage(GetTestDataFilePath("Png2D-bw.png").c_str()); - image3 = cvLoadImage(GetTestDataFilePath("OpenCV-Data/CroppedImage.png").c_str()); - image4 = cvLoadImage(GetTestDataFilePath("OpenCV-Data/GrabCutMask.png").c_str()); - image5 = cvLoadImage(GetTestDataFilePath("OpenCV-Data/GrabCutOutput.png").c_str()); + image1 = cv::imread(GetTestDataFilePath("NrrdWritingTestImage.jpg").c_str()); + image2 = cv::imread(GetTestDataFilePath("Png2D-bw.png").c_str()); + image3 = cv::imread(GetTestDataFilePath("OpenCV-Data/CroppedImage.png").c_str()); + image4 = cv::imread(GetTestDataFilePath("OpenCV-Data/GrabCutMask.png").c_str()); + image5 = cv::imread(GetTestDataFilePath("OpenCV-Data/GrabCutOutput.png").c_str()); testFilter = mitk::OpenCVToMitkImageFilter::New(); //change input testFilter->SetOpenCVMat(image1); } void tearDown() override { } void TestInitialization() { testFilter = mitk::OpenCVToMitkImageFilter::New(); MITK_TEST_OUTPUT(<<"Testing Initialization"); } void TestThreadSafety() { std::vector threadIDs; std::vector threads; itk::MultiThreader::Pointer multiThreader = itk::MultiThreader::New(); MITK_TEST_OUTPUT(<< "Testing Thread Safety with 2 Threads"); //create two threads mitkTestOpenCVToMITKImageFilterThread::Pointer newThread1 = mitkTestOpenCVToMITKImageFilterThread::New(multiThreader); newThread1->setFilter(testFilter); newThread1->setImage(image1); threads.push_back(newThread1); mitkTestOpenCVToMITKImageFilterThread::Pointer newThread2 = mitkTestOpenCVToMITKImageFilterThread::New(multiThreader); newThread2->setFilter(testFilter); newThread2->setImage(image1); threads.push_back(newThread2); //start both unsigned int id1 = newThread1->Start(); unsigned int id2 = newThread2->Start(); int delay = 1; for (int i = 0; i < 10000; i++) { //std::cout << "Run " << i << std::endl; //wait a bit itksys::SystemTools::Delay(delay); //change input newThread1->setImage(image2); newThread1->setImage(image3); //wait a bit itksys::SystemTools::Delay(delay); //change input newThread1->setImage(image4); newThread1->setImage(image5); //wait a bit itksys::SystemTools::Delay(delay); //change input newThread1->setImage(image2); newThread1->setImage(image2); //wait a bit itksys::SystemTools::Delay(delay); //change input newThread1->setImage(image3); newThread1->setImage(image3); //wait a bit itksys::SystemTools::Delay(delay); } //stop both threads newThread1->Stop(); newThread2->Stop(); multiThreader->TerminateThread(id1); multiThreader->TerminateThread(id2); MITK_TEST_OUTPUT(<< "Testing Thread Safety with 2 Threads"); } private: }; MITK_TEST_SUITE_REGISTRATION(mitkOpenCVToMitkImageFilter) diff --git a/Modules/OpenCVVideoSupport/mitkImageToOpenCVImageFilter.cpp b/Modules/OpenCVVideoSupport/mitkImageToOpenCVImageFilter.cpp index 6364f81a99..aefda85d50 100644 --- a/Modules/OpenCVVideoSupport/mitkImageToOpenCVImageFilter.cpp +++ b/Modules/OpenCVVideoSupport/mitkImageToOpenCVImageFilter.cpp @@ -1,117 +1,117 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include "mitkImageToOpenCVImageFilter.h" #include #include #include namespace mitk{ ImageToOpenCVImageFilter::ImageToOpenCVImageFilter() : m_OpenCVImage(nullptr) { m_sliceSelector = ImageSliceSelector::New(); } ImageToOpenCVImageFilter::~ImageToOpenCVImageFilter() { m_OpenCVImage = nullptr; } void ImageToOpenCVImageFilter::SetImage( Image* _Image ) { m_Image = _Image; } Image* ImageToOpenCVImageFilter::GetImage() { return m_Image.Lock(); } bool ImageToOpenCVImageFilter::CheckImage( Image* image ) { if(image == nullptr) { MITK_WARN << "MITK Image is 0"; return false; } if(image->GetDimension() > 2 ) { MITK_WARN << "Only 2D Images allowed"; return false; } return true; } IplImage* ImageToOpenCVImageFilter::GetOpenCVImage() { auto image = m_Image.Lock(); if(!this->CheckImage(image)) return nullptr; m_OpenCVImage = (nullptr); try { AccessFixedTypeByItk(image.GetPointer(), ItkImageProcessing, MITK_ACCESSBYITK_PIXEL_TYPES_SEQ // gray image (UCRGBPixelType)(USRGBPixelType)(FloatRGBPixelType)(DoubleRGBPixelType), // rgb image (2) // dimensions ) } catch (const AccessByItkException& e) { std::cout << "Caught exception [from AccessFixedTypeByItk]: \n" << e.what() << "\n"; return nullptr; } return m_OpenCVImage; } cv::Mat ImageToOpenCVImageFilter::GetOpenCVMat() { IplImage* img = this->GetOpenCVImage(); cv::Mat mat; if( img ) { // do not copy data, then release just the header - mat = cv::Mat ( img, false ); + mat = cv::cvarrToMat(img, false); cvReleaseImageHeader( &img ); } return mat; } template void ImageToOpenCVImageFilter::ItkImageProcessing( itk::Image* image ) { m_OpenCVImage = itk::OpenCVImageBridge::ITKImageToIplImage(image); } void ImageToOpenCVImageFilter::SetInputFromTimeSlice(Image::Pointer mitkImage, int timeStep, int slice) { m_sliceSelector->SetInput(mitkImage); m_sliceSelector->SetSliceNr(slice); m_sliceSelector->SetTimeNr(timeStep); m_sliceSelector->Update(); this->SetImage(m_sliceSelector->GetOutput()); } } // end namespace mitk diff --git a/Modules/OpenCVVideoSupport/mitkImageToOpenCVImageFilter.h b/Modules/OpenCVVideoSupport/mitkImageToOpenCVImageFilter.h index 69372646d5..92853d67fe 100644 --- a/Modules/OpenCVVideoSupport/mitkImageToOpenCVImageFilter.h +++ b/Modules/OpenCVVideoSupport/mitkImageToOpenCVImageFilter.h @@ -1,107 +1,108 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef mitkImageToOpenCVImageFilter_h #define mitkImageToOpenCVImageFilter_h #include #include #include #include #include +#include #include "mitkImageSliceSelector.h" namespace mitk { /// /// \brief A pseudo-Filter for creating OpenCV images from MITK images with the option of copying data or referencing it /// class MITKOPENCVVIDEOSUPPORT_EXPORT ImageToOpenCVImageFilter : public itk::Object { public: typedef itk::RGBPixel< unsigned char > UCRGBPixelType; typedef itk::RGBPixel< unsigned short > USRGBPixelType; typedef itk::RGBPixel< float > FloatRGBPixelType; typedef itk::RGBPixel< double > DoubleRGBPixelType; mitkClassMacroItkParent(ImageToOpenCVImageFilter, itk::Object); itkFactorylessNewMacro(Self) itkCloneMacro(Self) /// /// \brief set the input MITK image /// void SetImage( mitk::Image* _Image ); /// /// \brief get the input MITK image /// mitk::Image* GetImage(); /// /// \brief get the input MITK image /// bool CheckImage(mitk::Image* image); /// /// RUNS the conversion and returns the produced OpenCVImage. /// !!!ATTENTION!!! Do not forget to release this image again with cvReleaseImage(). /// \return the produced OpenCVImage or 0 if an error occured! /// IplImage* GetOpenCVImage(); /// /// RUNS the conversion and returns the produced image as cv::Mat. /// \return the produced OpenCVImage or an empty image if an error occured /// cv::Mat GetOpenCVMat(); //##Documentation //## @brief Convenient method to set a certain slice of a 3D or 4D mitk::Image as input to convert it to an openCV image //## //## This methods sets the input. Call GetOpenCVMat() or GetOpenCVImage() to get the image. //## //## @param mitkImage - the image that should be converted to an openCVImage //## @param timeStep - the time step, which is converted to openCV //## @param slice - the slice which is converted to openCV void SetInputFromTimeSlice(Image::Pointer mitkImage, int timeStep, int slice); protected: /// /// the actual templated conversion method /// template void ItkImageProcessing( itk::Image* image ); ImageToOpenCVImageFilter(); ~ImageToOpenCVImageFilter() override; /// /// Saves if the filter should copy the data or just reference it /// mitk::WeakPointer m_Image; IplImage* m_OpenCVImage; private: ImageSliceSelector::Pointer m_sliceSelector; }; } // namespace #endif // mitkImageToOpenCVImageFilter_h diff --git a/Modules/OpenCVVideoSupport/mitkOpenCVToMitkImageFilter.cpp b/Modules/OpenCVVideoSupport/mitkOpenCVToMitkImageFilter.cpp index 4d1c39eb5a..89e57a6464 100644 --- a/Modules/OpenCVVideoSupport/mitkOpenCVToMitkImageFilter.cpp +++ b/Modules/OpenCVVideoSupport/mitkOpenCVToMitkImageFilter.cpp @@ -1,159 +1,159 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include "mitkOpenCVToMitkImageFilter.h" #include #include #include #include #include #include "mitkImageToOpenCVImageFilter.h" namespace mitk{ OpenCVToMitkImageFilter::OpenCVToMitkImageFilter() { m_ImageMutex = itk::FastMutexLock::New(); m_OpenCVMatMutex = itk::FastMutexLock::New(); } OpenCVToMitkImageFilter::~OpenCVToMitkImageFilter() { } void OpenCVToMitkImageFilter::SetOpenCVMat(const cv::Mat &image) { m_OpenCVMatMutex->Lock(); m_OpenCVMat = image; m_OpenCVMatMutex->Unlock(); this->Modified(); } void OpenCVToMitkImageFilter::SetOpenCVImage(const IplImage* image) { - const cv::Mat cvMat = cv::Mat(image); + const cv::Mat cvMat = cv::cvarrToMat(image, false); this->SetOpenCVMat(cvMat); } void OpenCVToMitkImageFilter::GenerateData() { if (m_OpenCVMat.cols != 0 && m_OpenCVMat.rows != 0 && m_OpenCVMat.data) { // copy current cvMat m_OpenCVMatMutex->Lock(); const cv::Mat input = m_OpenCVMat; m_OpenCVMatMutex->Unlock(); // convert cvMat to mitk::Image m_ImageMutex->Lock(); // now convert rgb image if ((input.depth() >= 0) && ((unsigned int)input.depth() == CV_8S) && (input.channels() == 1)) { m_Image = ConvertCVMatToMitkImage< char, 2>(input); } else if (input.depth() == CV_8U && input.channels() == 1) { m_Image = ConvertCVMatToMitkImage< unsigned char, 2>(input); } else if (input.depth() == CV_8U && input.channels() == 3) { m_Image = ConvertCVMatToMitkImage< UCRGBPixelType, 2>(input); } else if (input.depth() == CV_16U && input.channels() == 1) { m_Image = ConvertCVMatToMitkImage< unsigned short, 2>(input); } else if (input.depth() == CV_16U && input.channels() == 3) { m_Image = ConvertCVMatToMitkImage< USRGBPixelType, 2>(input); } else if (input.depth() == CV_32F && input.channels() == 1) { m_Image = ConvertCVMatToMitkImage< float, 2>(input); } else if (input.depth() == CV_32F && input.channels() == 3) { m_Image = ConvertCVMatToMitkImage< FloatRGBPixelType, 2>(input); } else if (input.depth() == CV_64F && input.channels() == 1) { m_Image = ConvertCVMatToMitkImage< double, 2>(input); } else if (input.depth() == CV_64F && input.channels() == 3) { m_Image = ConvertCVMatToMitkImage< DoubleRGBPixelType, 2>(input); } else { MITK_WARN << "Unknown image depth and/or pixel type. Cannot convert OpenCV to MITK image."; return; } //inputMutex->Unlock(); m_ImageMutex->Unlock(); } else { MITK_WARN << "Cannot start filter. OpenCV Image not set."; return; } } ImageSource::OutputImageType* OpenCVToMitkImageFilter::GetOutput() { return m_Image; } /******************************************** * Converting from OpenCV image to ITK Image *********************************************/ template Image::Pointer mitk::OpenCVToMitkImageFilter::ConvertCVMatToMitkImage(const cv::Mat input) { typedef itk::Image< TPixel, VImageDimension > ImageType; typename ImageType::Pointer output = itk::OpenCVImageBridge::CVMatToITKImage(input); Image::Pointer mitkImage = Image::New(); mitkImage = GrabItkImageMemory(output); return mitkImage; } void OpenCVToMitkImageFilter::InsertOpenCVImageAsMitkTimeSlice(cv::Mat openCVImage, Image::Pointer mitkImage, int timeStep) { // convert it to an mitk::Image this->SetOpenCVMat(openCVImage); this->Modified(); this->Update(); //insert it as a timeSlice mitkImage->GetGeometry(timeStep)->SetSpacing(this->GetOutput()->GetGeometry()->GetSpacing()); mitkImage->GetGeometry(timeStep)->SetOrigin(this->GetOutput()->GetGeometry()->GetOrigin()); mitkImage->GetGeometry(timeStep)->SetIndexToWorldTransform(this->GetOutput()->GetGeometry()->GetIndexToWorldTransform()); mitk::ImageReadAccessor readAccess(this->GetOutput()); mitkImage->SetImportVolume(readAccess.GetData(), timeStep); mitkImage->Modified(); mitkImage->Update(); m_ImageMutex->Lock(); m_Image = mitkImage; m_ImageMutex->Unlock(); } } // end namespace mitk diff --git a/Modules/OpenCVVideoSupport/mitkOpenCVToMitkImageFilter.h b/Modules/OpenCVVideoSupport/mitkOpenCVToMitkImageFilter.h index 04722afa9b..71cfc55929 100644 --- a/Modules/OpenCVVideoSupport/mitkOpenCVToMitkImageFilter.h +++ b/Modules/OpenCVVideoSupport/mitkOpenCVToMitkImageFilter.h @@ -1,99 +1,99 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef mitkOpenCVToMitkImageFilter_h #define mitkOpenCVToMitkImageFilter_h // mitk includes #include #include #include // itk includes #include #include #include // OpenCV includes -#include +#include namespace mitk { /// /// \brief Filter for creating MITK RGB Images from an OpenCV image /// class MITKOPENCVVIDEOSUPPORT_EXPORT OpenCVToMitkImageFilter : public ImageSource { public: typedef itk::RGBPixel< unsigned char > UCRGBPixelType; typedef itk::RGBPixel< unsigned short > USRGBPixelType; typedef itk::RGBPixel< float > FloatRGBPixelType; typedef itk::RGBPixel< double > DoubleRGBPixelType; /// /// the static function for the conversion /// template static Image::Pointer ConvertCVMatToMitkImage(const cv::Mat input); mitkClassMacro(OpenCVToMitkImageFilter, ImageSource); itkFactorylessNewMacro(Self) itkCloneMacro(Self) /// /// sets an iplimage as input /// void SetOpenCVImage(const IplImage* image); //itkGetMacro(OpenCVImage, const IplImage*); /// /// sets an opencv mat as input (will be used if OpenCVImage Ipl image is 0) /// void SetOpenCVMat(const cv::Mat& image); itkGetMacro(OpenCVMat, cv::Mat); OutputImageType* GetOutput(void); //##Documentation //## @brief Convenient method to insert an openCV image as a slice at a //## certain time step into a 3D or 4D mitk::Image. //## //## @param openCVImage - the image that is inserted into the mitk Image //## @param mitkImage - pointer to the mitkImage, which is changed by this method! //## @param timeStep - the time step, at which the openCVImage is inserted //## //## @attention The parameter mitkImage will be changed! void InsertOpenCVImageAsMitkTimeSlice(const cv::Mat openCVImage, Image::Pointer mitkImage, int timeStep); protected: OpenCVToMitkImageFilter(); // purposely hidden ~OpenCVToMitkImageFilter() override; void GenerateData() override; protected: Image::Pointer m_Image; cv::Mat m_OpenCVMat; itk::FastMutexLock::Pointer m_ImageMutex; itk::FastMutexLock::Pointer m_OpenCVMatMutex; }; } // namespace mitk #endif // mitkOpenCVToMitkImageFilter_h diff --git a/Modules/OpenCVVideoSupport/mitkOpenCVVideoSource.cpp b/Modules/OpenCVVideoSupport/mitkOpenCVVideoSource.cpp index 45de70d921..8543ee7d90 100644 --- a/Modules/OpenCVVideoSupport/mitkOpenCVVideoSource.cpp +++ b/Modules/OpenCVVideoSupport/mitkOpenCVVideoSource.cpp @@ -1,428 +1,428 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include "mitkOpenCVVideoSource.h" #include #include mitk::OpenCVVideoSource::OpenCVVideoSource() : m_VideoCapture(nullptr), m_CurrentImage(nullptr), m_CurrentVideoTexture(nullptr), m_PauseImage(nullptr), m_GrabbingDeviceNumber(-1), m_RepeatVideo(false), m_UseCVCAMLib(false), m_UndistortImage(false), m_FlipXAxisEnabled(false), m_FlipYAxisEnabled(false) { } mitk::OpenCVVideoSource::~OpenCVVideoSource() { this->Reset(); } void mitk::OpenCVVideoSource::SetVideoFileInput(const char * filename, bool repeatVideo, bool /*useCVCAMLib*/) { this->Reset(); m_VideoFileName = filename; m_VideoCapture = cvCaptureFromFile(filename); if(!m_VideoCapture) MITK_WARN << "Error in initializing video file input!"; m_RepeatVideo = repeatVideo; //m_CurrentImage = cvCreateImage(cvSize(m_CaptureWidth,m_CaptureHeight),8,3); this->Modified(); } void mitk::OpenCVVideoSource::SetVideoCameraInput(int cameraindex, bool /*useCVCAMLib*/) { this->Reset(); m_GrabbingDeviceNumber = cameraindex; m_VideoCapture = cvCaptureFromCAM(m_GrabbingDeviceNumber); if(!m_VideoCapture) MITK_ERROR << "Error in initializing CVHighGUI video camera!"<< std::endl; this->Modified(); } double mitk::OpenCVVideoSource::GetVideoCaptureProperty(int property_id) { return cvGetCaptureProperty(m_VideoCapture, property_id); } int mitk::OpenCVVideoSource::SetVideoCaptureProperty(int property_id, double value) { return cvSetCaptureProperty(m_VideoCapture, property_id, value); } //method extended for "static video feature" if enabled unsigned char* mitk::OpenCVVideoSource::GetVideoTexture() { // Fetch Frame and return pointer to opengl texture FetchFrame(); if (m_FlipXAxisEnabled || m_FlipYAxisEnabled) { //rotate the image to get a static video m_CurrentImage = this->FlipImage(m_CurrentImage); } //transfer the image to a texture this->UpdateVideoTexture(); return this->m_CurrentVideoTexture; this->Modified(); } cv::Mat mitk::OpenCVVideoSource::GetImage() { if(m_CurrentImage) { - cv::Mat copy( m_CurrentImage, false ); + cv::Mat copy = cv::cvarrToMat( m_CurrentImage, false ); return copy.clone(); } return cv::Mat(); } const IplImage * mitk::OpenCVVideoSource::GetCurrentFrame() { return m_CurrentImage; } void mitk::OpenCVVideoSource::GetCurrentFrameAsOpenCVImage(IplImage * image) { // get last captured frame for processing the image data if(m_CurrentImage) { if(image) { image->origin = m_CurrentImage->origin; memcpy(image->imageData,m_CurrentImage->imageData,m_CurrentImage->width*m_CurrentImage->height*m_CurrentImage->nChannels); } } } void mitk::OpenCVVideoSource::FetchFrame() { // main procedure for updating video data if(m_CapturingInProcess) { if(m_VideoCapture) // we use highgui { if(!m_CapturePaused) { // release old image here m_CurrentImage = cvQueryFrame(m_VideoCapture); ++m_FrameCount; } if(m_CurrentImage == nullptr) // do we need to repeat the video if it is from video file? { double framePos = this->GetVideoCaptureProperty(CV_CAP_PROP_POS_AVI_RATIO); MITK_DEBUG << "End of video file found. framePos: " << framePos; if(m_RepeatVideo && framePos >= 0.99) { MITK_DEBUG << "Restarting video file playback."; this->SetVideoCaptureProperty(CV_CAP_PROP_POS_AVI_RATIO, 0); m_FrameCount = 0; m_CurrentImage = cvQueryFrame(m_VideoCapture); } else { std::ostringstream s; s << "End of video file " << m_VideoFileName; std::logic_error err( s.str() ); throw err; } } else { // only undistort if not paused if(m_UndistortImage && m_UndistortCameraImage.IsNotNull()) m_UndistortCameraImage->UndistortImageFast(m_CurrentImage, nullptr); } if(m_CaptureWidth == 0 || m_CaptureHeight == 0) { MITK_DEBUG << "Trying to set m_CaptureWidth & m_CaptureHeight."; m_CaptureWidth = m_CurrentImage->width; m_CaptureHeight = m_CurrentImage->height; MITK_INFO << "frame width: " << m_CaptureWidth << ", height: " << m_CaptureHeight; m_CurrentImage->origin = 0; } } } } void mitk::OpenCVVideoSource::UpdateVideoTexture() { //write the grabbed frame into an opengl compatible array, that means flip it and swap channel order if(!m_CurrentImage) return; if(m_CurrentVideoTexture == nullptr) m_CurrentVideoTexture = new unsigned char[m_CaptureWidth*m_CaptureHeight*3]; int width = m_CurrentImage->width; int height = m_CurrentImage->height; int widthStep = m_CurrentImage->widthStep; int nChannels = m_CurrentImage->nChannels; unsigned char* tex = m_CurrentVideoTexture; char* data = m_CurrentImage->imageData; char* currentData = m_CurrentImage->imageData; int hIndex=0; int wIndex=0; int iout,jout; for(int i=0;i= width) { wIndex=0; hIndex++; } // vertically flip the image iout = -hIndex+height-1; jout = wIndex; currentData = data + iout*widthStep; tex[i+2] = currentData[jout*nChannels + 0]; // B tex[i+1] = currentData[jout*nChannels + 1]; // G tex[i] = currentData[jout*nChannels + 2]; // R } } void mitk::OpenCVVideoSource::StartCapturing() { if(m_VideoCapture != nullptr) m_CapturingInProcess = true; else m_CapturingInProcess = false; } void mitk::OpenCVVideoSource::StopCapturing() { m_CapturePaused = false; m_CapturingInProcess = false; } bool mitk::OpenCVVideoSource::OnlineImageUndistortionEnabled() const { return m_UndistortCameraImage; } void mitk::OpenCVVideoSource::PauseCapturing() { m_CapturePaused = !m_CapturePaused; if(m_CapturePaused) { m_PauseImage = cvCloneImage(m_CurrentImage); // undistort this pause image if necessary if(m_UndistortImage) m_UndistortCameraImage->UndistortImageFast(m_PauseImage, nullptr); m_CurrentImage = m_PauseImage; } else { cvReleaseImage( &m_PauseImage ); // release old pause image if necessary m_CurrentImage = nullptr; m_PauseImage = nullptr; } } void mitk::OpenCVVideoSource::EnableOnlineImageUndistortion(mitk::Point3D focal, mitk::Point3D principal, mitk::Point4D distortion) { // Initialize Undistortion m_UndistortImage = true; float kc[4]; kc[0] = distortion[0]; kc[1] = distortion[1]; kc[2] = distortion[2]; kc[3] = distortion[3]; if(m_CaptureWidth == 0 || m_CaptureHeight == 0) FetchFrame(); m_UndistortCameraImage = mitk::UndistortCameraImage::New(); m_UndistortCameraImage->SetUndistortImageFastInfo(focal[0], focal[1], principal[0], principal[1], kc, (float)m_CaptureWidth, (float)m_CaptureHeight); } void mitk::OpenCVVideoSource::DisableOnlineImageUndistortion() { m_UndistortImage = false; } // functions for compatibility with ITK segmentation only void mitk::OpenCVVideoSource::GetCurrentFrameAsItkHSVPixelImage(HSVPixelImageType::Pointer &Image) { FetchFrame(); // Prepare iteration HSVConstIteratorType itImage( Image, Image->GetLargestPossibleRegion()); itImage.GoToBegin(); HSVPixelType pixel; int rowsize = 3 * m_CaptureWidth; char* bufferend; char* picture; picture = this->m_CurrentImage->imageData; bufferend = this->m_CurrentImage->imageData + 3*(m_CaptureHeight*m_CaptureWidth); float r,g,b,h,s,v; try { // we have to flip the image for(char* datapointer = bufferend - rowsize;datapointer >= picture; datapointer -= rowsize) { for(char* current = datapointer; current < datapointer + rowsize; current++) { b = *current; current++; g = *current; current++; r = *current; RGBtoHSV(r,g,b,h,s,v); pixel[0] = h; pixel[1] = s; pixel[2] = v; itImage.Set(pixel); ++itImage; } } } catch( ... ) { std::cout << "Exception raised mitkOpenCVVideoSource: get hsv itk image conversion error." << std::endl; } } void mitk::OpenCVVideoSource::RGBtoHSV(float r, float g, float b, float &h, float &s, float &v) { if(r > 1.0) r = r/255; if(b > 1.0) b = b/255; if(g > 1.0) g = g/255; float mn=r,mx=r; int maxVal=0; if (g > mx){ mx=g;maxVal=1;} if (b > mx){ mx=b;maxVal=2;} if (g < mn) mn=g; if (b < mn) mn=b; float delta = mx - mn; v = mx; if( mx != 0 ) s = delta / mx; else { s = 0; h = 0; return; } if (s==0.0f) { h=-1; return; } else { switch (maxVal) { case 0:{h = ( g - b ) / delta;break;} // yel < h < mag case 1:{h = 2 + ( b - r ) / delta;break;} // cyan < h < yel case 2:{h = 4 + ( r - g ) / delta;break;} // mag < h < cyan } } h *= 60; if( h < 0 ) h += 360; } /* * Rotate input image according to rotation angle around the viewing direction. * Angle is supposed to be calculated in QmitkARRotationComponet in the update() method. */ IplImage* mitk::OpenCVVideoSource::FlipImage(IplImage* input) { if(input == nullptr) { //warn the user and quit std::cout<<"openCVVideoSource: Current video image is null! "<< std::endl; return input; } if(m_FlipXAxisEnabled && !m_FlipYAxisEnabled) { cvFlip(input,nullptr,0); } if(!m_FlipXAxisEnabled && m_FlipYAxisEnabled) { cvFlip(input,nullptr,1); } if(m_FlipXAxisEnabled && m_FlipYAxisEnabled) { cvFlip(input,nullptr,-1); } return input; } void mitk::OpenCVVideoSource::Reset() { // set capturing to false this->StopCapturing(); this->m_FrameCount = 0; if(m_VideoCapture) cvReleaseCapture(&m_VideoCapture); m_VideoCapture = nullptr; m_CurrentImage = nullptr; m_CaptureWidth = 0; m_CaptureHeight = 0; delete m_CurrentVideoTexture; m_CurrentVideoTexture = nullptr; if(m_PauseImage) cvReleaseImage(&m_PauseImage); m_PauseImage = nullptr; m_CapturePaused = false; m_VideoFileName.clear(); m_GrabbingDeviceNumber = -1; // do not touch repeat video //m_RepeatVideo = false; m_UseCVCAMLib = false; // do not touch undistort settings // bool m_UndistortImage; } void mitk::OpenCVVideoSource::SetEnableXAxisFlip(bool enable) { this->m_FlipXAxisEnabled = enable; this->Modified(); } void mitk::OpenCVVideoSource::SetEnableYAxisFlip(bool enable) { this->m_FlipXAxisEnabled = enable; this->Modified(); } diff --git a/Modules/OpenCVVideoSupport/mitkOpenCVVideoSource.h b/Modules/OpenCVVideoSupport/mitkOpenCVVideoSource.h index ebe562f66e..7aa1e6c18d 100644 --- a/Modules/OpenCVVideoSupport/mitkOpenCVVideoSource.h +++ b/Modules/OpenCVVideoSupport/mitkOpenCVVideoSource.h @@ -1,201 +1,201 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef _mitk_OpenCVVideo_Source_h_ #define _mitk_OpenCVVideo_Source_h_ #include "mitkConfig.h" #include "mitkVideoSource.h" #include "mitkUndistortCameraImage.h" // HighGui camera interface: a convenient way for grabbing from a video capture (on windows VfW is used) -#include "highgui.h" +#include // For Providing ITK Image Interface #include "itkRGBPixel.h" #include "itkImage.h" #include "itkImageRegionIterator.h" #include "mitkOpenCVImageSource.h" namespace mitk { /** * Interface for acquiring video data using Intel's OPENCV library. * Video data may either be provided from a file or a grabbing device. * At the moment, OPENCV includes two separated modules for this grabbing, but only HighGui is * used here. * Initialize via SetVideoFileInput() or SetVideoCameraInput(), start processing with StartCapturing(); */ class MITKOPENCVVIDEOSUPPORT_EXPORT OpenCVVideoSource : virtual public VideoSource, virtual public OpenCVImageSource { public: typedef itk::RGBPixel< unsigned char > CharPixelType; typedef itk::FixedArray HSVPixelType; typedef itk::Image< CharPixelType , 2 > RGBPixelImageType; typedef itk::Image HSVPixelImageType; typedef itk::ImageRegionIterator< RGBPixelImageType > RGBConstIteratorType; typedef itk::ImageRegionIterator< HSVPixelImageType > HSVConstIteratorType; mitkClassMacro( OpenCVVideoSource, VideoSource ); itkFactorylessNewMacro(Self) itkCloneMacro(Self) ////##Documentation ////## @brief sets a video file as input device. One video frame is being processed by updating the renderwindow. ////## Notice: Which codecs and file formats are supported depends on the back end library. ////## Common Function that currently uses HighGui Lib for video playback virtual void SetVideoFileInput(const char * filename, bool repeatVideo, bool useCVCAMLib = false); ////##Documentation ////##@brief Initializes capturing video from camera. ////## Common Function for use either with HIGHGUI or with CVCAM library ////## On windows: if you use CVCAM Library, you can pass -1 as camera index for a selection menu virtual void SetVideoCameraInput(int cameraindex, bool useCVCAMLib = false); ////##Documentation ////## The function GetVideoCaptureProperty retrieves the specified property of camera or video file from HIGHGUI LIBRARY. ////## Video input has to be initialized before call, that means: at least one frame has to be grabbed already. ////## The property_id identifier can be the following: ////## CV_CAP_PROP_POS_MSEC film current position in milliseconds or video capture timestamp ////## CV_CAP_PROP_POS_FRAMES 0-based index of the frame to be decoded/captured next ////## CV_CAP_PROP_POS_AVI_RATIO relative position of video file (0 - start of the film, 1 - end of the film) ////## CV_CAP_PROP_FRAME_WIDTH width of frames in the video stream ////## CV_CAP_PROP_FRAME_HEIGHT height of frames in the video stream ////## CV_CAP_PROP_FPS frame rate ////## CV_CAP_PROP_FOURCC 4-character code of codec ////## CV_CAP_PROP_FRAME_COUNT number of frames in video file ////## See OpenCV Highgui documentation for more details ( http://opencvlibrary.sourceforge.net/HighGui ) virtual double GetVideoCaptureProperty(int property_id); ////##Documentation ////## @brief sets the specified property of video capturing from HIGHGUI LIBRARY. ////## Notice: Some properties only can be set using a video file as input devices, others using a camera. ////## See OpenCV Highgui documentation for more details ( http://opencvlibrary.sourceforge.net/HighGui ) virtual int SetVideoCaptureProperty(int property_id, double value); virtual void GetCurrentFrameAsOpenCVImage(IplImage * image); /// /// \return a copy of the image as opencv 2 Mat /// cv::Mat GetImage() override; virtual const IplImage * GetCurrentFrame(); ////##Documentation ////## @brief returns the current video data as an ITK image. virtual void GetCurrentFrameAsItkHSVPixelImage(HSVPixelImageType::Pointer &Image); ////##Documentation ////## @brief assigns the grabbing devices for acquiring the next frame. void FetchFrame() override; ////##Documentation ////## @brief returns a pointer to the image data array for opengl rendering. unsigned char * GetVideoTexture() override; ////##Documentation ////## @brief starts the video capturing. void StartCapturing() override; ////##Documentation ////## @brief stops the video capturing. void StopCapturing() override; ////##Documentation ////## @brief rotate image according to the set angle. virtual IplImage* FlipImage(IplImage* input); ////##Documentation ////## @brief EnableOnlineImageUndistortion allows for an online image undistortion directly after capturing an image. ////## The function has to be called after setting up the video input; the result is made accessible via the normal ////## GetCurrentFrame... functions. virtual void EnableOnlineImageUndistortion(mitk::Point3D focal, mitk::Point3D principal, mitk::Point4D distortion); ////##Documentation ////## @brief DisableOnlineImageUndistortion is used to disable the automatic image undistortion. virtual void DisableOnlineImageUndistortion(); /// /// \return true if image undistorsion is enabled /// virtual bool OnlineImageUndistortionEnabled() const; void PauseCapturing() override; /// /// Returns the video file name (maybe empty if a grabbing device is used) /// itkGetConstMacro( VideoFileName, std::string ); virtual void SetEnableXAxisFlip(bool enable); virtual void SetEnableYAxisFlip(bool enable); /// /// Returns the GrabbingDeviceNumber (maybe -1 if a video file is used) /// itkGetConstMacro( GrabbingDeviceNumber, short ); itkGetMacro( RepeatVideo, bool ); itkSetMacro( RepeatVideo, bool ); protected: OpenCVVideoSource(); ~OpenCVVideoSource() override; /// /// Resets the whole class for capturing from a new device /// void Reset(); ////##Documentation ////## @brief internally used for converting the current video frame to a texture for opengl rendering, ////## so that GetVideoTexture() can be used. void UpdateVideoTexture(); // Helper functions void sleep(unsigned int ms); void RGBtoHSV(float r, float g, float b, float &h, float &s, float &v); // HighGUI Library capture device CvCapture * m_VideoCapture; // current Video image IplImage * m_CurrentImage; unsigned char* m_CurrentVideoTexture; IplImage * m_PauseImage; /// /// saves the video file name (is empty if a grabbing device is used or if this is not initialized) std::string m_VideoFileName; /// /// saves the grabbing device number (is -1 if a videofilename is used or if this is not initialized) short m_GrabbingDeviceNumber; // Repeat a video file bool m_RepeatVideo; // Switch between CVCAM Lib and HighGui Lib bool m_UseCVCAMLib; // On-the-fly undistortion of the captured video image bool m_UndistortImage; mitk::UndistortCameraImage::Pointer m_UndistortCameraImage; /** * Flag to enable or disable video flipping by X Axis. **/ bool m_FlipXAxisEnabled; /** * Flag to enable or disable video flipping by Y Axis. **/ bool m_FlipYAxisEnabled; }; } #endif // Header diff --git a/Modules/ToFHardware/mitkToFOpenCVImageGrabber.cpp b/Modules/ToFHardware/mitkToFOpenCVImageGrabber.cpp index 74ab967eb9..16a61d0ad7 100644 --- a/Modules/ToFHardware/mitkToFOpenCVImageGrabber.cpp +++ b/Modules/ToFHardware/mitkToFOpenCVImageGrabber.cpp @@ -1,186 +1,180 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include "mitkToFOpenCVImageGrabber.h" // mitk includes #include "mitkImageDataItem.h" #include #include "mitkImageReadAccessor.h" #include "vtkSmartPointer.h" #include "vtkColorTransferFunction.h" #include "vtkFloatArray.h" namespace mitk { ToFOpenCVImageGrabber::ToFOpenCVImageGrabber() { m_CurrentOpenCVIntensityImage = nullptr; m_CurrentOpenCVAmplitudeImage = nullptr; m_CurrentOpenCVDistanceImage = nullptr; m_ImageType = 0; m_ImageDepth = IPL_DEPTH_32F; m_ImageGrabber = nullptr; } ToFOpenCVImageGrabber::~ToFOpenCVImageGrabber() { } cv::Mat ToFOpenCVImageGrabber::GetImage() { m_ImageGrabber->Update(); unsigned int numOfPixel = m_ImageGrabber->GetCaptureWidth()*m_ImageGrabber->GetCaptureHeight(); // copy current mitk images unsigned int dimensions[4]; dimensions[0] = this->m_ImageGrabber->GetCaptureWidth(); dimensions[1] = this->m_ImageGrabber->GetCaptureHeight(); dimensions[2] = 1; dimensions[3] = 1; // create single component float pixel type mitk::PixelType FloatType = MakeScalarPixelType(); ImageReadAccessor imgGrabAcc0(m_ImageGrabber->GetOutput(0), m_ImageGrabber->GetOutput(0)->GetSliceData()); ImageReadAccessor imgGrabAcc1(m_ImageGrabber->GetOutput(1), m_ImageGrabber->GetOutput(1)->GetSliceData()); ImageReadAccessor imgGrabAcc2(m_ImageGrabber->GetOutput(2), m_ImageGrabber->GetOutput(2)->GetSliceData()); mitk::Image::Pointer currentMITKIntensityImage = mitk::Image::New(); currentMITKIntensityImage->Initialize(FloatType, 2, dimensions); currentMITKIntensityImage->SetSlice((float*) imgGrabAcc2.GetData(),0,0,0); mitk::Image::Pointer currentMITKAmplitudeImage = mitk::Image::New(); currentMITKAmplitudeImage->Initialize(FloatType, 2, dimensions); currentMITKAmplitudeImage->SetSlice((float*)imgGrabAcc1.GetData(),0,0,0); mitk::Image::Pointer currentMITKDistanceImage = mitk::Image::New(); currentMITKDistanceImage->Initialize(FloatType, 2, dimensions); currentMITKDistanceImage->SetSlice((float*)imgGrabAcc0.GetData(),0,0,0); // copy mitk images to OpenCV images if (m_ImageDepth==IPL_DEPTH_32F) { if (m_ImageType==1) { ImageReadAccessor currentAmplAcc(currentMITKAmplitudeImage, currentMITKAmplitudeImage->GetSliceData(0, 0, 0)); float* amplitudeFloatData = (float*) currentAmplAcc.GetData(); memcpy(m_CurrentOpenCVAmplitudeImage->imageData,(unsigned char*)amplitudeFloatData,numOfPixel*sizeof(float)); - cv::Mat image(m_CurrentOpenCVAmplitudeImage); - return image; + return cv::cvarrToMat(m_CurrentOpenCVAmplitudeImage, false); } else if (m_ImageType==2) { ImageReadAccessor currentIntenAcc(currentMITKIntensityImage, currentMITKIntensityImage->GetSliceData(0, 0, 0)); float* intensityFloatData = (float*) currentIntenAcc.GetData(); memcpy(m_CurrentOpenCVIntensityImage->imageData,(unsigned char*)intensityFloatData,numOfPixel*sizeof(float)); - cv::Mat image(m_CurrentOpenCVIntensityImage); - return image; + return cv::cvarrToMat(m_CurrentOpenCVIntensityImage, false); } else { ImageReadAccessor currentDistAcc(currentMITKDistanceImage, currentMITKDistanceImage->GetSliceData(0, 0, 0)); float* distanceFloatData = (float*) currentDistAcc.GetData(); memcpy(m_CurrentOpenCVDistanceImage->imageData,(unsigned char*)distanceFloatData,numOfPixel*sizeof(float)); - cv::Mat image(m_CurrentOpenCVDistanceImage); - return image; + return cv::cvarrToMat(m_CurrentOpenCVDistanceImage, false); } } else { if (m_ImageType==1) { this->MapScalars(currentMITKAmplitudeImage, m_CurrentOpenCVAmplitudeImage); - cv::Mat image(m_CurrentOpenCVAmplitudeImage); - return image; + return cv::cvarrToMat(m_CurrentOpenCVAmplitudeImage, false); } else if (m_ImageType==2) { this->MapScalars(currentMITKIntensityImage, m_CurrentOpenCVIntensityImage); - cv::Mat image(m_CurrentOpenCVIntensityImage); - return image; + return cv::cvarrToMat(m_CurrentOpenCVIntensityImage, false); } else { this->MapScalars(currentMITKDistanceImage, m_CurrentOpenCVDistanceImage); - cv::Mat image(m_CurrentOpenCVDistanceImage); - return image; + return cv::cvarrToMat(m_CurrentOpenCVDistanceImage, false); } } } void ToFOpenCVImageGrabber::SetImageType(unsigned int imageType) { m_ImageType = imageType; } void ToFOpenCVImageGrabber::SetImageDepth(unsigned int imageDepth) { m_ImageDepth = imageDepth; } void ToFOpenCVImageGrabber::SetToFImageGrabber(ToFImageGrabber::Pointer imageGrabber) { m_ImageGrabber = imageGrabber; } ToFImageGrabber::Pointer ToFOpenCVImageGrabber::GetToFImageGrabber() { return m_ImageGrabber; } void ToFOpenCVImageGrabber::StartCapturing() { if (m_ImageGrabber.IsNotNull()) { m_ImageGrabber->ConnectCamera(); //Initialize cv Images after the camera is conneceted and we know the resolution m_CurrentOpenCVIntensityImage = cvCreateImage(cvSize(m_ImageGrabber->GetCaptureWidth(), m_ImageGrabber->GetCaptureHeight()), m_ImageDepth, 1); m_CurrentOpenCVAmplitudeImage = cvCreateImage(cvSize(m_ImageGrabber->GetCaptureWidth(), m_ImageGrabber->GetCaptureHeight()), m_ImageDepth, 1); m_CurrentOpenCVDistanceImage = cvCreateImage(cvSize(m_ImageGrabber->GetCaptureWidth(), m_ImageGrabber->GetCaptureHeight()), m_ImageDepth, 1); m_ImageGrabber->StartCamera(); } } void ToFOpenCVImageGrabber::StopCapturing() { if (m_ImageGrabber.IsNotNull()) { m_ImageGrabber->StopCamera(); m_ImageGrabber->DisconnectCamera(); } } void ToFOpenCVImageGrabber::MapScalars( mitk::Image::Pointer mitkImage, IplImage* openCVImage) { unsigned int numOfPixel = m_ImageGrabber->GetCaptureWidth()*m_ImageGrabber->GetCaptureHeight(); ImageReadAccessor imgAcc(mitkImage, mitkImage->GetSliceData(0, 0, 0)); float* floatData = (float*)imgAcc.GetData(); vtkSmartPointer colorTransferFunction = vtkSmartPointer::New(); vtkSmartPointer floatArrayInt = vtkSmartPointer::New(); floatArrayInt->Initialize(); floatArrayInt->SetArray(floatData, numOfPixel, 0); mitk::ScalarType min = mitkImage->GetStatistics()->GetScalarValueMin(); mitk::ScalarType max = mitkImage->GetStatistics()->GetScalarValueMaxNoRecompute(); MITK_INFO<<"Minimum: "<RemoveAllPoints(); colorTransferFunction->AddRGBPoint(min, 0, 0, 0); colorTransferFunction->AddRGBPoint(max, 1, 1, 1); colorTransferFunction->SetColorSpaceToHSV(); colorTransferFunction->MapScalarsThroughTable(floatArrayInt, (unsigned char*)openCVImage->imageData, VTK_LUMINANCE); } } // end namespace mitk diff --git a/Modules/US/USFilters/mitkUSImageVideoSource.h b/Modules/US/USFilters/mitkUSImageVideoSource.h index 994a925489..9e2d155f70 100644 --- a/Modules/US/USFilters/mitkUSImageVideoSource.h +++ b/Modules/US/USFilters/mitkUSImageVideoSource.h @@ -1,221 +1,221 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef MITKUSImageVideoSource_H_HEADER_INCLUDED_ #define MITKUSImageVideoSource_H_HEADER_INCLUDED_ // ITK #include // MITK #include "mitkUSImageSource.h" #include "mitkConvertGrayscaleOpenCVImageFilter.h" #include "mitkCropOpenCVImageFilter.h" #include "mitkBasicCombinationOpenCVImageFilter.h" // OpenCV -#include +#include namespace mitk { /** * \brief This class can be pointed to a video file or a videodevice and delivers USImages. * * Images are in color by default, but can be set to greyscale via SetColorOutput(false), * which significantly improves performance. * * Images can also be cropped to a region of interest, further increasing performance. * * \ingroup US */ class MITKUS_EXPORT USImageVideoSource : public mitk::USImageSource { public: mitkClassMacroItkParent(USImageVideoSource, itk::ProcessObject); itkFactorylessNewMacro(Self) itkCloneMacro(Self) /** * \brief Defines a region of interest by top left and bottom right corner. */ struct USImageRoi { int topLeftX; int topLeftY; int bottomRightX; int bottomRightY; USImageRoi() : topLeftX(0), topLeftY(0), bottomRightX(0), bottomRightY(0) { }; USImageRoi(unsigned int topLeftX, unsigned int topLeftY, unsigned int bottomRightX, unsigned int bottomRightY) : topLeftX(topLeftX), topLeftY(topLeftY), bottomRightX(bottomRightX), bottomRightY(bottomRightY) { }; }; /** * \brief Defines a region of interest by distances to the four image borders. */ struct USImageCropping { unsigned int top; unsigned int bottom; unsigned int left; unsigned int right; USImageCropping() : top(0), bottom(0), left(0), right(0) { }; USImageCropping(unsigned int top, unsigned int bottom, unsigned int left, unsigned int right) : top(top), bottom(bottom), left(left), right(right) { }; }; /** * \brief Opens a video file for streaming. If nothing goes wrong, the * VideoSource is ready to deliver images after calling this function. */ void SetVideoFileInput(std::string path); /** * \brief Opens a video device for streaming. Takes the Device id. Try -1 for "grab the first you can get" * which works quite well if only one device is available. If nothing goes wrong, the * VideoSource is ready to deliver images after calling this function. */ void SetCameraInput(int deviceID); void ReleaseInput(); /** * \brief Sets the output image to rgb or grayscale. * Output is color by default * and can be set to color by passing true, or to grayscale again by passing false. */ void SetColorOutput(bool isColor); /** * \brief Defines the cropping area. * The rectangle will be justified to the image borders if the given * rectangle is larger than the video source. If a correct rectangle is * given, the dimensions of the output image will be equal to those of the * rectangle. */ void SetRegionOfInterest(int topLeftX, int topLeftY, int bottomRightX, int bottomRightY); /** * \brief Defines the cropping area. * The rectangle will be justified to the image borders if the given * rectangle is larger than the video source. If a correct rectangle is * given, the dimensions of the output image will be equal to those of the * rectangle. * * \param regionOfInterest struct defining x and y coordinates of top left and bottom right corner */ void SetRegionOfInterest(USImageRoi regionOfInterest); /** * \brief Defines the cropping area. * The rectangle will be justified to the image borders if the given * rectangle is larger than the video source. If a correct rectangle is * given, the dimensions of the output image will be equal to those of the * rectangle. * * \param cropping struct defining distances to the four image borders */ void SetCropping(USImageCropping cropping); /** * /brief Removes the region of interest. * Produced images will be uncropped after call of this method. */ void RemoveRegionOfInterest(); /** * \brief This is a workaround for a problem that happens with some video device drivers. * * If you encounter OpenCV Warnings that buffer sizes do not match while calling getNextFrame, * then do the following: Using the drivers control panel to force a certain resolution, then call * this method with the same Dimensions after opening the device. * Before retrieving images one should call mitk::USImageVideoSource::isReady(). */ void OverrideResolution(int width, int height); // Getter & Setter itkGetMacro(IsVideoReady, bool); itkGetMacro(ResolutionOverride, bool); itkSetMacro(ResolutionOverride, bool); itkGetMacro(IsGreyscale,bool); itkGetMacro(ResolutionOverrideWidth,int); itkGetMacro(ResolutionOverrideHeight,int); int GetImageHeight(); int GetImageWidth(); USImageCropping GetCropping(); USImageRoi GetRegionOfInterest(); /** * \brief Returns true if images can be delivered. * * Only if true is returned one can retrieve images via * mitk::USImageVideoSource::GetNextImage(). * If false is returned, behaviour is undefined. */ bool GetIsReady(); protected: USImageVideoSource(); ~USImageVideoSource() override; /** * \brief Next image is gathered from the image source. * * \param[out] image an OpenCV-Matrix containing this image */ void GetNextRawImage( std::vector& image ) override; /** * \brief Next image is gathered from the image source. * * \param[out] image an mitk::Image containing this image */ void GetNextRawImage( std::vector& image ) override; /** * \brief The source of the video, managed internally */ cv::VideoCapture* m_VideoCapture; /** * \brief If true, a frame can be grabbed anytime. */ bool m_IsVideoReady; /** * \brief If true, image output will be greyscale. */ bool m_IsGreyscale; /** * \brief If true, image will be cropped according to settings of crop filter. */ bool m_IsCropped; /** * These Variables determined whether Resolution Override is on, what dimensions to use. */ int m_ResolutionOverrideWidth; int m_ResolutionOverrideHeight; bool m_ResolutionOverride; ConvertGrayscaleOpenCVImageFilter::Pointer m_GrayscaleFilter; CropOpenCVImageFilter::Pointer m_CropFilter; }; } // namespace mitk #endif /* MITKUSImageVideoSource_H_HEADER_INCLUDED_ */ diff --git a/Plugins/org.mitk.gui.qt.ultrasound/src/internal/QmitkUltrasoundSupport.cpp b/Plugins/org.mitk.gui.qt.ultrasound/src/internal/QmitkUltrasoundSupport.cpp index 76c005d152..51f6110989 100644 --- a/Plugins/org.mitk.gui.qt.ultrasound/src/internal/QmitkUltrasoundSupport.cpp +++ b/Plugins/org.mitk.gui.qt.ultrasound/src/internal/QmitkUltrasoundSupport.cpp @@ -1,533 +1,533 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ // Blueberry #include #include //Mitk #include #include #include #include #include // Qmitk #include "QmitkUltrasoundSupport.h" // Qt #include #include #include // Ultrasound #include "mitkUSDevice.h" #include "QmitkUSAbstractCustomWidget.h" #include #include #include "usServiceReference.h" #include "internal/org_mitk_gui_qt_ultrasound_Activator.h" const std::string QmitkUltrasoundSupport::VIEW_ID = "org.mitk.views.ultrasoundsupport"; QmitkUltrasoundSupport::QmitkUltrasoundSupport() : m_Controls(nullptr), m_ControlCustomWidget(0), m_ControlBModeWidget(0), m_ControlProbesWidget(0), m_ImageAlreadySetToNode(false), m_CurrentImageWidth(0), m_CurrentImageHeight(0) { ctkPluginContext* pluginContext = mitk::PluginActivator::GetContext(); if (pluginContext) { // to be notified about service event of an USDevice pluginContext->connectServiceListener(this, "OnDeviceServiceEvent", QString::fromStdString("(" + us::ServiceConstants::OBJECTCLASS() + "=" + us_service_interface_iid() + ")")); } } QmitkUltrasoundSupport::~QmitkUltrasoundSupport() { try { StoreUISettings(); StopTimers(); // Get all active devicesand deactivate them to prevent freeze std::vector devices = this->m_Controls->m_ActiveVideoDevices->GetAllServices(); for (size_t i = 0; i < devices.size(); i++) { mitk::USDevice::Pointer device = devices[i]; if (device.IsNotNull() && device->GetIsActive()) { device->Deactivate(); device->Disconnect(); } } } catch (std::exception &e) { MITK_ERROR << "Exception during call of destructor! Message: " << e.what(); } } void QmitkUltrasoundSupport::SetFocus() { } void QmitkUltrasoundSupport::CreateQtPartControl(QWidget *parent) { //initialize timers m_UpdateTimer = new QTimer(this); m_RenderingTimer2d = new QTimer(this); m_RenderingTimer3d = new QTimer(this); // build up qt view, unless already done if (!m_Controls) { // create GUI widgets from the Qt Designer's .ui file m_Controls = new Ui::UltrasoundSupportControls; // create GUI widgets from the Qt Designer's .ui file m_Controls->setupUi(parent); //load persistence data before connecting slots (so no slots are called in this phase...) LoadUISettings(); //connect signals and slots... connect(m_Controls->m_DeviceManagerWidget, SIGNAL(NewDeviceButtonClicked()), this, SLOT(OnClickedAddNewDevice())); // Change Widget Visibilities connect(m_Controls->m_DeviceManagerWidget, SIGNAL(NewDeviceButtonClicked()), this->m_Controls->m_NewVideoDeviceWidget, SLOT(CreateNewDevice())); // Init NewDeviceWidget connect(m_Controls->m_ActiveVideoDevices, SIGNAL(ServiceSelectionChanged(us::ServiceReferenceU)), this, SLOT(OnChangedActiveDevice())); connect(m_Controls->m_RunImageTimer, SIGNAL(clicked()), this, SLOT(OnChangedActiveDevice())); connect(m_Controls->m_ShowImageStream, SIGNAL(clicked()), this, SLOT(OnChangedActiveDevice())); connect(m_Controls->m_NewVideoDeviceWidget, SIGNAL(Finished()), this, SLOT(OnNewDeviceWidgetDone())); // After NewDeviceWidget finished editing connect(m_Controls->m_FrameRatePipeline, SIGNAL(valueChanged(int)), this, SLOT(OnChangedFramerateLimit())); connect(m_Controls->m_FrameRate2d, SIGNAL(valueChanged(int)), this, SLOT(OnChangedFramerateLimit())); connect(m_Controls->m_FrameRate3d, SIGNAL(valueChanged(int)), this, SLOT(OnChangedFramerateLimit())); connect(m_Controls->m_FreezeButton, SIGNAL(clicked()), this, SLOT(OnClickedFreezeButton())); connect(m_UpdateTimer, SIGNAL(timeout()), this, SLOT(UpdateImage())); connect(m_RenderingTimer2d, SIGNAL(timeout()), this, SLOT(RenderImage2d())); connect(m_RenderingTimer3d, SIGNAL(timeout()), this, SLOT(RenderImage3d())); connect(m_Controls->m_Update2DView, SIGNAL(clicked()), this, SLOT(StartTimers())); connect(m_Controls->m_Update3DView, SIGNAL(clicked()), this, SLOT(StartTimers())); connect(m_Controls->m_DeviceManagerWidget, SIGNAL(EditDeviceButtonClicked(mitk::USDevice::Pointer)), this, SLOT(OnClickedEditDevice())); //Change Widget Visibilities connect(m_Controls->m_DeviceManagerWidget, SIGNAL(EditDeviceButtonClicked(mitk::USDevice::Pointer)), this->m_Controls->m_NewVideoDeviceWidget, SLOT(EditDevice(mitk::USDevice::Pointer))); // Initializations m_Controls->m_NewVideoDeviceWidget->setVisible(false); std::string filter = "(&(" + us::ServiceConstants::OBJECTCLASS() + "=" + "org.mitk.services.UltrasoundDevice)(" + mitk::USDevice::GetPropertyKeys().US_PROPKEY_ISACTIVE + "=true))"; m_Controls->m_ActiveVideoDevices->Initialize( mitk::USDevice::GetPropertyKeys().US_PROPKEY_LABEL, filter); m_Controls->m_ActiveVideoDevices->SetAutomaticallySelectFirstEntry(true); m_FrameCounterPipeline = 0; m_FrameCounter2d = 0; m_FrameCounter3d = 0; m_Controls->tabWidget->setTabEnabled(1, false); } } void QmitkUltrasoundSupport::OnClickedAddNewDevice() { m_Controls->m_NewVideoDeviceWidget->setVisible(true); m_Controls->m_DeviceManagerWidget->setVisible(false); m_Controls->m_Headline->setText("Add New Video Device:"); m_Controls->m_WidgetActiveDevices->setVisible(false); } void QmitkUltrasoundSupport::OnClickedEditDevice() { m_Controls->m_NewVideoDeviceWidget->setVisible(true); m_Controls->m_DeviceManagerWidget->setVisible(false); m_Controls->m_WidgetActiveDevices->setVisible(false); m_Controls->m_Headline->setText("Edit Video Device:"); } void QmitkUltrasoundSupport::UpdateImage() { //Update device m_Device->Modified(); m_Device->Update(); //Only update the view if the image is shown if (m_Controls->m_ShowImageStream->isChecked()) { //Update data nodes for (size_t i = 0; i < m_AmountOfOutputs; i++) { mitk::Image::Pointer curOutput = m_Device->GetOutput(i); if (curOutput->IsEmpty()) { m_Node.at(i)->SetName("No Data received yet ..."); //create a noise image for correct initialization of level window, etc. mitk::Image::Pointer randomImage = mitk::ImageGenerator::GenerateRandomImage(32, 32, 1, 1, 1, 1, 1, 255, 0); m_Node.at(i)->SetData(randomImage); curOutput->SetGeometry(randomImage->GetGeometry()); } else { std::stringstream nodeName; nodeName << "US Viewing Stream - Image " << i; m_Node.at(i)->SetName(nodeName.str()); m_Node.at(i)->SetData(curOutput); m_Node.at(i)->Modified(); } // if the geometry changed: reinitialize the ultrasound image if ((i==0) && (m_OldGeometry.IsNotNull()) && (curOutput->GetGeometry() != NULL) && (!mitk::Equal(*(m_OldGeometry.GetPointer()), *(curOutput->GetGeometry()), 0.0001, false)) ) { mitk::IRenderWindowPart* renderWindow = this->GetRenderWindowPart(); if ((renderWindow != NULL) && (curOutput->GetTimeGeometry()->IsValid()) && (m_Controls->m_ShowImageStream->isChecked())) { renderWindow->GetRenderingManager()->InitializeViews( curOutput->GetGeometry(), mitk::RenderingManager::REQUEST_UPDATE_ALL, true); renderWindow->GetRenderingManager()->RequestUpdateAll(); } m_CurrentImageWidth = curOutput->GetDimension(0); m_CurrentImageHeight = curOutput->GetDimension(1); m_OldGeometry = dynamic_cast(curOutput->GetGeometry()); } } } //Update frame counter m_FrameCounterPipeline++; if (m_FrameCounterPipeline >0) { //compute framerate of pipeline update int nMilliseconds = m_Clock.restart(); int fps = 1000.0 / nMilliseconds; m_FPSPipeline = fps; m_FrameCounterPipeline = 0; //display lowest framerate in UI int lowestFPS = m_FPSPipeline; if (m_Controls->m_Update2DView->isChecked() && (m_FPS2d < lowestFPS)) { lowestFPS = m_FPS2d; } if (m_Controls->m_Update3DView->isChecked() && (m_FPS3d < lowestFPS)) { lowestFPS = m_FPS3d; } m_Controls->m_FramerateLabel->setText("Current Framerate: " + QString::number(lowestFPS) + " FPS"); } } void QmitkUltrasoundSupport::RenderImage2d() { if (!m_Controls->m_Update2DView->isChecked()) return; this->RequestRenderWindowUpdate(mitk::RenderingManager::REQUEST_UPDATE_2DWINDOWS); m_FrameCounter2d++; if (m_FrameCounter2d >0) { //compute framerate of 2d render window update int nMilliseconds = m_Clock2d.restart(); int fps = 1000.0f / (nMilliseconds); m_FPS2d = fps; m_FrameCounter2d = 0; } } void QmitkUltrasoundSupport::RenderImage3d() { if (!m_Controls->m_Update3DView->isChecked()) return; this->RequestRenderWindowUpdate(mitk::RenderingManager::REQUEST_UPDATE_3DWINDOWS); m_FrameCounter3d++; if (m_FrameCounter3d >0) { //compute framerate of 2d render window update int nMilliseconds = m_Clock3d.restart(); int fps = 1000.0f / (nMilliseconds); m_FPS3d = fps; m_FrameCounter3d = 0; } } void QmitkUltrasoundSupport::OnChangedFramerateLimit() { StopTimers(); int intervalPipeline = (1000 / m_Controls->m_FrameRatePipeline->value()); int interval2D = (1000 / m_Controls->m_FrameRate2d->value()); int interval3D = (1000 / m_Controls->m_FrameRate3d->value()); SetTimerIntervals(intervalPipeline, interval2D, interval3D); StartTimers(); } void QmitkUltrasoundSupport::OnClickedFreezeButton() { if (m_Device.IsNull()) { MITK_WARN("UltrasoundSupport") << "Freeze button clicked though no device is selected."; return; } if (m_Device->GetIsFreezed()) { m_Device->SetIsFreezed(false); m_Controls->m_FreezeButton->setText("Freeze"); } else { m_Device->SetIsFreezed(true); m_Controls->m_FreezeButton->setText("Start Viewing Again"); } } void QmitkUltrasoundSupport::OnChangedActiveDevice() { - if (m_Controls->m_RunImageTimer->isChecked() == FALSE) - { - StopTimers(); - return; - } + if (m_Controls->m_RunImageTimer->isChecked() == false) + { + StopTimers(); + return; + } //clean up and stop timer StopTimers(); this->RemoveControlWidgets(); for (size_t j = 0; j < m_Node.size(); j++) { this->GetDataStorage()->Remove(m_Node.at(j)); m_Node.at(j)->ReleaseData(); } m_Node.clear(); //get current device, abort if it is invalid m_Device = m_Controls->m_ActiveVideoDevices->GetSelectedService(); if (m_Device.IsNull()) { m_Controls->tabWidget->setTabEnabled(1, false); return; } m_AmountOfOutputs = m_Device->GetNumberOfIndexedOutputs(); // clear data storage, create new nodes and add for (size_t i = 0; i < m_AmountOfOutputs; i++) { mitk::DataNode::Pointer currentNode = mitk::DataNode::New(); std::stringstream nodeName; nodeName << "US Viewing Stream - Image " << i; currentNode->SetName(nodeName.str()); //create a dummy image (gray values 0..255) for correct initialization of level window, etc. mitk::Image::Pointer dummyImage = mitk::ImageGenerator::GenerateRandomImage(100, 100, 1, 1, 1, 1, 1, 255, 0); currentNode->SetData(dummyImage); m_OldGeometry = dynamic_cast(dummyImage->GetGeometry()); m_Node.push_back(currentNode); //show node if the option is enabled if (m_Controls->m_ShowImageStream->isChecked()) { this->GetDataStorage()->Add(m_Node.at(i)); } } //create the widgets for this device and enable the widget tab this->CreateControlWidgets(); m_Controls->tabWidget->setTabEnabled(1, true); //start timer if (m_Controls->m_RunImageTimer->isChecked()) { int intervalPipeline = (1000 / m_Controls->m_FrameRatePipeline->value()); int interval2D = (1000 / m_Controls->m_FrameRate2d->value()); int interval3D = (1000 / m_Controls->m_FrameRate3d->value()); SetTimerIntervals(intervalPipeline, interval2D, interval3D); StartTimers(); m_Controls->m_TimerWidget->setEnabled(true); } else { m_Controls->m_TimerWidget->setEnabled(false); } } void QmitkUltrasoundSupport::OnNewDeviceWidgetDone() { m_Controls->m_NewVideoDeviceWidget->setVisible(false); m_Controls->m_DeviceManagerWidget->setVisible(true); m_Controls->m_Headline->setText("Ultrasound Devices:"); m_Controls->m_WidgetActiveDevices->setVisible(true); } void QmitkUltrasoundSupport::CreateControlWidgets() { m_ControlProbesWidget = new QmitkUSControlsProbesWidget(m_Device->GetControlInterfaceProbes(), m_Controls->m_ToolBoxControlWidgets); m_Controls->probesWidgetContainer->addWidget(m_ControlProbesWidget); // create b mode widget for current device m_ControlBModeWidget = new QmitkUSControlsBModeWidget(m_Device->GetControlInterfaceBMode(), m_Controls->m_ToolBoxControlWidgets); m_Controls->m_ToolBoxControlWidgets->addItem(m_ControlBModeWidget, "B Mode Controls"); if (!m_Device->GetControlInterfaceBMode()) { m_Controls->m_ToolBoxControlWidgets->setItemEnabled(m_Controls->m_ToolBoxControlWidgets->count() - 1, false); } // create doppler widget for current device m_ControlDopplerWidget = new QmitkUSControlsDopplerWidget(m_Device->GetControlInterfaceDoppler(), m_Controls->m_ToolBoxControlWidgets); m_Controls->m_ToolBoxControlWidgets->addItem(m_ControlDopplerWidget, "Doppler Controls"); if (!m_Device->GetControlInterfaceDoppler()) { m_Controls->m_ToolBoxControlWidgets->setItemEnabled(m_Controls->m_ToolBoxControlWidgets->count() - 1, false); } ctkPluginContext* pluginContext = mitk::PluginActivator::GetContext(); if (pluginContext) { std::string filter = "(org.mitk.services.UltrasoundCustomWidget.deviceClass=" + m_Device->GetDeviceClass() + ")"; QString interfaceName = QString::fromStdString(us_service_interface_iid()); m_CustomWidgetServiceReference = pluginContext->getServiceReferences(interfaceName, QString::fromStdString(filter)); if (m_CustomWidgetServiceReference.size() > 0) { m_ControlCustomWidget = pluginContext->getService (m_CustomWidgetServiceReference.at(0))->CloneForQt(m_Controls->tab2); m_ControlCustomWidget->SetDevice(m_Device); m_Controls->m_ToolBoxControlWidgets->addItem(m_ControlCustomWidget, "Custom Controls"); } else { m_Controls->m_ToolBoxControlWidgets->addItem(new QWidget(m_Controls->m_ToolBoxControlWidgets), "Custom Controls"); m_Controls->m_ToolBoxControlWidgets->setItemEnabled(m_Controls->m_ToolBoxControlWidgets->count() - 1, false); } } // select first enabled control widget for (int n = 0; n < m_Controls->m_ToolBoxControlWidgets->count(); ++n) { if (m_Controls->m_ToolBoxControlWidgets->isItemEnabled(n)) { m_Controls->m_ToolBoxControlWidgets->setCurrentIndex(n); break; } } } void QmitkUltrasoundSupport::RemoveControlWidgets() { if (!m_ControlProbesWidget) { return; } //widgets do not exist... nothing to do // remove all control widgets from the tool box widget while (m_Controls->m_ToolBoxControlWidgets->count() > 0) { m_Controls->m_ToolBoxControlWidgets->removeItem(0); } // remove probes widget (which is not part of the tool box widget) m_Controls->probesWidgetContainer->removeWidget(m_ControlProbesWidget); delete m_ControlProbesWidget; m_ControlProbesWidget = 0; delete m_ControlBModeWidget; m_ControlBModeWidget = 0; delete m_ControlDopplerWidget; m_ControlDopplerWidget = 0; // delete custom widget if it is present if (m_ControlCustomWidget) { ctkPluginContext* pluginContext = mitk::PluginActivator::GetContext(); delete m_ControlCustomWidget; m_ControlCustomWidget = 0; if (m_CustomWidgetServiceReference.size() > 0) { pluginContext->ungetService(m_CustomWidgetServiceReference.at(0)); } } } void QmitkUltrasoundSupport::OnDeviceServiceEvent(const ctkServiceEvent event) { if (m_Device.IsNull() || event.getType() != ctkServiceEvent::MODIFIED) { return; } ctkServiceReference service = event.getServiceReference(); if (m_Device->GetManufacturer() != service.getProperty(QString::fromStdString(mitk::USDevice::GetPropertyKeys().US_PROPKEY_MANUFACTURER)).toString().toStdString() && m_Device->GetName() != service.getProperty(QString::fromStdString(mitk::USDevice::GetPropertyKeys().US_PROPKEY_NAME)).toString().toStdString()) { return; } if (!m_Device->GetIsActive() && m_UpdateTimer->isActive()) { StopTimers(); } if (m_CurrentDynamicRange != service.getProperty(QString::fromStdString(mitk::USDevice::GetPropertyKeys().US_PROPKEY_BMODE_DYNAMIC_RANGE)).toDouble()) { m_CurrentDynamicRange = service.getProperty(QString::fromStdString(mitk::USDevice::GetPropertyKeys().US_PROPKEY_BMODE_DYNAMIC_RANGE)).toDouble(); // update level window for the current dynamic range mitk::LevelWindow levelWindow; m_Node.at(0)->GetLevelWindow(levelWindow); levelWindow.SetAuto(m_curOutput.at(0), true, true); m_Node.at(0)->SetLevelWindow(levelWindow); } } void QmitkUltrasoundSupport::StoreUISettings() { QSettings settings; settings.beginGroup(QString::fromStdString(VIEW_ID)); settings.setValue("DisplayImage", QVariant(m_Controls->m_ShowImageStream->isChecked())); settings.setValue("RunImageTimer", QVariant(m_Controls->m_RunImageTimer->isChecked())); settings.setValue("Update2DView", QVariant(m_Controls->m_Update2DView->isChecked())); settings.setValue("Update3DView", QVariant(m_Controls->m_Update3DView->isChecked())); settings.setValue("UpdateRatePipeline", QVariant(m_Controls->m_FrameRatePipeline->value())); settings.setValue("UpdateRate2d", QVariant(m_Controls->m_FrameRate2d->value())); settings.setValue("UpdateRate3d", QVariant(m_Controls->m_FrameRate3d->value())); settings.endGroup(); } void QmitkUltrasoundSupport::LoadUISettings() { QSettings settings; settings.beginGroup(QString::fromStdString(VIEW_ID)); m_Controls->m_ShowImageStream->setChecked(settings.value("DisplayImage", true).toBool()); m_Controls->m_RunImageTimer->setChecked(settings.value("RunImageTimer", true).toBool()); m_Controls->m_Update2DView->setChecked(settings.value("Update2DView", true).toBool()); m_Controls->m_Update3DView->setChecked(settings.value("Update3DView", true).toBool()); m_Controls->m_FrameRatePipeline->setValue(settings.value("UpdateRatePipeline", 50).toInt()); m_Controls->m_FrameRate2d->setValue(settings.value("UpdateRate2d", 20).toInt()); m_Controls->m_FrameRate3d->setValue(settings.value("UpdateRate3d", 5).toInt()); settings.endGroup(); } void QmitkUltrasoundSupport::StartTimers() { m_Clock.start(); m_UpdateTimer->start(); if (m_Controls->m_Update2DView->isChecked()) { m_RenderingTimer2d->start(); } if (m_Controls->m_Update3DView->isChecked()) { m_RenderingTimer3d->start(); } } void QmitkUltrasoundSupport::StopTimers() { m_UpdateTimer->stop(); m_RenderingTimer2d->stop(); m_RenderingTimer3d->stop(); } void QmitkUltrasoundSupport::SetTimerIntervals(int intervalPipeline, int interval2D, int interval3D) { m_UpdateTimer->setInterval(intervalPipeline); m_RenderingTimer2d->setInterval(interval2D); m_RenderingTimer3d->setInterval(interval3D); }