diff --git a/Modules/Biophotonics/python/iMC/regression/estimation.py b/Modules/Biophotonics/python/iMC/regression/estimation.py index b4c9dd1c59..7b84e88974 100644 --- a/Modules/Biophotonics/python/iMC/regression/estimation.py +++ b/Modules/Biophotonics/python/iMC/regression/estimation.py @@ -1,134 +1,130 @@ """ -The MultiSpectral Imaging Toolkit (MSITK) +The Medical Imaging Interaction Toolkit (MITK) -Copyright (c) German Cancer Research Center, -Computer Assisted Interventions. +Copyright (c) German Cancer Research Center (DKFZ) All rights reserved. -This software is distributed WITHOUT ANY WARRANTY; without -even the implied warranty of MERCHANTABILITY or FITNESS FOR -A PARTICULAR PURPOSE. - -See LICENSE for details +Use of this source code is governed by a 3-clause BSD license that can be +found in the LICENSE file. """ ''' Created on Oct 21, 2015 @author: wirkert ''' import math import logging import time import tensorflow as tf import numpy as np import SimpleITK as sitk from regression.tensorflow_estimator import multilayer_perceptron, cnn import msi.imgmani as imgmani def SAMDistance(x, y): return math.acos(np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))) def estimate_image(msi, regressor): """given an Msi and an regressor estimate the parmaeters for this image Paramters: msi: multi spectral image regressor: regressor, must implement the predict method""" # estimate parameters collapsed_msi = imgmani.collapse_image(msi.get_image()) # in case of nan values: set to 0 collapsed_msi[np.isnan(collapsed_msi)] = 0. collapsed_msi[np.isinf(collapsed_msi)] = 0. start = time.time() estimated_parameters = regressor.predict(collapsed_msi) end = time.time() estimation_time = end - start logging.info("time necessary for estimating image parameters: " + str(estimation_time) + "s") # restore shape feature_dimension = 1 if len(estimated_parameters.shape) > 1: feature_dimension = estimated_parameters.shape[-1] estimated_paramters_as_image = np.reshape( estimated_parameters, (msi.get_image().shape[0], msi.get_image().shape[1], feature_dimension)) # save as sitk nrrd. sitk_img = sitk.GetImageFromArray(estimated_paramters_as_image, isVector=True) return sitk_img, estimation_time def estimate_image_tensorflow(msi, model_checkpoint_dir): # estimate parameters collapsed_msi = imgmani.collapse_image(msi.get_image()) # in case of nan values: set to 0 collapsed_msi[np.isnan(collapsed_msi)] = 0. collapsed_msi[np.isinf(collapsed_msi)] = 0. tf.reset_default_graph() keep_prob = tf.placeholder("float") nr_wavelengths = len(msi.get_wavelengths()) x = tf.placeholder("float", [None, nr_wavelengths, 1, 1]) x_test_image = np.reshape(msi.get_image(), [-1, nr_wavelengths, 1, 1]) # Construct the desired model # pred, regularizers = multilayer_perceptron(x, nr_wavelengths, 100, 1, # keep_prob) pred = cnn(x, 1, keep_prob) # Initializing the variables init = tf.initialize_all_variables() saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.initialize_all_variables()) # restore model: ckpt = tf.train.get_checkpoint_state(model_checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) start = time.time() estimated_parameters = pred.eval({x: x_test_image, keep_prob:1.0}) end = time.time() estimation_time = end - start logging.info("time necessary for estimating image parameters: " + str(estimation_time) + "s") # restore shape feature_dimension = 1 if len(estimated_parameters.shape) > 1: feature_dimension = estimated_parameters.shape[-1] estimated_paramters_as_image = np.reshape( estimated_parameters, (msi.get_image().shape[0], msi.get_image().shape[1], feature_dimension)) # save as sitk nrrd. sitk_img = sitk.GetImageFromArray(estimated_paramters_as_image, isVector=True) return sitk_img, estimation_time def standard_score(estimator, X, y): """our standard scoring method is the median absolute error""" return np.median(np.abs(estimator.predict(X) - y)) diff --git a/Modules/CppMicroServices/README.md b/Modules/CppMicroServices/README.md index 05b4c242e4..fcac6bd1d5 100644 --- a/Modules/CppMicroServices/README.md +++ b/Modules/CppMicroServices/README.md @@ -1,100 +1,100 @@ [![Build Status](https://secure.travis-ci.org/saschazelzer/CppMicroServices.png)](http://travis-ci.org/saschazelzer/CppMicroServices) [![Coverity Scan Build Status](https://scan.coverity.com/projects/1329/badge.svg)](https://scan.coverity.com/projects/1329) C++ Micro Services ================== Introduction ------------ The C++ Micro Services library provides a dynamic service registry and module system, partially based the OSGi Core Release 5 specifications. It enables developers to create a service oriented and dynamic software stack. Proper usage of the C++ Micro Services library leads to - Re-use of software components - Loose coupling - Separation of concerns - Clean APIs based on service interfaces - Extensible systems and more. Requirements ------------ This is a pure C++ implementation of the OSGi service model and does not have any third-party library dependencies. Supported Platforms ------------------- The library should compile on many different platforms. Below is a list of tested compiler/OS combinations: - GCC 4.6 (Ubuntu 12.04) - GCC 4.8 (Ubuntu 13.10) - Clang 3.2 (Ubuntu 13.10) - Clang (MacOS X 10.8 and 10.9) - Visual Studio 2008 SP1, 2010, 2012, 2013 (Windows 7) Legal ----- -Copyright (c) German Cancer Research Center. Licensed under the [Apache License v2.0][apache_license]. +Copyright (c) German Cancer Research Center (DKFZ). Licensed under the [Apache License v2.0][apache_license]. Quick Start ----------- Essentially, the C++ Micro Services library provides you with a powerful dynamic service registry. Each shared or static library has an associated `ModuleContext` object, through which the service registry is accessed. To query the registry for a service object implementing one or more specific interfaces, the code would look like this: ```cpp #include #include using namespace us; void UseService(ModuleContext* context) { ServiceReference serviceRef = context->GetServiceReference(); if (serviceRef) { SomeInterface* service = context->GetService(serviceRef); if (service) { /* do something */ } } } ``` Registering a service object against a certain interface looks like this: ```cpp #include #include using namespace us; void RegisterSomeService(ModuleContext* context, SomeInterface* service) { context->RegisterService(service); } ``` The OSGi service model additionally allows to annotate services with properties and using these properties during service look-ups. It also allows to track the life-cycle of service objects. Please see the [Documentation](http://cppmicroservices.org/doc_latest/index.html) for more examples and tutorials and the API reference. There is also a blog post about [OSGi Lite for C++](http://blog.cppmicroservices.org/2012/04/15/osgi-lite-for-c++). Build Instructions ------------------ Please visit the [Build Instructions][bi_master] page online. [bi_master]: http://cppmicroservices.org/doc_latest/BuildInstructions.html [apache_license]: http://www.apache.org/licenses/LICENSE-2.0 diff --git a/Modules/CppMicroServices/core/README.md b/Modules/CppMicroServices/core/README.md index 05b4c242e4..fcac6bd1d5 100644 --- a/Modules/CppMicroServices/core/README.md +++ b/Modules/CppMicroServices/core/README.md @@ -1,100 +1,100 @@ [![Build Status](https://secure.travis-ci.org/saschazelzer/CppMicroServices.png)](http://travis-ci.org/saschazelzer/CppMicroServices) [![Coverity Scan Build Status](https://scan.coverity.com/projects/1329/badge.svg)](https://scan.coverity.com/projects/1329) C++ Micro Services ================== Introduction ------------ The C++ Micro Services library provides a dynamic service registry and module system, partially based the OSGi Core Release 5 specifications. It enables developers to create a service oriented and dynamic software stack. Proper usage of the C++ Micro Services library leads to - Re-use of software components - Loose coupling - Separation of concerns - Clean APIs based on service interfaces - Extensible systems and more. Requirements ------------ This is a pure C++ implementation of the OSGi service model and does not have any third-party library dependencies. Supported Platforms ------------------- The library should compile on many different platforms. Below is a list of tested compiler/OS combinations: - GCC 4.6 (Ubuntu 12.04) - GCC 4.8 (Ubuntu 13.10) - Clang 3.2 (Ubuntu 13.10) - Clang (MacOS X 10.8 and 10.9) - Visual Studio 2008 SP1, 2010, 2012, 2013 (Windows 7) Legal ----- -Copyright (c) German Cancer Research Center. Licensed under the [Apache License v2.0][apache_license]. +Copyright (c) German Cancer Research Center (DKFZ). Licensed under the [Apache License v2.0][apache_license]. Quick Start ----------- Essentially, the C++ Micro Services library provides you with a powerful dynamic service registry. Each shared or static library has an associated `ModuleContext` object, through which the service registry is accessed. To query the registry for a service object implementing one or more specific interfaces, the code would look like this: ```cpp #include #include using namespace us; void UseService(ModuleContext* context) { ServiceReference serviceRef = context->GetServiceReference(); if (serviceRef) { SomeInterface* service = context->GetService(serviceRef); if (service) { /* do something */ } } } ``` Registering a service object against a certain interface looks like this: ```cpp #include #include using namespace us; void RegisterSomeService(ModuleContext* context, SomeInterface* service) { context->RegisterService(service); } ``` The OSGi service model additionally allows to annotate services with properties and using these properties during service look-ups. It also allows to track the life-cycle of service objects. Please see the [Documentation](http://cppmicroservices.org/doc_latest/index.html) for more examples and tutorials and the API reference. There is also a blog post about [OSGi Lite for C++](http://blog.cppmicroservices.org/2012/04/15/osgi-lite-for-c++). Build Instructions ------------------ Please visit the [Build Instructions][bi_master] page online. [bi_master]: http://cppmicroservices.org/doc_latest/BuildInstructions.html [apache_license]: http://www.apache.org/licenses/LICENSE-2.0 diff --git a/Modules/ModelFitUI/Qmitk/QmitkParameterFitBackgroundJob.cpp b/Modules/ModelFitUI/Qmitk/QmitkParameterFitBackgroundJob.cpp index 2f25f4aeef..2664f3f934 100644 --- a/Modules/ModelFitUI/Qmitk/QmitkParameterFitBackgroundJob.cpp +++ b/Modules/ModelFitUI/Qmitk/QmitkParameterFitBackgroundJob.cpp @@ -1,121 +1,115 @@ -/*========================================================================= +/*============================================================================ -Program: Medical Imaging & Interaction Toolkit -Language: C++ -Date: $Date$ -Version: $Revision$ +The Medical Imaging Interaction Toolkit (MITK) -Copyright (c) German Cancer Research Center, Software Development for -Integrated Diagnostic and Therapy. All rights reserved. -See MITKCopyright.txt or http://www.mitk.org/copyright.html for details. +Copyright (c) German Cancer Research Center (DKFZ) +All rights reserved. -This software is distributed WITHOUT ANY WARRANTY; without even -the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -PURPOSE. See the above copyright notices for more information. - -=========================================================================*/ +Use of this source code is governed by a 3-clause BSD license that can be +found in the LICENSE file. +============================================================================*/ #include "QmitkParameterFitBackgroundJob.h" #include "mitkModelFitInfo.h" void ParameterFitBackgroundJob::OnFitEvent(::itk::Object* caller, const itk::EventObject & event) { itk::ProgressEvent progressEvent; itk::InitializeEvent initEvent; itk::StartEvent startEvent; itk::EndEvent endEvent; if (progressEvent.CheckEvent(&event)) { mitk::ParameterFitImageGeneratorBase* castedReporter = dynamic_cast(caller); emit JobProgress(castedReporter->GetProgress()); } else if (initEvent.CheckEvent(&event)) { emit JobStatusChanged(QString("Initializing parameter fit generator")); } else if (startEvent.CheckEvent(&event)) { emit JobStatusChanged(QString("Started fitting process.")); } else if (endEvent.CheckEvent(&event)) { emit JobStatusChanged(QString("Finished fitting process.")); } } ParameterFitBackgroundJob:: ParameterFitBackgroundJob(mitk::ParameterFitImageGeneratorBase* generator, const mitk::modelFit::ModelFitInfo* fitInfo, mitk::DataNode* parentNode) : ParameterFitBackgroundJob(generator, fitInfo, parentNode, {}) { }; ParameterFitBackgroundJob:: ParameterFitBackgroundJob(mitk::ParameterFitImageGeneratorBase* generator, const mitk::modelFit::ModelFitInfo* fitInfo, mitk::DataNode* parentNode, mitk::modelFit::ModelFitResultNodeVectorType additionalRelevantNodes) { if (!generator) { mitkThrow() << "Cannot create parameter fit background job. Passed fit generator is NULL."; } if (!fitInfo) { mitkThrow() << "Cannot create parameter fit background job. Passed model traits interface is NULL."; } m_Generator = generator; m_ModelFitInfo = fitInfo; m_ParentNode = parentNode; m_AdditionalRelevantNodes = additionalRelevantNodes; m_spCommand = ::itk::MemberCommand::New(); m_spCommand->SetCallbackFunction(this, &ParameterFitBackgroundJob::OnFitEvent); m_ObserverID = m_Generator->AddObserver(::itk::AnyEvent(), m_spCommand); }; mitk::DataNode* ParameterFitBackgroundJob:: GetParentNode() const { return m_ParentNode; }; mitk::modelFit::ModelFitResultNodeVectorType ParameterFitBackgroundJob::GetAdditionalRelevantNodes() const { return m_AdditionalRelevantNodes; }; ParameterFitBackgroundJob:: ~ParameterFitBackgroundJob() { m_Generator->RemoveObserver(m_ObserverID); }; void ParameterFitBackgroundJob:: run() { try { emit JobStatusChanged(QString("Started fit session.Generate UID: ")+QString::fromStdString(m_ModelFitInfo->uid)); m_Generator->Generate(); emit JobStatusChanged(QString("Generate result nodes.")); m_Results = mitk::modelFit::CreateResultNodeMap(m_Generator->GetParameterImages(), m_Generator->GetDerivedParameterImages(), m_Generator->GetCriterionImages(), m_Generator->GetEvaluationParameterImages(), m_ModelFitInfo); emit ResultsAreAvailable(m_Results, this); } catch (::std::exception& e) { emit Error(QString("Error while fitting data. Details: ")+QString::fromLatin1(e.what())); } catch (...) { emit Error(QString("Unkown error when fitting the data.")); } emit Finished(); }; diff --git a/Modules/ModelFitUI/Qmitk/QmitkParameterFitBackgroundJob.h b/Modules/ModelFitUI/Qmitk/QmitkParameterFitBackgroundJob.h index 886a0d4810..9a025dd6a0 100644 --- a/Modules/ModelFitUI/Qmitk/QmitkParameterFitBackgroundJob.h +++ b/Modules/ModelFitUI/Qmitk/QmitkParameterFitBackgroundJob.h @@ -1,84 +1,78 @@ -/*========================================================================= +/*============================================================================ -Program: Medical Imaging & Interaction Toolkit -Language: C++ -Date: $Date$ -Version: $Revision$ +The Medical Imaging Interaction Toolkit (MITK) -Copyright (c) German Cancer Research Center, Software Development for -Integrated Diagnostic and Therapy. All rights reserved. -See MITKCopyright.txt or http://www.mitk.org/copyright.html for details. +Copyright (c) German Cancer Research Center (DKFZ) +All rights reserved. -This software is distributed WITHOUT ANY WARRANTY; without even -the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -PURPOSE. See the above copyright notices for more information. - -=========================================================================*/ +Use of this source code is governed by a 3-clause BSD license that can be +found in the LICENSE file. +============================================================================*/ #ifndef __QMITK_PARAMETER_FIT_BACKGROUND_JOB_H #define __QMITK_PARAMETER_FIT_BACKGROUND_JOB_H //QT #include #include //MITK #include #include #include #include // ITK #include #include "MitkModelFitUIExports.h" class MITKMODELFITUI_EXPORT ParameterFitBackgroundJob : public QObject, public QRunnable { // this is needed for all Qt objects that should have a Qt meta-object // (everything that derives from QObject and wants to have signal/slots) Q_OBJECT public: ParameterFitBackgroundJob(mitk::ParameterFitImageGeneratorBase* generator, const mitk::modelFit::ModelFitInfo* fitInfo, mitk::DataNode* parentNode = nullptr); /** */ ParameterFitBackgroundJob(mitk::ParameterFitImageGeneratorBase* generator, const mitk::modelFit::ModelFitInfo* fitInfo, mitk::DataNode* parentNode, mitk::modelFit::ModelFitResultNodeVectorType additionalRelevantNodes); ~ParameterFitBackgroundJob() override; void run() override; /**Returns the node (if defined), that is the parent object for the results of the job. May be null.*/ mitk::DataNode* GetParentNode() const; mitk::modelFit::ModelFitResultNodeVectorType GetAdditionalRelevantNodes() const; signals: void Finished(); void Error(QString err); void ResultsAreAvailable(mitk::modelFit::ModelFitResultNodeVectorType resultMap, const ParameterFitBackgroundJob* pJob); void JobProgress(double progress); void JobStatusChanged(QString info); protected: //Inputs mitk::ParameterFitImageGeneratorBase::Pointer m_Generator; mitk::modelFit::ModelFitInfo::ConstPointer m_ModelFitInfo; mitk::DataNode::Pointer m_ParentNode; mitk::modelFit::ModelFitResultNodeVectorType m_AdditionalRelevantNodes; // Results mitk::modelFit::ModelFitResultNodeVectorType m_Results; ::itk::MemberCommand::Pointer m_spCommand; unsigned long m_ObserverID; void OnFitEvent(::itk::Object *, const itk::EventObject &event); }; #endif diff --git a/Modules/PharmacokineticsUI/Qmitk/QmitkDescriptionParameterBackgroundJob.cpp b/Modules/PharmacokineticsUI/Qmitk/QmitkDescriptionParameterBackgroundJob.cpp index 0040e4d63a..0ab7b626ff 100644 --- a/Modules/PharmacokineticsUI/Qmitk/QmitkDescriptionParameterBackgroundJob.cpp +++ b/Modules/PharmacokineticsUI/Qmitk/QmitkDescriptionParameterBackgroundJob.cpp @@ -1,129 +1,123 @@ -/*========================================================================= +/*============================================================================ -Program: Medical Imaging & Interaction Toolkit -Language: C++ -Date: $Date$ -Version: $Revision$ +The Medical Imaging Interaction Toolkit (MITK) -Copyright (c) German Cancer Research Center, Software Development for -Integrated Diagnostic and Therapy. All rights reserved. -See MITKCopyright.txt or http://www.mitk.org/copyright.html for details. +Copyright (c) German Cancer Research Center (DKFZ) +All rights reserved. -This software is distributed WITHOUT ANY WARRANTY; without even -the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -PURPOSE. See the above copyright notices for more information. - -=========================================================================*/ +Use of this source code is governed by a 3-clause BSD license that can be +found in the LICENSE file. +============================================================================*/ #include "QmitkDescriptionParameterBackgroundJob.h" #include "mitkModelFitInfo.h" void DescriptionParameterBackgroundJob::OnComputeEvent(::itk::Object* caller, const itk::EventObject& event) { itk::ProgressEvent progressEvent; itk::InitializeEvent initEvent; itk::StartEvent startEvent; itk::EndEvent endEvent; if (progressEvent.CheckEvent(&event)) { mitk::DescriptionParameterImageGeneratorBase* castedReporter = dynamic_cast(caller); emit JobProgress(castedReporter->GetProgress()); } else if (initEvent.CheckEvent(&event)) { emit JobStatusChanged(QString("Initializing description parameter generator")); } else if (startEvent.CheckEvent(&event)) { emit JobStatusChanged(QString("Started parameter computation process.")); } else if (endEvent.CheckEvent(&event)) { emit JobStatusChanged(QString("Finished parameter computation process.")); } } DescriptionParameterBackgroundJob:: DescriptionParameterBackgroundJob(mitk::DescriptionParameterImageGeneratorBase* generator, mitk::DataNode* parentNode) { if (!generator) { mitkThrow() << "Cannot create description parameter background job. Passed fit generator is NULL."; } m_Generator = generator; m_ParentNode = parentNode; m_spCommand = ::itk::MemberCommand::New(); m_spCommand->SetCallbackFunction(this, &DescriptionParameterBackgroundJob::OnComputeEvent); m_ObserverID = m_Generator->AddObserver(::itk::AnyEvent(), m_spCommand); }; mitk::DataNode* DescriptionParameterBackgroundJob:: GetParentNode() const { return m_ParentNode; }; DescriptionParameterBackgroundJob:: ~DescriptionParameterBackgroundJob() { m_Generator->RemoveObserver(m_ObserverID); }; mitk::modelFit::ModelFitResultNodeVectorType DescriptionParameterBackgroundJob::CreateResultNodes( const mitk::DescriptionParameterImageGeneratorBase::ParameterImageMapType& paramimages) { mitk::modelFit::ModelFitResultNodeVectorType results; for (auto image : paramimages) { if (image.second.IsNull()) { mitkThrow() << "Cannot generate result node. Passed parameterImage is null. parameter name: " << image.first; } mitk::DataNode::Pointer result = mitk::DataNode::New(); result->SetData(image.second); result->SetName(image.first); result->SetVisibility(true); results.push_back(result); } return results; }; void DescriptionParameterBackgroundJob:: run() { try { emit JobStatusChanged(QString("Started session...")); m_Generator->Generate(); emit JobStatusChanged(QString("Generate result nodes.")); m_Results = CreateResultNodes(m_Generator->GetParameterImages()); emit ResultsAreAvailable(m_Results, this); } catch (::std::exception& e) { emit Error(QString("Error while processing data. Details: ") + QString::fromLatin1(e.what())); } catch (...) { emit Error(QString("Unkown error when processing the data.")); } emit Finished(); }; diff --git a/Modules/PharmacokineticsUI/Qmitk/QmitkDescriptionParameterBackgroundJob.h b/Modules/PharmacokineticsUI/Qmitk/QmitkDescriptionParameterBackgroundJob.h index b1e4c03e78..7f3d8c5356 100644 --- a/Modules/PharmacokineticsUI/Qmitk/QmitkDescriptionParameterBackgroundJob.h +++ b/Modules/PharmacokineticsUI/Qmitk/QmitkDescriptionParameterBackgroundJob.h @@ -1,79 +1,73 @@ -/*========================================================================= +/*============================================================================ -Program: Medical Imaging & Interaction Toolkit -Language: C++ -Date: $Date$ -Version: $Revision$ +The Medical Imaging Interaction Toolkit (MITK) -Copyright (c) German Cancer Research Center, Software Development for -Integrated Diagnostic and Therapy. All rights reserved. -See MITKCopyright.txt or http://www.mitk.org/copyright.html for details. +Copyright (c) German Cancer Research Center (DKFZ) +All rights reserved. -This software is distributed WITHOUT ANY WARRANTY; without even -the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -PURPOSE. See the above copyright notices for more information. - -=========================================================================*/ +Use of this source code is governed by a 3-clause BSD license that can be +found in the LICENSE file. +============================================================================*/ #ifndef __QMITK_DESCRIPTION_PARAMETER_BACKGROUND_JOB_H #define __QMITK_DESCRIPTION_PARAMETER_BACKGROUND_JOB_H //QT #include #include //MITK #include #include #include #include // ITK #include #include "MitkPharmacokineticsUIExports.h" class MITKPHARMACOKINETICSUI_EXPORT DescriptionParameterBackgroundJob : public QObject, public QRunnable { // this is needed for all Qt objects that should have a Qt meta-object // (everything that derives from QObject and wants to have signal/slots) Q_OBJECT public: DescriptionParameterBackgroundJob(mitk::DescriptionParameterImageGeneratorBase* generator, mitk::DataNode* parentNode = nullptr); ~DescriptionParameterBackgroundJob() override; void run() override; /**Returns the node (if defined), that is the parent object for the results of the job. May be null.*/ mitk::DataNode* GetParentNode() const; signals: void Finished(); void Error(QString err); void ResultsAreAvailable(mitk::modelFit::ModelFitResultNodeVectorType resultMap, const DescriptionParameterBackgroundJob* pJob); void JobProgress(double progress); void JobStatusChanged(QString info); protected: static mitk::modelFit::ModelFitResultNodeVectorType CreateResultNodes(const mitk::DescriptionParameterImageGeneratorBase::ParameterImageMapType& paramimages); //Inputs mitk::DescriptionParameterImageGeneratorBase::Pointer m_Generator; mitk::DataNode::Pointer m_ParentNode; // Results mitk::modelFit::ModelFitResultNodeVectorType m_Results; ::itk::MemberCommand::Pointer m_spCommand; unsigned long m_ObserverID; void OnComputeEvent(::itk::Object *, const itk::EventObject &event); }; #endif diff --git a/Plugins/org.mitk.gui.qt.fit.demo/documentation/UserManual/Manual.dox b/Plugins/org.mitk.gui.qt.fit.demo/documentation/UserManual/Manual.dox index 17a7551190..9460060acb 100644 --- a/Plugins/org.mitk.gui.qt.fit.demo/documentation/UserManual/Manual.dox +++ b/Plugins/org.mitk.gui.qt.fit.demo/documentation/UserManual/Manual.dox @@ -1,17 +1,17 @@ /** \page org_mitk_gui_qt_fit_demo The Model Fit Demo View \imageMacro{fit_demo_doc.svg,"Icon of the Fit Demo View",3.0} \tableofcontents \section FIT_DEMO_Introduction Introduction This plugin is a very simple demo plugin that allows 1) to generate a demo 3D+t image (with linear increasing values) and 2) to perform a linear fit on a selected node. It was/is use to demonstrate basic principle and to generate example fit sessions for demo and testing purposes (e.g. functionality of the fit inspector). \section FIT_DEMO_Contact Contact information This plug-in is being developed by the SIDT group (Software development for Integrated Diagnostics -and Therapy) at the DKFZ (German Cancer Research Center). If you have any questions, need support, +and Therapy) at the German Cancer Research Center (DKFZ). If you have any questions, need support, find a bug or have a feature request, feel free to contact us at www.mitk.org. -*/ \ No newline at end of file +*/ diff --git a/Plugins/org.mitk.gui.qt.fit.genericfitting/documentation/UserManual/Manual.dox b/Plugins/org.mitk.gui.qt.fit.genericfitting/documentation/UserManual/Manual.dox index 22989ba6dd..063fdf0dd9 100644 --- a/Plugins/org.mitk.gui.qt.fit.genericfitting/documentation/UserManual/Manual.dox +++ b/Plugins/org.mitk.gui.qt.fit.genericfitting/documentation/UserManual/Manual.dox @@ -1,16 +1,16 @@ /** \page org_mitk_gui_qt_fit_genericfitting The Model Fit Generic Fitting View \imageMacro{fit_generic_doc.svg,"Icon of the Generic Fitting View",3.0} \tableofcontents \section FIT_GENERIC_Introduction Introduction -This plug-in offers a generic fitting component for time resolved image data. +This plug-in offers a generic fitting component for time resolved image data. \section FIT_GENERIC_Contact Contact information This plug-in is being developed by the SIDT group (Software development for Integrated Diagnostics -and Therapy) at the DKFZ (German Cancer Research Center). If you have any questions, need support, +and Therapy) at the German Cancer Research Center (DKFZ). If you have any questions, need support, find a bug or have a feature request, feel free to contact us at www.mitk.org. -*/ \ No newline at end of file +*/ diff --git a/Plugins/org.mitk.gui.qt.fit.inspector/documentation/UserManual/Manual.dox b/Plugins/org.mitk.gui.qt.fit.inspector/documentation/UserManual/Manual.dox index edd1f3e8a5..4abb6951fd 100644 --- a/Plugins/org.mitk.gui.qt.fit.inspector/documentation/UserManual/Manual.dox +++ b/Plugins/org.mitk.gui.qt.fit.inspector/documentation/UserManual/Manual.dox @@ -1,78 +1,78 @@ /** \page org_mitk_gui_qt_fit_inspector The Model Fit Inspector View \imageMacro{fit_inspector_doc.svg,"Icon of the Model Fit Inspector View",3.0} \tableofcontents \section FIT_INSPECTOR_Introduction Introduction This view (Model Fit Inspector; MFI) offers the possibility to display the time course of the signal within an individual -voxel (with or without fit). +voxel (with or without fit). \section FIT_INSPECTOR_Contact Contact information This plug-in is being developed by the SIDT group (Software development for Integrated Diagnostics -and Therapy) at the DKFZ (German Cancer Research Center). If you have any questions, need support, +and Therapy) at the German Cancer Research Center (DKFZ). If you have any questions, need support, find a bug or have a feature request, feel free to contact us at www.mitk.org. \section FIT_INSPECTOR_Raw Viewing without a model fit \imageMacro{fit_inspect_raw.png, "Example screen shot showing the inspection of raw dynamic data without a fit.", 10} Open the view and select the dynamic image in the data manager. The graph plot will show the time course of image intensities (signal) in the selected voxel (cross hair) as red data points. -The blue point indicates the frame currently displayed in the 4-window view. +The blue point indicates the frame currently displayed in the 4-window view. \section FIT_INSPECTOR_Fit Viewing without a model fit \imageMacro{fit_inspect_fit.png, "Example screen shot showing the inspection of dynamic data and an associated fit.", 10} Selecting a parameter map of the fit of interest in the Data Manager will display the raw data curve in red dots with corresponding fit as black line in the selected cross-hair position. If an AIF-based model was used, the utilized AIF (averaged over AIF mask) is also displayed (default in green). The color of the AIF display can be adjusted. For ROI based fits, the MFI will display both the current data curve in the selected voxel (in red) and the ROI-averaged fitted curve (in dark green, can be adjusted). Scrolling through the individual voxels will change the current data curve, but the ROI-based curve remains the same. If voxels outside the fitted area defined by the mask -are selected, the raw data voxel values will be displayed, however no black fit line is visualized. +are selected, the raw data voxel values will be displayed, however no black fit line is visualized. -Below the data plot, several options for data visualization can be selected: +Below the data plot, several options for data visualization can be selected: \subsection FIT_INSPECTOR_Fit_info Fit info tab \imageMacro{fit_inspect_info.png, "Details of the fit info tab.", 5} -The Fit info tab displays meta-data for selected fits performed on the displayed data set. If no fit was performed and only raw data is visualized, the fields are empty. +The Fit info tab displays meta-data for selected fits performed on the displayed data set. If no fit was performed and only raw data is visualized, the fields are empty. \subsection FIT_INSPECTOR_Fit_Parameter Fit parameter tab \imageMacro{fit_inspect_results.png, "Details of the fit parameter tab.", 5} The "fit parameter" tab shows fit related parameter estimate values, derived parameters, fit criterion values and (optional) debug parameter maps in the selected voxel (and all inspection positions; see also \ref FIT_INSPECTOR_Inspect "inspection positions") listed as a table. If no fit was performed and only raw data is visualized, the table is empty. The content of the table may by copied to clipboard or exported as csv file, by clicking the respective button below the table. \subsection FIT_INSPECTOR_Fit_Inspection Inspection positions tab \imageMacro{fit_inspect_positions.png, "Details of the inspection position tab.", 5} The tab allows to manage inspection positions (see here for more about \ref FIT_INSPECTOR_Inspect "inspection positions"). - (1) Shows the coordinates of the current selected position in the workbench. - (2) Press to make the current position a inspected position. It will be added at the bottom of the list (3). - (3) List of all inspection positions - (4) Toggle adding mode on/off. If on, you can add new positions by clicking into render windows with "SHIFT + left mouse button". - (5) Manually adding inspection positions by entering the coordinates. - (6) Remove the selected inspection positions. (Hot key: Del) - (7) Move the selected inspection position up in the list (3). - (8) Move the selected inspection position down in the list (3). - (9) Save inspection points to a file. - (10) Load inspection points from a file. \subsection FIT_INSPECTOR_Fit_Settings Settings tab \imageMacro{fit_inspect_settings.png, "Details of the settings tab.", 5} The View settings tab is used to adjust the plot display, namely, x and y axis scales and colors of displayed data plots (i.e. AIF). \subsection FIT_INSPECTOR_Fit_Export Plot data export tab \imageMacro{fit_inspect_export.png, "Details of the plot data export tab.", 5} Displays voxel data (input image) and corresponding time grid together with model fit values and additional curves (like AIF values) for each time point. The table will contain the position depended values of the current selected position as well as of all inspection positions (see also \ref FIT_INSPECTOR_Inspect "inspection positions"). The data in the table can also be copied to clipboard or exported to csv files, by clicking the respective button below the table. \section FIT_INSPECTOR_Inspect Inspection positions \imageMacro{fit_inspect_positions_example.png, "Example of the usage of inspection positions.", 5} The fit inspector allows to define positions in the world coordinate system that will be constantly displayed in addition to the current selected position. These inspected position will be shown in the following parts of the view: - The plot windows. See example image above; the plot shows the current position (raw data: red dots, fit: black line) and an additional inspection position (green). - The fit parameter tab (see example image above) - The plot data export tab. It will work with dynamic data with and without an model fit. See the \ref FIT_INSPECTOR_Fit_Inspection "inspection positions tab section" for more details on how to manage inspection positions. */ diff --git a/Plugins/org.mitk.gui.qt.matchpoint.algorithm.control/documentation/UserManual/Manual.dox b/Plugins/org.mitk.gui.qt.matchpoint.algorithm.control/documentation/UserManual/Manual.dox index 162da76ff6..fcc483e40c 100644 --- a/Plugins/org.mitk.gui.qt.matchpoint.algorithm.control/documentation/UserManual/Manual.dox +++ b/Plugins/org.mitk.gui.qt.matchpoint.algorithm.control/documentation/UserManual/Manual.dox @@ -1,39 +1,39 @@ /** \page org_mitk_gui_qt_matchpoint_algorithm_control The MatchPoint Algorithm Control View \imageMacro{map_icon_run_doc.svg,"Icon of the MatchPoint Algorithm Control",3.0} \tableofcontents \section MAP_RUN_Introduction Introduction This plugin offers the user a way to use a selected registration algorithm in order to determine a registration for two selected images. For the selection of an algorithm please see MatchPoint Algorithm Browser (\ref org_mitk_gui_qt_matchpoint_algorithm_browser). \section MAP_RUN_Contact Contact information This plug-in is being developed by the SIDT group (Software development for Integrated Diagnostics -and Therapy) at the DKFZ (German Cancer Research Center). If you have any questions, need support, +and Therapy) at the German Cancer Research Center (DKFZ). If you have any questions, need support, find a bug or have a feature request, feel free to contact us at www.mitk.org. \section MAP_RUN_Usage Usage \imageMacro{map_control_example.png, "Example screenshot showing the control plugin in use.", 10} To use the plugin a registration algorithm must be loaded and a moving as well as a target image must be selected.\n The moving image is registered onto the target image. Thus the result is a mapped input image in the geometry (field of view, orientation, spacing) defined by the target image.\n All images are selected in the data manager using multi select (press the CTRL-key while selecting the nodes in the data manager). The first selection is the moving image, the second is the target image.\n If an algorithm is loaded and input images are selected, the plugin will automatically switch to the "Execution" tab. \subsection MAP_RUN_Usage_selection Selection tab \imageMacro{map_control_step1_selection.png, "Details of the selection tab.", 5} In this tab registration algorithms that are selected in the MatchPoint Algorithm Browser can be chosen. In the tab you see the ID of the algorithm selected by the browser and its profile information.\n If you press "Load selected algorithm", the algorithm will be used by the control plugin. The name of the algorithm occurs in the text field "Loaded algorithm" (at the top of the plugin view).\n At this point, it has no effect if you change the selection in the browser. The control plugin will keep the loaded algorithm until you choose to load another one. \subsection MAP_RUN_Usage_exec Execution tab \imageMacro{map_control_step2_execution.png, "Details of the execution tab.", 5} In this tab you can specify a name for the registration job (this will determine the names of the result nodes in the data manager).\n You can also choose to "store registration" (which is normally the goal of the whole process, because this is the very result of the algorithm ;).\n Additionally you can choose "Generate + store mapped result". This is a convenience feature which often saves you the time to use the mapper plugin afterwards. It will do the same like using the mapper plugin with the moving and target image, setting padding value "0" and using "linear interpolation". If you need other settings, skip the convenience generation and use the MatchPoint mapper plugin directly.\n "Start" will trigger the registration process. Some algorithms can be stopped while processing takes place. In those cases, there is a "Stop" button enabled, as soon as the registration process starts. \subsection MAP_RUN_Usage_settings Settings tab \imageMacro{map_control_step3_settings.png, "Details of the settings tab.", 5} In this tab, you can change the parametrization of the loaded algorithm (before it starts), if it offers any possibility to do so. */ diff --git a/Plugins/org.mitk.gui.qt.matchpoint.evaluator/documentation/UserManual/Manual.dox b/Plugins/org.mitk.gui.qt.matchpoint.evaluator/documentation/UserManual/Manual.dox index 1a7ecf6d90..93f1b219bb 100644 --- a/Plugins/org.mitk.gui.qt.matchpoint.evaluator/documentation/UserManual/Manual.dox +++ b/Plugins/org.mitk.gui.qt.matchpoint.evaluator/documentation/UserManual/Manual.dox @@ -1,50 +1,50 @@ /** \page org_mitk_gui_qt_matchpoint_evaluator The MatchPoint Registration Evaluation View \imageMacro{map_evaluator_doc.svg, "Icon of the MatchPoint Registration Evaluator", 3} \tableofcontents \section MAP_REGEVAL_Introduction Introduction This view offers the possibility to evaluate the quality of the registration/mapping of two given images by visual inspection. One may select no registration. Then the images will be displayed in evaluation mode assuming an identity transform (so no mapping). It is one of several MatchPoint registration plug-ins.\n \section MAP_REGEVAL_Contact Contact information This plug-in is being developed by the SIDT group (Software development for Integrated Diagnostics -and Therapy) at the DKFZ (German Cancer Research Center). If you have any questions, need support, +and Therapy) at the German Cancer Research Center (DKFZ). If you have any questions, need support, find a bug or have a feature request, feel free to contact us at www.mitk.org. \section MAP_REGEVAL_Usage Usage \imageMacro{map_view_example.png, "Example screenshot showing the plug-in in use.", 14} To use the evaluation view you must have selected at least the moving and the target image you want to use to evaluate. If you select a registration with referenced target and moving image (the normal state if you generate registrations with the MatchPoint plugins) these images will be auto selected by just clicking on the registration. If you select no registration the view will assume that an identity transform should be used.\n As long as no valid set of data is selected the "Start evaluation" button will be disabled. If its enabled you may start the evaluation mode with it. \imageMacro{map_no_data_selected.png, "Example screenshot showing the state if no data is selected", 5} If the evaluation view is active you can choose between different modes of visualization. For more details see \ref MAP_REGEVAL_Styles.\n To stop the evaluation mode, you may use the "Stop evaluation" button or just close the evaluation view. \remark The evaluation view will use the level window settings of the used images. So to changes the level windowing of the evaluation view, you must change the level windowing of the respective images. \section MAP_REGEVAL_Styles Visualization styles You can choose from the following visualization styles to evaluate the registration/mapping quality:\n \li "Blend": Blends the images with a user defined weight. Default is 50:50. \imageMacro{map_style_blend.png, "Example for mode: Blend", 5} \li "Checkerboard": Checkerboard style that composes both images. You may define the resolution of the checkerboard. \imageMacro{map_style_checkerboard.png, "Example for mode: Checkerboard", 5} \li "Color blend": Color blend of the images (blue: target image; yellow: moving). Areas where you see no color implies good intensity matchings. \imageMacro{map_style_color_blend.png, "Example for mode: Color blend", 5} \li "Contour": Blend mode that display one image as blue "background" and the other image in yellow contours. You may choose the role of the images. \imageMacro{map_style_contour.png, "Example for mode: Contour", 5} \li "Difference": Displays the absolute difference of both images. \li "Wipe": Blend mode that makes a rectilinear combination of the images. You can choose the mode how the images are splitted. The split is synchronized with the current selection. So you may interact with the split border to position it on interesting areas. \imageMacro{map_style_wipe_cross.png, "Example for mode: Wipe cross", 5} \imageMacro{map_style_wipe_horizontal.png, "Example for mode: Wipe horizontal", 5} */ diff --git a/Plugins/org.mitk.gui.qt.matchpoint.framereg/documentation/UserManual/Manual.dox b/Plugins/org.mitk.gui.qt.matchpoint.framereg/documentation/UserManual/Manual.dox index 5b5a1cb796..ac1d07d340 100644 --- a/Plugins/org.mitk.gui.qt.matchpoint.framereg/documentation/UserManual/Manual.dox +++ b/Plugins/org.mitk.gui.qt.matchpoint.framereg/documentation/UserManual/Manual.dox @@ -1,64 +1,64 @@ /** \page org_mitk_gui_qt_matchpoint_framereg The MatchPoint Motion/Frame Correction View \imageMacro{"map_framereg_icon_doc.svg", "Icon of the MatchPoint Algorithm Control", 3} \tableofcontents \section MAP_FRAME_Introduction Introduction This plugin offers the user a way to use a selected registration algorithm in order to make a frame/motion correction for a selected 3D+t images. The plugin is for example helpfull if you have a dynamic image with motion artifacts in same time points and you want to reduce/remove this motion artifacts. For the selection of an algorithm please see MatchPoint Algorithm Browser (\ref de_dkfz_matchpoint_mitk_gui_qt_algorithm_browser). \section MAP_FRAME_Contact Contact information This plug-in is being developed by the SIDT group (Software development for Integrated Diagnostics -and Therapy) at the DKFZ (German Cancer Research Center). If you have any questions, need support, +and Therapy) at the German Cancer Research Center (DKFZ). If you have any questions, need support, find a bug or have a feature request, feel free to contact us at www.mitk.org. \section MAP_FRAME_Usage Usage \imageMacro{"map_framereg_example.png" , "Example screenshot showing the plugin in use.", 15} To use the plugin a registration algorithm must be loaded and the image must be selected, that should be corrected.\n The correction is performed that every frame/timpoint of the image is registered to the first frame. And the corrected frames is mapped in the same geometry then the first frame.\n If an algorithm is loaded and input images are selected, the plugin will automatically switch to the "Execution" tab. \subsection MAP_FRAME_Usage_selection Algorithm selection tab \imageMacro{map_step1_selection.png, "Details of the algorithm selection tab.", 6} In this tab you can load/"book" the algorithm selected in the MatchPoint Algorithm Browser. In the tab you see the ID of the algorithm selected by the browser and its profile information.\n If you press "Load selected algorithm", the algorithm will be used by the plugin for the frame correction and the name of the algorithm occurs in the text field "Loaded algorithm" (at the top of the plugin view).\n At this point, it has no effect if you change the the selection in the browser. The plugin will keep the loaded algorithm until you choose to load another one. \subsection MAP_FRAME_Usage_exec Execution tab \imageMacro{map_step2_execution.png, "Details of the execution tab.", 6} In this tab you can specify a name for the correction job (this will determine the names of the result nodes in the data manager).\n "Start" will trigger the correction process. \subsection MAP_FRAME_Usage_settings Settings tab \imageMacro{map_step3_settings.png, "Details of the settings tab.", 6} In this tab, you can (1) define the mapping settings \ref MAP_FRAME_Mapper_Settings "(See details)", used for the corrected frames, or (2) parametrize the loaded algorithm (before it starts), if it offers any possibility to do so. \subsubsection MAP_FRAME_Mapper_Settings Mapper settings For the mapping of corrected images, you have several settings available:\n \li "Allow undefined pixels": Activate to handle pixels of the result image that are not in the field of view of the input image. This pixel will get the "padding value". \li "Allow error pixels": Activate to handle pixels of the result image that can not be mapped because the registration does not support this part of the output image. This pixel will get the "error value". \li "Interpolator": Set to choose the interpolation strategy that should be used for mapping. \ref MAP_FRAME_Interpolation "(see details)" \subsubsection MAP_FRAME_Interpolation Interpolation You can choose from the following interpolation strategies:\n \li "nearest neighbour": Use the value of the nearest pixel. Fastest, but high interpolation errors for gray value images. Right choice for label images or masks. \li "Linear": Fast linear interpolation with often sufficient quality. Tends to blur edges. \li "BSpline (3rd order)": Good trade off between time and quality. \li "Windowed Sinc (Hamming)": Good interpolation quality but very time consuming. \li "Windowed Sinc (Welch)": Good interpolation quality but very time consuming. \subsection MAP_FRAME_Usage_frame_selection Frame selection tab \imageMacro{map_step4_frameselection.png, "Details of the frame selection tab.", 6} In this tab you can specify the frames of the currently selected image that should be corrected. As default all frames of an image will be corrected. If you only select specific frames, these frames will be corrected all other frames will be just copied unchanged. */ diff --git a/Plugins/org.mitk.gui.qt.matchpoint.manipulator/documentation/UserManual/Manual.dox b/Plugins/org.mitk.gui.qt.matchpoint.manipulator/documentation/UserManual/Manual.dox index d376891913..d4af889568 100644 --- a/Plugins/org.mitk.gui.qt.matchpoint.manipulator/documentation/UserManual/Manual.dox +++ b/Plugins/org.mitk.gui.qt.matchpoint.manipulator/documentation/UserManual/Manual.dox @@ -1,69 +1,69 @@ /** \page org_mitk_gui_qt_matchpoint_manipulator The MatchPoint Registration Manipulator View \imageMacro{map_manipulator_icon_doc.svg, "Icon of the MatchPoint Registration Manipulator", 3} \tableofcontents \section MAP_REGMANIP_Introduction Introduction This view offers the possibility to manually manipulate a registration to establish a good mapping between data. The effect of manipulation is visualized with to user defined images to allow visual inspection.\n It is one of several MatchPoint registration plug-ins.\n \imageMacro{map_view_example.png, "Example screenshot showing the plug-in in use", 10} \section MAP_REGMANIP_Contact Contact information This plug-in is being developed by the SIDT group (Software development for Integrated Diagnostics -and Therapy) at the DKFZ (German Cancer Research Center). If you have any questions, need support, +and Therapy) at the German Cancer Research Center (DKFZ). If you have any questions, need support, find a bug or have a feature request, feel free to contact us at www.mitk.org. \section MAP_REGMANIP_Usage Usage \imageMacro{map_view_steps.png, "Illustration of the workflow steps.", 7} The typical workflow with the manipulator has following steps/sections: 1. Source selection: You can choose between starting a new registration and using a selected registration. For later option, the registration must be selected in the data manager. \remark If you choose a new registration, the manipulator will automatically pre initialize this new transform to align the centers of the used images and therefore starts with sensible settings. \remark If you choose an existing registration, the registration will *not* be altered. It serves as template/baseline for the manipulation, which will be "on top" of the existing registration. 2. Image selection: To allow visual inspection of the manipulation to images are needed. If you have selected a registration (independent from the source selection mode) the manipulator will use the moving and target images used to determine the selected registration as images for the manipulation. You can also explicitly select images in the data manager (press shift while selecting for multi select). 3. Start manual registration: If all settings are valid, you can start the manipulation. The render windows will automatically switch to the visual inspection mode. The views will be reinitialized to the field of view of the target image. 4. Generation settings: You may choose to give the resulting registration a special name. Additionally you can choose the convenience option to map the moving image with the confirmed registration automatically. 5. Settings: You can alter the settings of the transform (\ref MAP_REGMANIP_TransSettings) and the rendering settings (\ref MAP_REGMANIP_EvalSettings) for the visual inspection. 6. Cancel or confirmation: You may cancel the manipulation process (Closing the view equals cancelation) or confirm the determined registration and store it in the data storage with the given name.\n \section MAP_REGMANIP_TransSettings Transformation settings You can alter the translation and the rotation of the transform. In addition you may choose the center of rotation type. You have the following options:\n - Moving image center: Rotate around the center of the moving image. - World origin: Rotate around (0.0,0.0,0.0), the world origin. - Current navigator position: Rotate around the current navigator position in the render views. \remark FAQ: Why are the translation values "jumping" when I change the center mode or when I am rotating?\n The reason is the relation between center, rotation, and translation.\n A transformation is defined as x' = R (x - C) + C + T\n where x': transformed point; x: point to transform; R: rotation matrix; C: center point; T: translation vector.\n The offset of a transform is defined as O = -RC + C + T\n The offset as well as the rotation matrix stay constant if the center point changes, therefore the translation has to be altered. \note To ease the orientation, the edit fields have background colours which resemble the colours of the plane the changes will "happen".\n For translation, the translation vector will be perpendicular to the indicated plane (The image moves "through" the plane). For rotation, the rotation axis will be perpendicular to the indicated plane. \section MAP_REGMANIP_EvalSettings Evaluation settings The settings you can choose are equal to the settings of the evaluation view (\ref org_mitk_gui_qt_matchpoint_evaluator). Please see the documentation of the MatchPoint Registration Evaluator view for more details. */ diff --git a/Plugins/org.mitk.gui.qt.matchpoint.mapper/documentation/UserManual/Manual.dox b/Plugins/org.mitk.gui.qt.matchpoint.mapper/documentation/UserManual/Manual.dox index c02863a2a6..76b20f6419 100644 --- a/Plugins/org.mitk.gui.qt.matchpoint.mapper/documentation/UserManual/Manual.dox +++ b/Plugins/org.mitk.gui.qt.matchpoint.mapper/documentation/UserManual/Manual.dox @@ -1,85 +1,85 @@ /** \page org_mitk_gui_qt_matchpoint_mapper The MatchPoint Image Mapper View \imageMacro{map_mapper_icon_doc.svg, "Icon of the MatchPoint Image Mapper",3} \tableofcontents \section MAP_MAPPER_Introduction Introduction This view offers the possibility to map any image or point set in the data manager using a user selected registration object. Using the Mapper to map images the user can control the field of view (image geometry) the image should be mapped into, as well as interpolation strategy that should be used.\n It is one of several MatchPoint registration plugins.\n Typical usage scenarios\n \li You have registered image I1 onto image I2. Now you want to transfer the segmentation of I1 to I2 in order to evaluate I2 within this mapped segmentation using \ref org_mitk_views_imagestatistics . \li You have registered image I1 onto image I2. Now you want to map I3 (e.g. an other MRI sequence of the same session) also onto I2 with the same registration. \li You have registered image I1 onto image I2. Now you want to map a segmentation done on I1 also onto I2 with the same registration. \li You have registered image I1 onto image I2. Now you want to map a point set of image I1 also onto I2 with the same registration. \section MAP_MAPPER_Contact Contact information This plug-in is being developed by the SIDT group (Software development for Integrated Diagnostics -and Therapy) at the DKFZ (German Cancer Research Center). If you have any questions, need support, +and Therapy) at the German Cancer Research Center (DKFZ). If you have any questions, need support, find a bug or have a feature request, feel free to contact us at www.mitk.org. \section MAP_MAPPER_Usage Usage \imageMacro{map_mapper-examplescreen.png, "Example screenshot showing the Mapper plugin in use.", 14} To use the mapper at least an input data (image or point set) must be selected. Additionally you may select a registration object and a reference image. Registration objects are marked with a small blue icon (e.g. the data "Registration" in the data manager of the screen shot above). The Reference image defines the geometry (field of view, orientation, spacing) that should be used for the result image. By default the view will try to automatically determine the reference image (by default it is the target image of the selected registration). If auto selection cannot determine the reference it will choose the input image as reference. The reference image can be also defined by the user explicitly by activating manual selection.\n REMARK: If you map point sets you can ignore the reference image slot. It has no affect.\n You can multi select registration and data (press the CTRL-key while selecting the nodes in the data manager). The Mapper will automatically sort the selections in the correct "slots" of the view.\n REMARK: The mapping results will be added as child nodes to the used input node.\n REMARK: If you do not select an registration the view will assume that you make an identity transform. This is a convenient way if you just want to resample an image into the geometry of an other image (when no registration is needed). Also in this use case you can take advantage of the different interpolation and sub/super sampling strategies. \imageMacro{map_mapper.png, "Details of the mapper view.", 8} (1) The currently selected registration, that will be used for mapping.\n (2) The currently selected input data, that will be mapped.\n (3) The currently (automatically or by user) selected reference image, that defines the geometry of the result.\n (4) The name of the result data in the data manger.\n (5) The start button(s) to commence the mapping process. For details regarding the two options see \ref MAP_MAPPER_Refine.\n (6) Log windows with messages regarding the mapping process.\n\n Every "slot" has the ability to be locked. If locked the last selection will be kept, regardless the current selection in the data manager. You can use this for example to lock the registration, if you want to map multiple images. Doing so it is enough to just select the next image in the data manager. To lock a slot, click at the "lock" button at the right side (see example images below). \imageMacro{map_node-unlocked.png, "Unlocked slot/node (default state). Changes with the selections in the data manager.",6} \imageMacro{map_node-locked.png, "Locked slot/node. Stays, regardless the selections in the data manager.",6} \section MAP_MAPPER_Refine Mapping or geometry refinement The mapper view offers two options to map images:\n \li "Map" (default) \li "Refine geometry" For images "Map" fills the pixels of the output image by interpolating input image pixels using the registration object. This option always works. But may take longer and introduces interpolation errors, because a new image is resampled.\n The second option "Refine geometry" is only offered, if the registration (more precise its inverse kernel) is matrix based and the selected data is an image. In this case it just clones the image and refines its image geometry (origin and orientation) to project it to the position indicated by the registration; thus no interpolation artefacts are introduced. \remark If you want to use a mapped image in conjunction with the statistic plugin and an mask of the reference image (or you want to proceed any other computation that expects the voxel to be in the same grid for direct numeric comparison), you must use "Map" to ensure the same geometry (including the same image grid; including same spacing and resolution). Otherwise operations like the statistic plugin will fail. \section MAP_MAPPER_Settings Settings If you map the image (and not just refine the geometry), you have several settings available:\n \li "Allow undefined pixels": Activate to handle pixels of the result image that are not in the field of view of the input image. This pixel will get the "padding value". \li "Allow error pixels": Activate to handle pixels of the result image that can not be mapped because the registration does not support this part of the output image. This pixel will get the "error value". \li "Interpolator": Set to choose the interpolation strategy that should be used for mapping. \li "Activate super/sub sampling": Activate if you want to use origin and orientation of the reference image but want to alter the spacing. \section MAP_MAPPER_Interpolation Interpolation You can choose from the following interpolation strategies:\n \li "nearest neighbor": Use the value of the nearest pixel. Fastest, but high interpolation errors for gray value images. Right choice for label images or masks. \li "Linear": Fast linear interpolation with often sufficient quality. Tends to blur edges. \li "BSpline (3rd order)": Good trade off between time and quality. \li "Windowed Sinc (Hamming)": Good interpolation quality but very time consuming. \li "Windowed Sinc (Welch)": Good interpolation quality but very time consuming. \section MAP_MAPPER_Masks Handling of masks/segmentations If you select an mask as input image, the plugin will be automatically reconfigured to settings that are suitable for the task of mapping masks. Most importantly the interpolator will be set to "nearest neighbor". */ diff --git a/Plugins/org.mitk.gui.qt.matchpoint.visualizer/documentation/UserManual/Manual.dox b/Plugins/org.mitk.gui.qt.matchpoint.visualizer/documentation/UserManual/Manual.dox index 46cdb16a37..1a23e3e98a 100644 --- a/Plugins/org.mitk.gui.qt.matchpoint.visualizer/documentation/UserManual/Manual.dox +++ b/Plugins/org.mitk.gui.qt.matchpoint.visualizer/documentation/UserManual/Manual.dox @@ -1,21 +1,21 @@ /** \page org_mitk_gui_qt_matchpoint_visualizer The MatchPoint Registration Visualizer View \imageMacro{map_vis_icon_doc.svg, "Icon of the Registration Visualizer",3} \tableofcontents \section MAP_VIS_Introduction Introduction This view is in development to offer the user a way to visualize MatchPoint registrations in a MITK scene. Currently only a simple grid visualization and glyph visualization is implemented.\n \remark This is an experimental version and work in progress. So please excuse errors or usage issues and report them. This view will be improved and polished with the next releases. \section MAP_VIS_Contact Contact information This plug-in is being developed by the SIDT group (Software development for Integrated Diagnostics -and Therapy) at the DKFZ (German Cancer Research Center). If you have any questions, need support, +and Therapy) at the German Cancer Research Center (DKFZ). If you have any questions, need support, find a bug or have a feature request, feel free to contact us at www.mitk.org. \section MAP_VIS_Usage Usage Oops. Documentation is missing and to be done. */ diff --git a/Plugins/org.mitk.gui.qt.pharmacokinetics.concentration.mri/documentation/UserManual/Manual.dox b/Plugins/org.mitk.gui.qt.pharmacokinetics.concentration.mri/documentation/UserManual/Manual.dox index 97e5f59a25..79c3c04ad5 100644 --- a/Plugins/org.mitk.gui.qt.pharmacokinetics.concentration.mri/documentation/UserManual/Manual.dox +++ b/Plugins/org.mitk.gui.qt.pharmacokinetics.concentration.mri/documentation/UserManual/Manual.dox @@ -1,16 +1,16 @@ /** \page org_mitk_gui_qt_pharmacokinetics_concentration_mri The Concentration Curve Converter View \imageMacro{pharmacokinetics_concentration_doc.svg,"Icon of the Concentration Curve Converter View",3.0} \tableofcontents \section org_mitk_gui_qt_pharmacokinetics_concentration_mri_overview Overview Stand-alone conversion of image signal intensities to contrast agent concentration units can be performed with a dedicated plugin. The plugin distinguishes between T1 weighted and T2 weighted sequences. T1 conversion can be performed in terms of absolute and relative signal enhancement as well as turbo flash sequences for both 3D images (baseline images S0 without contrast enhancement (pre-contrast) input required) and 4D sequences (baseline selected as first frame of time series). \section org_mitk_gui_qt_pharmacokinetics_concentration_mri_Contact Contact information This plug-in is being developed by Charlotte Debus and the SIDT group (Software development for Integrated Diagnostics -and Therapy) at the DKFZ (German Cancer Research Center). +and Therapy) at the German Cancer Research Center (DKFZ). If you have any questions, need support, find a bug or have a feature request, feel free to contact us at www.mitk.org. */ diff --git a/Plugins/org.mitk.gui.qt.pharmacokinetics.curvedescriptor/documentation/UserManual/Manual.dox b/Plugins/org.mitk.gui.qt.pharmacokinetics.curvedescriptor/documentation/UserManual/Manual.dox index b1e55f62a7..c97c25295f 100644 --- a/Plugins/org.mitk.gui.qt.pharmacokinetics.curvedescriptor/documentation/UserManual/Manual.dox +++ b/Plugins/org.mitk.gui.qt.pharmacokinetics.curvedescriptor/documentation/UserManual/Manual.dox @@ -1,26 +1,26 @@ /** \page org_mitk_gui_qt_pharmacokinetics_curvedescriptor The Perfusion Curve Description Parameters View \imageMacro{pharmacokinetics_curve_desc_doc.svg,"Icon of the Perfusion Curve Description Parameters View",3.0} \tableofcontents \section org_mitk_gui_qt_pharmacokinetics_concentration_mri_overview Overview In cases where data quality is not sufficient for dedicated pharmacokinetic analysis, or if global scouting of the overall image should be performed to identify regions of interest, it is often advisable to use semi-quantitative measures that describe the general shape and type of the curve. The Perfusion Curve Description Parameters plugin can be used to voxelwise calculate these parameters. Currently the following parameters are offered by the tool: - area-under-the-curve (AUC) - area-under the first moment curve (AUMC), - mean-residence-time (MRT; AUMC/AUC) - time to peak and maximum signal These parameters are calculated directly from the sampled data. -AUC and AUMC are calculated by step-wise integration with linear interpolation between sampling points. Maximum and time to peak are derived from the highest intensity value (overall maximum) of all data points. +AUC and AUMC are calculated by step-wise integration with linear interpolation between sampling points. Maximum and time to peak are derived from the highest intensity value (overall maximum) of all data points. Note: If semi-quantitative parameters should be calculated from concentration time curves rather than raw data signal intensities, use the concentration n curve converter view (See 5) Parameters of interest can be selected from the list. Selecting a 4D image in the Data manager enables the Calculate Parameters button. Resulting parameter maps will afterwards be added to the data manager as subnodes to the analyzed 4D image. \section org_mitk_gui_qt_pharmacokinetics_concentration_mri_Contact Contact information This plug-in is being developed by Charlotte Debus and the SIDT group (Software development for Integrated Diagnostics -and Therapy) at the DKFZ (German Cancer Research Center). +and Therapy) at the German Cancer Research Center (DKFZ). If you have any questions, need support, find a bug or have a feature request, feel free to contact us at www.mitk.org. */ diff --git a/Plugins/org.mitk.gui.qt.pharmacokinetics.mri/documentation/UserManual/Manual.dox b/Plugins/org.mitk.gui.qt.pharmacokinetics.mri/documentation/UserManual/Manual.dox index ab56d736e1..13e39a1f86 100644 --- a/Plugins/org.mitk.gui.qt.pharmacokinetics.mri/documentation/UserManual/Manual.dox +++ b/Plugins/org.mitk.gui.qt.pharmacokinetics.mri/documentation/UserManual/Manual.dox @@ -1,88 +1,88 @@ /** \page org_mitk_gui_qt_pharmacokinetics_mri The DCE MR Perfusion Datafit View \imageMacro{pharmacokinetics_mri_doc.svg,"Icon of the DCE MR Perfusion View",3.0} \tableofcontents \section FIT_DCE_Introduction Introduction -For pharmacokinetic analysis of DCE MRI/CT data using compartment models in non-linear least square fitting the DCE MR Perfusion Datafit plugin can be used. +For pharmacokinetic analysis of DCE MRI/CT data using compartment models in non-linear least square fitting the DCE MR Perfusion Datafit plugin can be used. \section FIT_DCE_Contact Contact information This plug-in is being developed by Charlotte Debus and the SIDT group (Software development for Integrated Diagnostics -and Therapy) at the DKFZ (German Cancer Research Center). +and Therapy) at the German Cancer Research Center (DKFZ). If you have any questions, need support, find a bug or have a feature request, feel free to contact us at www.mitk.org. \subsection FIT_DCE_Cite Citation information If you use the view for your research please cite our work as reference:\n\n Debus C and Floca R, Ingrisch M, Kompan I, Maier-Hein K, Abdollahi A, Nolden M, MITK-ModelFit: generic open-source framework for model fits and their exploration in medical imaging – design, implementation and application on the example of DCE-MRI (arXiv:1807.07353) \section FIT_DCE_General General information \imageMacro{dce_mri_init.png, "Example screen shot showing the view at first start.", 10} -For pharmacokinetic analysis of DCE MRI/CT data using compartment models in non-linear least square fitting the DCE MR Perfusion Datafit view can be used. +For pharmacokinetic analysis of DCE MRI/CT data using compartment models in non-linear least square fitting the DCE MR Perfusion Datafit view can be used. In principle, every model can be fitted on the entire image. However, for model configuration reasons (e.g. AIF required) and computational time cost, this is often not advisable. Therefore, apart from the image to be fitted (Selected Time Series), a ROI segmentation can be defined (Selected Mask), within which model fitting is performed. The view currently offers voxel wise and/or ROI averaged fits of intensity-time curves with different quantitative and semi-quantitative models. If a mask is selected, ROI-based fitting (fit average curve within ROI) is enabled (radio button Fitting Strategy – Pixel based / ROI based). \subsection FIT_DCE_General_models Supported models Currently the following pharmacokinetic models for gadolinium-based contrast agent are available: - The Descriptive Brix model \ref FIT_DCE_lit_ref1 "[1]" - A semi-quantitative three segment linear model (3SL) - The standard tofts model \ref FIT_DCE_lit_ref2 "[2]" - The extended Tofts model \ref FIT_DCE_lit_ref3 "[3]" - The two compartment exchange model (2CXM) \ref FIT_DCE_lit_ref4 "[4, 5]" \section FIT_DCE_Settings Model Settings \imageMacro{dce_mri_config.png, "Example screenshot showing the config settings of the view.", 10} \subsection FIT_DCE_Settings_model Model specific settings Selecting one of the \ref FIT_DCE_General_models "supported models" will open below tabs for further configuration of the model. - The descriptive Brix model requires only definition of the duration of the bolus, i.e. the overall time of the injection (Injection Time [min]) - The 3SL is a semi-quantitative descriptive model that distinguishes three different segments of the signal: A constant baseline, the initial fast rise (wash-in) and the final slow rise / signal decrease (washout). Each of these segments is approximated by a linear curve, with change points in-between. It requires no further configuration - The standard Tofts model, the extended Tofts model and the 2CXM are all three compartment models that require the input of the concentration time curve in the tissue feeding artery, the AIF. In the DCE MRI Model fitting plugin, the arterial input function can be defined in several ways. For patient individual image derived AIFs, select the radio button "Select AIF from Image". In that case, a segmentation ROI for the artery has to be given to the tool (Drop-down menu AIF Mask from Data Manager). In cases, where the respective artery does not lie in the same image as the investigated tissue (e.g. in animal experiments, where a slice through the heart is used for AIF extraction), a dedicated AIF image can be selected from the Data Manager. The other option is to define the AIF via an external file (e.g. for population derived AIFs or AIFs from blood sampling). By clicking the Browse button, one can select a csv file that holds the arterial intensity values and corresponding timepoints (in tuple format (Time, Value)). Caution: the file may not contain a header line, but the first line must start with Time and Intensity values. Furthermore, the hematocrit level has to be set (from 0 to 1) for conversion from whole blood to plasma concentration. It is set to the literature default value of 0.45. \subsection FIT_DCE_Settings_start Start parameter \imageMacro{dce_mri_start.png, "Example screen shot for start parameter settings.", 10} In cases of noisy data it can be useful to define the initial starting values of the parameter estimates, at which optimization starts, in order to prevent optimization results in local optima. Each model has default scalar values (applied to every voxel) for initial values of each parameter, however these can be adjusted. Moreover, initial values can also be defined locally for each individual voxel via starting value images. \subsection FIT_DCE_Settings_constraint Constraint settings \imageMacro{dce_mri_constraint.png, "Example screen shot for constraint settings.", 10} To limit the fitting search space and to exclude unphysical/illogical results for model parameter estimates, constraints to individual parameters as well as combinations can be imposed. Each model has default constraints, however, new ones can be defined or removed by the + and – buttons in the table. The first column specifies the parameter(s) involved in the constraint (if multiple selected, their sum will be used) by selection in the drop down menu. The second column defines whether the constraints defines an upper or lower boundary. Value and Width define the actual constraint value, that should not be crossed, and a certain tolerance width. \subsection FIT_DCE_Settings_concentration Signal to concentration conversion settings \imageMacro{dce_mri_concentration.png, "Example screen shot for concentration conversion settings.", 10} Most models require concentration values as input rather than raw signal intensities (i.e. all compartment models). The DCE MR Perfusion view offers conversion to concentration by means of relative and absolute signal enhancement as well as a special conversion for turbo flash sequences. \section FIT_DCE_Fitting Executing a fit After configuration of the entire fit routine, the respective time series to be fitted and eventually the ROI mask have to be selected. If only an image is needed, selection of the respective time series in the data manager is sufficient. If a mask is to be selected as well, image and mask have to be selected by holding the shift key and selecting them in this order from the Data manager.\n\n In order to distinguish results from different model fits to the data, a Fitting name can be defined in the bottom field. As default, the name of the model and the fitting strategy (pixel/ROI) are given. This name will then be appended by the respective parameter name.\n\n For development purposes and evaluation of the fits, the option "Generate debug parameter images" is available. Enabling this option will result in additional parameter maps displaying the status of the optimizer at fit termination, like needed optimization time, number of iterations, constraint violations and reasons for fit termination (criterion reached, maximum number of iterations, etc.).\n\n -After all necessary configurations are set, the button "Start Modelling" is enabled, which starts the fitting routine. Progress can be seen in the message box on the bottom. Resulting parameter maps will afterwards be added to the data manager as sub-nodes to the analyzed 4D image. +After all necessary configurations are set, the button "Start Modelling" is enabled, which starts the fitting routine. Progress can be seen in the message box on the bottom. Resulting parameter maps will afterwards be added to the data manager as sub-nodes to the analyzed 4D image. \section FIT_DCE_lit References/Literature - \anchor FIT_DCE_lit_ref1 [1] Brix G, Semmler W, Port R, Schad LR, Layer G, Lorenz WJ. Pharmacokinetic parameters in CNS Gd-DTPA enhanced MR imaging. J Comput Assist Tomogr. 1991;15:621–8. - \anchor FIT_DCE_lit_ref2 [2] Tofts PS, Kermode AG. Measurement of the blood-brain barrier permeability and leakage space using dynamic MR imaging. 1. Fundamental concepts. Magn Reson Med. 1991;17:357–67. - \anchor FIT_DCE_lit_ref3 [3] Sourbron SP, Buckley DL. On the scope and interpretation of the Tofts models for DCE-MRI. Magn Reson Med. 2011;66:735–45. - \anchor FIT_DCE_lit_ref4 [4] Brix G, Kiessling F, Lucht R, Darai S, Wasser K, Delorme S, et al. Microcirculation and microvasculature in breast tumors: Pharmacokinetic analysis of dynamic MR image series. Magn Reson Med. 2004;52:420–9. - \anchor FIT_DCE_lit_ref5 [5] Sourbron, Buckley. Tracer kinetic modelling in MRI: estimating perfusion and capillary permeability - pdf. Phys Med Biol. 2012. http://iopscience.iop.org/article/10.1088/0031-9155/57/2/R1/pdf. Accessed 1 May 2016. */ diff --git a/Plugins/org.mitk.gui.qt.pharmacokinetics.pet/documentation/UserManual/Manual.dox b/Plugins/org.mitk.gui.qt.pharmacokinetics.pet/documentation/UserManual/Manual.dox index 6b0075415c..e36b99fd06 100644 --- a/Plugins/org.mitk.gui.qt.pharmacokinetics.pet/documentation/UserManual/Manual.dox +++ b/Plugins/org.mitk.gui.qt.pharmacokinetics.pet/documentation/UserManual/Manual.dox @@ -1,46 +1,46 @@ /** \page org_mitk_gui_qt_pharmacokinetics_pet The Dynamic PET DataFit View \imageMacro{pharmacokinetics_pet_doc.svg,"Icon of the DCE MR Perfusion View",3.0} \tableofcontents \section FIT_PET_Overview Overview Pharmacokinetic analysis of concentration time curves is also of interest in the context of dynamic PET acquisition over the accumulation of a radioactive tracer in tissue. \section FIT_PET_Contact Contact information This plug-in is being developed by Charlotte Debus and the SIDT group (Software development for Integrated Diagnostics -and Therapy) at the DKFZ (German Cancer Research Center). +and Therapy) at the German Cancer Research Center (DKFZ). If you have any questions, need support, find a bug or have a feature request, feel free to contact us at www.mitk.org. \subsection FIT_DCE_Cite Citation information If you use the view for your research please cite our work as reference:\n\n Debus C and Floca R, Ingrisch M, Kompan I, Maier-Hein K, Abdollahi A, Nolden M, MITK-ModelFit: generic open-source framework for model fits and their exploration in medical imaging – design, implementation and application on the example of DCE-MRI (arXiv:1807.07353) \section FIT_PET_General General information All models require definition of the arterial tracer concentration, i.e. the AIF. For AIF definition see section 3. Instead of the hematocrit level, the whole blood to plasma correction value needs to be specified. The literature value commonly used is 0.1 Since PET images are already in concentration units of activity per volume ([Bq/ml], translates to number of nuclei per volume), no conversion of signal intensities to concentration is offered in the plugin. If, however, conversion of the 4D images to standard uptake values (SUV) is desired, this can be performed with the separate PET SUV calculation plugin. Start parameters and parameter constraints can be defined in the same manner as for the DCE tool. \subsection FIT_PET_General_models Supported models The PET dynamic plugin works in analogy to the DCE MRI perfusion plugin. It currently supports the following compartmental models: - One tissue compartment model (without blood volume VB) - Extended one tissue compartment model (with blood volume VB) - Two tissue compartment model (with blood volume) - Two tissue compartment model for FDG (without back exchange k4) \section FIT_PET_Settings Model Settings \subsection FIT_PET_Settings_start Start parameter In cases of noisy data it can be useful to define the initial starting values of the parameter estimates, at which optimization starts, in order to prevent optimization results in local optima. Each model has default scalar values (applied to every voxel) for initial values of each parameter, however these can be adjusted. Moreover, initial values can also be defined locally for each individual voxel via starting value images. \subsection FIT_PET_Settings_constraint Constraint settings To limit the fitting search space and to exclude unphysical/illogical results for model parameter estimates, constraints to individual parameters as well as combinations can be imposed. Each model has default constraints, however, new ones can be defined or removed by the + and – buttons in the table. The first column specifies the parameter(s) involved in the constraint (if multiple selected, their sum will be used) by selection in the drop down menu. The second column defines whether the constraints defines an upper or lower boundary. Value and Width define the actual constraint value, that should not be crossed, and a certain tolerance width. */