diff --git a/Modules/ToFHardware/KinectV2/mitkKinectV2Controller.cpp b/Modules/ToFHardware/KinectV2/mitkKinectV2Controller.cpp index 02eb5942b4..b8cea6e4d9 100644 --- a/Modules/ToFHardware/KinectV2/mitkKinectV2Controller.cpp +++ b/Modules/ToFHardware/KinectV2/mitkKinectV2Controller.cpp @@ -1,433 +1,432 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include "mitkKinectV2Controller.h" #include #include #include #include #include #include #include #include #include //Taken from official Microsoft SDK samples. Should never be public or part of the class, //because it is just for cleaning up. // Safe release for interfaces template inline void SafeRelease(Interface *& pInterfaceToRelease) { if (pInterfaceToRelease != NULL) { pInterfaceToRelease->Release(); pInterfaceToRelease = NULL; } } namespace mitk { class KinectV2Controller::KinectV2ControllerPrivate { public: KinectV2ControllerPrivate(); ~KinectV2ControllerPrivate(); bool ErrorText(unsigned int error); IKinectSensor* m_pKinectSensor; IMultiSourceFrameReader*m_pMultiSourceFrameReader; ICoordinateMapper* m_pCoordinateMapper; RGBQUAD* m_pColorRGBX; bool m_ConnectionCheck; ///< check if camera is connected or not int m_DepthCaptureWidth; int m_DepthCaptureHeight; int m_RGBCaptureWidth; int m_RGBCaptureHeight; float* m_Distances; float* m_Amplitudes; unsigned char* m_Colors; size_t m_RGBBufferSize; size_t m_DepthBufferSize; mitk::Surface::Pointer m_Surface; CameraSpacePoint* m_CameraCoordinates; - vtkSmartPointer m_PolyData; + //vtkSmartPointer m_PolyData; ColorSpacePoint* m_ColorPoints; }; KinectV2Controller::KinectV2ControllerPrivate::KinectV2ControllerPrivate(): m_pKinectSensor(NULL), m_pMultiSourceFrameReader(NULL), m_pCoordinateMapper(NULL), m_pColorRGBX(NULL), m_ConnectionCheck(false), m_DepthCaptureWidth(512), m_DepthCaptureHeight(424), m_RGBCaptureWidth(1920), m_RGBCaptureHeight(1080), m_Distances(NULL), m_Amplitudes(NULL), m_Colors(NULL), m_RGBBufferSize(1920*1080*3), m_DepthBufferSize(sizeof(float)*512*424), m_Surface(NULL), m_CameraCoordinates(NULL), - m_PolyData(NULL), + //m_PolyData(NULL), m_ColorPoints(NULL) { // create heap storage for color pixel data in RGBX format m_pColorRGBX = new RGBQUAD[m_RGBCaptureWidth * m_RGBCaptureHeight]; m_Distances = new float[m_DepthCaptureWidth * m_DepthCaptureHeight]; m_Amplitudes = new float[m_DepthCaptureWidth * m_DepthCaptureHeight]; m_Colors = new unsigned char[m_RGBBufferSize]; m_Surface = mitk::Surface::New(); m_CameraCoordinates = new CameraSpacePoint[m_DepthCaptureWidth * m_DepthCaptureHeight]; - m_PolyData = vtkSmartPointer::New(); + //m_PolyData = vtkSmartPointer::New(); m_ColorPoints = new ColorSpacePoint[m_DepthCaptureWidth * m_DepthCaptureHeight]; } KinectV2Controller::KinectV2ControllerPrivate::~KinectV2ControllerPrivate() { MITK_INFO << "~KinectV2ControllerPrivate"; } bool KinectV2Controller::KinectV2ControllerPrivate::ErrorText(unsigned int error) { return true; } KinectV2Controller::KinectV2Controller(): d(new KinectV2ControllerPrivate) { } KinectV2Controller::~KinectV2Controller() { MITK_INFO << "~KinectV2Controller"; delete d; } bool KinectV2Controller::OpenCameraConnection() { if (!d->m_ConnectionCheck) { HRESULT hr; d->m_ConnectionCheck = true; hr = GetDefaultKinectSensor(&d->m_pKinectSensor); if (FAILED(hr)) { d->m_ConnectionCheck = false; } else { hr = d->m_pKinectSensor->get_CoordinateMapper(&d->m_pCoordinateMapper); if (FAILED(hr)) { d->m_ConnectionCheck = false; } hr = d->m_pKinectSensor->Open(); } if (!d->m_pKinectSensor || FAILED(hr)) { d->m_ConnectionCheck = false; MITK_WARN << "No Kinect 2 ready!"; } else { MITK_INFO << "Kinect 2 succesfully connected"; } } return d->m_ConnectionCheck; } bool KinectV2Controller::InitializeMultiFrameReader() { //check if it is already initialized if((d->m_ConnectionCheck) && (d->m_pMultiSourceFrameReader)) { return true; } else //initialize the frame reader { HRESULT hr = d->m_pKinectSensor->OpenMultiSourceFrameReader( FrameSourceTypes::FrameSourceTypes_Depth | FrameSourceTypes::FrameSourceTypes_Color | FrameSourceTypes::FrameSourceTypes_Infrared, &d->m_pMultiSourceFrameReader); if (SUCCEEDED(hr) && (d->m_pMultiSourceFrameReader)) { MITK_INFO << "KinectV2 MultiFrameReader initialized"; return true; } } return false; } bool KinectV2Controller::CloseCameraConnection() { // done with depth frame reader MITK_INFO << "CloseConnection"; SafeRelease(d->m_pMultiSourceFrameReader); // close the Kinect Sensor if(d->m_pKinectSensor) { d->m_pKinectSensor->Close(); } SafeRelease(d->m_pKinectSensor); d->m_ConnectionCheck = false; return true; } bool KinectV2Controller::UpdateCamera() { if(InitializeMultiFrameReader()) { IMultiSourceFrame* pMultiSourceFrame = NULL; IDepthFrame* pDepthFrame = NULL; IColorFrame* pColorFrame = NULL; IInfraredFrame* pInfraRedFrame = NULL; HRESULT hr = -1; static DWORD lastTime = 0; DWORD currentTime = GetTickCount(); //Check if we do not request data faster than 30 FPS. Kinect V2 can only deliver 30 FPS. if( unsigned int(currentTime - lastTime) > 33 ) { hr = d->m_pMultiSourceFrameReader->AcquireLatestFrame(&pMultiSourceFrame); lastTime = currentTime; } if (SUCCEEDED(hr)) { IDepthFrameReference* pDepthFrameReference = NULL; hr = pMultiSourceFrame->get_DepthFrameReference(&pDepthFrameReference); if (SUCCEEDED(hr)) { hr = pDepthFrameReference->AcquireFrame(&pDepthFrame); } SafeRelease(pDepthFrameReference); } if (SUCCEEDED(hr)) { IColorFrameReference* pColorFrameReference = NULL; hr = pMultiSourceFrame->get_ColorFrameReference(&pColorFrameReference); if (SUCCEEDED(hr)) { hr = pColorFrameReference->AcquireFrame(&pColorFrame); } SafeRelease(pColorFrameReference); } if (SUCCEEDED(hr)) { IInfraredFrameReference* pInfraredFrameReference = NULL; hr = pMultiSourceFrame->get_InfraredFrameReference(&pInfraredFrameReference); if (SUCCEEDED(hr)) { hr = pInfraredFrameReference->AcquireFrame(&pInfraRedFrame); } SafeRelease(pInfraredFrameReference); } if (SUCCEEDED(hr)) { UINT nDepthBufferSize = 0; UINT16 *pDepthBuffer = NULL; UINT16 *pIntraRedBuffer = NULL; ColorImageFormat imageFormat = ColorImageFormat_None; UINT nColorBufferSize = 0; RGBQUAD *pColorBuffer = NULL; if (SUCCEEDED(hr)) { hr = pDepthFrame->AccessUnderlyingBuffer(&nDepthBufferSize, &pDepthBuffer); } if (SUCCEEDED(hr)) { hr = pInfraRedFrame->AccessUnderlyingBuffer(&nDepthBufferSize, &pIntraRedBuffer); } if (SUCCEEDED(hr)) { UINT pointCount = d->m_DepthCaptureWidth * d->m_DepthCaptureHeight; d->m_pCoordinateMapper->MapDepthFrameToCameraSpace(pointCount, pDepthBuffer, pointCount, d->m_CameraCoordinates); vtkSmartPointer points = vtkSmartPointer::New(); vtkSmartPointer vertices = vtkSmartPointer::New(); vtkSmartPointer textureCoordinates = vtkSmartPointer::New(); textureCoordinates->SetNumberOfComponents(2); textureCoordinates->Allocate(pointCount); d->m_pCoordinateMapper->MapDepthFrameToColorSpace(pointCount, pDepthBuffer, pointCount, d->m_ColorPoints); for(int i = 0; i < d->m_DepthCaptureHeight*d->m_DepthCaptureWidth; ++i) { vtkIdType id = points->InsertNextPoint(d->m_CameraCoordinates[i].X, d->m_CameraCoordinates[i].Y, d->m_CameraCoordinates[i].Z); vertices->InsertNextCell(1); vertices->InsertCellPoint(id); d->m_Distances[i] = static_cast(*pDepthBuffer); d->m_Amplitudes[i] = static_cast(*pIntraRedBuffer); ++pDepthBuffer; ++pIntraRedBuffer; ColorSpacePoint colorPoint = d->m_ColorPoints[i]; // retrieve the depth to color mapping for the current depth pixel int colorInDepthX = (int)(floor(colorPoint.X + 0.5)); int colorInDepthY = (int)(floor(colorPoint.Y + 0.5)); float xNorm = static_cast(colorInDepthX)/d->m_RGBCaptureWidth; float yNorm = static_cast(colorInDepthY)/d->m_RGBCaptureHeight; // make sure the depth pixel maps to a valid point in color space if ( colorInDepthX >= 0 && colorInDepthX < d->m_RGBCaptureWidth && colorInDepthY >= 0 && colorInDepthY < d->m_RGBCaptureHeight ) { textureCoordinates->InsertTuple2(id, xNorm, yNorm); } } - - d->m_PolyData->SetPoints(points); - d->m_PolyData->SetVerts(vertices); - d->m_PolyData->GetPointData()->SetTCoords(textureCoordinates); - vtkSmartPointer copy = vtkSmartPointer::New(); - copy->DeepCopy(d->m_PolyData); - d->m_Surface->SetVtkPolyData(copy); + vtkSmartPointer polyData = vtkSmartPointer::New(); + polyData->SetPoints(points); + polyData->SetVerts(vertices); + polyData->GetPointData()->SetTCoords(textureCoordinates); + d->m_Surface->SetVtkPolyData(polyData); + //d->m_Surface->SetVtkPolyData(d->m_PolyData); d->m_Surface->Modified(); } else { MITK_ERROR << "AccessUnderlyingBuffer"; } // get color frame data if (SUCCEEDED(hr)) { hr = pColorFrame->get_RawColorImageFormat(&imageFormat); } if (SUCCEEDED(hr)) { if (imageFormat == ColorImageFormat_Bgra) { hr = pColorFrame->AccessRawUnderlyingBuffer(&nColorBufferSize, reinterpret_cast(&pColorBuffer)); } else if (d->m_pColorRGBX) { pColorBuffer = d->m_pColorRGBX; nColorBufferSize = d->m_RGBCaptureWidth * d->m_RGBCaptureHeight * sizeof(RGBQUAD); hr = pColorFrame->CopyConvertedFrameDataToArray(nColorBufferSize, reinterpret_cast(pColorBuffer), ColorImageFormat_Bgra); } else { hr = E_FAIL; } if (SUCCEEDED(hr)) { for(int i = 0; i < d->m_RGBBufferSize; i+=3) { //convert from BGR to RGB d->m_Colors[i+0] = pColorBuffer->rgbRed; d->m_Colors[i+1] = pColorBuffer->rgbGreen; d->m_Colors[i+2] = pColorBuffer->rgbBlue; ++pColorBuffer; } } } } SafeRelease(pDepthFrame); SafeRelease(pColorFrame); SafeRelease(pInfraRedFrame); SafeRelease(pMultiSourceFrame); if( hr != -1 && !SUCCEEDED(hr) ) { //The thread gets here, if the data is requested faster than the device can deliver it. //This may happen from time to time. return false; } return true; } MITK_ERROR << "Unable to initialize MultiFrameReader"; return false; } void KinectV2Controller::GetDistances(float* distances) { memcpy(distances, d->m_Distances, d->m_DepthBufferSize); } void KinectV2Controller::GetRgb(unsigned char* rgb) { memcpy(rgb, d->m_Colors, d->m_RGBBufferSize); } void KinectV2Controller::GetAllData(float* distances, float* amplitudes, unsigned char* rgb) { this->GetDistances(distances); this->GetRgb(rgb); this->GetAmplitudes(amplitudes); } void KinectV2Controller::GetAmplitudes( float* amplitudes ) { memcpy( amplitudes, d->m_Amplitudes, d->m_DepthBufferSize); } int KinectV2Controller::GetRGBCaptureWidth() const { return d->m_RGBCaptureWidth; } int KinectV2Controller::GetRGBCaptureHeight() const { return d->m_RGBCaptureHeight; } int KinectV2Controller::GetDepthCaptureWidth() const { return d->m_DepthCaptureWidth; } int KinectV2Controller::GetDepthCaptureHeight() const { return d->m_DepthCaptureHeight; } mitk::Surface::Pointer KinectV2Controller::GetSurface() { return d->m_Surface; } } diff --git a/Modules/ToFHardware/KinectV2/mitkKinectV2Device.cpp b/Modules/ToFHardware/KinectV2/mitkKinectV2Device.cpp index 52b9fd7f00..5f0aded3f5 100644 --- a/Modules/ToFHardware/KinectV2/mitkKinectV2Device.cpp +++ b/Modules/ToFHardware/KinectV2/mitkKinectV2Device.cpp @@ -1,332 +1,341 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include "mitkKinectV2Device.h" #include "mitkRealTimeClock.h" #include "itkMultiThreader.h" #include #include +#include +#include + namespace mitk { KinectV2Device::KinectV2Device(): m_DistanceDataBuffer(NULL), m_AmplitudeDataBuffer(NULL), m_RGBDataBuffer(NULL), m_DepthBufferSize(sizeof(float)*512*424), m_RGBBufferSize(3*1920*1080) { m_Controller = mitk::KinectV2Controller::New(); + m_Surface = mitk::Surface::New(); } KinectV2Device::~KinectV2Device() { } std::string GetDeviceName() { return "Microsoft Kinect 2 Device "; } bool KinectV2Device::OnConnectCamera() { bool ok = false; if (m_Controller) { ok = m_Controller->OpenCameraConnection(); if (ok) { this->m_CaptureWidth = m_Controller->GetDepthCaptureWidth(); this->m_CaptureHeight = m_Controller->GetDepthCaptureHeight(); this->m_PixelNumber = this->m_CaptureWidth * this->m_CaptureHeight; this->m_RGBImageWidth = m_Controller->GetRGBCaptureWidth(); this->m_RGBImageHeight = m_Controller->GetRGBCaptureHeight(); this->m_RGBPixelNumber = this->m_RGBImageWidth * this->m_RGBImageHeight; // allocate buffer this->m_DistanceArray = new float[this->m_PixelNumber]; for(int i=0; im_PixelNumber; i++) {this->m_DistanceArray[i]=0.0;} this->m_AmplitudeArray = new float[this->m_PixelNumber]; for(int i=0; im_PixelNumber; i++) {this->m_AmplitudeArray[i]=0.0;} this->m_DistanceDataBuffer = new float*[this->m_MaxBufferSize]; for(int i=0; im_MaxBufferSize; i++) { this->m_DistanceDataBuffer[i] = new float[this->m_PixelNumber]; } this->m_AmplitudeDataBuffer = new float*[this->m_MaxBufferSize]; for(int i=0; im_MaxBufferSize; i++) { this->m_AmplitudeDataBuffer[i] = new float[this->m_PixelNumber]; } this->m_RGBDataBuffer = new unsigned char*[this->m_MaxBufferSize]; for (int i=0; im_MaxBufferSize; i++) { this->m_RGBDataBuffer[i] = new unsigned char[this->m_RGBPixelNumber*3]; } m_CameraConnected = true; } } return ok; } bool KinectV2Device::DisconnectCamera() { bool ok = false; if (m_Controller) { ok = m_Controller->CloseCameraConnection(); // clean-up only if camera was connected if (m_CameraConnected) { delete [] m_DistanceArray; delete [] m_AmplitudeArray; for(int i=0; im_MaxBufferSize; i++) { delete[] this->m_DistanceDataBuffer[i]; delete[] this->m_AmplitudeDataBuffer[i]; delete[] this->m_RGBDataBuffer[i]; } delete[] this->m_DistanceDataBuffer; delete[] this->m_AmplitudeDataBuffer; delete[] this->m_RGBDataBuffer; m_CameraConnected = false; } } return ok; } void KinectV2Device::StartCamera() { if (m_CameraConnected) { // get the first image this->m_Controller->UpdateCamera(); this->m_ImageMutex->Lock(); this->m_Controller->GetAllData(this->m_DistanceDataBuffer[this->m_FreePos],this->m_AmplitudeDataBuffer[this->m_FreePos],this->m_RGBDataBuffer[this->m_FreePos]); this->m_FreePos = (this->m_FreePos+1) % this->m_BufferSize; this->m_CurrentPos = (this->m_CurrentPos+1) % this->m_BufferSize; this->m_ImageSequence++; this->m_ImageMutex->Unlock(); this->m_CameraActiveMutex->Lock(); this->m_CameraActive = true; this->m_CameraActiveMutex->Unlock(); this->m_ThreadID = this->m_MultiThreader->SpawnThread(this->Acquire, this); // wait a little to make sure that the thread is started itksys::SystemTools::Delay(10); } else { MITK_WARN << "Camera not connected"; } } void KinectV2Device::StopCamera() { m_CameraActiveMutex->Lock(); m_CameraActive = false; m_CameraActiveMutex->Unlock(); itksys::SystemTools::Delay(100); if (m_MultiThreader.IsNotNull()) { m_MultiThreader->TerminateThread(m_ThreadID); } // wait a little to make sure that the thread is terminated itksys::SystemTools::Delay(10); } bool KinectV2Device::IsCameraActive() { m_CameraActiveMutex->Lock(); bool ok = m_CameraActive; m_CameraActiveMutex->Unlock(); return ok; } void KinectV2Device::UpdateCamera() { if (m_Controller) { m_Controller->UpdateCamera(); } } ITK_THREAD_RETURN_TYPE KinectV2Device::Acquire(void* pInfoStruct) { /* extract this pointer from Thread Info structure */ struct itk::MultiThreader::ThreadInfoStruct * pInfo = (struct itk::MultiThreader::ThreadInfoStruct*)pInfoStruct; if (pInfo == NULL) { return ITK_THREAD_RETURN_VALUE; } if (pInfo->UserData == NULL) { return ITK_THREAD_RETURN_VALUE; } KinectV2Device* toFCameraDevice = (KinectV2Device*)pInfo->UserData; if (toFCameraDevice!=NULL) { mitk::RealTimeClock::Pointer realTimeClock; realTimeClock = mitk::RealTimeClock::New(); double t1, t2; t1 = realTimeClock->GetCurrentStamp(); int n = 100; bool overflow = false; bool printStatus = false; while (toFCameraDevice->IsCameraActive()) { // update the ToF camera - toFCameraDevice->UpdateCamera(); // get the image data from the camera and write it at the next free position in the buffer toFCameraDevice->m_ImageMutex->Lock(); + toFCameraDevice->UpdateCamera(); toFCameraDevice->m_Controller->GetAllData(toFCameraDevice->m_DistanceDataBuffer[toFCameraDevice->m_FreePos],toFCameraDevice->m_AmplitudeDataBuffer[toFCameraDevice->m_FreePos],toFCameraDevice->m_RGBDataBuffer[toFCameraDevice->m_FreePos]); + vtkSmartPointer poly = vtkSmartPointer::New(); + if( toFCameraDevice->m_Controller->GetSurface()->GetVtkPolyData() != NULL ) + poly->DeepCopy( toFCameraDevice->m_Controller->GetSurface()->GetVtkPolyData() ); + toFCameraDevice->m_Surface->SetVtkPolyData( poly ); toFCameraDevice->m_ImageMutex->Unlock(); // call modified to indicate that cameraDevice was modified toFCameraDevice->Modified(); toFCameraDevice->m_FreePos = (toFCameraDevice->m_FreePos+1) % toFCameraDevice->m_BufferSize; toFCameraDevice->m_CurrentPos = (toFCameraDevice->m_CurrentPos+1) % toFCameraDevice->m_BufferSize; toFCameraDevice->m_ImageSequence++; if (toFCameraDevice->m_FreePos == toFCameraDevice->m_CurrentPos) { overflow = true; } if (toFCameraDevice->m_ImageSequence % n == 0) { printStatus = true; } if (overflow) { overflow = false; } // print current framerate if (printStatus) { t2 = realTimeClock->GetCurrentStamp() - t1; MITK_INFO << " Framerate (fps): " << n / (t2/1000) << " Sequence: " << toFCameraDevice->m_ImageSequence; t1 = realTimeClock->GetCurrentStamp(); printStatus = false; } } // end of while loop } return ITK_THREAD_RETURN_VALUE; } void KinectV2Device::GetAmplitudes(float* amplitudeArray, int& imageSequence) { m_ImageMutex->Lock(); if (m_CameraActive) { memcpy(amplitudeArray, this->m_AmplitudeDataBuffer[this->m_CurrentPos], this->m_DepthBufferSize); imageSequence = this->m_ImageSequence; } else { MITK_WARN("ToF") << "Warning: Data can only be acquired if camera is active."; } m_ImageMutex->Unlock(); } void KinectV2Device::GetIntensities(float*, int&) { } void KinectV2Device::GetDistances(float* distanceArray, int& imageSequence) { m_ImageMutex->Lock(); if (m_CameraActive) { memcpy(distanceArray, this->m_DistanceDataBuffer[this->m_CurrentPos], this->m_DepthBufferSize); imageSequence = this->m_ImageSequence; } else { MITK_WARN("ToF") << "Warning: Data can only be acquired if camera is active."; } m_ImageMutex->Unlock(); } void KinectV2Device::GetAllImages(float* distanceArray, float* amplitudeArray, float* intensityArray, char* sourceDataArray, int requiredImageSequence, int& capturedImageSequence, unsigned char* rgbDataArray) { if (m_CameraActive) { // check for empty buffer if (this->m_ImageSequence < 0) { // buffer empty MITK_WARN << "Buffer empty!! "; capturedImageSequence = this->m_ImageSequence; return; } // determine position of image in buffer int pos = 0; if ((requiredImageSequence < 0) || (requiredImageSequence > this->m_ImageSequence)) { capturedImageSequence = this->m_ImageSequence; pos = this->m_CurrentPos; } else if (requiredImageSequence <= this->m_ImageSequence - this->m_BufferSize) { capturedImageSequence = (this->m_ImageSequence - this->m_BufferSize) + 1; pos = (this->m_CurrentPos + 1) % this->m_BufferSize; } else // (requiredImageSequence > this->m_ImageSequence - this->m_BufferSize) && (requiredImageSequence <= this->m_ImageSequence) { capturedImageSequence = requiredImageSequence; pos = (this->m_CurrentPos + (10-(this->m_ImageSequence - requiredImageSequence))) % this->m_BufferSize; } //// write image data to arrays m_ImageMutex->Lock(); memcpy(distanceArray, this->m_DistanceDataBuffer[pos], this->m_DepthBufferSize); memcpy(amplitudeArray, this->m_AmplitudeDataBuffer[pos], this->m_DepthBufferSize); memcpy(rgbDataArray, this->m_RGBDataBuffer[pos], this->m_RGBBufferSize); - this->SetProperty("ToFSurface", mitk::SmartPointerProperty::New( m_Controller->GetSurface() )); + //mitk::Surface::Pointer surfaceClone = this->m_Surface->Clone(); + this->SetProperty("ToFSurface", mitk::SmartPointerProperty::New( this->m_Surface )); m_ImageMutex->Unlock(); this->Modified(); } else { MITK_WARN("ToF") << "Warning: Data can only be acquired if camera is active."; } } KinectV2Controller::Pointer KinectV2Device::GetController() { return this->m_Controller; } int KinectV2Device::GetRGBCaptureWidth() { return this->m_RGBImageWidth; } int KinectV2Device::GetRGBCaptureHeight() { return this->m_RGBImageHeight; } } diff --git a/Modules/ToFHardware/KinectV2/mitkKinectV2Device.h b/Modules/ToFHardware/KinectV2/mitkKinectV2Device.h index a463397d89..22e3d5a2f2 100644 --- a/Modules/ToFHardware/KinectV2/mitkKinectV2Device.h +++ b/Modules/ToFHardware/KinectV2/mitkKinectV2Device.h @@ -1,142 +1,144 @@ /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef __mitkKinectV2Device_h #define __mitkKinectV2Device_h #include #include "mitkKinectV2ModuleExports.h" #include "mitkToFCameraDevice.h" #include "mitkKinectV2Controller.h" #include #include #include #include namespace mitk { /** * @brief Interface for all representations of Microsoft Kinect V2 devices. * Kinect2Device internally holds an instance of Kinect2Controller and starts a thread * that continuously grabs images from the controller. A buffer structure buffers the last acquired images * to provide the image data loss-less. * * \throws mitkException In case of no connection, an exception is thrown! * * @ingroup ToFHardware */ class MITK_KINECTV2MODULE_EXPORT KinectV2Device : public ToFCameraDevice { public: mitkClassMacro( KinectV2Device , ToFCameraDevice ); itkNewMacro( Self ); /*! \brief opens a connection to the ToF camera \throws mitkException In case of no connection, an exception is thrown! */ virtual bool OnConnectCamera(); /*! \brief closes the connection to the camera */ virtual bool DisconnectCamera(); /*! \brief starts the continuous updating of the camera. A separate thread updates the source data, the main thread processes the source data and creates images and coordinates \throws mitkException In case of no connection, an exception is thrown! */ virtual void StartCamera(); /*! \brief stops the continuous updating of the camera */ virtual void StopCamera(); /*! \brief updates the camera for image acquisition \throws mitkException In case of no connection, an exception is thrown! */ virtual void UpdateCamera(); /*! \brief returns whether the camera is currently active or not */ virtual bool IsCameraActive(); /*! \brief gets the amplitude data from the ToF camera as the strength of the active illumination of every pixel. Caution! The user is responsible for allocating and deleting the images. These values can be used to determine the quality of the distance values. The higher the amplitude value, the higher the accuracy of the according distance value \param imageSequence the actually captured image sequence number \param amplitudeArray contains the returned amplitude data as an array. */ virtual void GetAmplitudes(float* amplitudeArray, int& imageSequence); /*! \brief Does nothing for Kinect V2 as there is no intensity data provided by the device. * * The method is an empty implementation, because the interface (ToFCameraDevice) requires it. */ virtual void GetIntensities(float* intensityArray, int& imageSequence); /*! \brief gets the distance data from the ToF camera measuring the distance between the camera and the different object points in millimeters. Caution! The user is responsible for allocating and deleting the images. \param distanceArray contains the returned distances data as an array. \param imageSequence the actually captured image sequence number */ virtual void GetDistances(float* distanceArray, int& imageSequence); /*! \brief gets the 3 images (distance, amplitude, intensity) from the ToF camera. Caution! The user is responsible for allocating and deleting the images. \param distanceArray Contains the distance data as an array. \param amplitudeArray Contains the infrared image. \param intensityArray Does nothing for Kinect V2. \param sourceDataArray Does nothing for Kinect V2. \param requiredImageSequence The required image sequence number. \param capturedImageSequence Does nothing for Kinect V2. */ virtual void GetAllImages(float* distanceArray, float* amplitudeArray, float* intensityArray, char* sourceDataArray, int requiredImageSequence, int& capturedImageSequence, unsigned char* rgbDataArray=NULL); /*! \brief returns the corresponding camera controller */ KinectV2Controller::Pointer GetController(); /*! \brief returns the width of the RGB image */ int GetRGBCaptureWidth(); /*! \brief returns the height of the RGB image */ int GetRGBCaptureHeight(); protected: KinectV2Device(); ~KinectV2Device(); /*! \brief Thread method continuously acquiring images from the ToF hardware */ static ITK_THREAD_RETURN_TYPE Acquire(void* pInfoStruct); KinectV2Controller::Pointer m_Controller; ///< corresponding CameraController float** m_DistanceDataBuffer; ///< buffer holding the last distance images float** m_AmplitudeDataBuffer; ///< buffer holding the last amplitude images unsigned char** m_RGBDataBuffer; ///< buffer holding the last RGB image size_t m_DepthBufferSize; ///< Size of depth buffer (i.e. memory size of depth and infrared image) size_t m_RGBBufferSize; ///< Size of RGB buffer (i.e. memory size of RGB image) + + mitk::Surface::Pointer m_Surface; }; } //END mitk namespace #endif