KinectV2_背景消除_多拉A夢任意門空間轉換效果




這次我們要來學習如何寫一個Kinect V2 背景去除效果


首先 我們的第一個目標是如下圖的結果




感覺非常好玩可以到任何國度或者雨林中


我們先觀察 Kinect V2 呈現了彩色資訊 和 身體索引 等等


這是我在一開始所觀察到的

或許還有少些資訊

還有很直觀的一個人的身體輪廓(身體資訊)前景
可以在一張圖上做融入現場環境的有趣小互動




我們先來觀察 Kinect V2 官網上雖說 彩色解析度是1920*1080




第1-1階段程式碼

試著印出   彩色資訊的解析度
#include<iostream>
#include<stdio.h>
//opencv Library
#include"opencv\cv.h"
#include"opencv\highgui.h"
#include"opencv2\highgui\highgui.hpp"
#include"opencv2\opencv.hpp"
//Kiinect SDK
#include "Kinect.h"
using namespace std;
using namespace cv;
int main()
{
    //open Kinect V2 sensor
    IKinectSensor   * mySensor = nullptr;
    GetDefaultKinectSensor(&mySensor);
    mySensor->Open();
    //obtain Kinect V2 color source
    IColorFrameSource *myColorSource = nullptr;
    mySensor->get_ColorFrameSource(&myColorSource);
    //obtain Kinect V2 color frame description
    IFrameDescription *myColorDescription = nullptr;
    //we want to know the width and height of color source
    int width, height;
    //we use "IColorFrameSource" to help us get frame description
    myColorSource->get_FrameDescription(&myColorDescription);
    //we use "IFrameDescription" to help us get width and height
    myColorDescription->get_Width(&width);
    myColorDescription->get_Height(&height);    
    cout << "Width:" << width << "\n" << "Height:" << height << endl;
    system("PAUSE"); //卡住用的
    return  0;
}





一個關於Kinect V2觀念  :
不同種數據會搭配兩個項目(源、閱讀器、對應的描述結構)
IBody , IDepth , IColor .....etc

    一個xxxFrameSource(源)
    一個xxxFrameReader(閱讀器)

若需要得到源的描述
   IFrameDescription *  是一個可以用來取得所有源對應描述之結構體!!!!!!


以目前我們是要獲得 彩色數據源的寬、高等描述

處理三步驟
        從「傳感器」獲得「源」
mySensor->get_ColorFrameSource(&myColorSource);
        從「源」取得 「影格描述」
 myColorSource->get_FrameDescription(&myColorDescription);
        從「影格描述」獲得「寬」、「高」
myColorDescription->get_Width(&width);
myColorDescription->get_Height(&height);   




我們來統整一下



所以其實啊~~~  有兩個步驟是可以同時都做得

應該說可以依據你要做捨麼去擇一

你要秀出影像就必須開啟 IxxxFrameReader (IColorFrameReader ,IDepthFrameReader ...etc)

你要獲取影格描述就必需開啟 IFrameDescription


現在我要秀出影像

所以需使用  Reader

從  source 可開啟 Reader





就會顯示了

第1-2階段程式碼
秀彩影

#include<iostream>
#include<stdio.h>
//opencv Library
#include"opencv\cv.h"
#include"opencv\highgui.h"
#include"opencv2\highgui\highgui.hpp"
#include"opencv2\opencv.hpp"
//Kiinect SDK
#include "Kinect.h"
using namespace std;
using namespace cv;
int main()
{
    //open Kinect V2 sensor
    IKinectSensor   * mySensor = nullptr;
    GetDefaultKinectSensor(&mySensor);
    mySensor->Open();
    //obtain Kinect V2 color source
    IColorFrameSource *myColorSource = nullptr;
    mySensor->get_ColorFrameSource(&myColorSource);
    //obtain Kinect V2 color frame description
    IFrameDescription *myColorDescription = nullptr;
    //we want to know the width and height of color source
    int width, height;
    //we use "IColorFrameSource" to help us get frame description
    myColorSource->get_FrameDescription(&myColorDescription);
    //we use "IFrameDescription" to help us get width and height
    myColorDescription->get_Width(&width);
    myColorDescription->get_Height(&height);

    //cout << "Width:" << width << "\n" << "Height:" << height << endl;

    IColorFrameReader *myColorReader = nullptr;
    myColorSource->OpenReader(&myColorReader);

    Mat img(height, width, CV_8UC4); // R->G->B->A
    IColorFrame *myColorFrame = nullptr;

    for (;;)
    {
        if (myColorReader->AcquireLatestFrame(&myColorFrame) == S_OK)
        {
            UINT size = 0;
            myColorFrame->CopyConvertedFrameDataToArray(width*height*4,(BYTE*)img.data,ColorImageFormat_Bgra);
            namedWindow("Color",0);
            imshow("Color",img);
            myColorFrame->Release(); // 沒加會停不更新
        }
            if (waitKey(30) == VK_ESCAPE) // escape 
                break;        
    }
    
    myColorReader->Release();
    myColorDescription->Release();
    mySensor->Close();
    mySensor->Release();
    
    system("PAUSE"); //卡住用的
    return  0;
}





緊接著我又好奇了

我們的Kinect V2 是如何得到

身體索引資訊的呢????


依樣畫葫蘆


第2-1階段程式碼

試著印出   身體索引資訊的解析度

#include<iostream>
#include<stdio.h>
//opencv Library
#include"opencv\cv.h"
#include"opencv\highgui.h"
#include"opencv2\highgui\highgui.hpp"
#include"opencv2\opencv.hpp"
//Kiinect SDK
#include "Kinect.h"
using namespace std;
using namespace cv;
int main()
{
    //open Kinect V2 sensor
    IKinectSensor   * mySensor = nullptr;               
    GetDefaultKinectSensor(&mySensor);
    mySensor->Open();
    //obtain Kinect V2 BodyIndex source
    IBodyIndexFrameSource  *myBodyIndexSource = nullptr;       
    mySensor->get_BodyIndexFrameSource(&myBodyIndexSource);
    
    //obtain Kinect V2 BodyIndex frame description
    IFrameDescription   * myBodyIndexDescription = nullptr;
    int height, width;
    myBodyIndexSource->get_FrameDescription(&myBodyIndexDescription);
    myBodyIndexDescription->get_Height(&height);
    myBodyIndexDescription->get_Width(&width);

    cout << "Width:" << width << "\n" << "Height:" << height << endl;

    system("PAUSE");
    return  0;
}

第2-2階段程式碼
秀Body Index
大於5的值代表的是背景
#include<iostream>
#include<stdio.h>
//opencv Library
#include"opencv\cv.h"
#include"opencv\highgui.h"
#include"opencv2\highgui\highgui.hpp"
#include"opencv2\opencv.hpp"
//Kiinect SDK
#include "Kinect.h"
using namespace std;
using namespace cv;
int main()
{
    //open Kinect V2 sensor
    IKinectSensor  *mySensor = nullptr;               
    GetDefaultKinectSensor(&mySensor);
    mySensor->Open();
    //obtain Kinect V2 BodyIndex source
    IBodyIndexFrameSource *myBodyIndexSource = nullptr;       
    mySensor->get_BodyIndexFrameSource(&myBodyIndexSource);
    
    //obtain Kinect V2 BodyIndex frame description
    IFrameDescription   * myBodyIndexDescription = nullptr;
    int height, width;
    myBodyIndexSource->get_FrameDescription(&myBodyIndexDescription);
    myBodyIndexDescription->get_Height(&height);
    myBodyIndexDescription->get_Width(&width);

    //cout << "Width:" << width << "\n" << "Height:" << height << endl;

    IBodyIndexFrameReader *myBodyIndexReader = nullptr;       
    myBodyIndexSource->OpenReader(&myBodyIndexReader);

    Mat img(height, width, CV_8UC3);
    IBodyIndexFrame *myBodyIndexFrame = nullptr;
    //use opencv to draw different color
    Vec3b   color[7] = { Vec3b(0, 0, 255),  //R
                         Vec3b(0, 255, 255), //yello
                         Vec3b(255, 255, 255), //white
                         Vec3b(0, 255, 0), //G
                         Vec3b(255, 0, 0), //B
                         Vec3b(255, 0, 255), //purple
                         Vec3b(0, 0, 0) }; //black

    for (;;)
    {
        if (myBodyIndexReader->AcquireLatestFrame(&myBodyIndexFrame) == S_OK)
        {
            UINT    size = 0;
            BYTE    * buffer = nullptr;
            myBodyIndexFrame->AccessUnderlyingBuffer(&size, &buffer);
            for (int i = 0; i < height; i++)
                for (int j = 0; j < width; j++)
                {
                    int index = buffer[i * width + j];      
                    //0-5代表人體,其他值代表背景,藉此用不同顏色區分人體(前景)
                    if (index <= 5)
                        img.at<Vec3b>(i, j) = color[index];
                    else //大於5的值代表的是背景
                        img.at<Vec3b>(i, j) = color[6];
                }
            imshow("Body Index", img);
            myBodyIndexFrame->Release();
        }
        if (waitKey(30) == VK_ESCAPE)
            break;
    }
    myBodyIndexReader->Release();
    myBodyIndexDescription->Release();
    myBodyIndexSource->Release();
    mySensor->Close();
    mySensor->Release();

    system("PAUSE");
    return  0;
}



在Kinect V2 SDK中提供了一個叫

ICoordinateMapper的類


主要是用來做坐標系之間的互相轉換,用來解決各種源(Source)的解析度

不同導致點對應不起來的問題。


我們需要將Color Image中的點與 Depth Image中的點一一對應起來


可透過MapColorFrameToDepthSpace()這個函數。

起初需準備好三種源(Source):
(1)Color
(2)BodyIndex
(3)Depth


其中前兩個是完成功能本來就需要的

第三個是轉換坐標係時需要,
無法直接把Color的坐標系映射到BodyIndex中,只能映射到Depth中。


緊接著我們需要讀取Background,

讀取之後也要轉換成Color image的尺寸,

這樣把Color中的點貼過去時坐標就不用再轉換

直接替換就行。



接下來也要讀取三種Frame,為了程式碼具有清楚架構和好閱讀性,
我們先將開始的所有配置都弄好

利用MapColorFrameToDepthSpace()

官網介紹 link :
https://msdn.microsoft.com/en-us/library/windowspreview.kinect.coordinatemapper.mapcolorframetodepthspace.aspx

把Color Frame 映射到Depth的坐標系,
它需要4個參數,



第1個是深度幀的大小,
第2個是深度數據,
第3個是彩色幀的大小,
第4個是一個DepthSpacePoint的數組:用來儲存彩色空間中的每個點對應到深度空間的坐標。




要注意,這個函數只是完成坐標系的轉換,也就是說它對於彩色坐標系中的每個點,

都給出了一個此點對應到深度坐標系中的坐標,並不涉及到具體的ColorFrame。



最後,遍歷整張Color Image,
對每一點,都取出其對應的深度坐標系坐標,然後把這個坐標放入BodyIndex的數據中,
判斷此點是否屬於人體,

如果屬於,就把這點從彩色圖中取出,跟背景圖中同一坐標的點替換。

【Hint : DepthSpacePoint中的X和Y的值都是float的

用它們來計算在BodyIndex裡的坐標時,需要做casting運算強制轉型成 int ,

不然畫面就會很差,

不屬於人體的地方也會被標記成了人體被替換掉。

最後效果



最後程式碼


#include<iostream>
#include<stdio.h>
//opencv Library
#include"opencv\cv.h"
#include"opencv\highgui.h"
#include"opencv2\highgui\highgui.hpp"
#include"opencv2\opencv.hpp"
//Kiinect SDK
#include "Kinect.h"
using namespace std;
using namespace cv;
int main()
{
    //open Kinect V2 sensor
    IKinectSensor  *mySensor = nullptr;
    GetDefaultKinectSensor(&mySensor);
    mySensor->Open();

    IFrameDescription   * myDescription = nullptr; 
    // 大家一起共用的 BodyIndex , depth , Color 都可以乘載寬、高描述

    /*Color---------------------------------------------------------------*/

    //obtain Kinect V2 BodyIndex source
    IColorFrameSource   * myColorSource = nullptr;
    mySensor->get_ColorFrameSource(&myColorSource);
    //open Color Reader from ColorFrameSource
    IColorFrameReader   * myColorReader = nullptr;
    myColorSource->OpenReader(&myColorReader);

    int colorHeight = 0, colorWidth = 0;
    myColorSource->get_FrameDescription(&myDescription);
    myDescription->get_Height(&colorHeight);
    myDescription->get_Width(&colorWidth);

    myDescription->Release();
    myColorSource->Release();

    /*Depth---------------------------------------------------------------*/

    //obtain Kinect V2 Depth source
    IDepthFrameSource   * myDepthSource = nullptr;
    mySensor->get_DepthFrameSource(&myDepthSource);
    //open Depth Reader from DepthFrameSource
    IDepthFrameReader   * myDepthReader = nullptr;
    myDepthSource->OpenReader(&myDepthReader);

    int depthHeight = 0, depthWidth = 0;
    myDepthSource->get_FrameDescription(&myDescription);
    myDescription->get_Height(&depthHeight);
    myDescription->get_Width(&depthWidth);

    myDescription->Release();
    myDepthSource->Release();

    /*BodyIndex---------------------------------------------------------------*/

    //obtain Kinect V2 BodyIndex source
    IBodyIndexFrameSource   * myBodyIndexSource = nullptr;
    mySensor->get_BodyIndexFrameSource(&myBodyIndexSource);
    //open BodyIndex Reader from DepthFrameSource
    IBodyIndexFrameReader   * myBodyIndexReader = nullptr;
    myBodyIndexSource->OpenReader(&myBodyIndexReader);

    int bodyHeight = 0, bodyWidth = 0;
    myDepthSource->get_FrameDescription(&myDescription);
    myDescription->get_Height(&bodyHeight);
    myDescription->get_Width(&bodyWidth);

    myDescription->Release();
    myBodyIndexSource->Release();
    
    // prepare buffer for each of kinect sources
    UINT    colorDataSize = colorHeight * colorWidth;
    UINT    depthDataSize = depthHeight * depthWidth;
    UINT    bodyDataSize = bodyHeight * bodyWidth;
    
    //load background
    Mat temp = imread("C:\\img_res\\outer_space.jpg"), background;                   
    //resize the background img to the Size of Color source  
    resize(temp, background, Size(colorWidth, colorHeight));   

    ICoordinateMapper   * myMaper = nullptr;                //open mapper
    mySensor->get_CoordinateMapper(&myMaper);

    Mat colorData(colorHeight, colorWidth, CV_8UC4);        //prepare buffer
    UINT16  * depthData = new UINT16[depthDataSize];
    BYTE    * bodyData = new BYTE[bodyDataSize];
    DepthSpacePoint * output = new DepthSpacePoint[colorDataSize];

    //put each of  different frame (IColor , IDepth , IBodyIndex )
    while (1)
    {
        IColorFrame * myColorFrame = nullptr;
        while (myColorReader->AcquireLatestFrame(&myColorFrame) != S_OK);   //read colorFrame
        myColorFrame->CopyConvertedFrameDataToArray(colorDataSize * 4, colorData.data, ColorImageFormat_Bgra);
        myColorFrame->Release();

        IDepthFrame * myDepthframe = nullptr;
        while (myDepthReader->AcquireLatestFrame(&myDepthframe) != S_OK);   //read depthFrame
        myDepthframe->CopyFrameDataToArray(depthDataSize, depthData);
        myDepthframe->Release();

        IBodyIndexFrame * myBodyIndexFrame = nullptr;                       //read BodyIndexFrame
        while (myBodyIndexReader->AcquireLatestFrame(&myBodyIndexFrame) != S_OK);
        myBodyIndexFrame->CopyFrameDataToArray(bodyDataSize, bodyData);
        myBodyIndexFrame->Release();

        Mat copy = background.clone();        //copy an bacjground image to do some image process
        if (myMaper->MapColorFrameToDepthSpace(depthDataSize, depthData, colorDataSize, output) == S_OK)
        {
            for (int i = 0; i < colorHeight; ++i)
            for (int j = 0; j < colorWidth; ++j)
            {
                //obtain the points from Color image , It contains the coordinates correspond to the depth image
                DepthSpacePoint tPoint = output[i * colorWidth + j];    

                if (tPoint.X >= 0 && tPoint.X < depthWidth && tPoint.Y >= 0 && tPoint.Y < depthHeight)  
                {
                    //Get the value that corresponds to the points where in BodyIndex on the color image 
                    int index = (int)tPoint.Y * depthWidth + (int)tPoint.X; //(need to use casting)                    
                    
                    //If it is determined at a point on the color map is the body, 
                    //use it to replace the background image corresponding to the point
                    if (bodyData[index] <= 5)
                    {
                        Vec4b   color = colorData.at<Vec4b>(i, j);
                        copy.at<Vec3b>(i, j) = Vec3b(color[0], color[1], color[2]);
                    }
                }
            }
            imshow("Back ground remove", copy);
        }
        if (waitKey(30) == VK_ESCAPE)
            break;
    }
    delete[] depthData;        
    delete[] bodyData;
    delete[] output;


    myMaper->Release();
    myColorReader->Release();
    myDepthReader->Release();
    myBodyIndexReader->Release();
    mySensor->Close();
    mySensor->Release();

    return  0;
}



如果覺得太大就在imshow 之前做 resize吧

這裡我調整成  600 * 400 做效果顯示



他把椅子也當成人了 = =|||


以上是這次學習分享



留言

這個網誌中的熱門文章

經得起原始碼資安弱點掃描的程式設計習慣培養(五)_Missing HSTS Header

經得起原始碼資安弱點掃描的程式設計習慣培養(三)_7.Cross Site Scripting(XSS)_Stored XSS_Reflected XSS All Clients

(2021年度)駕訓學科筆試準備題庫歸納分析_法規是非題