Quantcast
Channel: beginners - openFrameworks
Viewing all articles
Browse latest Browse all 4929

ofxKinectForWindows2 background substraction

$
0
0

@ire.vetro wrote:

Goodmorning @elliotwoods ,
i'm totally new on this topic (OOP, openframeworks/ computer vision ecc..),
I tried to write a code for background substraction using Kinect v2 and openCV in OF.
I used some code sample like "opencvExample" and "exampleBodyIndexColor".
My code works but sometimes I get this error

my code for ofApp.h:

	ofxKFW2::Device kinect;
	ICoordinateMapper* coordinateMapper;

	vector<ofVec2f>			colorCoords;
	ofxCvColorImage			colorImg;
	ofxCvGrayscaleImage 	grayImage;
	ofxCvGrayscaleImage 	grayBg;
	ofxCvGrayscaleImage 	grayDiff;

	ofxCvContourFinder 	contourFinder;

	int 				threshold;
	bool				bLearnBakground;

ofApp.cpp:

void ofApp::setup() {

kinect.open();
kinect.initDepthSource();
kinect.initColorSource();
kinect.initInfraredSource();
kinect.initBodySource();
kinect.initBodyIndexSource();

if (kinect.getSensor()->get_CoordinateMapper(&coordinateMapper) < 0) {
	ofLogError() << "Could not acquire CoordinateMapper!";
}

colorCoords.resize(DEPTH_WIDTH * DEPTH_HEIGHT);

colorImg.allocate(320, 240);
grayImage.allocate(320, 240);
grayBg.allocate(320, 240);
grayDiff.allocate(320, 240);

bLearnBakground = true;
threshold = 80;

}
void ofApp::update() {
ofBackground(100, 100, 100);

bool bNewFrame = false;

kinect.update();
bNewFrame = kinect.isFrameNew();

if (bNewFrame) {
	/*
	ofxCvColorImage currImg;
	currImg.allocate(320, 240);

	auto& p = kinect.getColorSource()->getPixels();
	p.resize(320, 240);
	currImg.setFromPixels(p);
	// currImg.resize(320, 240);



	colorImg = currImg;
	*/

	auto& colorPix = kinect.getColorSource()->getPixels();
	auto& depthPix = kinect.getDepthSource()->getPixels();

	coordinateMapper->MapDepthFrameToColorSpace(DEPTH_SIZE, (UINT16*)depthPix.getPixels(), DEPTH_SIZE, (ColorSpacePoint*)colorCoords.data());

	// Loop through the depth image
	for (int y = 0; y < DEPTH_HEIGHT; y++) {
		for (int x = 0; x < DEPTH_WIDTH; x++) {
			int index = (y * DEPTH_WIDTH) + x;
			colorImg.getPixels().setColor(x, y, ofColor::white);

			// For a given (x,y) in the depth image, lets look up where that point would be
			// in the color image
			ofVec2f mappedCoord = colorCoords[index];

			// Mapped x/y coordinates in the color can come out as floats since it's not a 1:1 mapping
			// between depth <-> color spaces i.e. a pixel at (100, 100) in the depth image could map
			// to (405.84637, 238.13828) in color space
			// So round the x/y values down to ints so that we can look up the nearest pixel
			mappedCoord.x = floor(mappedCoord.x);
			mappedCoord.y = floor(mappedCoord.y);

			// Make sure it's within some sane bounds, and skip it otherwise
			if (mappedCoord.x < 0 || mappedCoord.y < 0 || mappedCoord.x >= COLOR_WIDTH || mappedCoord.y >= COLOR_HEIGHT) {
				continue;
			}

			// Finally, pull the color from the color image based on its coords in
			// the depth image
			colorImg.getPixels().setColor(x, y, colorPix.getColor(mappedCoord.x, mappedCoord.y));

		}
	}

	grayImage = colorImg;
	if (bLearnBakground == true) {
		grayBg = grayImage;		// the = sign copys the pixels from grayImage into grayBg (operator overloading)
		bLearnBakground = false;
	}

	// take the abs value of the difference between background and incoming and then threshold:
	grayDiff.absDiff(grayBg, grayImage);
	grayDiff.threshold(threshold);

	// find contours which are between the size of 20 pixels and 1/3 the w*h pixels.
	// also, find holes is set to true so we will get interior contours as well....
	contourFinder.findContours(grayDiff, 20, (340 * 240) / 3, 10, true);	// find holes
}

}

void ofApp::draw() {

// draw the incoming, the grayscale, the bg and the thresholded difference
ofSetHexColor(0xffffff);
colorImg.draw(20, 20);
grayImage.draw(360, 20);
grayBg.draw(20, 280);
grayDiff.draw(360, 280);

// then draw the contours:

ofFill();
ofSetHexColor(0x333333);
ofDrawRectangle(360, 540, 320, 240);
ofSetHexColor(0xffffff);

// we could draw the whole contour finder
//contourFinder.draw(360,540);

// or, instead we can draw each blob individually from the blobs vector,
// this is how to get access to them:
for (int i = 0; i < contourFinder.nBlobs; i++) {
	contourFinder.blobs[i].draw(360, 540);

	// draw over the centroid if the blob is a hole
	ofSetColor(255);
	if (contourFinder.blobs[i].hole) {
		ofDrawBitmapString("hole",
			contourFinder.blobs[i].boundingRect.getCenter().x + 360,
			contourFinder.blobs[i].boundingRect.getCenter().y + 540);
	}
}

// finally, a report:
ofSetHexColor(0xffffff);
stringstream reportStr;
reportStr << "bg subtraction and blob detection" << endl
	<< "press ' ' to capture bg" << endl
	<< "threshold " << threshold << " (press: +/-)" << endl
	<< "num blobs found " << contourFinder.nBlobs << ", fps: " << ofGetFrameRate();
ofDrawBitmapString(reportStr.str(), 20, 600);

}

void ofApp::keyPressed(int key) {

switch (key) {
case ' ':
	bLearnBakground = true;
	break;
case '+':
	threshold++;
	if (threshold > 255) threshold = 255;
	break;
case '-':
	threshold--;
	if (threshold < 0) threshold = 0;
	break;
}

Thanks in advance
forgive me for the bad English.

Posts: 2

Participants: 2

Read full topic


Viewing all articles
Browse latest Browse all 4929

Trending Articles