We are about to switch to a new forum software. Until then we have removed the registration on this forum.
Hello Processing Kinectors!
I've just gotten to the point where I have cobbled together this script, that creates a pointcloud, I can navigate around it via arrow keys, or peasycam, and by hitting TAB I can record the output.
But if you run this script, you'll see the background behind you. I'm trying to figure out a way to control the depth of my output, so I can 'cull' the points. I'm looking to 'delete' the background points, and want a way to control how 'deep' the depth is... I'm just not sure how to begin...
import SimpleOpenNI.*;
import peasy.*;
PeasyCam cam;
SimpleOpenNI context;
float zoomF =0.3f;
float rotX = radians(180); // by default rotate the hole scene 180deg around the x-axis,
// the data from openni comes upside down
float rotY = radians(0);
PShape pointCloud;
int steps = 2;
//int frames = 240, num=500;
boolean record = false;
void setup()
{
size(1024, 768, OPENGL);
//context = new SimpleOpenNI(this,SimpleOpenNI.RUN_MODE_MULTI_THREADED);
context = new SimpleOpenNI(this);
if (context.isInit() == false)
{
println("Can't init SimpleOpenNI, maybe the camera is not connected!");
exit();
return;
}
// disable mirror
context.setMirror(false);
// enable depthMap generation
context.enableDepth();
// enable RGBMap generation
context.enableRGB();
// align depth data to image data
context.alternativeViewPointDepthToImage();
context.setDepthColorSyncEnabled(true);
//peasycam
cam = new PeasyCam(this, 500, 400, 0, 400);
}
void draw()
{
//save to file
if ( record == true ) saveFrame("output.###.jpg");
// update the cam
context.update();
background(0);
translate(width/2, height/2, 0);
rotateX(rotX);
rotateY(rotY);
scale(zoomF);
PImage rgbImage = context.rgbImage();
int[] depthMap = context.depthMap();
int steps = 4; // to speed up the drawing, draw every third point
int index;
PVector realWorldPoint;
color pixelColor;
strokeWeight((float)steps/2);
translate(0, 0, -1000); // set the rotation center of the scene 1000 infront of the camera
PVector[] realWorldMap = context.depthMapRealWorld();
beginShape(POINTS);
for (int y=0; y < context.depthHeight (); y+=steps)
{
for (int x=0; x < context.depthWidth (); x+=steps)
{
index = x + y * context.depthWidth();
if (depthMap[index] > 0)
{
// get the color of the point
pixelColor = rgbImage.pixels[index];
stroke(pixelColor);
// draw the projected point
realWorldPoint = realWorldMap[index];
vertex(realWorldPoint.x, realWorldPoint.y, realWorldPoint.z); // make realworld z negative, in the 3d drawing coordsystem +z points in the direction of the eye
}
}
}
endShape();
}
void keyPressed() {
if ( keyPressed ) {
switch(keyCode)
{
case TAB: //if TAB is pressed set record true, if not, set it to false.
if ( !record ) {
record = true;
} else {
record = false;
}
break;
case LEFT:
rotY += 0.1f;
break;
case RIGHT:
// zoom out
rotY -= 0.1f;
break;
case UP:
if (keyEvent.isShiftDown())
zoomF += 0.02f;
else
rotX += 0.1f;
break;
case DOWN:
if (keyEvent.isShiftDown())
{
zoomF -= 0.02f;
if (zoomF < 0.01)
zoomF = 0.01;
} else
rotX -= 0.1f;
break;
}
}
}
Answers
Might not be what you are looking for but this comes to my mind
import SimpleOpenNI.*; SimpleOpenNI kinect; PImage depthImage; float closestValue = 610; float farthestValue = 1525; void setup() { size(640, 480); kinect = new SimpleOpenNI(this); kinect.enableDepth(); } void draw() { kinect.update(); int[] depthValues = kinect.depthMap(); depthImage = kinect.depthImage(); for (int x = 0; x < 640; x++) { for (int y = 0; y < 480; y++) { int i = x + y * 640; int currentDepthValue = depthValues[i]; if (currentDepthValue < closestValue || currentDepthValue > farthestValue) { depthImage.pixels[i] = 0; } } } image(depthImage, 0, 0); }