KINECT/PROCESSING: How to change radius of Blob by looking at rawdepth
in
Programming Questions
•
2 years ago
I need to make the radius of my blob bigger when a person is close to the kinect and I need to make the blob smaller when the person is farther away from the kinect. I know that I must look at the raw depth values that are smaller than the threshold to then average them out (first by doing a sum and a count, then divide the sum by the count to get the average) and then use the average depth to make a decision on the radius, same as the x,y values.
I have no idea where or how to write this code, please help.
Nodes are the blob
Here is the code.
// Daniel Shiffman
// Tracking the average location beyond a given depth threshold
// Thanks to Dan O'Sullivan
import org.openkinect.*;
import org.openkinect.processing.*;
// Showing how we can farm all the kinect stuff out to a separate class
KinectTracker tracker;
// Kinect Library object
Kinect kinect;
// center point
float centerX = 0, centerY = 0;
float radius = 65, rotAngle = -90;
float accelX, accelY;
float springing = .0009, damping = 0.99;
//corner nodes
int nodes = 10;
float nodeStartX[] = new float[nodes];
float nodeStartY[] = new float[nodes];
float[]nodeX = new float[nodes];
float[]nodeY = new float[nodes];
float[]angle = new float[nodes];
float[]frequency = new float[nodes];
// soft-body dynamics
float organicConstant = 1;
void setup() {
size(640,520);
kinect = new Kinect(this);
tracker = new KinectTracker();
//centerX = width/2;
//centerY = height/2;
// iniitalize frequencies for corner nodes
for (int i=0; i<nodes; i++) {
frequency[i] = random(5, 12);
}
}
void draw() {
background(255);
// Run the tracking analysis
tracker.track();
// Show the image
tracker.display();
changeRadius();
// Let's draw the raw location
//PVector v1 = tracker.getPos();
//fill(255);
//noStroke();
//ellipse(v1.x,v1.y,20,20);
// Let's draw the "lerped" location
PVector v2 = tracker.getLerpedPos();
//fill(255);
//noStroke();
//ellipse(v2.x,v2.y,20,20);
// Display some info
int t = tracker.getThreshold();
fill(100);
// text("threshold: " + t + " " + "framerate: " + (int)frameRate + " " + "UP increase threshold, DOWN decrease threshold",10,500);
drawShape();
moveShape(v2.x, v2.y);
}
void drawShape() {
PVector v1 = tracker.getPos();
smooth();
// calculate node starting locations
for (int i=0; i<nodes; i++) {
nodeStartX[i] = centerX+cos(radians(rotAngle))*radius;
nodeStartY[i] = centerY+sin(radians(rotAngle))*radius;
rotAngle += 360.0/nodes;
}
// draw polygon
curveTightness(organicConstant);
fill(0);
beginShape();
for (int i=0; i<nodes; i++) {
curveVertex(nodeX[i], nodeY[i]);
}
for (int i=0; i<nodes-1; i++) {
curveVertex(nodeX[i], nodeY[i]);
}
endShape(CLOSE);
}
void moveShape(float newX, float newY) {
//move center point
float deltaX = newX-centerX;
float deltaY = newY-centerY;
// create springing effect
deltaX *= springing;
deltaY *= springing;
accelX += deltaX;
accelY += deltaY;
// move predator's center
centerX += accelX;
centerY += accelY;
// slow down springing
accelX *= damping;
accelY *= damping;
// change curve tightness
organicConstant = 1-((abs(accelX)+abs(accelY))*.1);
//move nodes
for (int i=0; i<nodes; i++) {
nodeX[i] = nodeStartX[i]+sin(radians(angle[i]))*(accelX*2);
nodeY[i] = nodeStartY[i]+sin(radians(angle[i]))*(accelY*2);
angle[i]+=frequency[i];
}
}
void changeRadius() {
int t = tracker.getThreshold();
if (t <= 300) {
radius ++;
}
}
//if (keyCode == UP) {
//t+=5;
// tracker.setThreshold(t);
// }
// else if (keyCode == DOWN) {
// t-=5;
// tracker.setThreshold(t);
// }
// }
//}
void stop() {
tracker.quit();
super.stop();
}
second class piece of code that goes with first,
class KinectTracker {
// Size of kinect image
int kw = 640;
int kh = 480;
int threshold = 750;
// Raw location
PVector loc;
// Interpolated location
PVector lerpedLoc;
// Depth data
int[] depth;
PImage display;
KinectTracker() {
kinect.start();
kinect.enableDepth(true);
// We could skip processing the grayscale image for efficiency
// but this example is just demonstrating everything
kinect.processDepthImage(true);
display = createImage(kw,kh,PConstants.RGB);
loc = new PVector(0,0);
lerpedLoc = new PVector(0,0);
}
void track() {
// Get the raw depth as array of integers
depth = kinect.getRawDepth();
// Being overly cautious here
if (depth == null) return;
float sumX = 0;
float sumY = 0;
float count = 0;
for(int x = 0; x < kw; x++) {
for(int y = 0; y < kh; y++) {
// Mirroring the image
int offset = kw-x-1+y*kw;
// Grabbing the raw depth
int rawDepth = depth[offset];
// Testing against threshold
if (rawDepth < threshold) {
sumX += x;
sumY += y;
count++;
}
}
}
// As long as we found something
if (count != 0) {
loc = new PVector(sumX/count,sumY/count);
}
// Interpolating the location, doing it arbitrarily for now
lerpedLoc.x = PApplet.lerp(lerpedLoc.x, loc.x, 0.3f);
lerpedLoc.y = PApplet.lerp(lerpedLoc.y, loc.y, 0.3f);
}
PVector getLerpedPos() {
return lerpedLoc;
}
PVector getPos() {
return loc;
}
void display() {
PImage img = kinect.getDepthImage();
// Being overly cautious here
if (depth == null || img == null) return;
// Going to rewrite the depth image to show which pixels are in threshold
// A lot of this is redundant, but this is just for demonstration purposes
display.loadPixels();
for(int x = 0; x < kw; x++) {
for(int y = 0; y < kh; y++) {
// mirroring image
int offset = kw-x-1+y*kw;
// Raw depth
int rawDepth = depth[offset];
int pix = x+y*display.width;
if (rawDepth < threshold) {
// A red color instead
float c = map(rawDepth, 0, threshold, 0, 255);
display.pixels[pix] = color(c,50,50);
}
else {
display.pixels[pix] = img.pixels[offset];
}
}
}
display.updatePixels();
// Draw the image
image(display,0,0);
}
void quit() {
kinect.quit();
}
int getThreshold() {
return threshold;
}
void setThreshold(int t) {
threshold = t;
}
}
1