We are about to switch to a new forum software. Until then we have removed the registration on this forum.
Hello, I was able to make my sound play and stop with Kinect but it doesn't loop or fade out. I want my sound to continue playing with the video and just fade in if the interaction is activated and fade out when the interaction is no longer happening. Also, I want my sound to loop.
import processing.sound.*;
import org.openkinect.processing.*;
import processing.video.*;
Movie vid;
Movie vid1;
SoundFile sound1;
SoundFile sound2;
Kinect2 kinect2;
//PImage depthImg;
//PImage img1;
//pixel
int minDepth=0;
int maxDepth=4500; //4.5m
boolean off = false;
void setup() {
size(1920,1080);
//fullScreen();
vid = new Movie(this, "test_1.1.mp4");
vid1 = new Movie(this, "test_1.1.mp4");
sound1 = new SoundFile(this, "cosmos.mp3");
sound2 = new SoundFile(this, "NosajThing_Distance.mp3");
//MOVIE FILES
//01.MOV
//03.MOV
//02.mov (File's too big)
//Urban Streams.mp4
//HiddenNumbers_KarinaLopez.mov
//test_w-sound.mp4
//test_1.1.mp4
//test005.mov
//SOUND FILES
//cosmos.mp3
//NosajThing_Distance.mp3
vid.loop();
vid1.loop();
kinect2 = new Kinect2(this);
kinect2.initDepth();
kinect2.initDevice();
//depthImg = new PImage(kinect2.depthWidth, kinect2.depthHeight);
//img1 = createImage(kinect2.depthWidth, kinect2.depthHeight, RGB);
}
void movieEvent(Movie vid){
vid.read();
vid1.read();
}
void draw() {
vid.loadPixels();
vid1.loadPixels();
//image(kinect2.getDepthImage(), 0, 0);
int[] depth = kinect2.getRawDepth();
float sumX=0;
float sumY=0;
float totalPixels=0;
for (int x = 0; x < kinect2.depthWidth; x++){
for (int y = 0; y < kinect2.depthHeight; y++){
int offset = x + y * kinect2.depthWidth;
int d = depth[offset];
if ( d > 0 && d < 1000){
// //video.pixels[offset] = color(255, 100, 15);
sumX +=x;
sumY+=y;
totalPixels++;
brightness(0);
} else {
// //video.pixels[offset] = color(150, 250, 180);
brightness(255);
} }
}
vid.updatePixels();
vid1.updatePixels();
float avgX = sumX/totalPixels;
float avgY=sumY/totalPixels;
//VID 01 - Screen 01
if (avgX>300 && avgX<500){
tint(255, (avgX)/2);
image(vid1, 1920/2, 0);
if(sound2.isPlaying()==0){
sound2.play(0.5);
sound2.amp(0.5);
}
}else{
tint(0, (avgX)/2);
image(vid1, 1920/2, 0);
if(sound2.isPlaying()==1){
delay(1);
//IT DIMS THE VOLUME TO 0 BUT IT DOESN'T GO BACK TO VOLUME 0.5 [sound2.amp(0.5);]
sound2.amp(0);
}
}
//VID 02 - Screen 01
if (avgX>50 && avgX<200){
tint(255, (avgX)/3);
image(vid, 0-(1920/2), 0);
}else{
tint(0, (avgX)/3);
image(vid, 0-(1920/2), 0);
}
}
Im attempting to build a system for hand tracking using the depth camera and color tracking, and output the values to MaxMsp where I can use them for audio mappings for a college project. The sketch runs fine when I comment out my OSC send code but when sending the OSC, the rgb and depth stop, changing frame every thirty seconds or so.
//kinect
import org.openkinect.freenect.*;
import org.openkinect.freenect2.*;
import org.openkinect.processing.*;
import org.openkinect.tests.*;
//osc
import oscP5.*;
import netP5.*;
Kinect kinect;
OscP5 oscP5;
NetAddress myRemoteLocation;
PImage cam;
//Color
color trackColor;
color trackColor2;
float threshold = 150;
void setup() {
size(640, 480, P2D);
kinect = new Kinect(this);
kinect.initVideo();
kinect.initDepth();
background(255);
//Color
trackColor = color(255, 0, 0);
trackColor2 = color(0, 255, 0);
//osc
myRemoteLocation = new NetAddress("127.0.0.1", 8000);
oscP5 = new OscP5(this, 8000);
}
void draw() {
image(kinect.getVideoImage(), 0, 0);
PImage cam = kinect.getVideoImage();
int[] depth = kinect.getRawDepth();
float avgX1 = 0;
float avgY1 = 0;
float avgX2 = 0;
float avgY2 = 0;
int count1 = 0;
int count2 = 0;
for (int x = 0; x < kinect.width; x++ ) {
for (int y = 0; y < kinect.height; y++ ) {
int loc = x + y * kinect.width;
// What is current color
color currentColor = cam.pixels[loc];
float rc = red(currentColor);
float gc = green(currentColor);
float bc = blue(currentColor);
float r2 = red(trackColor);
float g2 = green(trackColor);
float b2 = blue(trackColor);
float r3 = red(trackColor2);
float g3 = green(trackColor2);
float b3 = blue(trackColor2);
float d = distSq(rc, gc, bc, r2, g2, b2);
float e = distSq(rc, gc, bc, r3, g3, b3);
if (d < threshold*threshold) {
stroke(255);
strokeWeight(1);
point(x, y);
avgX1 += x;
avgY1 += y;
count1++;
}
else if (e < threshold*threshold) {
stroke(255);
strokeWeight(1);
point(x, y);
avgX2 += x;
avgY2 += y;
count2++;
}
}
}
if (count1 > 0) {
avgX1 = avgX1 / count1;
avgY1 = avgY1 / count1;
fill(255, 0, 0);
strokeWeight(4.0);
stroke(0);
ellipse(avgX1, avgY1, 24, 24);
}
if (count2 > 0) {
avgX2 = avgX2 / count2;
avgY2 = avgY2 / count2;
//green
fill(0, 255, 0);
strokeWeight(4.0);
stroke(0);
ellipse(avgX2, avgY2, 24, 24);
}
//DEPTH
for (int x = 0; x < 640; x++) {
for (int y = 0; y < 480; y++) {
int offset = x + y * kinect.width;
int dpth = depth[offset];
if (!(dpth > 300 && dpth < 700)) {
cam.pixels[offset] = color(0);
} else if (dpth > 300 && dpth < 700) {
/*
//OSC LEFT
OscMessage leftXpos = new OscMessage("/leftXpos");
OscMessage leftYpos = new OscMessage("/leftYpos");
leftXpos.add(avgX1);
leftYpos.add(avgY1);
//OSC RIGHT
OscMessage rightXpos = new OscMessage("/rightXpos");
OscMessage rightYpos = new OscMessage("/rightYpos");
rightXpos.add(avgX2);
rightYpos.add(avgY2);
oscP5.send(leftXpos, myRemoteLocation);
oscP5.send(leftYpos, myRemoteLocation);
oscP5.send(rightXpos, myRemoteLocation);
oscP5.send(rightYpos, myRemoteLocation);
*/
}
}
}
}
float distSq(float x1, float y1, float z1, float x2, float y2, float z2) {
float d = (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) +(z2-z1)*(z2-z1);
return d;
}
I'm having trouble controlling the .mov file's tint with rawdepth from kinect
import org.openkinect.processing.*;
import processing.video.*;
Movie video;
Kinect2 kinect2;
int minDepth=0;
int maxDepth=4500; //4.5m
void setup() {
size(1920,1080);
video = new Movie(this, "final-02.mov");
video.loop();
kinect2 = new Kinect2(this);
kinect2.initDepth();
kinect2.initDevice();
}
void movieEvent(Movie video){
video.read();
}
void draw() {
image(video, 0, 0);
video.loadPixels();
int[] depth = kinect2.getRawDepth();
for (int x = 0; x < kinect2.depthWidth; x++){
for (int y = 0; y < kinect2.depthHeight; y++){
int offset = x + y * kinect2.depthWidth;
int d = depth[offset];
if (d > 10 && d < 400){
//video.pixels[offset] = color(255, 100, 15);
tint(10,255);
} else {
//video.pixels[offset] = color(150, 250, 180);
tint(250,10);
}
}
println(x);
}
video.updatePixels();
image(video,0,0);
}
Hello all, I'm very new to all this and I thought I'd ask some people in the know first before I spend all day bashing my head against a wall. I'm trying to make a function that returns the depths of pixels from a kinect camera so I can use them to control a seperate object. I have this so far...
int getDepth() {
int myDepth;
int skip = 10;
int[] depth = kinect.getRawDepth();
PImage img = kinect.getDepthImage();
for (int x = 0; x < kinect.width; x+=skip) {
for (int y = 0; y < kinect.height; y+=skip) {
int index = x + y * img.width;
int offset = x + y * kinect.width;
int d = depth[offset];
float b = brightness(img.pixels[index]);
myDepth = d;
return myDepth;
}
}
}
"on y+=skip" I'm getting the error "dead code" and overall I'm getting "This method must return a result of type int" Any help would be greatly appreciated, thank you.
Thank you guys for being so helpful. I decided to try using my laptop camera as a second camera and so I can install these separately. Then I played with the filter() to get some cool effects. What do you guys think? The whole idea is to create a place where people walk by and see others also through the mixing of their silhouettes.
import org.openkinect.processing.*;
import processing.video.*;
Kinect2 kinect2;
Capture video;
PImage img;
void setup() {
size(512, 424, P3D);
kinect2 = new Kinect2(this);
kinect2.initDepth();
kinect2.initDevice();
img = createImage(kinect2.depthWidth, kinect2.depthHeight, RGB);
video = new Capture(this, 512, 424);
video.start();
}
void captureEvent(Capture video) {
// Step 4. Read the image from the camera.
video.read();
}
void draw() {
background(0);
img.loadPixels();
noTint();
int[] depth = kinect2.getRawDepth();
for (int x = 0; x < kinect2.depthWidth; x++) {
for (int y = 0; y < kinect2.depthHeight; y++) {
int offset = x + y * kinect2.depthWidth; //offset is translating grid location to array
int d = depth[offset];
if (d < 500) {
img.pixels[offset] = color(255, 0, 0);
} else if (d > 500 && d<1000) {
img.pixels[offset] = color(0, 255, 0);
} else if (d >1000 && d<1500) {
img.pixels[offset] = color(0, 0, 255);
} else {
img.pixels[offset] = color(0);
}
}
// if (d < 500) {
// tint(255, 0, 0, 63);
// img.pixels[offset] = color(255, 0, 0);
// } else if (d > 500 && d<1000) {
// img.pixels[offset] = color(0, 255, 0);
// } else if (d >1000 && d<1500) {
// img.pixels[offset] = color(0, 0, 255);
// } else {
// img.pixels[offset] = color(0);
// }
//}
}
img.updatePixels();
image(img, 0, 0);
tint(255, 0, 0, 127);
image(video, 0, 0);
//filter(POSTERIZE, 4);
filter(INVERT);
}
@jeremydouglass I added the tint and blend and tried changing the alpha attribute and all didn't work,
import org.openkinect.processing.*;
Kinect2 kinect2;
PImage img;
PImage img2;
void setup() {
size(512, 424, P3D);
kinect2 = new Kinect2(this);
kinect2.initDepth();
kinect2.initDevice();
img = createImage(kinect2.depthWidth, kinect2.depthHeight, RGB);
img2 = createImage(kinect2.depthWidth, kinect2.depthHeight, RGB);
}
void draw() {
background(0);
img.loadPixels();
img2.loadPixels();
int[] depth = kinect2.getRawDepth();
for (int x = 0; x < kinect2.depthWidth; x++) {
for (int y = 0; y < kinect2.depthHeight; y++) {
int offset = x + y * kinect2.depthWidth; //offset is translating grid location to array
int d = depth[offset];
if (d < 500) {
**** tint(255,0,0,63)**** img.pixels[offset] = color(255, 0, 0); img2.pixels[offset] = color(255, 0, 0); } else if (d > 500 && d<1000) {
img.pixels[offset] = color(0, 255, 0);
img2.pixels[offset] = color(0, 255, 0);
} else if (d >1000 && d<1500) {
img.pixels[offset] = color(0, 0, 255);
img2.pixels[offset] = color(0, 0, 255);
} else {
img.pixels[offset] = color(0);
img2.pixels[offset] = color(0);
}
}
}
img.updatePixels();
img2.updatePixels();
image(img, 0, 0);
image(img2,0,0);
**blend(img, 0, 0, 514, 424, 0, 0, 514, 424, MULTIPLY);**
}
@kfrajer I tried before with PGraphics to blend a moving ellipse on mouseX and mouseY and another ellipse randomly placed and that worked well so I tried to use the same method to apply to this and now it's not working, perhaps it's because Kinect can only act as one input.
So like here, I'm showing the kinect image twice as img and img2. But it fails to display the 2 images over each other like in PGraphics.
import org.openkinect.processing.*;
Kinect2 kinect2;
PImage img;
PImage img2;
void setup() {
size(512, 424, P3D);
kinect2 = new Kinect2(this);
kinect2.initDepth();
kinect2.initDevice();
img = createImage(kinect2.depthWidth, kinect2.depthHeight, RGB);
img2 = createImage(kinect2.depthWidth, kinect2.depthHeight, RGB);
}
void draw() {
background(0);
img.loadPixels();
img2.loadPixels();
int[] depth = kinect2.getRawDepth();
for (int x = 0; x < kinect2.depthWidth; x++) {
for (int y = 0; y < kinect2.depthHeight; y++) {
int offset = x + y * kinect2.depthWidth; //offset is translating grid location to array
int d = depth[offset];
if (d < 500) {
img.pixels[offset] = color(255, 0, 0);
img2.pixels[offset] = color(255, 0, 0);
} else if (d > 500 && d<1000) {
img.pixels[offset] = color(0, 255, 0);
img2.pixels[offset] = color(0, 255, 0);
} else if (d >1000 && d<1500) {
img.pixels[offset] = color(0, 0, 255);
img2.pixels[offset] = color(0, 0, 255);
} else {
img.pixels[offset] = color(0);
img2.pixels[offset] = color(0);
}
}
}
img.updatePixels();
img2.updatePixels();
image(img, 0, 0);
image(img2,0,0);
}
// Daniel Shiffman // Kinect Point Cloud example
// https://github.com/shiffman/OpenKinect-for-Processing // http://shiffman.net/p5/kinect/
import org.openkinect.freenect.*; import org.openkinect.processing.*;
// Kinect Library object Kinect kinect;
// Angle for rotation float a = 0;
// We'll use a lookup table so that we don't have to repeat the math over and over float[] depthLookUp = new float[2048];
void setup() { // Rendering in P3D size(800, 600, P3D); kinect = new Kinect(this); kinect.initDepth();
// Lookup table for all possible depth values (0 - 2047) for (int i = 0; i < depthLookUp.length; i++) { depthLookUp[i] = rawDepthToMeters(i); } }
void draw() {
background(0);
// Get the raw depth as array of integers int[] depth = kinect.getRawDepth();
// We're just going to calculate and draw every 4th pixel (equivalent of 160x120) int skip = 4;
// Translate and rotate translate(width/2, height/2, -50); rotateY(a);
for (int x = 0; x < kinect.width; x += skip) { for (int y = 0; y < kinect.height; y += skip) { int offset = x + y*kinect.width;
// Convert kinect data to world xyz coordinate
int rawDepth = depth[offset];
PVector v = depthToWorld(x, y, rawDepth);
stroke(255);
pushMatrix();
// Scale up by 200
float factor = 200;
translate(v.x*factor, v.y*factor, factor-v.z*factor);
// Draw a point
point(0, 0);
popMatrix();
}
}
// Rotate a += 0.015f; }
// These functions come from: http://graphics.stanford.edu/~mdfisher/Kinect.html float rawDepthToMeters(int depthValue) { if (depthValue < 2047) { return (float)(1.0 / ((double)(depthValue) * -0.0030711016 + 3.3309495161)); } return 0.0f; }
PVector depthToWorld(int x, int y, int depthValue) {
final double fx_d = 1.0 / 5.9421434211923247e+02; final double fy_d = 1.0 / 5.9104053696870778e+02; final double cx_d = 3.3930780975300314e+02; final double cy_d = 2.4273913761751615e+02;
PVector result = new PVector(); double depth = depthLookUp[depthValue];//rawDepthToMeters(depthValue); result.x = (float)((x - cx_d) * depth * fx_d); result.y = (float)((y - cy_d) * depth * fy_d); result.z = (float)(depth); return result; }
Hi, Using Daniel Shiffmana's MinMaxThreshold tutorial, I was able to change the colour from red to blue to green based on their distance to the Kinect. I would like to make a wall where when 2 people walk past each other, their silhouette colours mix. I tried to play with opacity with a background image but wouldn't mix 2 different silhouettes detected by kinect. Should I use blog detection to get the kinect to detect multiple people and how would I do this? I am using Kinect2 with Processing3 and seems like SimpleOpenNI doesn't work for Kinect2? Thanks!
Here's the code:
import org.openkinect.processing.*;
// Kinect Library object
Kinect2 kinect2;
//float minThresh = 480;
//float maxThresh = 830;
PImage kin;
PImage bg;
void setup() {
size(512, 424, P3D);
kinect2 = new Kinect2(this);
kinect2.initDepth();
kinect2.initDevice();
kin = createImage(kinect2.depthWidth, kinect2.depthHeight, RGB);
bg = loadImage("1219690.jpg");
}
void draw() {
background(0);
//loadPixels();
tint(255,254);
image(bg,0,0);
kin.loadPixels();
//minThresh = map(mouseX, 0, width, 0, 4500);
//maxThresh = map(mouseY, 0, height, 0, 4500);
// Get the raw depth as array of integers
int[] depth = kinect2.getRawDepth();
//float sumX = 0;
//float sumY = 0;
//float totalPixels = 0;
for (int x = 0; x < kinect2.depthWidth; x++) {
for (int y = 0; y < kinect2.depthHeight; y++) {
int offset = x + y * kinect2.depthWidth;
int d = depth[offset];
//println(d);
//delay (10);
tint(255,127);
if (d < 500) {
kin.pixels[offset] = color(255, 0, 0);
//sumX += x;
//sumY += y;
//totalPixels++;
} else if (d > 500 && d<1000){
kin.pixels[offset] = color(0,255,0);
} else if (d >1000 && d<1500){
kin.pixels[offset] = color(0,0,255);
} else {
kin.pixels[offset] = color(0);
}
}
}
kin.updatePixels();
image(kin, 0, 0);
//float avgX = sumX / totalPixels;
//float avgY = sumY / totalPixels;
//fill(150,0,255);
//ellipse(avgX, avgY, 64, 64);
//fill(255);
//textSize(32);
//text(minThresh + " " + maxThresh, 10, 64);
}
import org.openkinect.processing.*;
//kinect library object
Kinect kinect;
// Angle for rotation
float a = 0;
void setup(){
size (800,600, P3D);
kinect = new Kinect(this);
kinect.initDepth();
kinect.initDevice();
void draw(){
background(0);
//Translate and rotatate
pushMatrix();
translate(width/2 , height/2, -2250);
rotateY(a);
//we're just going to calculate and draw every 2nd pixel
int skip = 4;
// Get the raw depth as array of integers
int[] depth = kinect.getRawDepth();
stroke(255);
strokeWeight(2);
beginShape(POINTS);
for (int x = 0; x < kinect.depthWidth; x+skip){
for (int y = 0; y < kinect.depthHeight; y+skip) {
int offset = x + y * kinect.depthWidth;
int d = depth[offset];
//calculte the x,y,z camera position based on depth information
PVector point = depthToPointCloudPos(x , y, d);
// Draw a point
vertex(point.x, point.y, point.z); } } endShape();
popMatrix();
fill(255);
text(frameRate, 50, 50);
//Rotate
a += 0.0015; }
//calculte the x,y,z camera position based on depth information
PVector depthToPointCloudPos( int x , int y, float depthValue){
PVector point = new Pvector ();
point.z = (depthValue)// / (1.0f); // Convert from mm to meters
point.x = ( x - CameraParams.cx) * point.z / CameraParams.fx;
point.y = ( y - CameraParams.cy) * point.z / CameraParams.fy;
return point;
}
CLASS:
//Camera information based on the kiect hardware static class CameraParams {
static float cx = 254.878f;
static float cy = 205.395f;
static float fx = 365.456f;
static float fy = 365.456f;
static float k1 = 0.0905474;
static float k2 = -0.26819;
static float k3 = 0.0950862;
static float p1 = 0.0;
static float p2 =0.0;
}
Hi, I am quite new to the forum.
I get a lot of help from this forum. And I hope to be able to give back when I will get better with programming:)
I am making a kinect game for my university project, and now I am testing some basic features.
What I want to do is to display only a user's silhouette over a background(game stage etc.)
So I tried to color the pixels of depthImage within a threshold with one color and other pixels with transparent color
color(0, 0, 0, 0);
But when I run the code, transparent pixels are displayed at first, but as a person moves around, the silhouette color remains and overlaps, creating the "Painting" effect.
Demo video:
https://youtu.be/KXebF0i0FKg
It would be great if you can tell me what I am doing wrong, or just guide me to the right direction?
Thanks in advance.
Here's the code:
import org.openkinect.freenect.*;
import org.openkinect.processing.*;
Kinect kinect;
// Depth image
PImage depthImg;
// Threshold
int minDepth = 450;
int maxDepth = 890;
void setup() {
size(1280, 720);
background(145, 170, 180);
kinect = new Kinect(this);
kinect.initDepth();
kinect.enableMirror(true);
// Silhouette image
depthImg = new PImage(kinect.width, kinect.height, ARGB);
}
void draw() {
// Draw the raw image
//image(kinect.getDepthImage(), 0, 0);
depthImg.loadPixels();
// Threshold the depth image
int[] rawDepth = kinect.getRawDepth();
for (int x = 0; x < kinect.width; x++) {
for (int y = 0; y < kinect.height; y++) {
int index = x + y * kinect.width;
int p = rawDepth[index];
if (p > minDepth && p < maxDepth) {
depthImg.pixels[index] = color(62, 96, 111); //Silhouette color
} else {
depthImg.pixels[index] = color(0, 0, 0, 0); // background color
}
}
}
// Draw the thresholded image
depthImg.updatePixels();
imageMode(CENTER);
image(depthImg, width/2, height - kinect.height / 2);
}
OS: Mac OS Sierra v10.12.6
Processing: v3.3.6
Library: Open Kinect
Kinect Hardware: Microsoft Kinect v1, 1414!
I am trying to send kinect depth from one computer to create a point cloud on another. I have tried sending the data through OSC, but I'm not sure how to declare it on the receiving end as the depth data is sending as an int[].
This is not my entire code for each side, just what matters to my question to avoid confusion: (hello is what I am trying to send) This is the sender code.
int[] hello;
void draw() {
int[] depth = kinect.getRawDepth();
hello = depth;
}
void OscEvent(OscMessage theOscMessage) {
//create a message with a unique address pattern
OscMessage myOutGoingMessage = new OscMessage( playerAddressPattern );
myOutGoingMessage.add(hello); //send the depth data (as an int string)
osc.send( myOutGoingMessage, destination );
}
This is the applicable code from the receiver
int[] hello;//Declare the depth that is coming in from the kinect
int[] depth = hello; // the actual depth data coming from the remote kinect as the variable "hello"
void OscEvent (OscMessage theOscMessage) {
hello=theOscMessage.get(0).intvalue(); //the value being received is an int[], not an int as i have typed- how do i declare this?
}
So what might help me here is how would i declare that " hello=theOscMessage.get(0).intvalue();" as an int[]
(MacOS High Sierra, Processing 3.0.1)
@kfrajer I can send data between two computers (I have a PONG game osc example where theres a controller on one computer and the game on another- also what the setup of my current code is based off of).
My apologies-by not being able to send depth data, I meant I'm not entirely sure how to send and declare the data. It is in an int[], which I'm able to send, but not sure how to declare the data on the receiver.
Below is my attempt of doing so
Im so sorry if this is messy or not properly formatted If this is too much to interpret, its understandable.
Here is the source code, "hello" is the test variable I'm trying to send
import netP5.*;
import oscP5.*;
import org.openkinect.freenect.*;
import org.openkinect.processing.*;
OscP5 osc;
int myListeningPort = 12001;
// Someone to talk to
NetAddress destination;
String destinationIP = "127.0.0.1"; // pong IP
int destinationPort = 12000; // pong port
String playerAddressPattern = "/";
// Kinect Library object
Kinect kinect;
float[] depthLookUp = new float[750];
int[] hello; //Declaring the variable to send
void setup() {
// Rendering in P3D
size(200, 200, P3D);
kinect = new Kinect(this);
kinect.initDepth();
// Lookup table for all possible depth values (0 - 2047)
osc = new OscP5(this, myListeningPort);
destination = new NetAddress(destinationIP, destinationPort);
}
void draw() {
int[] depth = kinect.getRawDepth();
hello = depth;
}
void OscEvent(OscMessage theOscMessage) {
//create a message with a unique address pattern
OscMessage myOutGoingMessage = new OscMessage( playerAddressPattern );
//myOutGoingMessage.add("hi");
myOutGoingMessage.add(hello); //send the depth data (as an int string)
osc.send( myOutGoingMessage, destination ); // actually do the sending
}
This is the Receiver, receiving the depth data. The second last line of code is what I am assuming the issue is - in not properly declaring the incoming int[] as an int[]. I know that I am currently trying to declare it as a regular int.
import oscP5.*;
import netP5.*;
import org.openkinect.freenect.*;
import org.openkinect.processing.*;
// Kinect Library object
Kinect kinect;
OscP5 oscP5; // osc object
OscMessage currentMessage;
int iNet_myListeningPort = 12000; // port I am listening on
float a = 0;
// We'll use a lookup table so that we don't have to repeat the math over and over
float[] depthLookUp = new float[750];
int[] hello;//Declare the depth that is coming in from the kinect.
void setup() {
size(800, 600, P3D);
background(0);
// This needs to be sent from pointcloud
//kinect = new Kinect(this);
kinect.initDepth();
//kinect.update();
for (int i = 0; i < depthLookUp.length; i++) {
depthLookUp[i] = rawDepthToMeters(i);
}
oscP5 = new OscP5(this, iNet_myListeningPort);
}
void draw() {
background(0);
int[] depth = hello; // the actual depth values from the remote kinect.
// We're just going to calculate and draw every 4th pixel (equivalent of 160x120)
int skip = 4; //
translate(width/2, height/2, 300); //dot distance
for (int x = 0; x < kinect.width; x += skip) {
for (int y = 0; y < kinect.height; y += skip) {
int offset = x + y*kinect.width;
// Convert kinect data to world xyz coordinate
int rawDepth = depth[offset];
PVector v = depthToWorld(x, y, rawDepth);
stroke(255, 0, 0);
pushMatrix();
float factor = 400; //overall Scale
translate(v.x*factor, v.y*factor, factor-v.z*factor);
// Draw a point
point(0, 0);
popMatrix();
}
}
}
float rawDepthToMeters(int depthValue) {
if (depthValue < 750) {
return (float)(1.0 / ((double)(depthValue) * -0.0030711016 + 3.3309495161));
}
return 0.0f;
}
PVector depthToWorld(int x, int y, int depthValue) {
final double fx_d = 1.0 / 5.9421434211923247e+02;
final double fy_d = 1.0 / 5.9104053696870778e+02;
final double cx_d = 3.3930780975300314e+02;
final double cy_d = 2.4273913761751615e+02;
PVector result = new PVector();
double depth = rawDepthToMeters(depthValue);
result.x = (float)((x - cx_d) * depth * fx_d);
result.y = (float)((y - cy_d) * depth * fy_d);
result.z = (float)(depth);
return result;
}
void OscEvent (OscMessage theOscMessage) {
hello=theOscMessage.get(0).intvalue(); //the value being recieved is an int[], not an int- how do i declare this?
}
This is my first time using Kinect and only my second time using oscP5 (with my first oscP5 work only requiring me to change the remote) so I really put myself in an awkward position here lol.
Hi all, this is my first post here
I am trying to feed my slightly tweaked PointCloud example from Open Kinect v1 library from one computer to another (for example sake, from one processing file to be displayed in another) through the OSC library.
I have attempted myself, but was unable to send the depth data properly (at all) and I am just lost as to where to start and how to even send the data to be displayed.
Here is my Kinect code (without any OSC), can anyone help or guide me through what to send/receive? (I'm on Mac OS 10.13 and Processing 3.0.1)
import org.openkinect.freenect.*;
import org.openkinect.processing.*;
// Kinect Library object
Kinect kinect;
// Angle for rotation
float a = 0;
// We'll use a lookup table so that we don't have to repeat the math over and over
float[] depthLookUp = new float[750];
void setup() {
// Rendering in P3D
size(800, 600, P3D);
kinect = new Kinect(this);
kinect.initDepth();
// Lookup table for all possible depth values (0 - 2047)
for (int i = 0; i < depthLookUp.length; i++) {
depthLookUp[i] = rawDepthToMeters(i);
}
}
void draw() {
background(0);
// Get the raw depth as array of integers
int[] depth = kinect.getRawDepth();
// We're just going to calculate and draw every 4th pixel (equivalent of 160x120)
int skip = 4; //
// Translate and rotate
translate(width/2, height/2, 300); //dot distance
//rotateY(a);
for (int x = 0; x < kinect.width; x += skip) {
for (int y = 0; y < kinect.height; y += skip) {
int offset = x + y*kinect.width;
// Convert kinect data to world xyz coordinate
int rawDepth = depth[offset];
PVector v = depthToWorld(x, y, rawDepth);
stroke(255, 0, 0);
pushMatrix();
float factor = 400; //overall Scale
translate(v.x*factor, v.y*factor, factor-v.z*factor);
// Draw a point
point(0, 0);
//line(0,0,2,2);
popMatrix();
}
}
// Rotate
//a += 0.015f;
}
// These functions come from: http://graphics.stanford.edu/~mdfisher/Kinect.html
float rawDepthToMeters(int depthValue) {
if (depthValue < 750) {
return (float)(1.0 / ((double)(depthValue) * -0.0030711016 + 3.3309495161));
}
return 0.0f;
}
PVector depthToWorld(int x, int y, int depthValue) {
final double fx_d = 1.0 / 5.9421434211923247e+02;
final double fy_d = 1.0 / 5.9104053696870778e+02;
final double cx_d = 3.3930780975300314e+02;
final double cy_d = 2.4273913761751615e+02;
PVector result = new PVector();
double depth = rawDepthToMeters(depthValue);
result.x = (float)((x - cx_d) * depth * fx_d);
result.y = (float)((y - cy_d) * depth * fy_d);
result.z = (float)(depth);
return result;
}
I think I've cracked it. And the solution involved your suggestion @kfrajer so thankyou
import org.openkinect.freenect.*;
import org.openkinect.freenect2.*;
import org.openkinect.processing.*;
import org.openkinect.tests.*;
Kinect kinect;
int kinectWidth = 640;
int kinectHeight = 480;
PImage cam;
int minThresh = 300;
int maxThresh = 700;
float reScale;
void setup() {
size(640, 480, P2D);
kinect = new Kinect(this);
kinect.enableMirror(true);
kinect.initDepth();
kinect.initVideo();
reScale = (float) width / kinectWidth;
background(255);
}
void draw() {
image(kinect.getVideoImage(),0,0);
PImage cam = kinect.getVideoImage();
int[] depth = kinect.getRawDepth();
for (int x = 0; x < 640; x++) {
for (int y = 0; y < 480; y++) {
int offset = x + y * kinect.width;
int d = depth[offset];
if (!(d > minThresh && d < maxThresh)) {
cam.pixels[offset] = color(0);
}
}
}
}
Basically I think I just had to draw the image in draw() every time and im not 100% sure why but just thank god it works. It was kinda of a happy accident to be honest!!
However there is still the problem of solving the offset between the rgb cam and the depth cam.
I've tried moving the rgb left a bit but the depth seems to go with it???
Does the current code work? Can you map depth to cam.loadpixels array? in other words, are they the same size and are they representing the same field of view?
What about you do this small change from:
if (d > minThresh && d < maxThresh) {
cam.pixels[offset] = color(255,0,200);
} else{
cam.pixels[offset] = color(0);
}
to
if ( ! (d > minThresh && d < maxThresh) )
cam.pixels[offset] = color(0);
}
This resets all those pixels outside the threshold. The pixels within the Threshold are not modified and they will contain the original RGB color.
Another suggestion is to explore previous posts if you haven't done so: https://forum.processing.org/two/search?Search=getRawDepth and in your case, make sure you are working with post relevant to Kinect V1.
Kf
Processsing 3 Kinect v1
Basically, i've stripped down my code so it's just this, atm it only shows the user when you're in the correct threshold. You go pink as I have made it that color. I want to know how to only show the RGB pixels for the bits that are pink.
Could I also just lay over a black shape where the threshold is not?
Many thanks for reading, any feedback is much appreciated
import org.openkinect.freenect.*;
import org.openkinect.freenect2.*;
import org.openkinect.processing.*;
import org.openkinect.tests.*;
Kinect kinect;
int kinectWidth = 640;
int kinectHeight = 480;
PImage cam = createImage(640, 480, RGB);
int minThresh = 300;
int maxThresh = 700;
float reScale;
void setup() {
size(640, 480, P3D);
kinect = new Kinect(this);
kinect.enableMirror(true);
kinect.initDepth();
reScale = (float) width / kinectWidth;
}
void draw() {
cam.loadPixels();
int[] depth = kinect.getRawDepth();
for (int x = 0; x < kinect.width; x++) {
for (int y = 0; y < kinect.height; y++) {
int offset = x + y * kinect.width;
int d = depth[offset];
if (d > minThresh && d < maxThresh) {
cam.pixels[offset] = color(255,0,200);
} else{
cam.pixels[offset] = color(0);
}
}
}
cam.updatePixels();
background(255);
image(cam,0,0);
}
I worked on this code using other libs but it cannot open or detect kinect
import org.openkinect.freenect.*;
import org.openkinect.processing.*;
static class CameraParams {
static float cx = 254.878f;
static float cy = 205.395f;
static float fx = 365.456f;
static float fy = 365.456f;
static float k1 = 0.0905474;
static float k2 = -0.26819;
static float k3 = 0.0950862;
static float p1 = 0.0;
static float p2 = 0.0;
}
Kinect2 kin;
float a=0;
float[] dlu=new float[2048];
ArrayList <ArrayList> clouds;
int count=120;
public void setup ()
{
size ( 1280, 720, P3D);
kin = new Kinect2(this);
kin.initDevice();
kin.initDepth();
for (int i=0; i<dlu.length; i++)
{
dlu[i]=rawDepthToMeters(i);
}
clouds=new ArrayList<ArrayList>();
}
public void draw ()
{
background(0);
ArrayList cloud=new ArrayList();
if (clouds.size()>count)
{
clouds.remove(0);
}
clouds.add(cloud);
int[] d=kin.getRawDepth();
for (int i=0; i<kin.depthWidth; i+=2)
{
for (int j=0; j<kin.depthHeight; j+=2)
{
int offset=i+j*kin.depthWidth;
int dep=d[offset];
PVector p=depthToPointCloudPos(i,j,dep);
cloud.add(p);
}
}
pushMatrix();
stroke(255);
beginShape(POINTS);
if (clouds.size()>count)
{
ArrayList currCloud=clouds.get(0);
for (int i=0; i<currCloud.size();i++)
{
PVector vec=(PVector)currCloud.get(i);
vertex(-vec.x, vec.y, vec.z);
}
}
endShape();
popMatrix();
}
PVector depthToPointCloudPos(int x, int y, float depVal)
{
PVector p=new PVector();
p.z=(depVal);
p.x=(X- CameraParams.cx)*p.z/CameraParams.fx;
p.y=(Y-CameraParams.cy)*p.z/CameraParams.fy;
return p;
}
// These functions come from: http://graphics.stanford.edu/~mdfisher/Kinect.html
float rawDepthToMeters(int depthValue) {
if (depthValue < 2047) {
return (float)(1.0 / ((double)(depthValue) * -0.0030711016 + 3.3309495161));
}
return 0.0f;
}
// Only needed to make sense of the ouput depth values from the kinect
PVector depthToWorld(int x, int y, int depthValue) {
final double fx_d = 1.0 / 5.9421434211923247e+02;
final double fy_d = 1.0 / 5.9104053696870778e+02;
final double cx_d = 3.3930780975300314e+02;
final double cy_d = 2.4273913761751615e+02;
// Drawing the result vector to give each point its three-dimensional space
PVector result = new PVector();
double depth = dlu[depthValue];//rawDepthToMeters(depthValue);
result.x = (float)((x - cx_d) * depth * fx_d);
result.y = (float)((y - cy_d) * depth * fy_d);
result.z = (float)(depth);
return result;
}
Hi!
My sketch works but when I try to add the Kinect I can't get it to work. Ideally I'd like to project the piece and have people be able to play with it. Right now it's not possible. Not sure what I'm doing wrong. Please help! Thanks!
import org.openkinect.freenect.*;
import org.openkinect.processing.*;
Kinect kinect;
int f, num = 50, vari = 25;
float sz;
int col[] = new int[num];
boolean save;
// Depth image
PImage depthImg;
// Which pixels do we care about?
// These thresholds can also be found with a variaty of methods
float minDepth = 996;
float maxDepth = 2493;
// What is the kinect's angle
float angle;
void setup() {
size(640, 480);
colorMode(HSB, 255, 50, 50);
frameRate(2.5);
for (int i=0; i<num; i++) {
col[i]= (int) random(255);
}
kinect = new Kinect(this);
kinect.initDepth();
angle = kinect.getTilt();
// Blank image
depthImg = new PImage(kinect.width, kinect.height);
}
void draw() {
background(#FFFFFF);
for (int i=0; i<num; i++) {
float x = width/2 + random(-vari, vari);
float y = height/2 + random(-vari, vari);
pushMatrix();
translate(x, y);
stroke(col[i], 100, 100, 50);
strokeWeight(width/5);
noFill();
sz = width/5;
ellipse(x, y, sz, sz);
popMatrix();
}
image(kinect.getDepthImage(), 0, 0);
// Calibration
//minDepth = map(mouseX,0,width, 0, 4500);
//maxDepth = map(mouseY,0,height, 0, 4500);
// Threshold the depth image
int[] rawDepth = kinect.getRawDepth();
for (int i=0; i < rawDepth.length; i++) {
if (rawDepth[i] >= minDepth && rawDepth[i] <= maxDepth) {
depthImg.pixels[i] = color(255);
} else {
depthImg.pixels[i] = color(0);
}
}
// Draw the thresholded image
depthImg.updatePixels();
image(depthImg, kinect.width, 0);
//Comment for Calibration
fill(0);
text("TILT: " + angle, 10, 20);
text("THRESHOLD: [" + minDepth + ", " + maxDepth + "]", 10, 36);
//Calibration Text
//fill(255);
//textSize(32);
//text(minDepth + " " + maxDepth, 10, 64);
}
// Adjust the angle and the depth threshold min and max
void keyPressed() {
if (key == CODED) {
if (keyCode == UP) {
angle++;
} else if (keyCode == DOWN) {
angle--;
}
angle = constrain(angle, 0, 30);
kinect.setTilt(angle);
} else if (key == 'a') {
minDepth = constrain(minDepth+10, 0, maxDepth);
} else if (key == 's') {
minDepth = constrain(minDepth-10, 0, maxDepth);
} else if (key == 'z') {
maxDepth = constrain(maxDepth+10, minDepth, 2047);
} else if (key =='x') {
maxDepth = constrain(maxDepth-10, minDepth, 2047);
}
}
Hi!
Trying to run a Processing and Kinect version 1 interactive sketch. Running into errors. One error: cannot find anything named "num". This is in reference to my Processing sketch. Please help! Thank you!
import org.openkinect.freenect.*;
import org.openkinect.processing.*;
Kinect kinect;
// Depth image
PImage depthImg;
// Which pixels do we care about?
// These thresholds can also be found with a variaty of methods
float minDepth = 996;
float maxDepth = 2493;
// What is the kinect's angle
float angle;
void setup() {
size(640, 480);
colorMode(HSB, 255, 50, 50);
frameRate(2.5);
for (int i=0; i<num; i++) {
col[i]= (int) random(255);
}
kinect = new Kinect(this);
kinect.initDepth();
angle = kinect.getTilt();
// Blank image
depthImg = new PImage(kinect.width, kinect.height);
}
void draw() {
background(#FFFFFF);
for (int i=0; i<num; i++) {
float x = width/2 + random(-vari, vari);
float y = height/2 + random(-vari, vari);
stroke(col[i], 100, 100, 50);
strokeWeight(width/5);
noFill();
sz = width/5;
ellipse(x, y, sz, sz);
}
image(kinect.getDepthImage(), 0, 0);
// Calibration
//minDepth = map(mouseX,0,width, 0, 4500);
//maxDepth = map(mouseY,0,height, 0, 4500);
// Threshold the depth image
int[] rawDepth = kinect.getRawDepth();
for (int i=0; i < rawDepth.length; i++) {
if (rawDepth[i] >= minDepth && rawDepth[i] <= maxDepth) {
depthImg.pixels[i] = color(255);
} else {
depthImg.pixels[i] = color(0);
}
}
// Draw the thresholded image
depthImg.updatePixels();
image(depthImg, kinect.width, 0);
//Comment for Calibration
fill(0);
text("TILT: " + angle, 10, 20);
text("THRESHOLD: [" + minDepth + ", " + maxDepth + "]", 10, 36);
//Calibration Text
//fill(255);
//textSize(32);
//text(minDepth + " " + maxDepth, 10, 64);
}
// Adjust the angle and the depth threshold min and max
void keyPressed() {
if (key == CODED) {
if (keyCode == UP) {
angle++;
} else if (keyCode == DOWN) {
angle--;
}
angle = constrain(angle, 0, 30);
kinect.setTilt(angle);
} else if (key == 'a') {
minDepth = constrain(minDepth+10, 0, maxDepth);
} else if (key == 's') {
minDepth = constrain(minDepth-10, 0, maxDepth);
} else if (key == 'z') {
maxDepth = constrain(maxDepth+10, minDepth, 2047);
} else if (key =='x') {
maxDepth = constrain(maxDepth-10, minDepth, 2047);
}
}