Howdy, Stranger!

We are about to switch to a new forum software. Until then we have removed the registration on this forum.

  • Problem with Screen for a Kinect User Depth Map Drawn with a Class

    Hello,

    I created a Kinect user depth map and drew with a Point class (for manipulating the particles) that will be going into Resolume through Spout. The problem arose when I previewing the resulting code. PP It seemed that there is another screen at the upper right screen. I am not quite sure how to go around it. The following are a part of the codes:

    void draw(){
      bodyJam.update();
      background(0,0,0);
      translate(width/2,height/2,0);
      rotateX(rotX);
      rotateY(rotY);
      scale(zoomF);
      strokeWeight(3);
      int[] userMap=bodyJam.userMap();
      int[] depthMap=bodyJam.depthMap();
      int steps=5;
      int index;
      PVector realWorldBlurp;
      translate(0,0,-1000);
      PVector[] realWorldMap=bodyJam.depthMapRealWorld();
      beginShape(TRIANGLES);
      for(int i=0; i<nodes.size(); i++){
        MovingNode currentNode = nodes.get(i);
        currentNode.setNumNeighbors( countNumNeighbors(currentNode,maxDistance) );
      }
       for(int i=0; i<nodes.size(); i++){
         MovingNode currentNode = nodes.get(i);
         if(currentNode.x > width || currentNode.x < 0 || currentNode.y > height || currentNode.y < 0){
         nodes.remove(currentNode);
         }
      }
       for(int i = 0; i < nodes.size(); i++){
           MovingNode currentNode = nodes.get(i);
           for(int j=0; j<currentNode.neighbors.size(); j++){
             MovingNode neighborNode = currentNode.neighbors.get(j);
          }
             currentNode.display();
       }
        for(int y=0;y<bodyJam.depthHeight();y+=steps){
           for(int x=0;x<bodyJam.depthWidth();x+=steps){
              index=x+y*bodyJam.depthWidth();
              if(depthMap[index]>0){
                translate(width/2,height/2,0);
                realWorldBlurp=realWorldMap[index];
                if(userMap[index]==0)
                   noStroke();
                   else
                   addNewNode(realWorldBlurp.x,realWorldBlurp.y,realWorldBlurp.z,random(-dx,dx),random(-dx,dx));
               }
             }
           }
            endShape();
    
          spout.sendTexture();
     }
    
    void addNewNode(float xPos, float yPos, float zPos, float dx, float dy){
      MovingNode node = new MovingNode(xPos+dx,yPos+dy,zPos);
      node.setNumNeighbors( countNumNeighbors(node,maxDistance) );
      if(node.numNeighbors < maxNeighbors){
        nodes.add(node);
      }
    }
    
    int countNumNeighbors(MovingNode nodeA, float maxNeighborDistance){
      int numNeighbors = 0;
      nodeA.clearNeighbors();
      for(int i = 0; i < nodes.size(); i++){
        MovingNode nodeB = nodes.get(i);
        float distance = sqrt((nodeA.x-nodeB.x)*(nodeA.x-nodeB.x) + (nodeA.y-nodeB.y)*(nodeA.y-nodeB.y) + (nodeA.z-nodeB.z)*(nodeA.z-nodeB.z));
        if(distance < maxNeighborDistance){
          numNeighbors++;
          nodeA.addNeighbor(nodeB);
        }
      }
      return numNeighbors;
    }
    

    Some lines from the Point class are as following: MovingNode(float xPos, float yPos, float zPos){ x = xPos; y = yPos; z = zPos; numNeighbors = 0; neighbors = new ArrayList(); }

      void display(){
        move();
        strokeWeight(3);
        stroke(200);
        point(x,y,z);
      }
    
      void move(){
        xAccel = random(-accelValue,accelValue);
        yAccel = random(-accelValue,accelValue);
        zAccel = random(-accelValue,accelValue);
        xVel += xAccel;
        yVel += yAccel;
        zVel += zAccel;
        x += xVel;
        y += yVel;
        z += zVel;
      }
    
      void addNeighbor(MovingNode node){
        neighbors.add(node);
      }
    
      void setNumNeighbors(int num){
        numNeighbors = num;
      }
    
      void clearNeighbors(){
        neighbors = new ArrayList<MovingNode>();
      }
    

    Any help is much appreciated. Thank you

  • Move a servo motor (Ax-12) with arduino, processing and kinect

    i also run your processing code. It did not run properly.

    Try: (just rename the COM3 port

    import SimpleOpenNI.*;
    import processing.serial.*;
    
    int pos=0;
    Serial myPort;
    
    SimpleOpenNI context;
    color[] userClr = new color[] { 
      color(255, 0, 0), 
      color(0, 255, 0), 
      color(0, 0, 255), 
      color(255, 255, 0), 
      color(255, 0, 255), 
      color(0, 255, 255)
    };
    PVector com = new PVector(); 
    PVector com2d = new PVector();
    
    void setup() {
      size(640, 480);
      println(Serial.list());
    
      // rename COM3 to the arduino port
      myPort = new Serial(this,"COM3", 9600);
    
      context = new SimpleOpenNI(this);
      if (context.isInit() == false) {
        println("Can't init SimpleOpenNI, maybe the camera is not connected!"); 
        exit();
        return;
      }
      // enable depthMap generation 
      context.enableDepth();
      // enable skeleton generation for all joints
      context.enableUser();
      context.enableRGB();
      background(200, 0, 0);
      stroke(0, 0, 255);
      strokeWeight(3);
      smooth();
    }
    
    void draw() {
      // update the cam
      context.update();
      // draw depthImageMap
      //image(context.depthImage(),0,0);
      image(context.userImage(), 0, 0);
      // draw the skeleton if it's available
      IntVector userList = new IntVector();
      context.getUsers(userList);
    
        if (userList.size() > 0) {
          int userId = userList.get(0);
          drawSkeleton(userId);
        }
    
    }
    
    
    
    // draw the skeleton with the selected joints
    void drawSkeleton(int userId) {
    
    
      // aqui é definido qual parte do corpo vai rastrear
      PVector jointPos = new PVector();
      context.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_HEAD, jointPos);
      PVector convertedHead = new PVector();
      context.convertRealWorldToProjective(jointPos, convertedHead);
      //desenhar uma elipse sobre a parte do corpo rastreada
      fill(255, 0, 0);
      ellipse(convertedHead.x, convertedHead.y, 20, 20);
    
      //draw YOUR Right Shoulder
      PVector jointPosLS = new PVector();
      context.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, jointPosLS);
      PVector convertedLS = new PVector();
      context.convertRealWorldToProjective(jointPosLS, convertedLS);
      //int LS = convertedLS.x, convertedLS.y
    
      //draw YOUR Right Elbow
      PVector jointPosLE = new PVector();
      context.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_LEFT_ELBOW, jointPosLE);
      PVector convertedLE = new PVector();
      context.convertRealWorldToProjective(jointPosLE, convertedLE);
      fill(200, 200, 200);
      ellipse(convertedLE.x, convertedLE.y, 20, 20);
    
    
      //angulooo
      int anguloLSE =int(degrees(atan2(convertedLS.x - convertedLE.x, convertedLS.y - convertedLE.y)));
      println(anguloLSE); 
      myPort.write(anguloLSE);
    
    
    
      //se quiser desenhar o esqueleto inteiro, descomentar as linhas abaixo
      context.drawLimb(userId, SimpleOpenNI.SKEL_HEAD, SimpleOpenNI.SKEL_NECK);
    
      context.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_LEFT_SHOULDER);
      context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_LEFT_ELBOW);
      context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_ELBOW, SimpleOpenNI.SKEL_LEFT_HAND);
    
      context.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_RIGHT_SHOULDER);
      context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_RIGHT_ELBOW);
      context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_ELBOW, SimpleOpenNI.SKEL_RIGHT_HAND);
    
      context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_TORSO);
      context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_TORSO);
    
      context.drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_LEFT_HIP);
      context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_HIP, SimpleOpenNI.SKEL_LEFT_KNEE);
      context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_KNEE, SimpleOpenNI.SKEL_LEFT_FOOT);
    
      context.drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_RIGHT_HIP);
      context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_HIP, SimpleOpenNI.SKEL_RIGHT_KNEE);
      context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_KNEE, SimpleOpenNI.SKEL_RIGHT_FOOT);
    }
    
    // -----------------------------------------------------------------
    // SimpleOpenNI events
    
    
    void onNewUser(SimpleOpenNI curContext, int userId)
    {
      println("onNewUser - userId: " + userId);
      println("\tstart tracking skeleton");
    
      curContext.startTrackingSkeleton(userId);
    }
    
    void onLostUser(SimpleOpenNI curContext, int userId)
    {
      println("onLostUser - userId: " + userId);
    }
    
    void onVisibleUser(SimpleOpenNI curContext, int userId)
    {
      //println("onVisibleUser - userId: " + userId);
    }
    void keyPressed()
    {
      switch(key)
      {
      case ' ':
        context.setMirror(!context.mirror());
        break;
      }
    }
    
  • Why am I getting errors on my server/client data parsing? Why does my data seem to stop sending?

    I am currently trying to setup a LAN between two computers, to send motion tracking data from one location to another. My code appears to work for a random length of time (anything from 2 seconds up to a minute) I either get one of two errors on different lines of code: StringIndexOutofBoundsException: String index out of range: -1 OR ArrayIndexOutofBounds: 1 I have commented in the code which lines I am getting the errors on.

    I really have no idea why this is happening - could it be something to do with the rate at which the data is being sent i.e. frameRate? Or am I doing something wrong when parsing the strings into arrays?

    There doesn't seem to be any correlation between the data that it crashes on, all I can think of at the moment is that it is something to do with the data not communicating quickly enough? I tried it on a frameRate of 10, and it seemed to work for longer, but I'm not sure if that was just a coincidence. The data continues to be generated and sent from the server but the client stops responding.

    Any help would be greatly appreciated.

    This is the client code (the one it crashes on that needs work):

    import processing.net.*;
    
    Client c;
    String input;
    String inputB;
    String data[];
    float xArr[];
    float yArr[];
    
    void setup() 
    {
      size(displayWidth, displayHeight);
      background(204);
      stroke(0);
      frameRate(10); // Slow it down a little
      // Connect to the server's IP address and port
      c = new Client(this, "192.168.0.2", 61514); // Replace with your server's IP and port
    }
    
    void draw() 
    {
      // Receive data from server
      if (c.available() > 0) {
        input = c.readString();
    
        println("Input");
        println(input);
    
        inputB = input.substring(0, input.indexOf("\n")); // Only up to the newline - this is where the first error happens 
        data = split(inputB, '#'); // Split values into an array
    
        println("Data");
        printArray(data);
    
        xArr = float(split(data[0], ":"));
        yArr = float(split(data[1], ":")); //this is where the second error happens (if it gets past the first one)
    
        for(int i = 0; i < yArr.length ; i++){
        strokeWeight(4);
        point(xArr[i],yArr[i]);
      }
      println("Arrays");
      println(xArr);
      println(yArr);
    
      }
    
      //println(input);
      //println(data);
    
    
    
    }
    

    This is the server code:

    //setting up the server 
    import processing.net.*;
    Server s;
    Client c;
    String input;
    int data[];
    
    //imports Kinect lib
    import SimpleOpenNI.*;
    
    //defines variable for kinect object
    SimpleOpenNI kinect;
    
    //declare variables for mapped x y values
    float x, y;
    
    //declare variable for number of people 
    float peopleNum = 0;
    
    //initialise other variables that we might need
    int userId;
    float inches;
    int count = 0;
    
    //set up arrays for all values - current x/y, previous x/y, depth 
    float[] oldXVals;
    float[] newXVals;
    float[] oldYVals;
    float[] newYVals;
    float[] storeDepth;
    
    void setup() {
    
      //set the display size to full screen
      size(displayWidth, displayHeight);
    
      //declares new kinect object
      kinect = new SimpleOpenNI(this);
    
      //enable depth image
      kinect.enableDepth();
    
      //enable user detection
      kinect.enableUser();
    
      frameRate(10);
    
      //set background to white 
      background(255);
    
      //initialise starting array lengths
      oldXVals = new float[20];
      newXVals = new float[20];
      oldYVals = new float[20];
      newYVals = new float[20];
      storeDepth = new float[20];
    
      //start a simple server on a port
      s = new Server(this, 61514); // Start a simple server on a port
    
    }
    
    void draw() {
    
      //updates depth image
      kinect.update();
    
      //draws depth image - if we don't need this comment out 
      //image(kinect.depthImage(),0,0,displayWidth,displayHeight);
    
      //array to store depth values
      int[] depthValues = kinect.depthMap();
    
      //access all users currently available to us 
      IntVector userList = new IntVector();
      kinect.getUsers(userList);
    
      //sets variable peopleNum to the number of people currently detected by the kinect
      peopleNum = userList.size();
    
      //change the length of the array to the number of people currently present
      newXVals = new float[int (peopleNum)];
      newYVals = new float[int (peopleNum)];
      storeDepth = new float[int (peopleNum)];
    
      //for every user detected, do this 
      for (int i = 0; i<userList.size (); i++) {
    
        //get the userId
        userId = userList.get(i);
    
        //declare PVector position to store x/y position 
        PVector position = new PVector();
    
        //get the position
        kinect.getCoM(userId, position);
        kinect.convertRealWorldToProjective(position, position);
    
        //calculate depth
        //find the position of the center of mass *640 to give us a number to find in depth array
        int comPosition = int(position.x) + (int(position.y) * 640);    
        //locate it in the depth array
        int comDepth = depthValues[comPosition];
        //calculate distance in inches
        inches = comDepth / 25.4;
    
        //map x and y coordinates to full screen
        x = map(position.x, 0, 640, 0, displayWidth);
        y = map(position.y, 0, 480, 0, displayHeight);
    
        //store current values for each user in arrays
        newXVals[i] = x;
        newYVals[i] = y;
    
        //store depth values in array for each user (just the current values)
        storeDepth[i] = inches;  
    
        //DRAW THINGS HERE
        //To draw with only the current values, use newXVals and newYVals
        //i corresponds to position in array
    
        //setting these parameters means there is no "drop off" at the edges
        if (newXVals[i] >= 50 && newXVals[i] <= width - 50) {
          if (newYVals[i] >= 50 && newYVals[i] <= height - 50)
          //setting this parameter means that if a person is too close (less than 35 inches), they are ignored
            if (storeDepth[i] >= 35) {
              strokeWeight(4);
              point(newXVals[i], newYVals[i]);
            }
        }
    
        //prints each userId with its corresponding x/y values
        //println("User: " + userId + " xPos: " + x + " yPos: " + y + " depth: " + inches); 
        //prints the number of people present continuously 
        //println("There are " + numPeople + " people present");
    
      }
    
     if(newXVals.length >= 1 && newYVals.length >= 1){
     String[] newXValsString = nfc(newXVals, 1);
     //println(newXValsString);  
     String joinedXVals = join(newXValsString, ":");
    
      String[] newYValsString = nfc(newYVals, 1);
     //println(newYValsString);  
     String joinedYVals = join(newYValsString, ":");
    
     s.write(joinedXVals + "#" + joinedYVals + "\n");
     println(joinedXVals + "#" + joinedYVals + "\n");
    
    
     }
    
      //copy new value arrays to old value arrays
      arrayCopy(newXVals, oldXVals);
      arrayCopy(newYVals, oldYVals);
    
    
    }
    
  • userMap questions

    I have a doubt on how to access userMap() data and how to manipulate it and convert it to a PGraphics for further manipulation. I did the following code, it works nicely, it differentiates different users(user1, user 2) and fills each silhouette with a different PGraphics. bgG is a PGraphic that i used to fill the background.

    void draw() {
      image(bgG, 0, 0);
    
      kinect.update();
    
      if (kinect.getNumberOfUsers() > 0) {
        depthValues=kinect.depthMap();
        userMap=kinect.userMap();
    
        g1.loadPixels();
        g2.loadPixels();
        loadPixels();
    
        for (int i = 0; i < userMap.length; i++) { 
          switch(userMap[i]) {
          case 1:        
            pixels[i]=g1.pixels[i];
            break;
          case 2:        
            pixels[i]=g2.pixels[i];
            break;
          }
        }
        updatePixels();
      }
    }
    

    my questions are: 1. what pixels i'm loading with loadPixels() 2. how can I access each user pixels and use them as mask, or how can i have them converted to a PGraphics or PImage?

    My goal is to isolate each user and manipulate their pixel data separately.

    thank you

  • Move a servo motor (Ax-12) with arduino, processing and kinect

    Hey guys,

    I'm having a problem with my code. I wanna move a Dynamixel servo according to the movement of a person, using kinect and processing. When I tried only with the Arduino moving randomly the servo it worked perfectly, but when I try with processing+kinect it doesnt move at all. I'm gonna appreciate any help :) Oh and I'm new at this... so correct me please if y'all find dumb mistakes :P

    Arduino code:

    #include <DynamixelSerial.h>
    int angulo=0;
    
    void setup() {
      Dynamixel.begin(1000000, 2); // Inicialize the servo at 1Mbps and Pin Control 2
      Serial.begin(9600);
    }
    
    void loop() {
    
      if (Serial.available() > 0) {
      // get incoming byte:
      angulo=Serial.read();
      }
    
      Dynamixel.ledStatus(1, ON);
      Dynamixel.move(1,angulo);
    
      delay(1000);
    
    
    }
    

    Processing code:

    import SimpleOpenNI.*;
    import processing.serial.*;
    import cc.arduino.*;
    Arduino arduino;
    int pos=0;
    Serial myPort;
    
    SimpleOpenNI context;
    color[] userClr = new color[] { 
      color(255, 0, 0), 
      color(0, 255, 0), 
      color(0, 0, 255), 
      color(255, 255, 0), 
      color(255, 0, 255), 
      color(0, 255, 255)
    };
    PVector com = new PVector(); 
    PVector com2d = new PVector();
    
    void setup() {
      size(640, 480);
      println(Serial.list());
      String portName = Serial.list()[1];
      myPort = new Serial(this, portName, 9600);
      //arduino = new Arduino(this, Arduino.list()[1], 9600); //your offset may vary
      //arduino.pinMode(2);
      context = new SimpleOpenNI(this);
      if (context.isInit() == false) {
        println("Can't init SimpleOpenNI, maybe the camera is not connected!"); 
        exit();
        return;
      }
      // enable depthMap generation 
      context.enableDepth();
      // enable skeleton generation for all joints
      context.enableUser();
      context.enableRGB();
      background(200, 0, 0);
      stroke(0, 0, 255);
      strokeWeight(3);
      smooth();
    }
    
    void draw() {
      // update the cam
      context.update();
      // draw depthImageMap
      //image(context.depthImage(),0,0);
      image(context.userImage(), 0, 0);
      // draw the skeleton if it's available
      int[] userList = context.getUsers();
      for (int i=0; i<userList.length; i++) {
        if (context.isTrackingSkeleton(userList)) {
          stroke(userClr[ (userList - 1) % userClr.length ] );
          drawSkeleton(userList);
        }
      }
    }
    
    
    
    // draw the skeleton with the selected joints
    void drawSkeleton(int userId) {
    
    
      // aqui é definido qual parte do corpo vai rastrear
      PVector jointPos = new PVector();
      context.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_HEAD, jointPos);
      PVector convertedHead = new PVector();
      context.convertRealWorldToProjective(jointPos, convertedHead);
      //desenhar uma elipse sobre a parte do corpo rastreada
      fill(255, 0, 0);
      ellipse(convertedHead.x, convertedHead.y, 20, 20);
    
      //draw YOUR Right Shoulder
      PVector jointPosLS = new PVector();
      context.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, jointPosLS);
      PVector convertedLS = new PVector();
      context.convertRealWorldToProjective(jointPosLS, convertedLS);
      //int LS = convertedLS.x, convertedLS.y
    
      //draw YOUR Right Elbow
      PVector jointPosLE = new PVector();
      context.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_LEFT_ELBOW, jointPosLE);
      PVector convertedLE = new PVector();
      context.convertRealWorldToProjective(jointPosLE, convertedLE);
      fill(200, 200, 200);
      ellipse(convertedLE.x, convertedLE.y, 20, 20);
    
    
      //angulooo
      int anguloLSE =int(degrees(atan2(convertedLS.x - convertedLE.x, convertedLS.y - convertedLE.y)));
      println(anguloLSE); 
      myPort.write(anguloLSE);
    
    
    
      //se quiser desenhar o esqueleto inteiro, descomentar as linhas abaixo
      context.drawLimb(userId, SimpleOpenNI.SKEL_HEAD, SimpleOpenNI.SKEL_NECK);
    
      context.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_LEFT_SHOULDER);
      context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_LEFT_ELBOW);
      context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_ELBOW, SimpleOpenNI.SKEL_LEFT_HAND);
    
      context.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_RIGHT_SHOULDER);
      context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_RIGHT_ELBOW);
      context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_ELBOW, SimpleOpenNI.SKEL_RIGHT_HAND);
    
      context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_TORSO);
      context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_TORSO);
    
      context.drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_LEFT_HIP);
      context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_HIP, SimpleOpenNI.SKEL_LEFT_KNEE);
      context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_KNEE, SimpleOpenNI.SKEL_LEFT_FOOT);
    
      context.drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_RIGHT_HIP);
      context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_HIP, SimpleOpenNI.SKEL_RIGHT_KNEE);
      context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_KNEE, SimpleOpenNI.SKEL_RIGHT_FOOT);
    }
    
    // -----------------------------------------------------------------
    // SimpleOpenNI events
    
    
    void onNewUser(SimpleOpenNI curContext, int userId)
    {
      println("onNewUser - userId: " + userId);
      println("\tstart tracking skeleton");
    
      curContext.startTrackingSkeleton(userId);
    }
    
    void onLostUser(SimpleOpenNI curContext, int userId)
    {
      println("onLostUser - userId: " + userId);
    }
    
    void onVisibleUser(SimpleOpenNI curContext, int userId)
    {
      //println("onVisibleUser - userId: " + userId);
    }
    void keyPressed()
    {
      switch(key)
      {
      case ' ':
        context.setMirror(!context.mirror());
        break;
      }
    }
    
  • Chaining Shaders and Feedback

    The idea is that the camera feed will be replaced with a looped video.

    The video will go through a vertex-displacement shader, and be rendered as a point-cloud. Hence my interest in Geometry Shaders. I had thoughts of rendering each point as a screen-facing quad/billboard, so I could add a nice depth of field shader to render more/less soft-edged circles in each billboard according to Z-position. But I digress..

    The point-cloud will then go through the Conway shader and Feedback passes.

    The 'runFX' switch is supposed to be triggered when the brightest pixel in a live Kinect (v.1) depthmap image exceeds a brightness threshold, so the output will switch between the point-cloud image and the Conway shader.

    At the moment, runFX is passed into the Conway shader, and determines if the shader simply passes through the input texture, or starts the Game of Life and displays that instead.

    I'm thinking, rather than doing this in the fragment shader, though, it would probably be better to create two different render functions in the sketch itself, one that simply renders the point cloud straight to the canvas, and the other that grabs the latest frame from the above, and feeds it into the Conway shader. That way, the capture will never be happening at the same time as the Conway shader, which should save resources.

    Wild speculation, though..

    a|x

  • Kinect Depth Threshold

    Thanks for the advice, Kf!

    Basically what I did is use the existing DepthMap3D example from OpenNI. The example gives me a visualization of 3D pointcloud. I then modify it with the method I learnt from Shiffman's tutorial on how to create Depth Threshold.

    Shiffman's tutorial used on a different library (OpenKinect) which couldn't work with my kinect. So what I tried to do is copy Shiffman's code into the OpenNI example, then changed some of the code to match the OpenNI library. I'm guessing it's the process when I change the codes create errors.

    error

    Below is my code:

    
    import SimpleOpenNI.*;
    
    SimpleOpenNI context;
    
    PImage img;
    
    // SIZES
    int canvasWidth  = 512;
    int canvasHeight = 424;
    
    int kinectWidth  = 512;
    int kinectHeight = 424;
    
    
    
    void setup()
    {
      size(512,424);
    
      context = new SimpleOpenNI(this);
      if(context.isInit() == false)
      {
         println("Can't init SimpleOpenNI, maybe the camera is not connected!"); 
         exit();
         return;  
      }
      
      // disable mirror
      context.setMirror(false);
    
      // enable depthMap generation 
      context.enableDepth();
    
    img  = createImage(kinectWidth, kinectHeight, RGB); 
      
    }
    
    void draw()
    {
    
    
      background(0,0);
      
      img.loadPixels();
    
    
      int[]   depthMap = context.depthMap();
    
    
    
      // draw pointcloud
    
      for(int y=0;y < context.depthHeight();y++)
      {
        for(int x=0;x < context.depthWidth();x++)
        {
        
          int offset =  x + y * context.depthWidth();
          int d = depthMap[offset];
          
          if (d > 300 && d < 1500) {
          img.pixels[offset] = color(255, 0, 150);
          } else {
            img.pixels[offset] = color(0);
        
          { 
    
          }
        
        }
      } 
    
      img.updatePixels();
      image (img,0,0);
    
    }
    
    }
    
    
  • Make the animation to move with kinect

    Hello, Thank you very much for your help. I really really appreciate that. As you notice I am not that familiar with processing and this might be very easy to do if you are familiar with the code.

    I am getting two errors on lines 27 and 28. Probably I need to change to kinect2. However, when I change to kinect2 I 've got another error on line 27 (the function update() does not exist ). when I change kinect.depthMap to kinect2.depthMap it says 'The Sunction'depthMap() does not exist.

    I am writing kinect2 because I am using the new kinect. I will check as well the links you send me.

    Thank you so much !!!

  • Make the animation to move with kinect

    This is untested code below. What I do is I grab the depth value right at the center of the depth data (yes, only one pixel) and i used that to replace the effect of mouseX from your code.

    Also search for other examples using https://forum.processing.org/two/search?Search=depthMap

    Lastly, check Shiffman's videos on Kinetic: https://www.youtube.com/playlist?list=PLRqwX-V7Uu6ZMlWHdcy8hAGDy6IaoxUKf

    Kf

    import org.openkinect.processing.*;
    
    // Kinect Library object
    Kinect2 kinect2;
    PImage img;
    
    //distance in cm depth, adapt to room 
    int distance = 1500;
    int distance2 = 3000;
    
    void setup () {
      size (500, 500);
      kinect2 = new Kinect2(this);
      kinect2.initDepth();
      kinect2.initDevice();
      noFill();
      stroke(255);
      strokeWeight(2);
    }
    
    void draw() {
      background(0);
    
      PImage img = kinect2.getDepthImage();
      image (img, 0, 0);
    
      kinect.update();
      int[] depthValues = kinect.depthMap(); //array, distances 
    
      translate(width /2, height/2);
      beginShape();
    
      int centerOfScreen= depthMapWidth/2.0+(depthMapWidth/2.0*depthMapWidth);
      int currentDepthValue = depthValues[centerOfScreen];
      float mappedVal=map(currentDepthValue,distance,distance2,0,width/100.0);
    
      // add some vertices
      for (float theta = 0; theta <= 2 * PI; theta += 0.01) {
    
        float rad = r(theta, 
          mappedVal, // a
          mouseY / 100.0, // b
          70, // m
          1, // n1
          2, // n2
          2 // n3
          ); 
        float x = rad * cos (theta) * 50;
        float y = rad * sin (theta) * 50;
        vertex (x, y);
      }
    
      endShape();
    }
    
    
    float r(float theta, float a, float b, float m, float n1, float n2, float n3) {
      return pow(pow (abs(cos(m * theta / 4.0)/a), n2 ) + 
        pow (abs(sin(m * theta / 4.0) /b), n3), -1.0 / n1) ;
    }
    
  • Kinect Projection Masking - Array out of bounds error

    This is a possible way to do things. keep in mind the code is not tested. You need to call updatePixels() as you are changing the image pixels.

    In your case, when changing dimensions and position, keep those operations as two different transactions. This is the reason why I remove your copy function and I used an alternative way to copy the image for sake of clarity.

    Beside other minor changes, the big one was the init of newPosX/Y to the center of the sketch and I set the imageMode to CENTER as this will do the positioning of the image easier: https://processing.org/reference/imageMode_.html

    Lastly, I ensure resizing o the image(and its position) was within certain boundaries. It is important to avoid having an image of zero size (in my opinion).

    Notice because I am changing different parameters in your original sketch, and since I didn't have the chance to test it, it is very likely something will not work right away. If you cannot figure how to fix it, post below.

    Kf

    import SimpleOpenNI.*;
    SimpleOpenNI kinect;
    
    //distance in cm depth, adapt to room 
    int distance = 1500;
    int distance2 = 3000;
    
    int depthMapWidth = 640;
    int depthMapHeight = 480;
    
    int newWidth=1470;
    int newHeight=890;
    int newPosX = 0;
    int newPosY = 0;
    
    PImage liveMap;
    PImage liveMapCopy;
    
    void setup() {
      size(1470, 890);
      imageMode(CENTER);
      newPosX=width/2;
      newPosY=height/2;
    
      kinect = new SimpleOpenNI(this);
      if (kinect.isInit() == false) {
        println("Camera not connected!");
        exit();
        return;
      }
    
      kinect.enableDepth(); //enables depth image 
      liveMap = createImage(depthMapWidth, depthMapHeight, RGB); //creates empty image that will be the mask 
      //liveMapCopy = createImage(1470, 890, RGB);
    }
    
    void draw() {
      //background(color(100,100,100)); //set background colour to black 
      fill(100, 100, 100);
      rect(0, 0, width, height);
      kinect.update();
      int[] depthValues = kinect.depthMap(); //array, distances 
    
      liveMap.loadPixels(); //overwrites pixels 
    
      for (int y=0; y<depthMapHeight; y++) {
        for (int x=0; x<depthMapWidth; x++) {
          int i= x+(y*depthMapWidth);
          int currentDepthValue = depthValues[i]; //calculates the numnber of pixels in the array and gets the distance value 
          if (currentDepthValue>distance&&currentDepthValue<distance2) {
            liveMap.pixels[i] = color(230, 255, 0);  //if the distance lies within limits
            //change mask image to white
          } else {
            liveMap.pixels[i] = color(0, 0, 0);  //if no change to black (creating mask)
          }
        }
      }
      liveMap.updatePixels();
    
      liveMapCopy=liveMap.get();
      liveMapCopy.resize(newWidth, newHeight);
    
      image(liveMapCopy, newPosX, newPosY);
    }
    
    void keyReleased() {
    
      updateImgDimensions();
      updateImgPosition();
    }
    
    void  updateImgDimensions() {
    
      if (newWidth>100 && newWidth<2*width) {
        if (keyCode == RIGHT) 
          newWidth = newWidth + 10;
    
        if (keyCode == LEFT) 
          newWidth = newWidth - 10;
      }
    
      if (newHeight>100 && newHeight<2*height) {
    
        if (keyCode == DOWN) 
          newHeight = newHeight + 10;
    
        if (keyCode == UP) 
          newHeight = newHeight - 10;
      }
    }
    
    void  updateImgPosition() {
    
      if (newPosX> -width/2 && newPosX<width/2) {
        if (keyCode == 'a')
          newPosX-=10;
        if (keyCode == 'd')
          newPosX+=10;
      }
    
      if (newPosY> -height/2 && newPosY<height/2) {
        if (keyCode == 's')
          newPosY-=10;
        if (keyCode == 'w')
          newPosY+=10;
      }
    }
    
  • Kinect Projection Masking - Array out of bounds error

    Hi guys, wondered if you could give me a hand on here, my previous discussion has been closed.

    I've been working on my code and i've managed to solve some errors. I'm now wanting to assign key controls in a keyPressed section. I've managed to assign my size values to UP, DOWN, LEFT AND RIGHT. I'm wanting to change the position of my copied image by pressed W, A, S and D to move the image. When trying to enter letters nothing happens, for example:

    if (keyCode == 'd') { newPosX = newPosX + 10;

    I know i'm probably going about this the wrong way but can't find anything online about how to assign a keyPressed function to a letter key.

    Any help would be appreciated, thanks. My updated code is below:

    `import SimpleOpenNI.*;
    SimpleOpenNI kinect;
    
    //distance in cm depth, adapt to room 
    int distance = 1500;
    int distance2 = 3000;
    
    int depthMapWidth = 640;
    int depthMapHeight = 480;
    
    int newWidth=1470;
    int newHeight=890;
    int newPosX = 0;
    int newPosY = 0;
    
    PImage liveMap;
    PImage liveMapCopy;
    
    void setup(){
      size(1470,890);
      kinect = new SimpleOpenNI(this);
      if (kinect.isInit() == false){
        println("Camera not connected!");
        exit();
        return;
      }
    
      kinect.enableDepth(); //enables depth image 
      liveMap = createImage(640,480,RGB); //creates empty image that will be the mask 
      liveMapCopy = createImage(1470,890,RGB);
    }
    
    void draw(){
      //background(color(100,100,100)); //set background colour to black 
      fill(100,100,100);
      rect(0,0,width,height);
      kinect.update();
      int[] depthValues = kinect.depthMap(); //array, distances 
    
      //liveMap.width = width;
      //liveMap.height = height;
      liveMap.loadPixels(); //overwrites pixels 
    
    for (int y=0; y<depthMapHeight; y++){
      for(int x=0; x<depthMapWidth; x++){
        int i= x+(y*depthMapWidth);
        int currentDepthValue = depthValues[i]; //calculates the numnber of pixels in the array and gets the distance value 
        if (currentDepthValue>distance&&currentDepthValue<distance2) {
            liveMap.pixels[i] = color(230,255,0);  //if the distance lies within limits
            //change mask image to white 
          } else {
            liveMap.pixels[i] = color(0,0,0);  //if no change to black (creating mask)          
          }
        }  
      }
    //mask image updated for use 
    
    liveMapCopy.copy(liveMap,0,0,640,480,newPosX,newPosY,newWidth,newHeight);
    //liveMapCopy.resize(mouseX+50,0);
    //liveMapCopy.resize(640,480);
    //liveMapCopy.updatePixels();
    image(liveMapCopy,100,0); //change position here
    }
    
    void keyPressed(){
          if (keyCode == RIGHT) {
          newWidth = newWidth + 10;
        } if(keyCode == LEFT)   {
          newWidth = newWidth - 10;
        } if (keyCode == DOWN)  {
          newHeight = newHeight + 10;
        } if (keyCode == UP)    {
          newHeight = newHeight - 10;
        }
    }
    `
    
  • Kinect Projection Masking - Array out of bounds error

    Hi,

    I'm currently working on an end of year project for university called 'motion tracking projection mapping'.

    I'm currently looking into 'projection masking' using the kinect depth image. Basically i'm wanting to project some imagery through a projector and onto a moving person. I'm using the resize function in order to enlarge my mask image without exceeding the limitations of the Kinect. I am then wanting to project this enlarged image to fit a person/area. I've managed to change the size of the window, however when i'm running my code I keep getting 'Array Index out of bounds exception 7500' whenever i'm trying to resize my image. The number on the error increases when increasing the values within my resize() function. The error occurs on line 41.

    Any help anyone could give me would be greatly appreciated. The code i'm using is below:

    `import SimpleOpenNI.*;
    SimpleOpenNI kinect;
    
    //distance in cm depth, adapt to room 
    int distance = 1500;
    int distance2 = 3000;
    
    int depthMapWidth = 640;
    int depthMapHeight = 480;
    
    PImage liveMap;
    
    void setup(){
      size(1024,768);
      kinect = new SimpleOpenNI(this);
      if (kinect.isInit() == false){
        println("Camera not connected!");
        exit();
        return;
      }
    
      kinect.setMirror(true);
      kinect.enableDepth(); //enables depth image 
      liveMap = createImage(640,480,RGB); //creates empty image that will be the mask 
    }
    
    void draw(){
      background(color(0,0,0)); //set background colour to black 
      kinect.update();
      int[] depthValues = kinect.depthMap(); //array, distances 
    
      //liveMap.width = width;
      //liveMap.height = height;
      liveMap.loadPixels(); //overwrites pixels 
    
    for (int y=0; y<depthMapHeight; y++){
      for(int x=0; x<depthMapWidth; x++){
        int i= x+(y*depthMapWidth);
        int currentDepthValue = depthValues[i]; //calculates the numnber of pixels in the array and gets the distance value 
        if (currentDepthValue>distance&&currentDepthValue<distance2) {
            liveMap.pixels[i] = color(255,255,255);  //if the distance lies within limits
            //change mask image to white 
          } else {
            liveMap.pixels[i] = color(0,0,0);  //if no change to black (creating mask)          
          }
        }  
      }
    //mask image updated for use 
    liveMap.resize(100,0);
    liveMap.updatePixels();
    image(liveMap,100,0); //change position here
    }`
    

    Thank you!

  • Kinect Projection Masking - How to enlarge image to fit a person?

    Hi,

    I'm currently working on an end of year project for university called 'motion tracking projection mapping'.

    I'm currently looking into 'projection masking' using the kinect depth image. Basically i'm wanting to project some imagery through a projector and onto a moving person. I have some example code from another site which creates a depth image mask. He later adds the video function to have a video loop onto the person's silhouette. Basically all i'm wanting to do is make the size(); of the window bigger so i can project an enlarged image. However when i'm trying to change the size of the window it doesn't let me as it's something to do with the values being out of bounds of the array. The code im trying to use is in a screenshot attached. Am i able to enlarge the size of the window or is not not possible with kinect 1's 640x480 limitation?

    Any help anyone could give me would be greatly appreciated. The code i'm using is below:

    Thank you!

    import SimpleOpenNI.*; 
    SimpleOpenNI  kinect;
    
    int distance = 1500;
    int distance2 = 3000;
    
    PImage liveMap;
    
    void setup()
    {  
      size(640, 480);
      kinect = new SimpleOpenNI(this);
      kinect.setMirror(false);
      kinect.enableDepth();
      liveMap = createImage(640, 480, RGB);
    }
    
    void draw()
    {
      background(color(0,0,0));  
      kinect.update();
      int[] depthValues = kinect.depthMap();
      liveMap.width = 640;
      liveMap.height = 480;
      liveMap.loadPixels();
      for (int y=0; y<480; y++) {
        for (int x=0; x<640; x++) {
          int i= x+(y*640);
          int currentDepthValue = depthValues[i];
          if (currentDepthValue>distance&&currentDepthValue<distance2) {
            liveMap.pixels[i] = color(255,255,255);  
          } else {
            liveMap.pixels[i] = color(0,0,0);            
          }
        }  
      }
      liveMap.updatePixels();
      image(liveMap,0,0);
    }
    
  • How to implement spout 2.05 in 'processing ' windowsX?

    Forgive the very basic nature of this query but i am very new to processing (and indeed programming). I am trying to use kinect 1414 with processing 2.2.1 and isadora acording to tutorial http://troikatronix.com/support/kb/kinect-tutorial-part2/, Since the tutorial spout has been upgraded and I am trying without success to change code according to recommendations for 2.05 https://github.com/leadedge/SpoutProcessing/releases. I have imprted spout library. The original code for processing sketch is below. /* -------------------------------------------------------------------------- * SimpleOpenNI User Test * -------------------------------------------------------------------------- * Processing Wrapper for the OpenNI/Kinect 2 library * http://code.google.com/p/simple-openni * -------------------------------------------------------------------------- * prog: Max Rheiner / Interaction Design / Zhdk / http://iad.zhdk.ch/ * date: 12/12/2012 (m/d/y) * ---------------------------------------------------------------------------- */

    import SimpleOpenNI.*;

    PGraphics canvas; color[] userClr = new color[] { color(255, 0, 0), color(0, 255, 0), color(0, 0, 255), color(255, 255, 0), color(255, 0, 255), color(0, 255, 255) };

    PVector com = new PVector();
    PVector com2d = new PVector();

    // -------------------------------------------------------------------------------- // CAMERA IMAGE SENT VIA SPOUT // -------------------------------------------------------------------------------- int kCameraImage_RGB = 1; // rgb camera image int kCameraImage_IR = 2; // infra red camera image int kCameraImage_Depth = 3; // depth without colored bodies of tracked bodies int kCameraImage_User = 4; // depth image with colored bodies of tracked bodies

    int kCameraImageMode = kCameraImage_User; // << Set thie value to one of the kCamerImage constants above

    // -------------------------------------------------------------------------------- // SKELETON DRAWING // -------------------------------------------------------------------------------- boolean kDrawSkeleton = true; // << set to true to draw skeleton, false to not draw the skeleton

    // -------------------------------------------------------------------------------- // OPENNI (KINECT) SUPPORT // --------------------------------------------------------------------------------

    import SimpleOpenNI.*; // import SimpleOpenNI library

    SimpleOpenNI context;

    private void setupOpenNI() { context = new SimpleOpenNI(this); if (context.isInit() == false) { println("Can't init SimpleOpenNI, maybe the camera is not connected!"); exit(); return; }

    // enable depthMap generation 
    context.enableDepth();
    context.enableUser();
    
    // disable mirror
    context.setMirror(false);
    

    }

    private void setupOpenNI_CameraImageMode() { println("kCameraImageMode " + kCameraImageMode);

    switch (kCameraImageMode) {
    case 1: // kCameraImage_RGB:
        context.enableRGB();
        println("enable RGB");
        break;
    case 2: // kCameraImage_IR:
        context.enableIR();
        println("enable IR");
        break;
    case 3: // kCameraImage_Depth:
        context.enableDepth();
        println("enable Depth");
        break;
    case 4: // kCameraImage_User:
        context.enableUser();
        println("enable User");
        break;
    }
    

    }

    private void OpenNI_DrawCameraImage() { switch (kCameraImageMode) { case 1: // kCameraImage_RGB: canvas.image(context.rgbImage(), 0, 0); // println("draw RGB"); break; case 2: // kCameraImage_IR: canvas.image(context.irImage(), 0, 0); // println("draw IR"); break; case 3: // kCameraImage_Depth: canvas.image(context.depthImage(), 0, 0); // println("draw DEPTH"); break; case 4: // kCameraImage_User: canvas.image(context.userImage(), 0, 0); // println("draw DEPTH"); break; } }

    // -------------------------------------------------------------------------------- // OSC SUPPORT // --------------------------------------------------------------------------------

    import oscP5.*; // import OSC library import netP5.*; // import net library for OSC

    OscP5 oscP5; // OSC input/output object NetAddress oscDestinationAddress; // the destination IP address - 127.0.0.1 to send locally int oscTransmitPort = 1234; // OSC send target port; 1234 is default for Isadora int oscListenPort = 9000; // OSC receive port number

    private void setupOSC() { // init OSC support, lisenting on port oscTransmitPort oscP5 = new OscP5(this, oscListenPort); oscDestinationAddress = new NetAddress("127.0.0.1", oscTransmitPort); }

    private void sendOSCSkeletonPosition(String inAddress, int inUserID, int inJointType) { // create the OSC message with target address OscMessage msg = new OscMessage(inAddress);

    PVector p = new PVector();
    float confidence = context.getJointPositionSkeleton(inUserID, inJointType, p);
    
    // add the three vector coordinates to the message
    msg.add(p.x);
    msg.add(p.y);
    msg.add(p.z);
    
    // send the message
    oscP5.send(msg, oscDestinationAddress);
    

    }

    private void sendOSCSkeleton(int inUserID) { sendOSCSkeletonPosition("/head", inUserID, SimpleOpenNI.SKEL_HEAD); sendOSCSkeletonPosition("/neck", inUserID, SimpleOpenNI.SKEL_NECK); sendOSCSkeletonPosition("/torso", inUserID, SimpleOpenNI.SKEL_TORSO);

    sendOSCSkeletonPosition("/left_shoulder", inUserID, SimpleOpenNI.SKEL_LEFT_SHOULDER);
    sendOSCSkeletonPosition("/left_elbow", inUserID, SimpleOpenNI.SKEL_LEFT_ELBOW);
    sendOSCSkeletonPosition("/left_hand", inUserID, SimpleOpenNI.SKEL_LEFT_HAND);
    
    sendOSCSkeletonPosition("/right_shoulder", inUserID, SimpleOpenNI.SKEL_RIGHT_SHOULDER);
    sendOSCSkeletonPosition("/right_elbow", inUserID, SimpleOpenNI.SKEL_RIGHT_ELBOW);
    sendOSCSkeletonPosition("/right_hand", inUserID, SimpleOpenNI.SKEL_RIGHT_HAND);
    
    sendOSCSkeletonPosition("/left_hip", inUserID, SimpleOpenNI.SKEL_LEFT_HIP);
    sendOSCSkeletonPosition("/left_knee", inUserID, SimpleOpenNI.SKEL_LEFT_KNEE);
    sendOSCSkeletonPosition("/left_foot", inUserID, SimpleOpenNI.SKEL_LEFT_FOOT);
    
    sendOSCSkeletonPosition("/right_hip", inUserID, SimpleOpenNI.SKEL_RIGHT_HIP);
    sendOSCSkeletonPosition("/right_knee", inUserID, SimpleOpenNI.SKEL_RIGHT_KNEE);
    sendOSCSkeletonPosition("/right_foot", inUserID, SimpleOpenNI.SKEL_RIGHT_FOOT);
    

    }

    // -------------------------------------------------------------------------------- // SPOUT SUPPORT // --------------------------------------------------------------------------------

    Spout server;

    private void setupSpoutServer(String inServerName, int inWidth, int inHeight) { // Create syhpon server to send frames out. server = new Spout();

    server.initSender(inServerName, inWidth, inHeight);
    

    }

    // -------------------------------------------------------------------------------- // EXIT HANDLER // -------------------------------------------------------------------------------- // called on exit to gracefully shutdown the Syphon server private void prepareExitHandler() { Runtime.getRuntime().addShutdownHook( new Thread( new Runnable() { public void run () { try { // if (server.hasClients()) { server.closeSender(); // } } catch (Exception ex) { ex.printStackTrace(); // not much else to do at this point } } } ) ); }

    // -------------------------------------------------------------------------------- // MAIN PROGRAM // -------------------------------------------------------------------------------- void setup() { int canvasWidth = 640; int canvasHeight = 480;

    size(canvasWidth, canvasHeight, P3D);
    canvas = createGraphics(canvasWidth, canvasHeight, P3D);
    

    textureMode(NORMAL);

    println("Setup Canvas");
    
    // canvas.background(200, 0, 0);
    canvas.stroke(0, 0, 255);
    canvas.strokeWeight(3);
    canvas.smooth();
    println("-- Canvas Setup Complete");
    
    // setup Syphon server
    println("Setup Spout");
    setupSpoutServer("Depth", canvasWidth, canvasHeight);
    
    // setup Kinect tracking
    println("Setup OpenNI");
    setupOpenNI();
    setupOpenNI_CameraImageMode();
    
    // setup OSC
    println("Setup OSC");
    setupOSC();
    
    // setup the exit handler
    println("Setup Exit Handerl");
    prepareExitHandler();
    

    }

    void draw() { // update the cam context.update();

    canvas.beginDraw();
    
    // draw image
    OpenNI_DrawCameraImage();
    
    // draw the skeleton if it's available
    if (kDrawSkeleton) {
    
        int[] userList = context.getUsers();
        for (int i=0; i<userList.length; i++)
        {
            if (context.isTrackingSkeleton(userList[i]))
            {
                canvas.stroke(userClr[ (userList[i] - 1) % userClr.length ] );
    
                drawSkeleton(userList[i]);
    
                if (userList.length == 1) {
                    sendOSCSkeleton(userList[i]);
                }
            }      
    
            // draw the center of mass
            if (context.getCoM(userList[i], com))
            {
                context.convertRealWorldToProjective(com, com2d);
    
                canvas.stroke(100, 255, 0);
                canvas.strokeWeight(1);
                canvas.beginShape(LINES);
                canvas.vertex(com2d.x, com2d.y - 5);
                canvas.vertex(com2d.x, com2d.y + 5);
                canvas.vertex(com2d.x - 5, com2d.y);
                canvas.vertex(com2d.x + 5, com2d.y);
                canvas.endShape();
    
                canvas.fill(0, 255, 100);
                canvas.text(Integer.toString(userList[i]), com2d.x, com2d.y);
            }
        }
    }
    
    canvas.endDraw();
    
    image(canvas, 0, 0);
    
    // send image to spout
    server.sendTexture();
    

    }

    // draw the skeleton with the selected joints void drawLimb(int userId, int inJoint1) { }

    // draw the skeleton with the selected joints void drawSkeleton(int userId) { canvas.stroke(255, 255, 255, 255); canvas.strokeWeight(3);

    drawLimb(userId, SimpleOpenNI.SKEL_HEAD, SimpleOpenNI.SKEL_NECK);
    
    drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_LEFT_SHOULDER);
    drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_LEFT_ELBOW);
    drawLimb(userId, SimpleOpenNI.SKEL_LEFT_ELBOW, SimpleOpenNI.SKEL_LEFT_HAND);
    
    drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_RIGHT_SHOULDER);
    drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_RIGHT_ELBOW);
    drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_ELBOW, SimpleOpenNI.SKEL_RIGHT_HAND);
    
    drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_TORSO);
    drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_TORSO);
    
    drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_LEFT_HIP);
    drawLimb(userId, SimpleOpenNI.SKEL_LEFT_HIP, SimpleOpenNI.SKEL_LEFT_KNEE);
    drawLimb(userId, SimpleOpenNI.SKEL_LEFT_KNEE, SimpleOpenNI.SKEL_LEFT_FOOT);
    
    drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_RIGHT_HIP);
    drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_HIP, SimpleOpenNI.SKEL_RIGHT_KNEE);
    drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_KNEE, SimpleOpenNI.SKEL_RIGHT_FOOT);
    

    }

    void drawLimb(int userId, int jointType1, int jointType2) { float confidence;

    // draw the joint position
    PVector a_3d = new PVector();
    confidence = context.getJointPositionSkeleton(userId, jointType1, a_3d);
    PVector b_3d = new PVector();
    confidence = context.getJointPositionSkeleton(userId, jointType2, b_3d);
    
    PVector a_2d = new PVector();
    context.convertRealWorldToProjective(a_3d, a_2d);
    PVector b_2d = new PVector();
    context.convertRealWorldToProjective(b_3d, b_2d);
    
    canvas.line(a_2d.x, a_2d.y, b_2d.x, b_2d.y);
    

    }

    // ----------------------------------------------------------------- // SimpleOpenNI events

    void onNewUser(SimpleOpenNI curContext, int userId) { println("onNewUser - userId: " + userId); println("\tstart tracking skeleton");

    curContext.startTrackingSkeleton(userId);
    

    }

    void onLostUser(SimpleOpenNI curContext, int userId) { println("onLostUser - userId: " + userId); }

    void onVisibleUser(SimpleOpenNI curContext, int userId) { //println("onVisibleUser - userId: " + userId); }

    void keyPressed() { switch(key) { case ' ': context.setMirror(!context.mirror()); println("Switch Mirroring"); break; } }

  • [Resolved]Hand tracking with Kinect/Processing

    Hi,

    I am trying to make a project with Processing and the Kinect, I already installed the right library (I use OpenNI and FingerTracker), everything seems to work. I followed a tutorial, which showed how to make the kinect detect our hands, especially our fingers. It's this one :

    import fingertracker.*;
    import SimpleOpenNI.*;
    
    FingerTracker fingers;
    SimpleOpenNI kinect;
    int threshold = 625;
    
    void setup() {
      size(640, 480);
    
    
      kinect = new SimpleOpenNI(this);
      kinect.enableDepth();
      kinect.setMirror(true);
    
      fingers = new FingerTracker(this, 640, 480);
      fingers.setMeltFactor(100);
    }
    
    void draw() {
    
      kinect.update();
      PImage depthImage = kinect.depthImage();
      image(depthImage, 0, 0);
    
    
      fingers.setThreshold(threshold);
    
    
      int[] depthMap = kinect.depthMap();
      fingers.update(depthMap);
    
    
      stroke(0,255,0);
      for (int k = 0; k < fingers.getNumContours(); k++) {
        fingers.drawContour(k);
      }
    
      // iterate over all the fingers found
      // and draw them as a red circle
      noStroke();
      fill(255,0,0);
      for (int i = 0; i < fingers.getNumFingers(); i++) {
        PVector position = fingers.getFinger(i);
        ellipse(position.x - 5, position.y -5, 10, 10);
      }
    
    
      fill(255,0,0);
      text(threshold, 10, 20);
    }
    
    
    void keyPressed(){
      if(key == '-'){
        threshold -= 10;
      }
    
      if(key == '='){
        threshold += 10;
      }
    }
    

    Everything works great, but I'm trying to make it detect when my fingers are on a certain location of the window. I am creating a picture with Photoshop, which will be displayed on the screen in Processing, and I want the JPG to have locations in which several things happen when my fingers touch these spaces (for example some objects which appear suddenly, other windows opening...). Is it possible ? How can I make it ?

    Thank you for your future answers.

  • SimpleOpenNI Library error

    /* -------------------------------------------------------------------------- * SimpleOpenNI UserCoordsys Test * -------------------------------------------------------------------------- * Processing Wrapper for the OpenNI/Kinect library * http://code.google.com/p/simple-openni * -------------------------------------------------------------------------- * prog: Max Rheiner / Interaction Design / zhdk / http://iad.zhdk.ch/ * date: 05/06/2012 (m/d/y) * ---------------------------------------------------------------------------- * This example shows how to setup a user defined coordiate system. * You have to devine the new nullpoint + the x/z axis. * This can be also usefull if you work with two independend cameras * ---------------------------------------------------------------------------- */

    import SimpleOpenNI.*;

    final static int CALIB_START = 0; final static int CALIB_NULLPOINT = 1; final static int CALIB_X_POINT = 2; final static int CALIB_Z_POINT = 3; final static int CALIB_DONE = 4;

    SimpleOpenNI context; boolean screenFlag = true; int calibMode = CALIB_START;

    PVector nullPoint3d = new PVector(); PVector xDirPoint3d = new PVector(); PVector zDirPoint3d = new PVector(); PVector tempVec1 = new PVector(); PVector tempVec2 = new PVector(); PVector tempVec3 = new PVector();

    PMatrix3D userCoordsysMat = new PMatrix3D();

    void setup() {
    size(640, 480); smooth();

    context = new SimpleOpenNI(this);

    context.setMirror(false);

    // enable depthMap generation if (context.enableDepth() == false) { println("Can't open the depthMap, maybe the camera is not connected!"); exit(); return; }

    if (context.enableRGB() == false) { println("Can't open the rgbMap, maybe the camera is not connected or there is no rgbSensor!"); exit(); return; }

    // align depth data to image data context.alternativeViewPointDepthToImage();

    // Create the font textFont(createFont("Georgia", 16)); }

    void draw() {
    // update the cam context.update();

    if (screenFlag) image(context.rgbImage(), 0, 0); else image(context.depthImage(), 0, 0);

    // draw text background pushStyle(); noStroke(); fill(0,200,0,100); rect(0,0,width,40); popStyle();

    switch(calibMode) { case CALIB_START: text("To start the calibration press SPACE!", 5, 30); break; case CALIB_NULLPOINT: text("Set the nullpoint with the left mousebutton", 5, 30); break; case CALIB_X_POINT: text("Set the x-axis with the left mousebutton", 5, 30); break; case CALIB_Z_POINT: text("Set the z-axis with the left mousebutton", 5, 30); break; case CALIB_DONE: text("New nullpoint is defined!", 5, 30); break; }

    // draw drawCalibPoint();

    // draw the user defined coordinate system // with the size of 500mm if (context.hasUserCoordsys()) { PVector temp = new PVector(); PVector nullPoint = new PVector();

    pushStyle();
    
    strokeWeight(3);
    noFill();        
    
    context.convertRealWorldToProjective(new PVector(0, 0, 0), tempVec1);  
    stroke(255, 255, 255, 150);
    ellipse(tempVec1.x, tempVec1.y, 10, 10); 
    
    context.convertRealWorldToProjective(new PVector(500, 0, 0), tempVec2);        
    stroke(255, 0, 0, 150);
    line(tempVec1.x, tempVec1.y, 
    tempVec2.x, tempVec2.y); 
    
    context.convertRealWorldToProjective(new PVector(0, 500, 0), tempVec2);        
    stroke(0, 255, 0, 150);
    line(tempVec1.x, tempVec1.y, 
    tempVec2.x, tempVec2.y); 
    
    context.convertRealWorldToProjective(new PVector(0, 0, 500), tempVec2);        
    stroke(0, 0, 255, 150);
    line(tempVec1.x, tempVec1.y, 
    tempVec2.x, tempVec2.y); 
    
    popStyle();
    

    } }

    void drawCalibPoint() { pushStyle();

    strokeWeight(3); noFill();

    switch(calibMode) { case CALIB_START:
    break; case CALIB_NULLPOINT: context.convertRealWorldToProjective(nullPoint3d, tempVec1);

    stroke(255, 255, 255, 150);
    ellipse(tempVec1.x, tempVec1.y, 10, 10);  
    break;
    

    case CALIB_X_POINT: // draw the null point context.convertRealWorldToProjective(nullPoint3d, tempVec1); context.convertRealWorldToProjective(xDirPoint3d, tempVec2);

    stroke(255, 255, 255, 150);
    ellipse(tempVec1.x, tempVec1.y, 10, 10);  
    
    stroke(255, 0, 0, 150);
    ellipse(tempVec2.x, tempVec2.y, 10, 10);  
    line(tempVec1.x, tempVec1.y, tempVec2.x, tempVec2.y);
    
    break;
    

    case CALIB_Z_POINT:

    context.convertRealWorldToProjective(nullPoint3d, tempVec1);
    context.convertRealWorldToProjective(xDirPoint3d, tempVec2);
    context.convertRealWorldToProjective(zDirPoint3d, tempVec3);
    
    stroke(255, 255, 255, 150);
    ellipse(tempVec1.x, tempVec1.y, 10, 10);  
    
    stroke(255, 0, 0, 150);
    ellipse(tempVec2.x, tempVec2.y, 10, 10);  
    line(tempVec1.x, tempVec1.y, tempVec2.x, tempVec2.y);
    
    stroke(0, 0, 255, 150);
    ellipse(tempVec3.x, tempVec3.y, 10, 10);  
    line(tempVec1.x, tempVec1.y, tempVec3.x, tempVec3.y);
    
    break;
    

    case CALIB_DONE:

    break;
    

    }

    popStyle(); }

    void keyPressed() { switch(key) { case '1': screenFlag = !screenFlag; break; case ' ': calibMode++; if (calibMode > CALIB_DONE) { calibMode = CALIB_START; context.resetUserCoordsys(); } else if (calibMode == CALIB_DONE) {
    // set the calibration context.setUserCoordsys(nullPoint3d.x, nullPoint3d.y, nullPoint3d.z, xDirPoint3d.x, xDirPoint3d.y, xDirPoint3d.z, zDirPoint3d.x, zDirPoint3d.y, zDirPoint3d.z);

      println("Set the user define coordinatesystem");
      println("nullPoint3d: " + nullPoint3d);
      println("xDirPoint3d: " + xDirPoint3d);
      println("zDirPoint3d: " + zDirPoint3d);
    
      /*
      // test
      context.getUserCoordsysTransMat(userCoordsysMat);
      PVector temp = new PVector();
    
      userCoordsysMat.mult(new PVector(0, 0, 0), temp);         
      println("PVector(0,0,0): " + temp);
    
      userCoordsysMat.mult(new PVector(500, 0, 0), temp);        
      println("PVector(500,0,0): " + temp);
    
      userCoordsysMat.mult(new PVector(0, 500, 0), temp);        
      println("PVector(0,500,0): " + temp);
    
      userCoordsysMat.mult(new PVector(0, 0, 500), temp);
      println("PVector(0,0,500): " + temp);
      */
    }
    
    break;
    

    } }

    void mousePressed() { if (mouseButton == LEFT) { PVector[] realWorldMap = context.depthMapRealWorld(); int index = mouseX + mouseY * context.depthWidth();

    switch(calibMode)
    {
    case CALIB_NULLPOINT:
      nullPoint3d.set(realWorldMap[index]);
      break;
    case CALIB_X_POINT:
      xDirPoint3d.set(realWorldMap[index]);
      break;
    case CALIB_Z_POINT:
      zDirPoint3d.set(realWorldMap[index]);
      break;
    }
    

    } else { PVector[] realWorldMap = context.depthMapRealWorld(); int index = mouseX + mouseY * context.depthWidth();

    println("Point3d: " + realWorldMap[index].x + "," + realWorldMap[index].y + "," + realWorldMap[index].z);
    

    } }

    void mouseDragged() { if (mouseButton == LEFT) { PVector[] realWorldMap = context.depthMapRealWorld(); int index = mouseX + mouseY * context.depthWidth();

    switch(calibMode)
    {
    case CALIB_NULLPOINT:
      nullPoint3d.set(realWorldMap[index]);
      break;
    case CALIB_X_POINT:
      xDirPoint3d.set(realWorldMap[index]);
      break;
    case CALIB_Z_POINT:
      zDirPoint3d.set(realWorldMap[index]);
      break;
    }
    

    }

    }

    each time i try to run this code i get this error: Can't load SimpleOpenNI library (SimpleOpenNI64) : java.lang.UnsatisfiedLinkError: C:\Users\maryl\OneDrive\Documents\Processing\libraries\SimpleOpenNI\library\SimpleOpenNI64.dll: Can't find dependent libraries Verify if you installed SimpleOpenNI correctly. http://code.google.com/p/simple-openni/wiki/Installation A library relies on native code that's not available. Or only works properly when the sketch is run as a 32-bit application.

  • How to record Kinect Depth Data and how to work with this offline data without Kinect ????

    both 'OpenKinect for processing' and 'KinectPV2' (and I guess SimpleOpenNI too, but you'd have to manually track that one down) can output the depthmap in an array. Then just check the examples/references/tutorials. EDIT: some might output as a PImage.. but pretty sure you can still access the int[] containing the depth values

    Also, atleast with the Kinect v2, Kinect Studio v2 can record and playback the output as if a Kinect was connected. dunno if there's something like that for the v1 though

  • Animation with kinect and processing

    Hi! Im doing a school project and need to make an interactive animation using processing and kinect. The drawings should change acording to the position of the user. I already have some code, but its failing the tests and I dont know how to proceed. Here it is, if someone could give me any help (please!!):

    ` import SimpleOpenNI.*;

    SimpleOpenNI context; color[] userClr = new color[]{ color(255,0,0), color(0,255,0), color(0,0,255), color(255,255,0), color(255,0,255), color(0,255,255) }; PVector com = new PVector();
    PVector com2d = new PVector();

    PImage[] animac = new PImage[48]; int frame = 0;

    void setup() { size(1920,1080);

    context = new SimpleOpenNI(this); if(context.isInit() == false) { println("Can't init SimpleOpenNI, maybe the camera is not connected!"); exit(); return;
    }

    // enable depthMap generation context.enableDepth();

    // enable skeleton generation for all joints context.enableUser();

    // animac[0] = loadImage("c1l1.png"); animac[1] = loadImage("c1l2.png"); animac[2] = loadImage("c1l3.png"); animac[3] = loadImage("c1l4.png"); animac[4] = loadImage("c1l5.png"); animac[5] = loadImage("c1l6.png"); animac[6] = loadImage("c1l7.png");

    animac[7] = loadImage("c2l1.png"); animac[8] = loadImage("c2l2.png"); animac[9] = loadImage("c2l3.png"); animac[10] = loadImage("c2l4.png"); animac[11] = loadImage("c2l5.png"); animac[12] = loadImage("c2l6.png"); animac[13] = loadImage("c2l7.png");

    animac[14] = loadImage("c3l1.png"); animac[15] = loadImage("c3l2.png"); animac[16] = loadImage("c3l3.png"); animac[17] = loadImage("c3l4.png"); animac[18] = loadImage("c3l5.png"); animac[19] = loadImage("c3l6.png"); animac[20] = loadImage("c3l7.png");

    animac[21] = loadImage("c4l1.png"); animac[22] = loadImage("c4l2.png"); animac[23] = loadImage("c4l3.png"); //animac[24] = loadImage("c4l4.png"); animac[25] = loadImage("c4l5.png"); animac[26] = loadImage("c4l6.png"); animac[27] = loadImage("c4l7.png");

    animac[28] = loadImage("c5l1.png"); animac[29] = loadImage("c5l2.png"); animac[30] = loadImage("c5l3.png"); animac[31] = loadImage("c5l4.png"); animac[32] = loadImage("c5l5.png"); animac[33] = loadImage("c5l6.png"); animac[34] = loadImage("c5l7.png");

    animac[35] = loadImage("c6l1.png"); animac[36] = loadImage("c6l2.png"); animac[37] = loadImage("c6l3.png"); animac[38] = loadImage("c6l4.png"); animac[39] = loadImage("c6l5.png"); animac[40] = loadImage("c6l6.png"); animac[41] = loadImage("c6l7.png");

    //animac[42] = loadImage("c7l1.png"); animac[43] = loadImage("c7l2.png"); animac[44] = loadImage("c7l3.png"); animac[45] = loadImage("c7l4.png"); animac[46] = loadImage("c7l5.png"); animac[47] = loadImage("c7l6.png"); //animac[48] = loadImage("c7l7.png");

    //background(200,0,0);

    // stroke(0,0,255); // strokeWeight(3); // smooth();
    }

    void draw() { // update the cam context.update();

    // draw depthImageMap //image(context.depthImage(),0,0); image(context.userImage(),0,0); //carrega a img do kinetic

    // draw the skeleton if it's available int[] userList = context.getUsers(); for(int i=0;i<userList.length;i++) { if(context.isTrackingSkeleton(userList[i])) { stroke(userClr[ (userList[i] - 1) % userClr.length ] ); drawSkeleton(userList[i]); }

    }
    }

    // draw the skeleton with the selected joints void drawSkeleton(int userId) { // to get the 3d joint data // aqui é definido qual parte do corpo vai rastrear

    PVector jointPos = new PVector(); context.getJointPositionSkeleton(userId,SimpleOpenNI.SKEL_HEAD,jointPos);

    PVector convertedHead = new PVector(); context.convertRealWorldToProjective(jointPos,convertedHead); //desenhar uma elipse sobre a parte do corpo rastreada fill(255,0,0); ellipse(convertedHead.x,convertedHead.y,20,20);

    if (convertedHead.x >= 100 && convertedHead.7 <= ){ image(animac[0],0,0); } if (convertedHead.x >= 6 && convertedHead.x <= 10 ){ image(animac[1],0,0); } if (convertedHead.x >= 11 && convertedHead.x <= 15 ){ image(animac[2],0,0); } if (convertedHead.x >= 16 && convertedHead.x <= 20 ){ image(animac[3],0,0); } if (convertedHead.x >= 21 && convertedHead.x <= 25 ){ image(animac[4],0,0); } if (convertedHead.x >= 26 && convertedHead.x <= 30 ){ image(animac[5],0,0); } if (convertedHead.x >= 31 && convertedHead.x <= 35 ){ image(animac[6],0,0); } if (convertedHead.x >= 36 && convertedHead.x <= 40 ){ image(animac[7],0,0); } if (convertedHead.x >= 41 && convertedHead.x <= 45 ){ image(animac[8],0,0); } }

    // ----------------------------------------------------------------- // SimpleOpenNI events

    void onNewUser(SimpleOpenNI curContext, int userId) { println("onNewUser - userId: " + userId); println("\tstart tracking skeleton");

    curContext.startTrackingSkeleton(userId); }

    void onLostUser(SimpleOpenNI curContext, int userId) { println("onLostUser - userId: " + userId); }

    void onVisibleUser(SimpleOpenNI curContext, int userId) { //println("onVisibleUser - userId: " + userId); }

    void keyPressed() { switch(key) { case ' ': context.setMirror(!context.mirror()); break; } }

    `

  • How to average values of more frames in Processing

    I'm working on this code to manage and save data coming from the Microsoft kinect, the data are stored in the int array int[] depthValues, what I'd like to do is to store and save an average of more frames (let's say 10), in order to get smoother data, leaving the remaining part of the code as it is.

    Here's the code:

    import java.io.File;
    import SimpleOpenNI.*;
    import java.util.*;
    SimpleOpenNI kinect;
    void setup()
    {
      size(640, 480);
      kinect = new SimpleOpenNI(this);
      kinect.enableDepth();
    }
    int precedente = millis();
    void draw()
    {
      kinect.update();
      PImage depthImage = kinect.depthImage();
      image(depthImage, 0, 0);
      int[] depthValues = kinect.depthMap();
      //depthValues = reverse(depthValues);
      StringBuilder sb = new StringBuilder();
      Deque<Integer> row = new LinkedList<Integer>();
      int kinectheight = 770; // kinect distance from the baselevel [mm]
      int scaleFactor = 1;
      int pixelsPerRow = 640;
      int pixelsToSkip = 40;
      int rowNum = 0;
      for (int i = 0; i < depthValues.length; i++) {
        if (i > 0 && i == (rowNum + 1) * pixelsPerRow) {
          fillStringBuilder(sb, row);
          rowNum++;
          sb.append("\n");
          row = new LinkedList<Integer>();
        }
        if (i >= (rowNum * pixelsPerRow) + pixelsToSkip) {
          row.addFirst((kinectheight - depthValues[i]) * scaleFactor);
        }
      }
      fillStringBuilder(sb, row);
      String kinectDEM = sb.toString();
      final String[] txt= new String[1]; //creates a string array of 2 elements
      int savingtimestep = 15000;  // time step in millisec between each saving
      if (millis() > precedente + savingtimestep) {
        txt[0] = "ncols         600\nnrows         480\nxllcorner     0\nyllcorner     0\ncellsize      91.6667\nNODATA_value  10\n" +kinectDEM;
        saveStrings("kinectDEM0.tmp", txt);
        precedente = millis();
        //  delete the old .txt file, from kinectDEM1 to kinectDEMtrash
        File f = new File (sketchPath("kinectDEM1.txt"));
        boolean success = f.delete();
    
        //  rename the old .txt file, from kinectDEM0 to kinectDEM1
        File oldName1 = new File(sketchPath("kinectDEM0.txt"));
        File newName1 = new File(sketchPath("kinectDEM1.txt"));
        oldName1.renameTo(newName1);
        //  rename kinectDEM0.tmp file to kinectDEM0.txt
        File oldName2 = new File(sketchPath("kinectDEM0.tmp"));
        File newName2 = new File(sketchPath("kinectDEM0.txt"));
        oldName2.renameTo(newName2);
    
      }
    }
    void fillStringBuilder(StringBuilder sb, Deque<Integer> row) {
      boolean emptyRow = false;
      while (!emptyRow) {
        Integer val = row.pollFirst();
        if (val == null) {
          emptyRow = true;
        } else {
          sb.append(val);
          val = row.peekFirst();
          if (val != null) {
            sb.append(" ");
          }
        }
      }
    }