Loading...
Logo
Processing Forum
mbvcloud's Profile
11 Posts
8 Responses
0 Followers

Activity Trend

Last 30 days
Show:
Private Message
    Can I have two layers of video, one that plays as the background image and one that plays in the OpenCV blob shapes? This is what I have so far. 

    Any suggestions is greatly appreciated! thank you in advance. 


    import hypermedia.video.*;
    import processing.video.*;

    OpenCV opencv;
    Movie mov;
    Movie movtwo;

    PImage movie1;
    PImage movie2;

    void setup() {

        size( 640, 480, P3D );
        mov = new Movie(this, "homepage.mov");
        mov.loop();
        
        movtwo = new Movie(this, "changehomepage.mov");
        movtwo.loop();

        // open video stream
        opencv = new OpenCV( this );
        opencv.capture( 640, 480 );

    }

    void movieEvent(Movie m) {
      m.read();
    }

    void draw() {

      
    background (255);
    image(mov, 0, 0);
     
        opencv.read();           // grab frame from camera
        opencv.threshold(40);    // set black & white threshold 

        // find blobs
        Blob[] blobs = opencv.blobs( 20, width*height/2, 200, true);

        // draw blob results
        for( int i=0; i<blobs.length; i++ ) {
         
     //stroke(255,255,255,10);   this works to fill the blob shapes with a color but I want to replace this with the second                                           video 
         
         
          beginShape();
           
            for( int j=0; j<blobs[i].points.length; j++ ) {
                vertex( blobs[i].points[j].x, blobs[i].points[j].y );
                vertex (320,160,0);
                 
                  
            }
            endShape(CLOSE);
            
           
        }

    }

    I need some help with aligning the image from a video capture onto the screen correctly. I'm not sure if this has anything to do with using P3D or not as this is my first time trying it out. I have tried changing all the variables I can think of and I can't get the field captured in my camera to fill the field in my program? The image is always to the bottom right corner. I started from an example that had an image in each corner and I commented out all except for the bottom right corner. Now I cannot figure out how to change it so the image fills the screen. I'm sure it is something really simple but I can't see what it is that I'm missing. I changed the integer for transform and was able to move the image around but I can't seem to make it larger. I changed the initial w and h but then the program didn't display any image. If anyone could take a quick look I would appreciate it very much. I want the image picked up in the camera to match the size/location of the image in the program window...just that! In the long run I want the detected blobs to draw a line from their position to a chosen point which seems to be working but it just isn't centered correctly. 

    thanks very much in advance if you can help !!!

    import hypermedia.video.*;
    import java.awt.*;//another library with blob
    OpenCV opencv;

    int w = 320;
    int h = 240;
    int threshold = 40;

    boolean find = true;

    void setup() {
      
      size( 600, 600, P3D );//size of program image//P3D is  for 3d shape
      //perspective(); I don't know if I need this
      opencv = new OpenCV (this);
      opencv.capture (w,h);
    }

    void draw() {
      background(255);
      
      opencv.read(); //read latest image from camera, save in memory
      //image (opencv.image(), 10, 10); //draw RGB image
      //image(opencv.image(OpenCV.GRAY), 20+w, 10); //draw grey image
      
      //image (opencv.image(OpenCV.MEMORY), 10, 20+h); //draw get image in memory
      
      opencv.absDiff();//
      opencv.threshold(threshold);//
     // image (opencv.image(OpenCV.GRAY), 20+w, 20+h);//draw the absoulute diff image
      
      Blob[] blobs = opencv.blobs(100, w*h/3, 200, true);//minarea/maxarea/max#/holes?
      noFill();
     pushMatrix();// i don't know what this is
      translate(20+w,20+h); //detect the blobs in the image
      
      for( int i=0; i<blobs.length; i++) {//for all the blobs
      
    // Rectangle bounding = blobs[i].rectangle;
    // noFill();
     //rect( bounding.x, bounding.y, bounding.width, bounding.height);//draw the box around blobs
      
      float area = blobs[i].area;
      float circumfrence = blobs[i].length;
      Point centroid = blobs[i].centroid;
      Point[]points = blobs[i].points;
      stroke(0,0,100,50);//color of line to chosen point and transparency
     
     if (points.length>0) {
     beginShape();
     for(int j=0; j<points.length; j++) {
       vertex(points[j].x, points[j].y);// vertices of blobs
       vertex (160,0);//position of chosen point
     }
     endShape(CLOSE);
     }
      }
      popMatrix(); // i don't know what this is
    }
       
    Hi, I would like to ask if anyone has any tips on how I could improve the speed of this program. The program is for a projection where viewers draw in front of the camera with colored objects and then the colors are projected onto a wall. If there is not movement or colors drawn by the viewers the image fades to white. It is working ok but just a little bit slow so I was wondering if anyone had any suggestions as to how I could improve the speed. Also, does playing the audio through the program slow it down a lot? or not really?

    thanks very much.


    import ddf.minim.*;
    import ddf.minim.signals.*;
    import ddf.minim.analysis.*;
    import ddf.minim.effects.*;

    import hypermedia.video.*;

    OpenCV opencv;
    PImage trailsImg;
    float threshold = 80f;
    Minim minim;
    AudioPlayer player;




    void setup() {
      
      // size(512, 200, P3D);
     // minim = new Minim(this);
      // load a file, give the AudioPlayer buffers that are 2048 samples long
     // player = minim.loadFile("soudofpaint.mp3", 2048);
      // play the file
    // player.shiftGain(0, 20, 2000);
    // player.loop();



        size( 800, 600 );
       

        // open video stream
        opencv = new OpenCV( this );
        opencv.capture( 800, 600 );
        trailsImg = new PImage (800, 600);
        background(255);
    }

    void draw() {

        opencv.read();                               // grab frame from camera
        //image( opencv.image(), 0, 0);                // show the original image
    PImage camImage;

        opencv.absDiff();   
        
        
        
        
        filter(INVERT);
    camImage = opencv.image();
     
        opencv.blur (OpenCV.BLUR, 3 );
        
      //  colorMode(HSB);
        trailsImg.blend( opencv.image(), 0, 0, 800, 600, 0, 0, 800, 600, SCREEN);
        
        image( trailsImg, 0,0 );             // display the result 
        
         
        
        filter(INVERT);
        
        opencv.copy (trailsImg);
        opencv.blur (OpenCV.BLUR, 4 );
        opencv.contrast (0);
        opencv.brightness (-10);//how fast image fades, higher number fades faster
        trailsImg = opencv.image();
        
         opencv.remember();  // store the actual image in memory
         
    }

    void keyPressed() {
        
        
    }
    void stop()
    {
      // always close Minim audio classes when you are done with them
     // player.close();
      // always stop Minim before exiting
     // minim.stop();
      
     // super.stop();
    }
    Hi, I'm working on this project for school. Basically, how it works is the sketch is projected onto a blank canvas on one side of the room. The camera is pointing at a white wall on the other side of the room. In the room there are all sorts of colored objects for the user to use such as paint brushes that are spray painted various colors. When they move the colored object in front of the camera it draws the line of their movement in that color onto the canvas as if they were painting. If there is no movement the image slowly fades back to white. Basically it is working, but there are some things I would like to improve. The colors that the sketch draws to the screen are for the most part accurate to the color that is being moved in front of the camera but sometimes they are not...and I'm not sure why??? I would like to improve on this and also if, for example, the user draws a line with a yellow brush, and then a blue brush on top of the yellow line I would like the line to change to green where they overlap (as it would with real paint). So I think I need to blend the image of the movement with the image from the screen? right? Any suggestions would be greatly appreciated. I have more details and pictures of the project here:  http://www.rebeccavickers.com/pages/English/dejuredefacto.html

    The code so far:

    import processing.opengl.*;

    import ddf.minim.*;
    import ddf.minim.signals.*;
    import ddf.minim.analysis.*;
    import ddf.minim.effects.*;

    import hypermedia.video.*;

    OpenCV opencv;
    PImage trailsImg;
    float threshold = 80f;
    Minim minim;
    AudioPlayer player;




    void setup() {
      
      // size(512, 200, P3D);
      minim = new Minim(this);
      // load a file, give the AudioPlayer buffers that are 2048 samples long
      player = minim.loadFile("soudofpaint.mp3", 2048);
      // play the file
    // player.shiftGain(0, 20, 2000);
     player.loop();



        size( 800, 600 );
       

        // open video stream
        opencv = new OpenCV( this );
        opencv.capture( 800, 600 );
        trailsImg = new PImage (800, 600);
        background(255);
    }

    void draw() {

        opencv.read();                               // grab frame from camera
        //image( opencv.image(), 0, 0);                // show the original image
    PImage camImage;

        opencv.absDiff();   
        
        
        
        
        filter(INVERT);
    camImage = opencv.image();
     
        opencv.blur (OpenCV.BLUR, 3 );
        
      //  colorMode(HSB);
        trailsImg.blend( opencv.image(), 0, 0, 800, 600, 0, 0, 800, 600, SCREEN);
        
        image( trailsImg, 0,0 );             // display the result 
        
         
        
        filter(INVERT);
        
        opencv.copy (trailsImg);
        opencv.blur (OpenCV.BLUR, 4 );
        opencv.contrast (0);
        opencv.brightness (-2);
        trailsImg = opencv.image();
        
         opencv.remember();  // store the actual image in memory
         
    }

    void keyPressed() {
        
        
    }
    void stop()
    {
      // always close Minim audio classes when you are done with them
      player.close();
      // always stop Minim before exiting
      minim.stop();
      
      super.stop();
    }


    thank you!!!!!
    HI,

    How do I increase java heap space? I searched around and found some other questions about this in the forum but unfortunately I didn't understand the responses. Do I need to put some code directly in my processing sketch to set the minimum/maximum? 
    One post said something about adding this: Xms528m -Xmx1000m   but...I don't know where I should add it to.
    I'm using Mac os 10.6.6

    Thanks very much. 
    appreciate it. 
    Hi,
    I was wondering if someone could tell me why in this sketch the colors from my camera are inverted in the program sometimes and other times they are true to the color I am holding in front of the camera? I tried turning the invert filter on and off but it didn't seem to make a difference.

    Thanks
    1. import processing.opengl.*;

      import ddf.minim.*;
      import ddf.minim.signals.*;
      import ddf.minim.analysis.*;
      import ddf.minim.effects.*;

      import hypermedia.video.*;

      OpenCV opencv;
      PImage trailsImg;
      float threshold = 80f;
      Minim minim;
      AudioPlayer player;




      void setup() {
       
        // size(512, 200, P3D);
        minim = new Minim(this);
        // load a file, give the AudioPlayer buffers that are 2048 samples long
        player = minim.loadFile("soudofpaint.mp3", 2048);
        // play the file
      // player.shiftGain(0, 20, 2000);
       player.loop();



          size( 1280, 1024 );
        

          // open video stream
          opencv = new OpenCV( this );
          opencv.capture( 1280, 1024 );
          trailsImg = new PImage (1280, 1024);
          background(255);
      }

      void draw() {

          opencv.read();                               // grab frame from camera
          //image( opencv.image(), 0, 0);                // show the original image
      PImage camImage;

          opencv.absDiff();  
         
         
         
         
          filter(INVERT);
      camImage = opencv.image();
       
          opencv.blur (OpenCV.BLUR, 3 );
         
        //  colorMode(HSB);
          trailsImg.blend( opencv.image(), 0, 0, 1280, 1024, 0, 0, 1280, 1024, SCREEN);
         
          image( trailsImg, 0,0 );             // display the result
         
          
         
          filter(INVERT);
         
          opencv.copy (trailsImg);
          opencv.blur (OpenCV.BLUR, 4 );
          opencv.contrast (0);
          opencv.brightness (-4);
          trailsImg = opencv.image();
         
           opencv.remember();  // store the actual image in memory
          
      }

      void keyPressed() {
         
         
      }
      void stop()
      {
        // always close Minim audio classes when you are done with them
        player.close();
        // always stop Minim before exiting
        minim.stop();
       
        super.stop();
      }
    I just want to make the sound fade out when the  pixels get closer to white and have it fade back in as there is more color(movement)


    THANKS.
    1. import processing.opengl.*;

      import ddf.minim.*;
      import ddf.minim.signals.*;
      import ddf.minim.analysis.*;
      import ddf.minim.effects.*;

      import hypermedia.video.*;

      OpenCV opencv;
      PImage trailsImg;
      float threshold = 80f;
      Minim minim;
      AudioPlayer player;



      void setup() {
       
        // size(512, 200, P3D);
        minim = new Minim(this);
        // load a file, give the AudioPlayer buffers that are 2048 samples long
        player = minim.loadFile("01 Space Maker 1.mp3", 2048);
        // play the file
        player.loop();



          size( 800, 600 );
        

          // open video stream
          opencv = new OpenCV( this );
          opencv.capture( 800, 600 );
          trailsImg = new PImage (800, 600);
      }

      void draw() {

          opencv.read();                               // grab frame from camera
          //image( opencv.image(), 0, 0);                // show the original image
      PImage camImage;

          opencv.absDiff();  
         
         
         
         
          filter(INVERT);
      camImage = opencv.image();
       
          opencv.blur (OpenCV.BLUR, 3 );
         
         
          trailsImg.blend( opencv.image(), 0, 0, 800, 600, 0, 0, 800, 600, SCREEN);
         
          image( trailsImg, 0,0 );             // display the result
         
         //opencv.read(); //do i need this?
          int imgSize = opencv.height * opencv.width;
          for(int i = 0; i < imgSize; i++)
         
          color currColor = camImage;
          
            // Extract the red, green, and blue components from current pixel
            int currR = (currColor >> 16) & 0xFF; // Like red(), but faster
            int currG = (currColor >> 8) & 0xFF;
            int currB = currColor & 0xFF;
          
            colorSum += currR + currG + currB;
         
           if ( colorSum > 200)
          { player.shiftGain(0, -40, 5000);}
          else {player.shiftGain(-40, 0, 5000);}
         
         
          filter(INVERT);
         
          opencv.copy (trailsImg);
          opencv.blur (OpenCV.BLUR, 4 );
          opencv.contrast (0);
          opencv.brightness (-2);
          trailsImg = opencv.image();
         
           opencv.remember();  // store the actual image in memory
      }

      void keyPressed() {
         
         
      }
      void stop()
      {
        // always close Minim audio classes when you are done with them
        player.close();
        // always stop Minim before exiting
        minim.stop();
       
        super.stop();
      }
    Hi,
    I am working on this project for school. It is pretty much working and doing what I want it to do but I thought I would post it here to ask for any suggestions of how I could improve it. This is pretty much my first project so I'm sure there are plenty of things that I could have written differently that might work better.  I am in a painting class and this program is going to use a camera to capture my classmates motions/color and then project them onto a blank canvas so they will be creating their own painting with their movements(and creating my painting assignment for me! haha). I plan to have paintbrushes spray painted in different colors available so they can draw in the color that they want. The image gradually resets itself to white when there is no motion. Any suggestions at all are very much appreciated!  Also, I am wondering if anyone has suggestions for how I could add sound? I want there to be sound only when there is motion.
    Thanks very much.
    Oh, and also- how do I flip the final image so it is like a mirror? thanks!!!!
    1. import hypermedia.video.*;

      OpenCV opencv;
      PImage trailsImg;
      float threshold = 80f;

      void setup() {

          size( 800, 600 );
        

          // open video stream
          opencv = new OpenCV( this );
          opencv.capture( 800, 600 );
          trailsImg = new PImage (800, 600);
      }

      void draw() {

          opencv.read();                               // grab frame from camera
          //image( opencv.image(), 0, 0);                // show the original image
      PImage camImage;

          opencv.absDiff();   // make the difference between the current image and the image in memory
          filter(INVERT);
      camImage = opencv.image();
       
         
          opencv.blur (OpenCV.BLUR, 3 );
          //opencv.threshold (20);
         
          trailsImg.blend( opencv.image(), 0, 0, 800, 600, 0, 0, 800, 600, SCREEN);
         
          image( trailsImg, 0,0 );             // display the result
          filter(INVERT);
         
          opencv.copy (trailsImg);
          opencv.blur (OpenCV.BLUR, 4 );
          opencv.contrast (0);
          opencv.brightness (-2);
          trailsImg = opencv.image();
         
           opencv.remember();  // store the actual image in memory
      }

      void keyPressed() {
         
         
      }

    Hi,

    I'm trying to create a project where the users motion is detected and drawn on the screen. I have found many very helpful resources on this forum and at openProcessing.org, specifically the optical flow project by Hidetoshi Shimodaira. Thanks very much for such a valuable resource. Below is the code that I have so far. There are two things that I would like to change if possible and am looking for some suggestions. I like how the vectors show the direction of movement but ideally I would really like them to be displayed in the color of the corresponding pixel in the camera feed. So that if, for example, someone has a red shirt on and moves in front of the camera the vectors would show up red where they had moved. If that makes sense? And finally I would like to add a blur to the final image. Can I do that? Please excuse the excessive amount of //comments in my code, I'm really new to this and it helps me keep track of what I'm looking at. :)




    1. int wscreen=640;//app window width
      int hscreen=480;//app window height
      int gs=5; //number of pixels per grid step
      float predsec=1.0; //larger number makes longer vector line

      import processing.opengl.*;
      import processing.video.*;//initialize camera
      Capture video;//capture video image
      int fps = 30; //frames per second
      color[] vline;


      //parameters for the grid
      int as=gs*2; //area of window for averaging pixels
      int gw=wscreen/gs;//width of screen/number of grid
      int gh=hscreen/gs;//height of screen/number of grid
      int gs2=gs/2;//grid/2
      float df=predsec*fps;//predsec*frames per second

      //regression vectors
      float[] fx, fy, ft;
      int fm=3*9; // length of the vectors

      // regularization term for regression
      float fc=pow(10,8); // larger values for noisy video

      // smoothing parameters
      float wflow=0.04; // smaller value for longer lasting vectors


      boolean flagmirror=true; // mirroring image
      boolean flagflow=true; // draw opticalflow vectors
      boolean flagimage=false; // show video image
      boolean flagseg=false; // segmentation of moving objects

      // internally used variables
      float ar,ag,ab; // used as return value of pixave
      float[] dtr, dtg, dtb; // differentiation by t (red,gree,blue)
      float[] dxr, dxg, dxb; // differentiation by x (red,gree,blue)
      float[] dyr, dyg, dyb; // differentiation by y (red,gree,blue)
      float[] par, pag, pab; // averaged grid values (red,gree,blue)
      float[] flowx, flowy; // computed optical flow
      float[] sflowx, sflowy; // slowly changing version of the flow
      int clockNow,clockPrev, clockDiff; // for timing check

      void setup() {
        // screen and video
        size(wscreen, hscreen, P2D);
        video = new Capture(this, wscreen, hscreen, fps);

        rectMode(CENTER);//setting region to captue pixels from

        // arrays
        par = new float[gw*gh];
        pag = new float[gw*gh];
        pab = new float[gw*gh];
        dtr = new float[gw*gh];
        dtg = new float[gw*gh];
        dtb = new float[gw*gh];
        dxr = new float[gw*gh];
        dxg = new float[gw*gh];
        dxb = new float[gw*gh];
        dyr = new float[gw*gh];
        dyg = new float[gw*gh];
        dyb = new float[gw*gh];
        flowx = new float[gw*gh];
        flowy = new float[gw*gh];
        sflowx = new float[gw*gh];
        sflowy = new float[gw*gh];
        fx = new float[fm];
        fy = new float[fm];
        ft = new float[fm];
        vline = new color[wscreen];
      }
      // calculate average pixel value (r,g,b) for rectangle region
      void pixave(int x1, int y1, int x2, int y2) {
        float sumr,sumg,sumb;
        color pix;
        int r,g,b;
        int n;

        if(x1<0) x1=0;
        if(x2>=wscreen) x2=wscreen-1;
        if(y1<0) y1=0;
        if(y2>=hscreen) y2=hscreen-1;

        sumr=sumg=sumb=0.0;
        for(int y=y1; y<=y2; y++) {
          for(int i=wscreen*y+x1; i<=wscreen*y+x2; i++) {
            pix=video.pixels[i];
            b=pix & 0xFF; // blue
            pix = pix >> 8;
            g=pix & 0xFF; // green
            pix = pix >> 8;
            r=pix & 0xFF; // red
            // averaging the values
            sumr += r;
            sumg += g;
            sumb += b;
          }
        }
        n = (x2-x1+1)*(y2-y1+1); // number of pixels
        // the results are stored in static variables
        ar = sumr/n;
        ag=sumg/n;
        ab=sumb/n;
      }

      // extract values from 9 neighbour grids
      void getnext9(float x[], float y[], int i, int j) {
        y[j+0] = x[i+0];
        y[j+1] = x[i-1];
        y[j+2] = x[i+1];
        y[j+3] = x[i-gw];
        y[j+4] = x[i+gw];
        y[j+5] = x[i-gw-1];
        y[j+6] = x[i-gw+1];
        y[j+7] = x[i+gw-1];
        y[j+8] = x[i+gw+1];
      }

      // solve optical flow by least squares (regression analysis)
      void solveflow(int ig) {
        float xx, xy, yy, xt, yt;
        float a,u,v,w;

        // prepare covariances
        xx=xy=yy=xt=yt=0.0;
        for(int i=0;i<fm;i++) {
          xx += fx[i]*fx[i];
          xy += fx[i]*fy[i];
          yy += fy[i]*fy[i];
          xt += fx[i]*ft[i];
          yt += fy[i]*ft[i];
        }

        // least squares computation
        a = xx*yy - xy*xy + fc; // fc is for stable computation
        u = yy*xt - xy*yt; // x direction
        v = xx*yt - xy*xt; // y direction

        // write back
        flowx[ig] = -2*gs*u/a; // optical flow x (pixel per frame)
        flowy[ig] = -2*gs*v/a; // optical flow y (pixel per frame)
      }

      void draw() {
        if(video.available()) {
          // video capture
          video.read();
      video.loadPixels();
          // mirror
          if(flagmirror) {
            for(int y=0;y<hscreen;y++) {
              int ig=y*wscreen;
              for(int x=0; x<wscreen; x++)
                vline[x] = video.pixels[ig+x];
              for(int x=0; x<wscreen; x++)
                video.pixels[ig+x]=vline[wscreen-1-x];
            }
          }

          // draw image
          if(flagimage) set(0,0,video);
          else background(255);

          // 1st sweep : differentiation by time
          for(int ix=0;ix<gw;ix++) {
            int x0=ix*gs+gs2;
            for(int iy=0;iy<gh;iy++) {
              int y0=iy*gs+gs2;
              int ig=iy*gw+ix;
              // compute average pixel at (x0,y0)
              pixave(x0-as,y0-as,x0+as,y0+as);
              // compute time difference
              dtr[ig] = ar-par[ig]; // red
              dtg[ig] = ag-pag[ig]; // green
              dtb[ig] = ab-pab[ig]; // blue
              // save the pixel
              par[ig]=ar;
              pag[ig]=ag;
              pab[ig]=ab;
            }
          }

          // 2nd sweep : differentiations by x and y
          for(int ix=1;ix<gw-1;ix++) {
            for(int iy=1;iy<gh-1;iy++) {
              int ig=iy*gw+ix;
              // compute x difference
              dxr[ig] = par[ig+1]-par[ig-1]; // red
              dxg[ig] = pag[ig+1]-pag[ig-1]; // green
              dxb[ig] = pab[ig+1]-pab[ig-1]; // blue
              // compute y difference
              dyr[ig] = par[ig+gw]-par[ig-gw]; // red
              dyg[ig] = pag[ig+gw]-pag[ig-gw]; // green
              dyb[ig] = pab[ig+gw]-pab[ig-gw]; // blue
            }
          }

          // 3rd sweep : solving optical flow
          for(int ix=1;ix<gw-1;ix++) {
            int x0=ix*gs+gs2;
            for(int iy=1;iy<gh-1;iy++) {
              int y0=iy*gs+gs2;
              int ig=iy*gw+ix;

              // prepare vectors fx, fy, ft
              getnext9(dxr,fx,ig,0); // dx red
              getnext9(dxg,fx,ig,9); // dx green
              getnext9(dxb,fx,ig,18);// dx blue
              getnext9(dyr,fy,ig,0); // dy red
              getnext9(dyg,fy,ig,9); // dy green
              getnext9(dyb,fy,ig,18);// dy blue
              getnext9(dtr,ft,ig,0); // dt red
              getnext9(dtg,ft,ig,9); // dt green
              getnext9(dtb,ft,ig,18);// dt blue

              // solve for (flowx, flowy) such that
              // fx flowx + fy flowy + ft = 0
              solveflow(ig);

              // smoothing
              sflowx[ig]+=(flowx[ig]-sflowx[ig])*wflow;
              sflowy[ig]+=(flowy[ig]-sflowy[ig])*wflow;
            }
          }


          // 4th sweep : draw the flow
          if(flagseg) {
            noStroke();
            fill(0);
            for(int ix=0;ix<gw;ix++) {
              int x0=ix*gs+gs2;
              for(int iy=0;iy<gh;iy++) {
                int y0=iy*gs+gs2;
                int ig=iy*gw+ix;

                float u=df*sflowx[ig];
                float v=df*sflowy[ig];


                float a=sqrt(u*u+v*v);
                if(a<2.0) rect(x0,y0,gs,gs);
              }
            }
          }

          // 5th sweep : draw the flow
          if(flagflow) {
            for(int ix=0;ix<gw;ix++) {
              int x0=ix*gs+gs2;
              for(int iy=0;iy<gh;iy++) {
                int y0=iy*gs+gs2;
                int ig=iy*gw+ix;

                float u=df*sflowx[ig];
                float v=df*sflowy[ig];

                // draw the line segments for optical flow
                float a=sqrt(u*u+v*v);
                if(a>=2.0) { // draw only if the length >=2.0
                  float r=0.5*(1.0+u/(a+0.1));
                  float g=0.5*(1.0+v/(a+0.1));
                  float b=0.5*(2.0-(r+g));

                  //  stroke(r,g,b);
                  stroke(255*r, 255*b, 255*g);//color of line *this is what I want to change but I don't know what I should                                                                                 write to get the pixels of the camera input?
                  line(x0,y0,x0+u,y0+v);//location of line beginning and end
                 
           
                }
              }
            }
          }
        }
      }


    Any assistance or tips towards potentially helpful tutorials would be greatly appreciated.

    THANKS!!
    Hi,

    I'm working on my first Processing project and would like to ask for some advice.
    Thanks very much for all the great and helpful information posted in this forum.
    I am using the flob library and an example from the library to detect blobs and movement. So far so good but what I would ideally like to do is instead of having the detected movement create the white squares that move I would like to have a background image and have the detected movement move the pixels of the image around. Then I would want the changed pixels to stay where they were moved to until moved again. Sort of like finger painting I guess.

    Here's what I am using right now, some stuff that was in the example file I started with I didn't need but I just commented it out for now so sorry for the messiness. Like I said, this is my first try so any advice you can give me as to what might be the best way to approach my idea would be greatly appreciated.

    Thanks in advance,
    Rebecca


     
    */

    import processing.opengl.*;
    import processing.video.*;
    import s373.flob.*;


    Capture video;  
    Flob flob;      
    ArrayList blobs;

    PSys psys;

    int tresh = 20;   // adjust treshold value here or keys t/T
    int fade = 25;
    int om = 1;
    int videores=128;
    String info="";
    PFont font;
    float fps = 60;
    int videotex = 2; //case 0: videotex = videoimg;//case 1: videotex = videotexbin;
    //case 2: videotex = videotexmotion//case 3: videotex = videoteximgmotion;TTTtf



    void setup() {
      // osx quicktime bug 882 processing 1.0.1
      try {
        quicktime.QTSession.open();
      }
      catch (quicktime.QTException qte) {
        qte.printStackTrace();
      }

      size(1024,512,OPENGL);
      frameRate(fps);
      rectMode(CENTER);
      // init video data and stream
      video = new Capture(this, videores, videores, (int)fps); 
      flob = new Flob(videores, videores, width, height);

      flob.setThresh(tresh).setSrcImage(videotex)
      .setBackground(video).setBlur(0).setOm(1).setFade(fade)
      .setMirror(true,false);

      font = createFont("monaco",10);
      textFont(font);

      psys = new PSys(1000);//number of squares
      stroke(200,200);//shape of squares
      strokeWeight(25);//size of squares
     
    }



    void draw() {

      if(video.available()) {
         video.read();
         blobs = flob.calc(flob.binarize(video));
      }

      image(flob.getSrcImage(), 0, 0, width, height);

      rectMode(CENTER);

      int numblobs = blobs.size();
      for(int i = 0; i < numblobs; i++) {
        ABlob ab = (ABlob)flob.getABlob(i);
        psys.touch(ab);

        //box
       // fill(0,0,255,100);
       // rect(ab.cx,ab.cy,ab.dimx,ab.dimy);
        //centroid
       // fill(0,255,0,200);
       // rect(ab.cx,ab.cy, 5, 5);
       // info = ""+ab.id+" "+ab.cx+" "+ab.cy;
       // text(info,ab.cx,ab.cy+20);
      }

      psys.go();
      psys.draw();

      //report presence graphically
     // fill(255,152,255);
     // rectMode(CORNER);
     // rect(5,5,flob.getPresencef()*width,10);
      //String stats = ""+frameRate+" flob.numblobs: "+numblobs+" flob.thresh:"+tresh+
       // " <t/T>"+" flob.fade:"+fade+"   <f/F>"+" flob.om:"+flob.getOm()+
      //  " flob.image:"+videotex+" flob.presence:"+flob.getPresencef();
      //fill(0,255,0);
      //text(stats,5,25);
    }


    void keyPressed() {

      if (key=='S')
        video.settings();
      if (key=='i') { 
        videotex = (videotex+1)%4;
        flob.setImage(videotex);
      }
      if(key=='t') {
        flob.setTresh(tresh--);
      }
      if(key=='T') {
        flob.setTresh(tresh++);
      }  
      if(key=='f') {
        flob.setFade(fade--);
      }
      if(key=='F') {
        flob.setFade(fade++);
      }  
      if(key=='o') {
        om=(om +1) % 3;
        flob.setOm(om);
      }  
      if(key==' ') //space clear flob.background
        flob.setBackground(video);
    }



    and the Psys:

    float drag = 0.957;
    class Part {
      float x, y, vx, vy, ax, ay;
      float px,py,force;

      Part() {
        x = random(width);
        y = random(height);
        vx = random(-2,2);
        vy = random(-2,2);
        force = random(-2,2);
        px = x;
        py = y;
      }
      void go() {
        vx += ax;
        vy += ay;
        vx *= drag;
        vy *= drag;
        px = x;
        py = y;
        x+=vx;
        y+=vy;
        ax = 0;
        ay = 0;
        bounds();
      }
     
      void bounds(){
        boolean c = false;
        if(x>width){
          x-=width;
          c = true;
        }
        if(x<0){
          c = true;
          x+=width;
        }
        if(y>height){
          c = true;
          y-=height;
        }
        if(y<0){
          y+=height;
          c = true;
        }
        if(c){
          px = x;
          py = y;
        }
      }

      void draw() {
        line(px,py,x,y);
      }
      void touch(ABlob ab) {
        float dx = ab.cx - x;
        float dy = ab.cy - y;
        float d = sqrt(dx*dx+dy*dy);
        if(d > 0 && d < 200) {
          d = 1.0f/d * force;
          dx *= d;
          dy *= d;
          ax += dx;
          ay += dy;
        }
      }
    }


    class PSys {
      Part p[];

      PSys (int num) {
        p = new Part[num];
        for(int i=0;i<p.length;i++)
          p[i] = new Part();
      }

      void go() {   
        for(int i=0; i<p.length;i++)
          p[i].go();
      }
      void draw() {
        for(int i=0; i<p.length;i++)
          p[i].draw();
      }
      void touch(ABlob ab) {
        for(int i=0; i<p.length;i++)
          p[i].touch(ab);
      }
    }

    HI,

    I am completely new to processing. I am working on making an interactive projection and I need to have a live feed video camera capture stills of the viewers participating in the projection. A really generous individual was kind enough to send me the code (see below) which allows me to accomplish this- it works perfectly but it is necessary to press the 's' key to capture the image. I am wondering if there is any way to have the image capture happen at a set time interval instead? That way the viewer wouldn't have to press any keys to have the image capture happen. If anyone could take a look and give me any suggestions or help me out with this I would really appreciate it.

    Thanks much,
    mbvcloud

    the code:

    import processing.video.*;
    import javax.swing.*;

    //--change these settings if you wish
    int w = 640;
    int h = 480;

    String folder = "/Users/" + System.getProperty("user.name") + "/Desktop/timeLapse4Md8";


    //--do not touch after that
    Capture cam;
    NumberFormat nf = NumberFormat.getInstance();
    JFileChooser fc;

    int count = 0;
    String num;

    PImage overlay;
    boolean showOverlay = false;
    boolean overlayState = false;
    boolean saveProcess = false;

    //--setup
    void setup() {
      size(w, h);

      cam = new Capture(this, w, h);

      //--manage saved files
      fc = new JFileChooser();
      fc.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY);
      int returnVal = fc.showOpenDialog(this);
      if (returnVal == JFileChooser.APPROVE_OPTION) {
        File file = fc.getSelectedFile();
        folder = file.getAbsolutePath();
      }
     
      File dir = new File(folder);

      if (dir.exists()) {
        String[] allFiles = dir.list();
        count = allFiles.length;
      }
      else {
        if (dir.mkdir()) println("Directory created");
      }

      num = formatCount(count-1);
    }

    //--draw
    void draw() {
      if (cam.available()) {
        cam.read();
        image(cam, 0, 0);
      }

      if (showOverlay) {
        blend(overlay, 0, 0, w, h, 0, 0, w, h, ADD);
      }

      if (saveProcess) saveImage();
    }

    //--key events
    void keyPressed() {
      if (key == 's') saveProcess();
      else if (key == 'l') toggleOverlayImage();
      else if (key == 'c') camSettings();
    }

    //--other functions
    //--run saving process and check if overlay has to be hidden
    void saveProcess() {
      overlayState = showOverlay;
      showOverlay = false;
      saveProcess = true;
    }

    //--save frame
    void saveImage() {
      num = formatCount(count);
      saveFrame(folder + "/timeLapse-" + num + ".jpg");
      //println("Frame # " + num + " saved"); 
      count++;

      saveProcess = false;
      if (overlayState) toggleOverlayImage();
    }

    //--format the file number with 5 digits
    String formatCount(int num) {
      nf.setMinimumIntegerDigits(5);
      nf.setMaximumIntegerDigits(5);
      nf.setGroupingUsed(false);
      return nf.format(num);
    }

    //--reload and overlay the last image to be captured
    void toggleOverlayImage() {
      if (count > 0) {
        if (!showOverlay) overlay = loadImage(folder + "/timeLapse-" + num + ".jpg");
        if (showOverlay) showOverlay = false;
        else showOverlay = true;
      }
    }

    //--display camera preferences
    void camSettings() {
      cam.settings();
    }