how can I use webcam input for opengl fisheye output?
in
Contributed Library Questions
•
15 days ago
Hi. This is my the first question on this forum! I'm undergoing project making
fish-eye effect on screen changing depends on
input from webcam blob detection.
Working on processing 1.5.1 to use glgraphics library_output_fish-eye ( http://glgraphics.sourceforge.net/examples/Output/FishEye/FishEye.pde), it is hard to use the both fish-eye library and webcam video capture(webcam capture needs canvas to get input and fish-eye rendering also). I'm using blob detection from v3ga ( http://www.v3ga.net/processing/BlobDetection/).
Two codes separately works well, but I don't know how to combine them for my purpose. Anyone can help this? and if it exists, I wanna know more about fish-eye effect on processing. Are there any libraries?
1) Fish-eye
it' on this link : http://glgraphics.sourceforge.net/examples/Output/FishEye/FishEye.pde
2) Video capture with blob detection
Working on processing 1.5.1 to use glgraphics library_output_fish-eye ( http://glgraphics.sourceforge.net/examples/Output/FishEye/FishEye.pde), it is hard to use the both fish-eye library and webcam video capture(webcam capture needs canvas to get input and fish-eye rendering also). I'm using blob detection from v3ga ( http://www.v3ga.net/processing/BlobDetection/).
Two codes separately works well, but I don't know how to combine them for my purpose. Anyone can help this? and if it exists, I wanna know more about fish-eye effect on processing. Are there any libraries?
1) Fish-eye
it' on this link : http://glgraphics.sourceforge.net/examples/Output/FishEye/FishEye.pde
2) Video capture with blob detection
- // - Super Fast Blur v1.1 by Mario Klingemann <http://incubator.quasimondo.com>
- // - BlobDetection library
- import processing.video.*;
- import blobDetection.*;
- Capture cam;
- BlobDetection theBlobDetection;
- PImage img;
- boolean newFrame=false;
- // ==================================================
- // setup()
- // ==================================================
- void setup()
- {
- // Size of applet
- size(640, 480);
- // Capture
- cam = new Capture(this, 40*4, 30*4, 15);
- // Comment the following line if you use Processing 1.5
- // BlobDetection
- // img which will be sent to detection (a smaller copy of the cam frame);
- img = new PImage(80,60);
- theBlobDetection = new BlobDetection(img.width, img.height);
- theBlobDetection.setPosDiscrimination(true);
- theBlobDetection.setThreshold(0.2f); // will detect bright areas whose luminosity > 0.2f;
- }
- // ==================================================
- // captureEvent()
- // ==================================================
- void captureEvent(Capture cam)
- {
- cam.read();
- newFrame = true;
- }
- // ==================================================
- // draw()
- // ==================================================
- void draw()
- {
- if (newFrame)
- {
- newFrame=false;
- image(cam,0,0,width,height);
- img.copy(cam, 0, 0, cam.width, cam.height,
- 0, 0, img.width, img.height);
- fastblur(img, 2);
- theBlobDetection.computeBlobs(img.pixels);
- drawBlobsAndEdges(true,true);
- }
- }
- // ==================================================
- // drawBlobsAndEdges()
- // ==================================================
- void drawBlobsAndEdges(boolean drawBlobs, boolean drawEdges)
- {
- noFill();
- Blob b;
- EdgeVertex eA,eB;
- for (int n=0 ; n<theBlobDetection.getBlobNb() ; n++)
- {
- b=theBlobDetection.getBlob(n);
- if (b!=null)
- {
- // Edges
- if (drawEdges)
- {
- strokeWeight(3);
- stroke(0,255,0);
- for (int m=0;m<b.getEdgeNb();m++)
- {
- eA = b.getEdgeVertexA(m);
- eB = b.getEdgeVertexB(m);
- if (eA !=null && eB !=null)
- line(
- eA.x*width, eA.y*height,
- eB.x*width, eB.y*height
- );
- }
- }
- // Blobs
- if (drawBlobs)
- {
- strokeWeight(1);
- stroke(255,0,0);
- rect(
- b.xMin*width,b.yMin*height,
- b.w*width,b.h*height
- );
- ellipse(b.xMin*width/2+(b.w*width+b.xMin*width)/2, b.yMin*height/2+(b.h*height+b.yMin*height)/2,10,10);
- println("NEW blob");
- println("x of blob =" + b.xMin*width/2+(b.w*width+b.xMin*width)/2);
- println("y of blob =" + b.yMin*height/2+(b.h*height+b.yMin*height)/2);
- }
- }
- }
- }
- // ==================================================
- // Super Fast Blur v1.1
- // by Mario Klingemann
- // <http://incubator.quasimondo.com>
- // ==================================================
- void fastblur(PImage img,int radius)
- {
- if (radius<1){
- return;
- }
- int w=img.width;
- int h=img.height;
- int wm=w-1;
- int hm=h-1;
- int wh=w*h;
- int div=radius+radius+1;
- int r[]=new int[wh];
- int g[]=new int[wh];
- int b[]=new int[wh];
- int rsum,gsum,bsum,x,y,i,p,p1,p2,yp,yi,yw;
- int vmin[] = new int[max(w,h)];
- int vmax[] = new int[max(w,h)];
- int[] pix=img.pixels;
- int dv[]=new int[256*div];
- for (i=0;i<256*div;i++){
- dv[i]=(i/div);
- }
- yw=yi=0;
- for (y=0;y<h;y++){
- rsum=gsum=bsum=0;
- for(i=-radius;i<=radius;i++){
- p=pix[yi+min(wm,max(i,0))];
- rsum+=(p & 0xff0000)>>16;
- gsum+=(p & 0x00ff00)>>8;
- bsum+= p & 0x0000ff;
- }
- for (x=0;x<w;x++){
- r[yi]=dv[rsum];
- g[yi]=dv[gsum];
- b[yi]=dv[bsum];
- if(y==0){
- vmin[x]=min(x+radius+1,wm);
- vmax[x]=max(x-radius,0);
- }
- p1=pix[yw+vmin[x]];
- p2=pix[yw+vmax[x]];
- rsum+=((p1 & 0xff0000)-(p2 & 0xff0000))>>16;
- gsum+=((p1 & 0x00ff00)-(p2 & 0x00ff00))>>8;
- bsum+= (p1 & 0x0000ff)-(p2 & 0x0000ff);
- yi++;
- }
- yw+=w;
- }
- for (x=0;x<w;x++){
- rsum=gsum=bsum=0;
- yp=-radius*w;
- for(i=-radius;i<=radius;i++){
- yi=max(0,yp)+x;
- rsum+=r[yi];
- gsum+=g[yi];
- bsum+=b[yi];
- yp+=w;
- }
- yi=x;
- for (y=0;y<h;y++){
- pix[yi]=0xff000000 | (dv[rsum]<<16) | (dv[gsum]<<8) | dv[bsum];
- if(x==0){
- vmin[y]=min(y+radius+1,hm)*w;
- vmax[y]=max(y-radius,0)*w;
- }
- p1=x+vmin[y];
- p2=x+vmax[y];
- rsum+=r[p1]-r[p2];
- gsum+=g[p1]-g[p2];
- bsum+=b[p1]-b[p2];
- yi+=w;
- }
- }
- }
1