3d face tracking - smoothing data
in
Contributed Library Questions
•
2 years ago
hey there, as with my previous posts... I'm still a beginner trying to slowly figure out processing. I'm
building a sketch for a 3d head tracking piece, Im using opencv face tracking to control objects, that
move in 3d space relating to head poistion. Using others bits of code, and examples ive managed to get
a simple example working, what im really noticing is the jumpiness if the readings.
Im trying to work out how the data can be smoothed, ive been trying to figure out how i can average
the readings for x,y,z values so the movement is interpolated between each points.
Any ideas on how to best do this... and feedback would be appreciated, the code is as listed below.
Thanks,
Gareth Bale
P.S apologies for the messy code... im still learning to drive..
//* 3d head tracking environment;
import fullscreen.*;
import japplemenubar.*;
import hypermedia.video.*;// IMPORT HYPERMEDIA
import processing.opengl.*;// IMPORT OPENGL RENDERER
import java.awt.Rectangle; //IMPORT JAVA RECTANGLE
import processing.opengl.*;
OpenCV opencv; // DECLARE OPENCV
FullScreen fs;
float z = 0; // DEFINE declare ALL THE FLOATS AND STARTING POINTS
float camX = 0;
float camY = 0;
float camZ = 1000.0;
float camCenterY = 0;
float camCenterX = 0;
float camCenterZ = 0;
float a = -500;
boolean changedir = false; // DEFINE BOOLEAN ARG
float linerotone = 0;
float linerottwo = 0;
int side = 0;
float tr;
float maxZ = 2000.0;
void setup() {
size(1024, 768, OPENGL); // SET UP THE OPEN GL RENDERER
opencv = new OpenCV( this ); // DEFINE OPEN CV AS THIS ONE
opencv.capture( width/4, height/4 ); // open video stream
opencv.cascade( OpenCV.CASCADE_FRONTALFACE_ALT_TREE ); // load detection description, here-> front face detection : "haarcascade_frontalface_alt.xml"
fill(204);
frameRate(30);
smooth();
// Create the fullscreen object
fs = new FullScreen(this);
// enter fullscreen mode
fs.enter();
}
void draw() {
background(0);
opencv.read(); // grab images from THE CAMERA
opencv.convert( GRAY );// convert image to gray scale
Rectangle[] faces = opencv.detect( 1.2, 2, OpenCV.HAAR_DO_CANNY_PRUNING, 60, 40 ); // detect face
//image( opencv.image(), 400, 400 );// draw the image at set position.
lights();
noFill();
//stroke(0,0,255);
//strokeWeight(1);
for( int i=0; i<faces.length; i++ ) {
camX = round(-(-width/2+faces[i].x*4)*HALF_PI);
camCenterX = (faces[i].x);
camY = round(((-height/2+faces[i].y*4)*HALF_PI)+200);
camCenterY = round(faces[i].y);
//z = faces[i].height*10;
camCenterZ = -z;
float invDist = mag(0,faces[i].height*10);
if(invDist >= maxZ)
{
maxZ = invDist;
}
z = maxZ-invDist;
float distance = z;
// println(z);
camZ = distance;
}
perspective();
// Change height of the camera with facedetect
camera(camX, camY, camZ, // eyeX, eyeY, eyeZ
camCenterX, camCenterY, camCenterZ, // centerX, centerY, centerZ
0.0, 1.0, 0.0); // upX, upY, upZ
//ambientLight(100, 50, 50);
directionalLight(255,255,255,-1,0.5,0);
if(keyPressed) {
if(key == 's') {
camZ+=10;
}
else if(key == 'w') {
camZ-=10.0;
}
}
noFill();
pushMatrix();
fill(255,50);
pushMatrix();
translate(2000,300,2000);
rotateX(0);
rect(-4000,-4000,4000,4000);
popMatrix();
for(int side = 0; side<2000; side+=200) {
stroke(125);
strokeWeight(2);
}
// these are the cubes
popMatrix();
noStroke();
fill(0,255,0);
if(changedir == false) {
a+=5;
}
if(a>=500) {
changedir = true;
}
if(changedir == true) {
a-=5;
}
if(a<=-500) {
changedir = false;
}
tr+=0;
rotateY(tr);
text("",-400,0,a);
//println(frameRate);
//box1
fill(1,30,135, 127);
stroke(255);
box(200);
//box2
pushMatrix();
fill(234,23,45,75);
translate(0,220,0);
box(200);
popMatrix();
pushMatrix();
fill(2127,57,45,75);
translate(220,220,0);
box(200);
popMatrix();
pushMatrix();
fill(27,157,45,55);
translate(470,220,0);
box(200);
popMatrix();
}
building a sketch for a 3d head tracking piece, Im using opencv face tracking to control objects, that
move in 3d space relating to head poistion. Using others bits of code, and examples ive managed to get
a simple example working, what im really noticing is the jumpiness if the readings.
Im trying to work out how the data can be smoothed, ive been trying to figure out how i can average
the readings for x,y,z values so the movement is interpolated between each points.
Any ideas on how to best do this... and feedback would be appreciated, the code is as listed below.
Thanks,
Gareth Bale
P.S apologies for the messy code... im still learning to drive..
//* 3d head tracking environment;
import fullscreen.*;
import japplemenubar.*;
import hypermedia.video.*;// IMPORT HYPERMEDIA
import processing.opengl.*;// IMPORT OPENGL RENDERER
import java.awt.Rectangle; //IMPORT JAVA RECTANGLE
import processing.opengl.*;
OpenCV opencv; // DECLARE OPENCV
FullScreen fs;
float z = 0; // DEFINE declare ALL THE FLOATS AND STARTING POINTS
float camX = 0;
float camY = 0;
float camZ = 1000.0;
float camCenterY = 0;
float camCenterX = 0;
float camCenterZ = 0;
float a = -500;
boolean changedir = false; // DEFINE BOOLEAN ARG
float linerotone = 0;
float linerottwo = 0;
int side = 0;
float tr;
float maxZ = 2000.0;
void setup() {
size(1024, 768, OPENGL); // SET UP THE OPEN GL RENDERER
opencv = new OpenCV( this ); // DEFINE OPEN CV AS THIS ONE
opencv.capture( width/4, height/4 ); // open video stream
opencv.cascade( OpenCV.CASCADE_FRONTALFACE_ALT_TREE ); // load detection description, here-> front face detection : "haarcascade_frontalface_alt.xml"
fill(204);
frameRate(30);
smooth();
// Create the fullscreen object
fs = new FullScreen(this);
// enter fullscreen mode
fs.enter();
}
void draw() {
background(0);
opencv.read(); // grab images from THE CAMERA
opencv.convert( GRAY );// convert image to gray scale
Rectangle[] faces = opencv.detect( 1.2, 2, OpenCV.HAAR_DO_CANNY_PRUNING, 60, 40 ); // detect face
//image( opencv.image(), 400, 400 );// draw the image at set position.
lights();
noFill();
//stroke(0,0,255);
//strokeWeight(1);
for( int i=0; i<faces.length; i++ ) {
camX = round(-(-width/2+faces[i].x*4)*HALF_PI);
camCenterX = (faces[i].x);
camY = round(((-height/2+faces[i].y*4)*HALF_PI)+200);
camCenterY = round(faces[i].y);
//z = faces[i].height*10;
camCenterZ = -z;
float invDist = mag(0,faces[i].height*10);
if(invDist >= maxZ)
{
maxZ = invDist;
}
z = maxZ-invDist;
float distance = z;
// println(z);
camZ = distance;
}
perspective();
// Change height of the camera with facedetect
camera(camX, camY, camZ, // eyeX, eyeY, eyeZ
camCenterX, camCenterY, camCenterZ, // centerX, centerY, centerZ
0.0, 1.0, 0.0); // upX, upY, upZ
//ambientLight(100, 50, 50);
directionalLight(255,255,255,-1,0.5,0);
if(keyPressed) {
if(key == 's') {
camZ+=10;
}
else if(key == 'w') {
camZ-=10.0;
}
}
noFill();
pushMatrix();
fill(255,50);
pushMatrix();
translate(2000,300,2000);
rotateX(0);
rect(-4000,-4000,4000,4000);
popMatrix();
for(int side = 0; side<2000; side+=200) {
stroke(125);
strokeWeight(2);
}
// these are the cubes
popMatrix();
noStroke();
fill(0,255,0);
if(changedir == false) {
a+=5;
}
if(a>=500) {
changedir = true;
}
if(changedir == true) {
a-=5;
}
if(a<=-500) {
changedir = false;
}
tr+=0;
rotateY(tr);
text("",-400,0,a);
//println(frameRate);
//box1
fill(1,30,135, 127);
stroke(255);
box(200);
//box2
pushMatrix();
fill(234,23,45,75);
translate(0,220,0);
box(200);
popMatrix();
pushMatrix();
fill(2127,57,45,75);
translate(220,220,0);
box(200);
popMatrix();
pushMatrix();
fill(27,157,45,55);
translate(470,220,0);
box(200);
popMatrix();
}
1