Howdy, Stranger!

We are about to switch to a new forum software. Until then we have removed the registration on this forum.

  • How to switch from one text to another according to time with the Geomerative library?!

    Dear Chrisir, In fact my problem is linked to the attarctor in my main sketch. I switch from a phrase to the other and the attractor for the lines works too but no the attractor linked to the words !? Don't find why... I tried to debug, but don't succeed. Here is the code: Thanks for your help... best, L

    import generativedesign.*;
    import geomerative.*;
    import ddf.minim.analysis.*;
    import ddf.minim.*;
    // List of a list of points. 1rst the numbre of phrases: 4,  the 2nd indicate the number of points
    RPoint[][] myPoints = new RPoint[5][0];
    RFont font;
    PFont f;
    Attractor attractor;
    
    // Variables for lines
    Attractor_Lines attractor_Lines;
    int xCount=401;
    int yCount=401;
    float gridSizeX=1800;
    float gridSizeY=1000;
    Node [] myNodes;
    float xPos, yPos;
    
    String [][] phraseSets = new String [4][0];
    String [] FR1 = {
      "On me dit de te haïr et je m'y efforce", 
      "Je t'imagine cruel, violent, implacable", 
      "Mais à te voir je n'ai bientôt plus de force", 
      "Et de te blesser je suis bien incapable", 
    };
    String [] FR2 = {
      "Tous mes camarades combattent avec rage", 
      "Et pleurent la nuit au souvenir des supplices", 
      "Infligés à leurs frères qui sont du même âge", 
      "et rêvent comme eux de toucher une peau lisse"
    };
    String [] FR3 =
      {"Et de pouvoir enfin caresser des obus", 
      "Autres que ceux produits par le pouvoir obtus", 
      "Je rêve de quitter ces boyaux infernaux"
    };
    String [] FR4 = {
      "De laisser ces furieux des deux bords du Rhin", 
      "Et de pouvoir embrasser ta chute de rein", 
      "Et porter notre amour sur les fonts baptismaux"
    };
    
    //TEXT
    final color textColor = color(245);
    int fontSize;
    
    //SOUND
    Minim minim;
    AudioPlayer[] sounds;
    FFT fft;
    float bandHeight;
    float soundDuration ;
    float soundDuration1 ;
    String []fileNamesFR= {"FR_01", "FR_02", "FR_03", "FR_04", "FR_05", "FR_06", "FR_07", "FR_08", "FR_09", "FR_10", "FR_11", "FR_12", "FR_13", "FR_14"};
    SoundManager sm;
    
    // TIME
    int startTime;
    int initTime;
    int lineSpacing;
    int index;
    int state;
    float duration;
    float dur1;
    float dur2;
    //----------------SETUP---------------------------------------------------------------------------------------
    
    void setup() {
      size(1920, 1080, JAVA2D);
      //add phrases to list
      phraseSets[0]=FR1;
      phraseSets[1]=FR2;
      phraseSets[2]=FR3;
      phraseSets[3]=FR4;
    
      smooth();
      RG.init(this);
      font = new RFont("FreeSans.ttf", 86, CENTER);
      stroke(textColor);
      strokeWeight(0.05);
      //INIT
      drawPhrases(phraseSets[0]);
    
      // LINES initiate attractor + attractors specs
      myNodes = new Node [xCount*yCount];
      initGrid();
      attractor_Lines = new Attractor_Lines(0, 0);
      attractor_Lines.strength=-160;
      attractor_Lines.ramp = 0.85;
    
      //SOUND
      minim = new Minim(this);
      sounds = new AudioPlayer[fileNamesFR.length];
      for (int idx=0; idx<sounds.length; idx++) {
        sounds[idx] = minim.loadFile(fileNamesFR[idx]+".wav", 2048);
        fft = new FFT(sounds[idx].bufferSize(), sounds[idx].sampleRate());
      }
      soundDuration = 2000;
      sm=new SoundManager(this);
      //}
      // TIME
      startTime=millis();
      initTime=millis();
      index=0;
      lineSpacing =150;
    }
    
    //----------------DRAW---------------------------------------------------------------------------------------------
    
    void draw() {
      background(255);
      state =0;
    
      //SOUNDS ANALYZIS
      for (int idx=0; idx < sounds.length; idx++) {
        fft.forward(sounds[idx].mix);
        for (int i =0; i< fft.specSize(); i++) {
          float bandDB = 10*log(fft.getBand(i)/fft.timeSize());
          bandDB = constrain(bandDB, -1000, 1000);
          bandHeight = map(bandDB*4, 0, -220, 0, height);
          stroke(0);
          //line(i, height, i, bandHeight-fft.getBand(i)*8);
        }
      }
    
    
      // LINES
      if (millis()-startTime > 0) {
        for (int i = 0; i<myNodes.length; i=i+10) {
          pushMatrix();
          translate(myNodes[i].x, myNodes[i].y);
          stroke(0, 100);
          strokeWeight(0.01);
          float noiseXRange = attractor_Lines.x/100.0;
          float noiseYRange = attractor_Lines.y/1000.0;
          float noiseX = map(myNodes[i].x, 0, xCount, 0, noiseXRange/5);
          float noiseY = map(myNodes[i].y, 0, yCount, 0, noiseYRange/5);
          float noiseValue = noise(noiseX, noiseY);
          float angle = noiseValue*TWO_PI;
          rotate(angle);
          line(0, 0, 10, 10);
          popMatrix();
        }
      }
    
      // TEXTS
      // draw on the center of the screen
      translate(width/2, height/2);
      // draw phrases vertically centered by moving the top up by half the line spaces
      translate(0, -1.0*lineSpacing*(phraseSets[index].length-1)/2.0);
      // loop through lines
      for (int i=0; i< myPoints.length; i++) {
        // draw a line
        for (int j=0; j< myPoints[i].length-1; j++) {
          pushMatrix(); 
          translate(myPoints[i][j].x, myPoints[i][j].y);
          noFill();
          stroke(0, 200);
          strokeWeight(0.25);
          float angle = TWO_PI*10;
          rotate(j/angle);
          bezier(-2*(noise(10)), 10, 25*(noise(10)), -5, 2*noise(5), -15, 10, -3);
          //bezier(-10*(noise(20))+mouseX/15, 30+mouseY/10, -10*(noise(10))+mouseX/15, 20+mouseY/15, -20*noise(20)+mouseX/15, -20+mouseY/5, 10+mouseX/15, -10+mouseY/15);
          popMatrix();
        }
        // move to the next line
        translate(0, lineSpacing);
      }
      //check Timer and redraw phrases if time has passed
    
      changePhraseTimerN(dur1, phraseSets);
      sm.update();
      // changePhraseTimer(duration*4, phraseSets);
    }
    
    //----------------INITIALIZE----------------------------------------------------------------------------------------------------------------------------------------
    void drawPhrases(String [] phrases) {
      myPoints = new RPoint[phrases.length][0];
      for (int j=0; j<phrases.length; j++) {
        RGroup myGroup = font.toGroup(phrases[j]);
        myGroup = myGroup.toPolygonGroup();
        myPoints[j] = myGroup.getPoints();
      }
    }
    
    //----------------TIMER----------------------------------------------------------------------------------------------------------------------------------------
    
    /*void changePhraseTimer( float duration, String [][] phraseSets) {
      duration = sounds[0].length()-150;
      if (millis()-startTime > duration*4) {
        index =(index+1) % phraseSets.length; 
        drawPhrases(phraseSets[index]);
        //startTime = millis();
      }
    }*/
    
    void changePhraseTimerN(float dur1, String [][] phraseSets) {
      dur1 = 11200.;
      dur2=7000;
      if (millis()-startTime < dur1) {
        state=0;
      } else if (millis()-startTime < dur1*2-200.) {
        state=1;
      } else if (millis()-startTime < dur1*3-4500.) {
        state=2;
      } else if (millis()-startTime < dur1*4-9500.) {
        state=3;
      } else {
        state=0;
        startTime = millis();
      }
    
      switch(state) {
    
      case 0:
        drawPhrases(phraseSets[0]);
        //println(0);
        index=0;
        break;
      case 1:
        //drawPhrases(phraseSets[1]); 
        index = (index+1) % phraseSets.length;
        println(index);
        startTime = millis();
        drawPhrases(phraseSets[index]);
        // println(1);
        break;
      case 2:
        drawPhrases(phraseSets[2]);
        // println(2);
        break;
      case 3:
        drawPhrases(phraseSets[3]);
        // println(3);
        break;
      }
    }
    
    
    //----------------TEXT ATTRACTOR INIT----------------------------------------------------------------------------------------------------------------------------------------
    void initAttractor(int i) {
      if (i>=4 && i<8) {
        i-=4;
      } else if (i>=8 && i<11) {
        i-=8;
      } else if (i>=11 && i<14) { 
        i-=11;
      } else if (i>14) {
        i=0;
      }
    
      float x = 0;
      float y =-50; 
      // println(i);
      attractor = new Attractor(x, y, myPoints[i]);
    }
    //----------------LINES ATTRACTOR INIT----------------------------------------------------------------------------------------------------------------------------------------
    void updateAttractorLines(float x, float y) {
      attractor_Lines.x=x;
      attractor_Lines.y=y;
    }
    //----------------LINES GRID INIT----------------------------------------------------------------------------------------------------------------------------------------
    void initGrid() {
      int i =0;
      for (int x=0; x<xCount; x++) {
        for (int y=0; y<yCount; y++) {
    
          xPos = x*(gridSizeX /(xCount-1)) + (width-gridSizeX)/2;
          yPos = y*(gridSizeY /(yCount-1)) + (height-gridSizeY)/2;
          myNodes[i] = new Node(xPos, yPos);
          myNodes[i]. setBoundary(0, 0, width, height);
          myNodes[i].setDamping(0.9);
          i++;
        }
      }
    }
    ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
    
    class Attractor {
    
      float force_radious = 100;
      float maxForce = 15;
      RPoint position;
      RPoint[] points;
    
      Attractor(float x, float y, RPoint[] p) {
        points = p;
        position = new RPoint(x, y);
      }
    
      void attract() {
    
        for (int i =0; i < points.length; i++) {
    
          float d= points[i].dist(position);
         // println ("d : "+d);
          if (d < force_radious) {   
            RPoint desired = new RPoint(points[i]);
            //points[i]= new RPoint(points[i]);
            //println( "avant x : "+ points[i].x +" y: "+points[i].y);
            desired.sub(position);
            desired.normalize();
            desired.scale(map(d, 0, force_radious, maxForce, 0));
            points[i].add(desired);
             //println( "après x : "+ points[i].x +" y: "+points[i].y);
          }
        }
      }
      void display () {
        stroke(0);
       strokeWeight(2);
      // ellipse (position.x, position.y-750, 30, 30);
      }
      void moveTo(float x, float y){
        position.x=x;
        position.y=y;
    
      }
    }
    
    class Attractor_Lines {
      float x=0, y=0;
      float radius =110;
      float strength= 0.55;
      float ramp=0.05;
      float theX;
      float theY;
    
      Attractor_Lines( float theX, float theY) {
        x= theX;
        y = theY;
      }
    
      void attract_Lines (Node theNode) {
    
        float dx = x-theNode.x;
        float dy = y-theNode.y;
        float d= mag(dx, dy);
        if ( d > 0 && d < radius) {
    
          float s = pow(d/radius, 1/ramp);
          float f = s*9*strength*50 * (1/(s+1)+((s-3)/4))/d;
          theNode.velocity.x += dx*f;
          theNode.velocity.y += dy*f;
        }
      }
    }
    
    ////////////////////////////////////////////////////////////////
    
    import ddf.minim.analysis.*;
    import ddf.minim.*;
    
    class SoundManager {
      //SOUND
      Minim minim;
      AudioPlayer[] sounds;
      FFT fft;
      float bandHeight;
      int currentSound;
      String []fileNamesFR1= {"FR_01", "FR_02", "FR_03", "FR_04", "FR_05", "FR_06", "FR_07", "FR_08", "FR_09", "FR_10", "FR_11", "FR_12", "FR_13", "FR_14"};
      float []linesYPositions ={300., 450., 600., 750., 300., 450., 600., 750., 450., 600., 750., 450., 600., 750.};
    
      SoundManager(PApplet app) {
    
        minim = new Minim(app);
        currentSound =-1;
        sounds = new AudioPlayer[fileNamesFR1.length];
        for (int idx=0; idx<sounds.length; idx++) {
          sounds[idx] = minim.loadFile(fileNamesFR1[idx]+".wav", 2048);
          fft = new FFT(sounds[idx].bufferSize(), sounds[idx].sampleRate());
    
        }
      }
    
      void update() {
    
        // SOUND
        if (currentSound ==-1) { 
          startPlaying();
    
        } else if (!sounds[currentSound].isPlaying()) {
          playNext();
        } else { 
    
          fft.forward(sounds[currentSound].mix);
          for (int i =0; i< fft.specSize(); i++) {
            float bandDB = 10*log(fft.getBand(i)/fft.timeSize());
            bandDB = constrain(bandDB, -1000, 1000);
            bandHeight = map(bandDB*4, 0, -220, 0, height);
          }
    
          attractor.moveTo(map(sounds[currentSound].position(), 0, sounds[currentSound].length(), 0, width-100)-width/2, bandHeight/10-300);
          attractor.attract();
    
          updateAttractorLines( attractor_Lines.x = map(sounds[currentSound].position(), 0, sounds[currentSound].length(), 0, width-(100)/2), linesYPositions[currentSound]);
    
          for (int j = 0; j<myNodes.length; j++) {
            attractor_Lines.attract_Lines(myNodes[j]);
            myNodes[j].update();
          }
        }
      }
    
    
      void startPlaying() {
        currentSound=0;
        playCurrentSound();
      }
    
      void playNext() {
    
        currentSound++;
        if (currentSound > sounds.length-1) {
          currentSound=0;
          drawPhrases(phraseSets[0]);
        } 
    
        // fonction restartAnimation
        //drawPhrases(phraseSets[0]);
       playCurrentSound();
    
      } 
    
      void playCurrentSound() {
        sounds[currentSound].rewind();
        sounds[currentSound].play();
        initAttractor(currentSound);
      }
    }
    
  • How do I record the audio input to a new audio render using minim

    Ok, so implementing @koogs changes are not enough, as you get back to the business of empty recorded files. However, that gave me an idea (Thxs @koogs) which I tested and sort of works. I mean, it only works for mp3 files but not for wav files. However, I tried a second idea, and it might work for you although it doesn't seem to have much control over audio when it is being played. That is what I labeled second solution using sampler objects. It works for both mp3 and wav files (tested).

    INSTRUCTIONS: In the code, define your file to play. When you run the sketch, press r to begin recording, r again to stop recording. Don't forget to press s to save the file to an audio file which will be located in the data folder.

    Kf


    FIRST solution: Only mp3

    //REFERENCE: https:// forum.processing.org/one/topic/how-can-i-detect-sound-with-my-mic-in-my-computer.html
    //REFERENCE: https:// forum.processing.org/two/discussion/21842/is-it-possible-to-perform-fft-with-fileplayer-object-minim
    
    /**
     * This sketch demonstrates how to use an <code>AudioRecorder</code> to record audio to disk. 
     * Press 'r' to toggle recording on and off and the press 's' to save to disk. 
     * The recorded file will be placed in the sketch folder of the sketch.
     * <p>
     * For more information about Minim and additional features, 
     * visit <a href="http://code.compartmental.net/minim/" target="_blank" rel="nofollow">http://code.compartmental.net/minim/</a>;
     */
    
    import ddf.minim.*;
    import ddf.minim.ugens.*;
    import ddf.minim.analysis.*;
    
    Minim         minim;
    FilePlayer player;
    AudioOutput out;
    AudioRecorder recorder;
    
    void setup()
    {
      size(512, 200, P3D);
      textFont(createFont("Arial", 12));
    
      minim = new Minim(this);  
      player = new FilePlayer(minim.loadFileStream("energeticDJ.mp3"));
      // IT DOESN'T WORK FOR WAV files  ====> player = new FilePlayer(minim.loadFileStream("fair1939.wav"));
      out = minim.getLineOut();
      TickRate rateControl = new TickRate(1.f);
      player.patch(rateControl).patch(out);
      recorder = minim.createRecorder(out, dataPath("myrecording.wav"),true);
    
      player.loop(0);
    
    }
    
    void draw()
    {
      background(0); 
      stroke(255);
    
      // draw a line to show where in the song playback is currently located
      float posx = map(player.position(), 0, player.length(), 0, width);
      stroke(0, 200, 0);
      line(posx, 0, posx, height);
    
    
    
      if ( recorder.isRecording() )
      {
        text("Currently recording...", 5, 15);
      } else
      {
        text("Not recording.", 5, 15);
      }
    }
    
    void keyReleased()
    {
      if ( key == 'r' ) 
      {
        // to indicate that you want to start or stop capturing audio data, you must call
        // beginRecord() and endRecord() on the AudioRecorder object. You can start and stop
        // as many times as you like, the audio data will be appended to the end of the buffer 
        // (in the case of buffered recording) or to the end of the file (in the case of streamed recording). 
        if ( recorder.isRecording() ) 
        {
          recorder.endRecord();
        } else
        {
          recorder.beginRecord();
        }
      }
      if ( key == 's' )
      {
        // we've filled the file out buffer, 
        // now write it to the file we specified in createRecorder
        // in the case of buffered recording, if the buffer is large, 
        // this will appear to freeze the sketch for sometime
        // in the case of streamed recording, 
        // it will not freeze as the data is already in the file and all that is being done
        // is closing the file.
        // the method returns the recorded audio as an AudioRecording, 
        // see the example  AudioRecorder >> RecordAndPlayback for more about that
        recorder.save();
        println("Done saving.");
      }
    }
    

    SECOND solution: Works for both wav and mp3

    //REFERENCE: https:// forum.processing.org/one/topic/how-can-i-detect-sound-with-my-mic-in-my-computer.html
    //REFERENCE: https:// forum.processing.org/two/discussion/21842/is-it-possible-to-perform-fft-with-fileplayer-object-minim
    //REFERENCE: https:// forum.processing.org/two/discussion/21953/why-can-i-only-load-four-audio-files-in-minum
    /**
     * This sketch demonstrates how to use an <code>AudioRecorder</code> to record audio to disk. 
     * Press 'r' to toggle recording on and off and the press 's' to save to disk. 
     * The recorded file will be placed in the sketch folder of the sketch.
     * <p>
     * For more information about Minim and additional features, 
     * visit <a href="http://code.compartmental.net/minim/" target="_blank" rel="nofollow">http://code.compartmental.net/minim/</a>;
     */
    
    import ddf.minim.*;
    import ddf.minim.ugens.*;
    import ddf.minim.analysis.*;
    
    Minim         minim;
    AudioRecorder recorder;
    AudioOutput out;
    Sampler  note;
    
    void setup()
    {
      size(512, 200, P3D);
      textFont(createFont("Arial", 12));
    
      minim = new Minim(this);  
      out = minim.getLineOut();
      note = new Sampler( "energeticDJ.mp3", 4, minim );
      //note = new Sampler( "fair1939.wav", 4, minim );
      note.patch( out );
    
      recorder = minim.createRecorder(out, dataPath("myrecording.wav"), true);
    
      note.trigger();
    }
    
    void draw()
    {
      background(0); 
      stroke(255);
    
      if ( recorder.isRecording() )
      {
        text("Currently recording...", 5, 15);
      } else
      {
        text("Not recording.", 5, 15);
      }
    }
    
    void keyReleased()
    {
      if ( key == 'r' ) 
      {
        // to indicate that you want to start or stop capturing audio data, you must call
        // beginRecord() and endRecord() on the AudioRecorder object. You can start and stop
        // as many times as you like, the audio data will be appended to the end of the buffer 
        // (in the case of buffered recording) or to the end of the file (in the case of streamed recording). 
        if ( recorder.isRecording() ) 
        {
          recorder.endRecord();
        } else
        {
          recorder.beginRecord();
        }
      }
      if ( key == 's' )
      {
        // we've filled the file out buffer, 
        // now write it to the file we specified in createRecorder
        // in the case of buffered recording, if the buffer is large, 
        // this will appear to freeze the sketch for sometime
        // in the case of streamed recording, 
        // it will not freeze as the data is already in the file and all that is being done
        // is closing the file.
        // the method returns the recorded audio as an AudioRecording, 
        // see the example  AudioRecorder >> RecordAndPlayback for more about that
        recorder.save();
        println("Done saving.");
      }
    }
    

    Keyword: kf_keyword minim sound recording from wav mp3 files

  • Sound Library

    MacBook Pro, OSX 10.13.4, Processing 3.3.7. Sound Library example sketch, when run no sound on the left channel, and it crashes on exit.

    /**
     * Processing Sound Library, Example 5
     * 
     * This sketch shows how to use the FFT class to analyze a stream  
     * of sound. Change the variable bands to get more or less 
     * spectral bands to work with. The smooth_factor variable determines 
     * how much the signal will be smoothed on a scale form 0-1.
     */
    
    import processing.sound.*;
    SoundFile   sample;
    FFT         fft;
    AudioDevice device;
    
    // Declare a scaling factor
    int scale = 10;
    
    // Define how many FFT bands we want
    int bands = 16;
    
    // declare a drawing variable for calculating rect width
    float r_width;
    
    // Create a smoothing vector
    float[] sum = new float[bands];
    
    // Create a smoothing factor
    //float smooth_factor = 0.2;
    
    void setup() {
      size(320,240);
      background(0);
    
      // If the Buffersize is larger than the FFT Size, the FFT will fail
      // so we set Buffersize equal to bands
      device=new AudioDevice(this,44100,bands);
    
      // Calculate the width of the rects depending on how many bands we have
      r_width = width/float(bands);
    
      // Load and play a soundfile and loop it. This has to be called 
      // before the FFT is created.
      // loop() crashes at end of file !!!
      sample=new SoundFile(this,"test.mp3");
      sample.play();
      // Create and patch the FFT analyzer
      fft=new FFT(this,bands);
      fft.input(sample);
    }      
    
    void draw() {
      background(0);
      fill(0,128,255);
      noStroke();
      fft.analyze();
      for (int i = 0; i < bands; i++) {
        //sum[i] += (fft.spectrum[i] - sum[i]) * smooth_factor;
        sum[i]=fft.spectrum[i];
        rect( i*r_width, height, r_width, -sum[i]*height*scale );
      }
    }
    
  • How to understand what is slowing down the code on many iterations?

    it's creating these also

    wave = new Oscil( 200, 0.5f, Waves.SINE );
    fftLinA = new FFT (out.bufferSize(), out.sampleRate());
    

    plus, that first loop isn't creating a new Worker, it's creating twenty new Workers.

  • How to understand what is slowing down the code on many iterations?

    I was able to learn (with the strategical use of "println") that the code is slowing down when this loop is running:

     for ( int i = 0; i< 20; i++) {
          aliens.add(new Worker(width/2, height/2, elites.get(j).net));
          j++;
          if (j >= elites.size()) {
            j = 0;
          }
        }
    

    So somehow adding a new "Worker" is slow. Here is what the constructor looks like:

      public Worker(float _x, float _y, NeuralNetwork _net) {
    
    
        directionsPI = new float [] {0,QUARTER_PI, PI/2, PI/2 + QUARTER_PI, PI, PI + QUARTER_PI, PI + PI/2, PI*2 - QUARTER_PI, PI};
        directionalValues = new float [directionsPI.length];
        for ( int i = 0; i < directionalValues.length; i++){
          directionalValues [i] = 0;
        }
    
        _layers = new int[] {16, 30, 16, directionalValues.length};
        net = new NeuralNetwork ( _layers, _net);
        type = "Worker";
        diameter = worker_size;
        pos = new PVector (_x, _y);
        speed = worker_speed;
        cor = workerColor;
        hearing_distance = 100;
        controls = new String[] {"Evolve Fighter"};
        out = minim.getLineOut();
        // create a sine wave Oscil, set to 440 Hz, at 0.5 amplitude
        wave = new Oscil( 200, 0.5f, Waves.SINE );
        fftLinA = new FFT (out.bufferSize(), out.sampleRate());
        fftLinA.linAverages(30);
        this.registerObserver(tutorial);
        collidable = true;
        selectable = true;
        soundInterval = 10;
        fitness = 0;
    
         float ran = random(1);
            if ( ran > mutation_chance){
              net.Mutate();
            }
    
      }
    

    As you can see that it contains a constructor for the other class which is NeuralNetwork which is the constructor that creates a copy of an existing net which is one of the arguments for the Worker. It also runs a "Mutate()" command which makes random changes to the neural network.

  • Index size Problem

    Hi, this is a problem that I have been having for a while. My program works fine until you fire too many bullets and the array gets bogged down, the thing is, I don't know why it keeps breaking at its limit.

    //April 28, 2018
    
    float randomside = 0.0;
    
    ArrayList<Integer> bulletup = new ArrayList<Integer>();
    ArrayList<Integer> dotposition = new ArrayList<Integer>();
    
    ArrayList<Integer> balloonX = new ArrayList<Integer>();
    ArrayList<Integer> balloonY = new ArrayList<Integer>();
    
    int moveup = 60;
    float fastdot = mouseX;
    float dotcoordiant = -50;
    int lastTimeCheck;
    int timeIntervalFlag = 6500;
    int timeIntervalFlag2 = 30;
    
    int drawCount = 0;
    
    void setup() {
      // set size of display
    
      size (500, 720);
      background (256, 256, 256);
      strokeWeight(10);
      frameRate(60);
      lastTimeCheck = millis();
    }
    
    
    void mousePressed(){
      println("the mouse was pressed at ",mouseX);
      dotposition.add(mouseX);
      bulletup.add(650);
    }
    
    int distance2(int X1, int Y1, int X2, int Y2){
      int a = X2 - X1;
      int b = Y2 - Y1;
      return(a * a + b * b);
    }
    
    boolean isFirstOffTop(ArrayList<Integer> myBullets){
      if (myBullets.size() == 0){
        return(false);
      }
      if (myBullets.get(0) > 0){
        return(false);
      }
      return(true);
    }
    
    boolean isFirstOffBottom(ArrayList<Integer> myBullets){
      if (myBullets.size() == 0){
        return(false);
      }
      if (myBullets.get(0) < 780){
        return(false);
      }
      return(true);
    }
    
    void draw() {
      background (256, 256, 256);
      randomside = random(450);
      stroke(0, 0, 256);
      point(mouseX, 650);
    
      //bulletcode
    
      // remove front bullets that have reached the top
      while (isFirstOffTop(bulletup)){
        bulletup.remove(0);
        dotposition.remove(0);
      }
    
      // draw the moving bullets
      for (int bi = 0; bi < dotposition.size(); bi++) {
          point(dotposition.get(bi), bulletup.get(bi));
          bulletup.set(bi, bulletup.get(bi) - 6);
      }
    
    
      if ( millis() > lastTimeCheck + timeIntervalFlag ) {
        lastTimeCheck = millis();
        moveup = 60;
        println(lastTimeCheck);
      }
      /*if ( millis() > lastTimeCheck + timeIntervalFlag2 ) {
       lastTimeCheck = millis();
       bulletup = bulletup;
       println(lastTimeCheck);
    
       }
       */
    
       // make another arraylist of "ballons",
       // on each draw create one new balloon at the top of the screen
       // at random X positions
       // slide every balloon down 10 pixels/frame
      drawCount += 1;
      if (drawCount > 1){
        // generate one more balloon at top of screen
        balloonX.add(int(random(500)));
        balloonY.add(0);
        drawCount = 0;
      }
    
      // remove balloons that have reached the bottom
      while (isFirstOffBottom(balloonY)){
        balloonX.remove(0);
        balloonY.remove(0);
      }
    
      // draw the falling balloons
      stroke(256, 0, 0);
      for (int bi = 0; bi < balloonX.size(); bi++) {
          point(balloonX.get(bi), balloonY.get(bi));
          balloonY.set(bi, balloonY.get(bi) + 5);
    
      }
      println("ballon size = ", balloonX.size());
      // pop balloons that have been hit
      int dist = 10;
      int dist2 = dist * dist;
      // loop backwards so I can delete through indexed loop
      for (int bi = balloonX.size() - 1; bi >= 0; bi--) {
          for (int si = 0; si < dotposition.size(); si++){
            println("ballon size = ", balloonX.size());
            if (distance2(dotposition.get(si), bulletup.get(si),
                balloonX.get(bi), balloonY.get(bi)) <= dist2) {
                   balloonX.remove(bi); 
                   balloonY.remove(bi); 
                }
          }
      }
      // Kill balloons that have reached the bottom
    }
    
  • Repository for soundscape investigation

    sculpture:

    waits a bit into the song till it records a 3D sound sculpture. Then stops the song and kills minim.

    Use peasyCam to rotate and scale the soundsculpture with the mouse.

    Chrisir

    /**
     * VERSION generates a 3D sculpture based on sound. 
    
     * This sketch demonstrates how to use an FFT to analyze
     * the audio being generated by an AudioPlayer.
     * <p>
     * FFT stands for Fast Fourier Transform, which is a 
     * method of analyzing audio that allows you to visualize 
     * the frequency content of a signal. You've seen 
     * visualizations like this before in music players 
     * and car stereos.
     * <p>
     * For more information about Minim and additional features, 
     * visit http://code.compartmental.net/minim/
     */
    
    import ddf.minim.analysis.*;
    import ddf.minim.*;
    
    import peasy.*;
    import peasy.org.apache.commons.math.*;
    import peasy.org.apache.commons.math.geometry.*;
    
    int state=0; 
    int i2=0;
    
    Minim       minim;
    AudioPlayer jingle;
    FFT         fft;
    PeasyCam    cam; 
    
    ArrayList< MyLine> myLines = new ArrayList(); 
    
    int t1; // timer  
    
    void setup() {
      size(1512, 1000, P3D);
    
      cam = new PeasyCam(this, 0, 0, 0, 500);  
    
      minim = new Minim(this);
    
      // specify that we want the audio buffers of the AudioPlayer
      // to be 1024 samples long because our FFT needs to have 
      // a power-of-two buffer size and this is a good size.
      jingle = minim.loadFile("jingle.mp3", 1024);
    
      if (jingle==null) {
        println ("file not found :"
          +"jingle.mp3"
          +" ++++++++++++++++++++++++++++++++++++++++++++++++");
        exit();
        return;
      }
    
      // play the file 
      jingle.play();
    
      background(0);
    
      t1=millis();
      // time to pass BEFORE starting recording sculpture : 3000 millis 
      while (millis() - t1 < 3000) {
        //
      }
      println ("start");
    
      // create an FFT object that has a time-domain buffer 
      // the same size as jingle's sample buffer
      // note that this needs to be a power of two 
      // and that it means the size of the spectrum will be half as large.
      fft = new FFT( jingle.bufferSize(), jingle.sampleRate() );
    
      t1=millis();
    }
    
    void draw() {
      if (state==0) {
        // recording sculpture 
        background(0);
        text("please wait", 22, 22); 
        // recording sculpture 
        initSculpture();
      } else {
    
        // display sculpture 
        background(0);
    
        lights(); 
    
        //  translate(width/2, 0);
        for (MyLine ml : myLines ) {
          ml.display();
        }//for
      }//else 
      //
    }//draw
    
    void initSculpture() {
      // 
      // perform a forward FFT on the samples in jingle's mix buffer,
      // which contains the mix of both the left and right channels of the file
    
      fft.forward( jingle.mix );
    
      // do frequency band i
      for (int i = 0; i < fft.specSize(); i++) {
    
        // angle in degree: 
        float angle = map (i, 0, fft.specSize(), 
          0, 360); 
        float radius =  fft.getBand(i) * 2; 
    
        PVector from = new PVector ( 0, i2+33, 0 ) ; 
        PVector to = new PVector (  cos(radians(angle)) * radius, i2+33, sin(radians(angle)) * radius  ) ;
    
        MyLine line1 = new MyLine ( from, to, radians(angle) );
    
        // store the line for frequency band i
        myLines.add(line1);
      }//for
    
      i2++;
    
      // time HOW LONG we record  
      if (millis() - t1 > 3000) {
        minim.stop();
        jingle.pause();
        jingle=null; 
        fft=null; 
        minim = null;
        state=1;
        println ("stop preparing ");
      }
    }//func
    
    // =========================================================
    
    class MyLine {
    
      PVector from;
      PVector to;
      float weight; 
    
      //constr
      MyLine(PVector from_, 
        PVector to_, 
        float weight_) {
    
        from=from_.copy(); 
        to=to_.copy();
        weight=weight_;
      }//constr
    
      void display() {
        stroke(from.y+33, 2, 2);
        strokeWeight(map(weight, 0, TWO_PI, 
          1, 4) );
        line(from.x, from.y, from.z, 
          to.x, to.y, to.z);
      }//func 
      //
    }//class 
    //
    
  • Repository for soundscape investigation

    Suggestion for visual:

    The map is from above

    Now imagine you stand in the land and see the map from the side. Like a walking person

    You would see sound spots on the horizon

    You could look around and see different ones (other angle)

    Their distances are different, so they appear smaller or bigger

    As a symbol you could use a marker with text above OR use an image showing the FFT of the first 40 seconds or so.

    So each sound looks differently

    This could be a 2D image with transparent background (calculation asynchronous) that rotates towards user (when he walks around) or be a 3D shape that you calculate and store as a PShape

    Use queasyCam for walking

  • Spectrogram to audio?

    But I believe this is the other way around.

    So the code in your first post read the audio stream, does the fft and displays the data? However, from what I understand, you have a graph and you want to create the sound? Is the graph in frequency domain? Do you have a sample data set?

    Please also provide details of what you want to do. There is no need for everybody to go and read somebody else's code to try to understand what you would like to do. Provide a brief description and the links will be also needed but they should be use as support for your post.

    Kf

  • Connect Audio context of Howler to p5.sound

    Hi There,

    I'm experimenting with p5.sound. Basically I was trying to connect Howler instance with p5.js sound to get the FFT to analyze it, till now I'm unsuccessful. Is there any way I can connect the Audio context of Howler to p5.

  • FFT to identify bird's singing

    Thank you all for your answers. So there's this website called xeno-canto who is a database for sounds(birds) the biggest one i found and the community if very big so i'm using that as my db (i already asked the website's owner and they even have an API to help downloading sounds), oh and most of the records are without noise.

    I know that the recognition won't be perfect and i'm ok with that. As for the bird's singing recording in real life, it'll depend on the user, most of birds sing in cycles, for example i recorded a bird yesterday, he was singing 5 seconds, then he stopped for 5seconds etc and did that for a min and the took a break and went again, the only problem is the noise, but i think i can work on that.

    I thought about Tensorflow but it's a pain to implement on java, but maybe on a later version.

    I started coding the FFT, i'm using a library in Java so it's not that hard, i just need to understand the output, as my mp3 array is 125000 bytes long and so is the output array.

    I'll post some code later.

  • FFT to identify bird's singing

    In the meantime, you are very welcome to start building your database of sounds. Even a better project at this stage, it would be to allow people from other regions/locations to upload their data. You need a website, some content management and a good "sell speech" to get people involved and volunteer their efforts to contribute to your database by donating recorded sounds with bird information.

    Implementing the fft by itself could provide to be a fun and simple task (cough cough) but the matching will be a daunting one (no experience here). If you ahve a sample code and few sample files, you can posted here and I can have a look. Just don't forget to tag me with @kfrajer.

    Kf

  • FFT to identify bird's singing

    @zoulou===

    i do agree with @jeremydouglass: that is not at all a simple project, neither for android, and neither for java. Why? - because the right way is to use tensorflow. and if you can import models pre-trained into AS (as for P5 i dont know) there is no way to create and train a model with Java: you need Python to do that. As for me i have successfully tried with android tensorflow, but now i am stuck with the pre-trained models. So, i have tried with Python. It works...But for birds you need a) to have a database for birds songs - which can be huge! then b) to construct your model and train it, which is not only FFT! (because sounds models are constructed like images and so it is quite unpredictable that the time-units when a bird repeats its song is the good one...) - So, though i am very interested by your project i think that it is a real challenge...and not for P5!

  • FFT to identify bird's singing

    Hello, i'm sorry if i'm not posting in the right Category, i'm new to the forum.

    I'm working on a project for hiking help for android, and i thought about a functionnality that can be fun, recognizing a bird's singing. The idea is to record a bird that you pass by and the database tells you which species it is.

    I'm not a pro of sound processing, but i though that implementing an FFT could help. I was planing on using the FFT to get the max-min amplitude and compare it with the database's pre-processed information, of course i don't plan on using only the min-max indicators.

    I inspired my code from this: https://github.com/blanche/shayam/blob/master/java/at.lw.shayam/src/at/lw/shayam/AudioAnalysis.java and as much as i undestand the maths behind the fourrier transform, i don't get everything in that code.

    So here are my questions:

    1. The chunks are used to accelerate the computing time of the FFT ? if we have a 2^n chunks, we'll have 2^n smaller FT processed ?

    2. The results[][] 2d Complex Array contains... complexs. But i don't understant what is x and y in results[x][y], how can you find the frequency and the amplitude. (of course i'll have to convert the complexs to doubles)

    3. Do you think this approach is enough ? the projet is not professional so i'm not trying to get a recogntion rate of 100% !

    Thank you for your answers.

  • A hypnotic, music-reactive Techno visuals show designed for staring at at a party

    @Dizotoff, thank you! Yes indeed I can.

    I am using a library called Minim for this. At first, I was using Minim's built-in beat detection. It has a class for simple beat detection, here's the documentation.

    It worked quite well, but this method proved less flexible than I would have liked — it would miss some beats, or detect a beat when there wasn't one sometimes, and there wasn't enough flexibility with the BeatDetect class (or so I thought, there is an isRange() method, where you are supposed to be able to choose a particular frequency band to react to (i.e. just the bass etc.) but I couldn't get that working well).

    In the end, I used a FFT (Fast Fourier Transform) object (docs). This allows you to split the audio into a spectrum (multiple bouncing frequency bands, like you might see on a graphic equalizer).

    Since my music has a pretty constant pulsing bassy beat, I simply access the value for the lowest band of the FFT (number 0), then set a threshold for what constitutes a beat on that band. I also have a sensitivity value that I can change on the fly, so that 2 beats aren't detected too close together. This works reasonably well, and is something I can quickly mess with at live shows if the beat detection goes wrong (I have threshold and sensitivity mapped to the arrow keys).

    Here's some code for that last bit:

    if (frameCount >= mostRecentBeatFrameCount + sensitivity) { if (selectedAverageBandLevel > chosenThreshold) { mostRecentBeatFrameCount = frameCount; } }

    So yeah, pretty simple solution, works well for live situations with 4x4 bass heavy music. There's definitely, definitely better ways to approach it though. You can find scientific papers and other writings online about better beat detection methods, I would like to implement one one day!

    Hope that helps, thanks for your question.

  • question about p5.Sound library

    hi guys, im developing a program that using p5.sound library, and am having a question about analyzing the sound file, im trying to analyze the whole sound file By the function ' fft.analyze() ' but this function is depends in the 'draw() function' because we call 'fft.analyze()' on it. as i know 'ftt.analyze()' taking a few snapshots and analyze them (about (2048/44100) sec ), so what i want a help on is just a way of how to analyze every (2048/44100) sec in the sound file from beginning until the end and also without even playing the sound file i need to just analyze it. Thank You everyone :)

  • Interactive video with sound input

    This is totally doable. For the sound effect, you could use either amplitude over time or frequency over time. The latter requires using FFT function to extract the frequency components of each sound buffer which is concurrently being played.

    If you are a beginner, i will stick to the amplitude vs. time concept as it is easier to implement. I dunno how much experience you have in processing. So I encourage you to write your first sketch and demonstrate what can you do and show what parts you require some guidance.

    For this project, you need to use:

    • Minim: Read the input from a mic or play a sound file and access the current sound buffer. You need to also need to get the amplitude of the current buffer (all provided by minim) which it is useful to detect sound. When you detect some sound, you can draw it, exactly as demonstrated in the video you provided.
    • Key terms to review in Processing: PGraphics(), mask(), tint(), lerpColor() and map()
    • You need the video library to load and play your video.
    • You will also need to have an array list to keep track of the objects drawn in the sketch
    • Finally you will need to work with classes to simplify the processing of color over time. This is related to the color fading effect, possibly and effect of using time+a pgraphics buffer+tint.

    Check the reference for those key terms and don't forget to check the provided examples accessible through the PDE.

    Kf

  • Beads - Getting different results each time I run FFT

    I am trying to use the Beads ShortFrameSegmenter and FFT in order to determine the pitch and harmonics of a sample but each time I run my sketch I get different results. Here's my code

    import beads.*;
    import org.jaudiolibs.beads.*;
    import java.util.Timer;
    import java.util.TimerTask;
    
    AudioContext ac;
    GranularSamplePlayer sample;
    Gain gain;
    
    ShortFrameSegmenter sfs;
    FFT fft;
    PowerSpectrum ps;
    Frequency f;
    SpectralPeaks sp;
    float[][] meanHarmonics;
    
    int numPeaks = 6;
    
    int loops = 0;
    double meanFrequency = 0.0;
    Timer t = new Timer();
    boolean finished = false;
    
    void setup() {
      size(1600, 900);
      ac = new AudioContext();
      ac.start();
      sample = new GranularSamplePlayer(ac, SampleManager.sample(dataPath("") + "\\C2.mp3"));
    
      gain = new Gain(ac, 1, 1);
    
      // input chaining
      gain.addInput(sample);
      ac.out.addInput(gain);
    
      // setup analysis
      // break audio into more manageable chunks
      sfs = new ShortFrameSegmenter(ac);
      sfs.addInput(sample);
    
      // fast fourier transform to analyse the harmonic spectrum
      fft = new FFT();
      sfs.addListener(fft);
    
      // PowerSpectrum turns the raw FFT output into proper audio data.
      ps = new PowerSpectrum();
      fft.addListener(ps);
    
      // Frequency tries to determine the strongest frequency in the wave
      // which is the fundamental that determines the pitch of the sound
      f = new Frequency(44100.0f);
      ps.addListener(f);
    
      // Listens for harmonics
      sp = new SpectralPeaks(ac, numPeaks);
      ps.addListener(sp);
    
      meanHarmonics = new float[numPeaks][2];
    
      // initialise meanHarmonics
      for (int i = 0; i < numPeaks; i++) {
        for (int j = 0; j < 2; j++) {
          meanHarmonics[i][j] = 0;
        }
      }
    
      ac.out.addDependent(sfs);
    
      t.scheduleAtFixedRate(new TimerTask() {
        public void run() {
          loops++;
          if (loops == 1) {
            sample.start(0);
          } else if (loops >= 1500) {
            finished = true;
            t.cancel();
          }
          Float inputFrequency = f.getFeatures();
          if (inputFrequency != null && inputFrequency != Float.NaN) {
            println(inputFrequency);
            meanFrequency += inputFrequency;
          }
          float[][] harmonics = sp.getFeatures();
          if (harmonics != null) {
            for (int feature = 0; feature < numPeaks; feature++) {
              // harmonic must be in human audible range
              // and its amplitude must be large enough to be audible
              if (harmonics[feature][0] < 20000.0 && harmonics[feature][1] > 0.01) {
                // average out the frequencies
                meanHarmonics[feature][0] += harmonics[feature][0];
                // average out the amplitudes
                meanHarmonics[feature][1] += harmonics[feature][1];
              }
            }
          }
        }
      }
      , 0, 1);
      while (!finished) { 
        print("");
      }
      float maxAmp = 0.0;
      float freq = 0.0;
      sample.pause(true);
      meanFrequency /= loops;
      println(meanFrequency);
      for (int feature = 0; feature < numPeaks; feature++) {
        meanHarmonics[feature][0] /= loops;
        meanHarmonics[feature][1] /= loops;
        if (meanHarmonics[feature][1] > maxAmp) {
          freq = meanHarmonics[feature][0];
          maxAmp = meanHarmonics[feature][1];
        }
        println(meanHarmonics[feature][0] + " " + meanHarmonics[feature][1]);
      }
      println(freq + " " + meanFrequency);
      println();
    }
    

    For every run I do I get different values for the harmonics' frequencies and amplitudes as well as the meanFrequency value, which is often NaN. I've also noticed that when printing the inputFrequency on each iteration its values are not entirely accurate especially in the lower end of the frequency spectrum. I've tested this both with complex samples and simple sine waves.

    StackOverflow question: https://stackoverflow.com/questions/48769181/getting-different-results-each-time-i-run-fft-with-processing-and-beads

  • I can't connect my Perfume Dispenser to my music Visualizer . I am using an Arduino Uno.
    import cc.arduino.*;
    //import org.firmata.*;
    
    import ddf.minim.*;
    import ddf.minim.analysis.*;
    import processing.serial.*;
    
    
    
    Minim minim;
    AudioPlayer song;
    FFT fft;
    BeatDetect beat;
    BeatListener bl;
    //Serial myPort; // Create object from Serial class
    Arduino arduino;
    
    
    // Variables that define the "zones" of the spectrum
    // For example, for bass, we take only the first 4% of the total spectrum
    float specLow = 0.03; // 3%
    float specMid = 0.125; // 12.5%
    float specHi = 0.20; // 20%
    float specLLow = 0.009;
    // This leaves 64% of the possible spectrum that will not be used.
    //These values are usually too high for the human ear anyway.
    
    //Scoring values for each zone.
    float scoreLow = 0;
    float scoreMid = 0;
    float scoreHi = 0;
    
    // Previous value, to soften the reduction
    float oldScoreLow = scoreLow;
    float oldScoreMid = scoreMid;
    float oldScoreHi = scoreHi;
    
    // Softening value
    float scoreDecreaseRate = 25;
    
    // Cubes appearing in space
    int nbCubes;
    Cube[] cubes;
    
    //Lines that appear on the sides
    int nbMurs = 500;
    Mur[] murs;
    boolean isPlaying = false;
    //Arduino
    int ledPin = 11; // LED connected to digital pin 11
    float kickSize;
    
    int deltime = 15; //Time to make the relay on after a beat is detected
    boolean delayboolean = false;
    
    
    
    
    void setup()
    {
    
    //Display in 3D on the entire screen
    fullScreen(P3D);
    
    //Load the minim library
    minim = new Minim(this);
    arduino = new Arduino(this, Arduino.list()[0], 57600);
    
    Arduino.list();
    //Load song
    song = minim.loadFile("song.mp3");
    
    //Create the FFT object to analyze the song
    fft = new FFT(song.bufferSize(), song.sampleRate());
    
    //One cube per frequency band
    nbCubes = (int)(fft.specSize()*specHi);
    cubes = new Cube[nbCubes];
    println(nbCubes);
    //As many walls as we want
    murs = new Mur[nbMurs];
    println(nbMurs);
    //Create all objects
    //Create cubic objects
    for (int i = 0; i < nbCubes; i++) {
    cubes[i] = new Cube();
    
    }
    
    //Create wall objects
    //Left walls
    for (int i = 0; i < nbMurs; i+=4) {
    murs[i] = new Mur(0, height/2, 10, height); 
    }
    
    //Straight walls
    for (int i = 1; i < nbMurs; i+=4) {
    murs[i] = new Mur(width, height/2, 10, height); 
    }
    
    //Low walls
    for (int i = 2; i < nbMurs; i+=4) {
    murs[i] = new Mur(width/2, height, width, 10); 
    }
    
    //High walls
    for (int i = 3; i < nbMurs; i+=4) {
    murs[i] = new Mur(width/2, 0, width, 10); 
    }
    
    //Black background
    background(0);
    
    //Start the song
    song.play(0);
    
    // a beat detection object that is FREQ_ENERGY mode that 
    // expects buffers the length of song's buffer size
    // and samples captured at songs's sample rate
    beat = new BeatDetect(song.bufferSize(), song.sampleRate());
    // set the sensitivity to 300 milliseconds
    // After a beat has been detected, the algorithm will wait for 300 milliseconds 
    // before allowing another beat to be reported. You can use this to dampen the 
    // algorithm if it is giving too many false-positives. The default value is 10, 
    // which is essentially no damping. If you try to set the sensitivity to a negative value, 
    // an error will be reported and it will be set to 10 instead. 
    beat.setSensitivity(100); 
    kickSize= 16;
    // make a new beat listener, so that we won't miss any buffers for the analysis
    bl = new BeatListener(beat, song); 
    textFont(createFont("Helvetica", 16));
    textAlign(CENTER);
    
    arduino.pinMode(ledPin, Arduino.OUTPUT); 
    }
    
    
    void draw()
    {
    //Forward the song. One draw () for each "frame" of the song ...
    fft.forward(song.mix);
    
    //Calculation of "scores" (power) for three categories of sound
    //First, save old values
    oldScoreLow = scoreLow;
    oldScoreMid = scoreMid;
    oldScoreHi = scoreHi;
    
    //Reset values
    scoreLow = 0;
    scoreMid = 0;
    scoreHi = 0;
    
    
    
    
    //Calculate the new "scores"
    for(int i = 0; i < fft.specSize()*specLow; i++)
    { 
    scoreLow += fft.getBand(i);
    
    }
    
    for(int i = (int)(fft.specSize()*specLow); i < fft.specSize()*specMid; i++)
    {
    scoreMid += fft.getBand(i);
    
    }
    
    for(int i = (int)(fft.specSize()*specMid); i < fft.specSize()*specHi; i++)
    {
    scoreHi += fft.getBand(i);
    
    }
    
    //To slow down the descent.
    if (oldScoreLow > scoreLow) {
    scoreLow = oldScoreLow - scoreDecreaseRate;
    }
    
    if (oldScoreMid > scoreMid) {
    scoreMid = oldScoreMid - scoreDecreaseRate;
    }
    
    if (oldScoreHi > scoreHi) {
    scoreHi = oldScoreHi - scoreDecreaseRate;
    }
    
    //Volume for all frequencies at this time, with the highest sounds higher.
    //This allows the animation to go faster for more high-pitched sounds, which we would notice more.
    float scoreGlobal = 0.66*scoreLow + 0.8*scoreMid + 1*scoreHi;
    
    //subtle color background
    background(scoreLow/100, scoreMid/100, scoreHi/100);
    
    //Cube for each frequency band
    for(int i = 0; i < nbCubes; i++)
    {
    //Value of the frequency band
    float bandValue = fft.getBand(i);
    //println(bandValue);
    
    //The color is represented as: red for bass, green for medium sounds and blue for high.
    //The opacity is determined by the volume of the tape and the overall volume.
    cubes[i].display(scoreLow, scoreMid, scoreHi, bandValue, scoreGlobal);
    }
    
    //Walls lines, here we must keep the value of the previous tape and the next to connect them together
    float previousBandValue = fft.getBand(0);
    
    //Distance between each line point, negative because on the z dimension
    float dist = -25;
    
    //Multiply the height by this constant
    float heightMult = 2;
    
    //For each band
    for(int i = 1; i < fft.specSize(); i++)
    {
    //Value of the frequency band, we multiply the bands farther to make them more visible.
    float bandValue = fft.getBand(i)*(1 + (i/50));
    
    
    
    
    //Selection of the color according to the strengths of the different types of sounds.
    stroke(100+scoreLow, 100+scoreMid, 100+scoreHi, 255-i);
    strokeWeight(1 + (scoreGlobal/100));
    
    //lower left line
    line(0, height-(previousBandValue*heightMult), dist*(i-1), 0, height-(bandValue*heightMult), dist*i);
    line((previousBandValue*heightMult), height, dist*(i-1), (bandValue*heightMult), height, dist*i);
    line(0, height-(previousBandValue*heightMult), dist*(i-1), (bandValue*heightMult), height, dist*i);
    
    //upper left line
    line(0, (previousBandValue*heightMult), dist*(i-1), 0, (bandValue*heightMult), dist*i);
    line((previousBandValue*heightMult), 0, dist*(i-1), (bandValue*heightMult), 0, dist*i);
    line(0, (previousBandValue*heightMult), dist*(i-1), (bandValue*heightMult), 0, dist*i);
    
    //lower right line
    line(width, height-(previousBandValue*heightMult), dist*(i-1), width, height-(bandValue*heightMult), dist*i);
    line(width-(previousBandValue*heightMult), height, dist*(i-1), width-(bandValue*heightMult), height, dist*i);
    line(width, height-(previousBandValue*heightMult), dist*(i-1), width-(bandValue*heightMult), height, dist*i);
    
    //upper right line
    line(width, (previousBandValue*heightMult), dist*(i-1), width, (bandValue*heightMult), dist*i);
    line(width-(previousBandValue*heightMult), 0, dist*(i-1), width-(bandValue*heightMult), 0, dist*i);
    line(width, (previousBandValue*heightMult), dist*(i-1), width-(bandValue*heightMult), 0, dist*i);
    
    //Save the value for the next loop round
    previousBandValue = bandValue; 
    }
    
    //Rectangular walls
    for(int i = 0; i < nbMurs; i++)
    {
    //Each wall is assigned a band, and its strength is sent to it.
    float intensity = fft.getBand(i%((int)(fft.specSize()*specHi))); 
    murs[i].display(scoreLow, scoreMid, scoreHi, intensity, scoreGlobal);
    }
    }
    
    //Class for cubes floating in space.
    class Cube {
    //Z position of "spawn" and maximum Z position
    float startingZ = -10000;
    float maxZ = 1000;
    
    //Position values
    float x, y, z;
    float rotX, rotY, rotZ;
    float sumRotX, sumRotY, sumRotZ;
    
    //Contructor
    Cube() {
    //Make the cube appear in a random place
    x = random(0, width);
    y = random(0, height);
    z = random(startingZ, maxZ);
    
    //Give the cube a random rotation
    rotX = random(0, 1);
    rotY = random(0, 1);
    rotZ = random(0, 1);
    }
    
    void display(float scoreLow, float scoreMid, float scoreHi, float intensity, float scoreGlobal) {
    //Selection of the color, opacity determined by the intensity (volume of the band)
    color displayColor = color(scoreLow*0.67, scoreMid*0.67, scoreHi*0.67, intensity*5);
    fill(displayColor, 255);
    
    
    //Color lines, they disappear with the individual intensity of the cube
    color strokeColor = color(255, 150-(20*intensity));
    stroke(strokeColor);
    strokeWeight(1 + (scoreGlobal/300));
    
    //Creating a transformation matrix to perform rotations, enlargements
    pushMatrix();
    
    //Displacement
    translate(x, y, z); 
    
    //Calculation of the rotation according to the intensity for the cube
    sumRotX += intensity*(rotX/1000); 
    sumRotY += intensity*(rotY/1000);
    sumRotZ += intensity*(rotZ/1000); 
    
    //Application of the rotation
    rotateX(sumRotX);
    rotateY(sumRotY);
    rotateZ(sumRotY); 
    
    //Creation of the box, variable size according to the intensity for the cube
    box(100+(intensity/2));
    
    //Application of the matrix
    popMatrix();
    
    //Dislacement Z
    z+= (1+(intensity/5)+(pow((scoreGlobal/150), 2))); 
    
    //Replace the box at the back when it is no longer visible
    if (z >= maxZ) {
    x = random(0, width);
    y = random(0, height);
    z = startingZ;
    }
    }
    }
    
    
    //Class to display the lines on the sides
    class Mur {
    //Minimum and maximum position Z
    float startingZ = -10000;
    float maxZ = 50;
    
    //Values at the position
    float x, y, z; 
    float sizeX, sizeY;
    
    //Constructor
    Mur(float x, float y, float sizeX, float sizeY) {
    //Make the line appear at the specified place
    
    this.x = x;
    this.y = y;
    //random depth
    this.z = random(startingZ, maxZ); 
    
    //We determine the size because the walls on the floors have a different size than those on the sides
    this.sizeX = sizeX;
    this.sizeY = sizeY;
    }
    
    //Display function
    void display(float scoreLow, float scoreMid, float scoreHi, float intensity, float scoreGlobal) {
    //Color determined by low, medium and high sounds
    //Opacity determined by the overall volume
    color displayColor = color(scoreLow*0.67, scoreMid*0.67, scoreHi*0.67, scoreGlobal);
    
    //Make lines disappear in the distance to give an illusion of fog
    fill(displayColor, ((scoreGlobal-5)/1000)*(255+(z/25)));
    noStroke();
    
    //First band, the one that moves according to the force
    //Transformation Matrix
    pushMatrix();
    
    //Displacement
    translate(x, y, z);
    
    //extension
    if (intensity > 100) intensity = 100;
    scale(sizeX*(intensity/100), sizeY*(intensity/100), 20);
    
    //Creation of the "box"
    box(1);
    popMatrix();
    
    //Second band, the one that is still the same size
    displayColor = color(scoreLow*0.5, scoreMid*0.5, scoreHi*0.5, scoreGlobal);
    fill(displayColor, (scoreGlobal/5000)*(255+(z/25)));
    //Transformation Matrix
    pushMatrix();
    
    //Displacement
    translate(x, y, z);
    
    //Extension
    scale(sizeX, sizeY, 10);
    
    //Creation of the "box"
    box(1);
    popMatrix();
    
    //Displacemnt Z
    z+= (pow((scoreGlobal/150), 2));
    if (z >= maxZ) {
    z = startingZ; 
    
    if(beat.isKick()) {
    arduino.digitalWrite(ledPin, Arduino.HIGH); // set the LED on
    kickSize = 32;
    delayboolean = true; //Tells a later if statement to delay for long enough for the lights to light up
    }
    
    arduino.digitalWrite(ledPin, Arduino.LOW); // set the LED off
    textSize(kickSize);
    text("KICK", width/4, height/2);
    kickSize = constrain(kickSize * 0.95, 16, 32);
    
    
    // if (bandValue == 0.01) 
    //{ //if we clicked in the window
    //myPort.write('1'); //send a 1
    //println("1"); 
    // } else 
    //{ //otherwise
    //myPort.write('0'); //send a 0
    //} 
    
    //try{
    //Load band values
    // ArrayList = (ArrayList) fft.getBand();
    
    //float bandValue = fft.getBand();
    
    //get first band value
    
    // println(bandValue);
    
    // Ascertain if the top band value is new (i.e. it's new) or if it's the same, has the count changed (i.e. it's the same value again).
    
    // Update band value details.
    
    //Arduino - standard LED blinky action. 
    
    //arduino.digitalWrite(ledPin, Arduino.HIGH);
    //delay(100);
    //arduino.digitalWrite(ledPin, Arduino.LOW);
    //}
    
    // Slow everything down so you don't exceed perfume dispenser's rate limits. Check every 10 secs.
    // delay(10000);
    //} 
    
    
    }
    }
    
    }
    void mousePressed()
    {
    // if the song is already being played
    // we pause it and change the value of isPlaying variable
    if (isPlaying)
    {
    song.pause();
    isPlaying = false;
    }
    
    // and if it is paused we play it and change 'isPlaying' accordingly
    // :P pretty simple right?
    else
    {
    song.play();
    isPlaying = true;
    }}
    void stop() {
    // always close Minim audio classes when you are finished with them
    song.close();
    // always stop Minim before exiting
    minim.stop();
    // this closes the sketch
    super.stop();
    }
    void keyPressed()
    {
    if ( key == 'p' ) song.play(); // Press P to play the song.
    if ( key == 's' ) song.pause(); // Press S to pause playing.
    if ( key == 'h' ) song.rewind(); // Press H to reset to the start.
    if ( key == 'f' ) song.skip(500); // Press F to fast forward.
    if ( key == 'r' ) song.skip(-500); // Press R to rewind.
    if ( key == 'q' ) song.close(); // Press Q to quit playing.
    }