waveform array and spectrum array to soundfile?

I'd like to render a soundfile from a waveform[] and spectrum[]... does p5.sound or fft has a function for this?

Answers

  • edited March 2016
    var recorder = new p5.SoundRecorder();
    var  audioClip = new p5.SoundFile();
    var  audioArray = new p5.AudioIn();
    var buildAudio = function(waveform, spectrum)
    {
      recorder(audioClip(audioArray)); 
    }
    

    would something like this work? if so how to get the waveform[] and spectrum[] into audioArray the right way?

  • ok working with Web Audio API if there are any sound data experts here, i need some help understanding things. to explain in short: i have a neural net that generates images from the wavefrom and spectrum of my sound. so input is waveform[n] and spectrum[n] and output is r, g and b which i set to an image. result:

    now i'd like to try the reverse. i know i can use the neuralnet in reverse if i save the weights to get the sound back from the image and i get 2 arrays of floats back from the neural net. that's not the problem. but how do i correctly convert this back to the right sound data? script:

    this is what i try now, but it sound very choppy. so i feel i convert the wavefrom and spectrum data back the wrong way. i got these with the fft.analyze() and fft.waveform() so i feel i should somehow do the reverse of what these functions did. two more things are i wrote the image's with the fft's default 1024 bins so should i also feed the audio data array with a window or just give it the whole data at once? very confusing so and help or insight is greatly appreciated.

    var nn;
    var img;
    var fft;
    var layers = [];
    var input;
    var inputs = [];
    var audioClip;
    var audioArray;
    var recorder;
    var bias = 1;
    
    var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
    var channels = 1;
    var frames = audioCtx.sampleRate;
    var myArrayBuffer;// = audioCtx.createBuffer(channels, frames, audioCtx.sampleRate);
    
    var r,g,b,amp,freq;
    var waveform = [];
    var spectrum = [];
    var i=0;
    var pixelArray = [];
    var pixel = [];
    
    function preload()
    {
      img = loadImage("Images/Test8.png");
    }
    
    function setup() 
    {
      input = new NN.layer(4);
      layers.push(input);
      var hidden = new NN.layer(5);
      layers.push(hidden);
      var output = new NN.layer(2);
      layers.push(output);
      nn = new NN.neuralNet(layers);
      createCanvas(img.width,img.height);
      image(img,0,0);
      img.loadPixels();
      pixelArray = get();
      myArrayBuffer = audioCtx.createBuffer(channels, pixels.length, audioCtx.sampleRate);
      print(audioCtx.sampleRate);
    
      waveform = myArrayBuffer.getChannelData(0);
    
      print(pixels.length);
    
      for(var i=0; i<pixels.length; i+=4)
      {
        r = map(pixels[i], 0, 255, -4, 4);
        g = map(pixels[i+1], 0, 255, -4, 4);
        b = map(pixels[i+2], 0, 255, -4, 4);
        inputs = [bias, r, g, b];
    
        nn.feedNetwork(inputs);
        amp = nn.outputLayer.perceptrons[0].outpt;
        freq = nn.outputLayer.perceptrons[1].outpt;
        //print("r: "+r+"/"+"g: "+g+"b: "+b+"f: "+freq+"/"+"a: "+amp);
        waveform[i] = freq*amp;
        spectrum[i] = freq;
      }
    
      var source = audioCtx.createBufferSource();
      // set the buffer in the AudioBufferSourceNode
      source.buffer = myArrayBuffer;
      // connect the AudioBufferSourceNode to the
      // destination so we can hear the sound
      source.connect(audioCtx.destination);
      // start the source playing
      source.start();
    }
    
Sign In or Register to comment.