A free to use core code for a basic feed-forward Neural Network in processing

edited November 2017 in Share Your Work

These past few days I have been experimenting with deep learning in processing, and since I haven't been able to find a proper Neural Network layout for it, I have decided to share the one that I've created for any other curious novices like me. Feel free to express any questions or critics you may have.

class Neuron {
  float[] inputs = {};     //each neuron takes in a number of inputs...
  float output;            //and produces an output.
  float[] weights = {};    //every input is weighed with its own unique weight...
  float bias;          //each neuron must have a bias. The bias has to be trained, but it's easier to initialize it at one, then add a weight for the bias and train it along with all the other weights
  int numberOfInputs;

  Neuron(int tempNumberOfInputs) {
    numberOfInputs = tempNumberOfInputs;
    bias = 1;
    for(int i = 0; i < numberOfInputs; i++) {
      inputs = (float[]) append(inputs, 0);      //add inputs equal to the amount of inputs the neuron expects
    }
    for(int i = 0; i < numberOfInputs+1; i++) {                        //add weights equal to the amount of inputs plus one to weigh the bias, too//
      weights = (float[]) append(weights, random(-1,1));
    }
  }

 float processAndActivateInputs(float[] getInputs) {    //function to activate inputs...
   float processedInputsSum = 0;
   if(getInputs.length!=inputs.length) {
     println("Error: number of inputs changed during the sketch");
     exit();
   }
   for(int i=0; i<numberOfInputs; i++) {
     inputs[i] = getInputs[i];
   }
   for(int i = 0; i<inputs.length; i++) {
     processedInputsSum += inputs[i]*weights[i];       //works by getting all the inputs, multiplying them by their corresponding weight, and summing all the results...
   }
   processedInputsSum += bias*weights[inputs.length];             //don't forget to weigh and add the bias (bias weight is at the end of the weights array)
   //activation function (tanh)//                    //the sum then goes through an activation function to produce an output. NOTE!!! You may have to change this function depending on what the aim of your network is.
   output = (1-exp(2*processedInputsSum))/(1 + exp(2*processedInputsSum));       //I have used a hyperbolic tangent (it returns values from -1 to 1) but other common functions are a logistic sigmoid function (returns values between 0 and 1) or a simple step function
   return output;
 }
}

class Layer {
  Neuron[] neurons = {};    //each layer has a number of neurons....
  float[] outputs = {};     //...and a number of outputs equal to the neurons...
  Layer(int numberOfInputs, int numberOfNeurons) {
    for(int i = 0; i<numberOfNeurons; i++) {              //for each neuron that the layer expects...
      Neuron tempNeuron = new Neuron(numberOfInputs);     
      neurons = (Neuron[]) append(neurons, tempNeuron);   //add a neuron...
      outputs = (float[]) append(outputs, 0);             //add an output...
    }
  }

  float[] produceOutputs(float[] getInputs) {  //function to get layer's outputs
    for(int i=0; i<neurons.length; i++) {                      //for each neuron...
      outputs[i] = neurons[i].processAndActivateInputs(getInputs);        //pass the layer's inputs to the neuron and get its output (every neuron in the layer processes the same inputs)...
    }
    return outputs;            //return the layer's outputs...//
  }
}

class Network {
  Layer[] layers = {};    //each network has a number of layers...
  float[] inputs = {};    //...expects inputs...
  float[] outputs = {};   //..and produces outputs.
  Network() {
  }

  void addLayer(int numberOfInputs, int numberOfNeurons) {                              //function to add layers to the network. This is usually done in the setup function of the program//
    layers = (Layer[]) append(layers, new Layer(numberOfInputs, numberOfNeurons));
  }

  float[] produceOutputs(float[] tempInputs) {                 //function to produce outputs from inputs...
    inputs = tempInputs;                                    
    outputs = layers[0].produceOutputs(inputs);                //the first layer gets the outputs from the outside world...
    for(int i = 1; i<layers.length; i++) {                     //the second layer and all other layers take the outputs from the previous layer as inputs...
      outputs = layers[i].produceOutputs(outputs);
    }
    return outputs;                                 //the outputs of the final layer are the outputs of the neural network.
  }
}


Network brain;
float inputs[];
float outputs[];
void setup() {
  size(800,800);
  //this is where you want to create your network's layers
  brain = new Network();
  brain.addLayer(4,10);   //the first layer you add actually determines two layers - the input layer and the first hidden layer. This is because the input layer does not actually process the information, it just feeds the raw inputs to the hidden layer
  brain.addLayer(10,2);   //Now we have a neural network with an input layer expecting four inputs, a hidden layer with ten neurons and an output layer producing two outputs.
  //the inputs must be equal to the inputs of the first layer...
  inputs = new float[4];
  //and the outputs must be equal to the outputs of the last layer...
  outputs = new float[2];
}

void draw() {
  //Now you can experiment with the network as much as you want. You need to know what outputs (decisions) it has to make, and what affects that decision (the inputs).
  //For example, let's say you have a creature that can move and accelerate. This is what its properties would be:
  creatureXspeed += creatureXacceleration;
  creatureYspeed += creatureYacceleration;
  creatureX += creatureXspeed;
  creatureY += creatureYspeed;
  //Now you want it to decide where to accelerate depending on another static object. then as inputs it would need to know where the object is depending on its position, and the outputs would be its acceleration (if the object is moving we would also have to add its mvoement as inputs):
  inputs[0] = map(objectX-creatureX, 0, width, -1, 1);   //when using a hyperbolic tangent as an activation function it is wise to map your inputs between -1 and 1. This will train the network faster
  inputs[1] = map(objectY-creatureY, 0, height, -1, 1);
  inputs[2] = map(creatureXspeed, 0, maxCreatureSpeed, -1, 1);
  inputs[3] = map(creatureYspeed, 0, maxCreatureSpeed, -1, 1);
  //now that we've created the outputs, we're going to feed them into the creature's brain and get its decision...
  outputs = brain.produceOutputs(inputs);
  //instead of making four inputs for each type of acceleration (up, down, left, right), we're only going to create two inputs that decide movement across the X axis and movement across the Y axis.
  //remember that the hyperbolic tangent function returns values between -1 and 1, so we need to map the outputs..
  creatureXacceleration = map(outputs[0], -1, 1, -0.1, 0.1);
  creatureYacceleration = map(outputs[1], -1, 1, -0.1, 0.1);
  //Voila! Now our creature can decide where to accelerate, knowing how fast it's currently going, where it's going and where the object is. 
  //The best part is that if you change how the creature interacts with the object (for example, having one case where the creature is rewarded for hitting the object and another where it has to avoid it) you don't need to change anything about the creature, it will learn to perform well on its own given a proper training algorithm.
  //From then on, it's just a matter of training the network accordingly. This is the harder part, and is usually done through backpropagation (an algorithm that is heavy on calculus), but it can also be done more simply with some creativity. 
  //For example, I've found that genetic algorithms are simple and produce good results in such occasions where creatures have to interact with simple environment.
  //Good luck!
}
Tagged:
Sign In or Register to comment.