// nlayer.cpp // David R. Morrison #include "nlayer.hpp" // Constructor NLayer::NLayer(int input_vector_size, int size) { layerSize = size; inputVectorSize = input_vector_size; neurons.resize(layerSize); signals.resize(layerSize); outputs.resize(layerSize); // Initialize all the neurons for (int i = 0; i < layerSize; ++i) neurons[i] = new Neuron(inputVectorSize); } // Delete all the neurons NLayer::~NLayer() { for (int i = 0; i < layerSize; ++i) delete neurons[i]; } // Return the number of neurons int NLayer::size() { return layerSize; } // Figure out the output of the layer in response to a given input; basically sums up // the weighted inputs and returns. (We just assume linearity here, though it can be // easily modified to squash). vector NLayer::calcLayerOutput(vector i) { inputs = i; vector out(layerSize); for (int i = 0; i < layerSize; ++i) outputs[i] = neurons[i]->evaluateSample(inputs); return outputs; } // Figure out the error signals (target - actual) vector NLayer::calcSignals(vector targets) { assert(targets.size() == outputs.size()); for (int i = 0; i < layerSize; ++i) signals[i] = targets[i] - outputs[i]; return signals; } // Update the weights based on the signals and the learning rate eta void NLayer::updateWeights(double eta) { for (int i = 0; i < layerSize; ++i) { // First take care of the bias. neurons[i]->changeWeight(0, eta * signals[i]); // Then do everything else. for (int j = 1; j < inputVectorSize + 1; ++j) neurons[i]->changeWeight(j, eta * signals[i] * inputs[j - 1]); } } // Return a 2D array of input weights, arranged such that they can be referenced as // output weights from the previous layer. Note that the bias is not included in these // weights. vector< vector > NLayer::getInputWeights() { vector< vector > inputWeights(inputVectorSize); // Since this is being used to calculate the error, we need to load things into // our vector backwards; we want it ordered by the output weights of the previous // layer. So inputWeights[i][j] is the weight going to the jth neuron in this // layer from the ith neuron/cell in the previous layer. // // Also, make sure to ignore biases, so we start at 1 instead of 0. for (int i = 1; i < inputVectorSize + 1; ++i) { inputWeights[i - 1].resize(layerSize); for (int j = 0; j < layerSize; ++j) inputWeights[i - 1][j] = neurons[j]->getWeight(i); } return inputWeights; } ostream& NLayer::print(ostream& out) { for (int i = 0; i < neurons.size(); ++i) out << "Neuron " << i << ": " << *neurons[i]; } ostream& operator<<(ostream& out, NLayer& n) { return n.print(out); }