mirror of
https://github.com/BlackLight/neuralpp.git
synced 2024-12-29 12:45:12 +01:00
Fixin' namespace neuralpp
This commit is contained in:
parent
49b5472480
commit
7b16294784
6 changed files with 18 additions and 197 deletions
|
@ -6,7 +6,8 @@
|
||||||
* neural++.hpp: Changed header name, added BETA0 macro
|
* neural++.hpp: Changed header name, added BETA0 macro
|
||||||
* synapsis.cpp: Added momentum() method to compute the inertial momentum
|
* synapsis.cpp: Added momentum() method to compute the inertial momentum
|
||||||
of a synapsis
|
of a synapsis
|
||||||
* everything: Data type changed from float to double for everything
|
* everything: Data type changed from float to double for everything,
|
||||||
|
fixing neuralpp namespace
|
||||||
|
|
||||||
--- Release 0.2.2 ---
|
--- Release 0.2.2 ---
|
||||||
|
|
||||||
|
|
|
@ -28,13 +28,13 @@
|
||||||
#include "neural++_exception.hpp"
|
#include "neural++_exception.hpp"
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
//! Default rand value: |sin(rand)|, always >= 0 and <= 1
|
||||||
|
#define RAND ( abs( sin(rand()) ) )
|
||||||
|
|
||||||
|
//! Initial value for the inertial momentum of the synapses
|
||||||
|
#define BETA0 0.7
|
||||||
|
|
||||||
namespace neuralpp {
|
namespace neuralpp {
|
||||||
//! Default rand value: |sin(rand)|, always >= 0 and <= 1
|
|
||||||
#define RAND ( (double) abs( sinf((double) rand()) ) )
|
|
||||||
|
|
||||||
//! Initial value for the inertial momentum of the synapses
|
|
||||||
#define BETA0 0.7
|
|
||||||
|
|
||||||
class Synapsis;
|
class Synapsis;
|
||||||
class Neuron;
|
class Neuron;
|
||||||
class Layer;
|
class Layer;
|
||||||
|
|
|
@ -15,12 +15,7 @@
|
||||||
#include "neural++.hpp"
|
#include "neural++.hpp"
|
||||||
using namespace neuralpp;
|
using namespace neuralpp;
|
||||||
|
|
||||||
/**
|
namespace neuralpp {
|
||||||
* @brief Constructor
|
|
||||||
* @param sz Size of the layer
|
|
||||||
* @param a Activation function
|
|
||||||
* @param d Its derivate
|
|
||||||
*/
|
|
||||||
Layer::Layer (size_t sz, double(*a)(double), double(*d)(double)) {
|
Layer::Layer (size_t sz, double(*a)(double), double(*d)(double)) {
|
||||||
for (size_t i=0; i<sz; i++) {
|
for (size_t i=0; i<sz; i++) {
|
||||||
Neuron n(a,d);
|
Neuron n(a,d);
|
||||||
|
@ -31,30 +26,16 @@ Layer::Layer (size_t sz, double(*a)(double), double(*d)(double)) {
|
||||||
deriv=d;
|
deriv=d;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Alternative constructor. It directly gets a vector of neurons to build
|
|
||||||
* the layer
|
|
||||||
*/
|
|
||||||
Layer::Layer (vector< Neuron > &el, double (*a)(double), double(*d)(double)) {
|
Layer::Layer (vector< Neuron > &el, double (*a)(double), double(*d)(double)) {
|
||||||
elements=el;
|
elements=el;
|
||||||
actv_f=a;
|
actv_f=a;
|
||||||
deriv=d;
|
deriv=d;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @return Number of neurons in the layer
|
|
||||||
*/
|
|
||||||
size_t Layer::size() { return elements.size(); }
|
size_t Layer::size() { return elements.size(); }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Redefinition for operator []. It gets the neuron at <i>i</i>
|
|
||||||
*/
|
|
||||||
Neuron& Layer::operator[] (size_t i) { return elements[i]; }
|
Neuron& Layer::operator[] (size_t i) { return elements[i]; }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It links a layer to another
|
|
||||||
* @param l Layer to connect to the current as input layer
|
|
||||||
*/
|
|
||||||
void Layer::link (Layer& l) {
|
void Layer::link (Layer& l) {
|
||||||
srand ((unsigned) time(NULL));
|
srand ((unsigned) time(NULL));
|
||||||
|
|
||||||
|
@ -71,27 +52,16 @@ void Layer::link (Layer& l) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It sets a vector of propagation values to all its neurons
|
|
||||||
* @param v Vector of values to write as propagation values
|
|
||||||
*/
|
|
||||||
void Layer::setProp (vector<double> &v) {
|
void Layer::setProp (vector<double> &v) {
|
||||||
for (size_t i=0; i<size(); i++)
|
for (size_t i=0; i<size(); i++)
|
||||||
elements[i].setProp(v[i]);
|
elements[i].setProp(v[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It sets a vector of activation values to all its neurons
|
|
||||||
* @param v Vector of values to write as activation values
|
|
||||||
*/
|
|
||||||
void Layer::setActv (vector<double> &v) {
|
void Layer::setActv (vector<double> &v) {
|
||||||
for (size_t i=0; i<size(); i++)
|
for (size_t i=0; i<size(); i++)
|
||||||
elements[i].setActv(v[i]);
|
elements[i].setActv(v[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It propagates its activation values to the output layers
|
|
||||||
*/
|
|
||||||
void Layer::propagate() {
|
void Layer::propagate() {
|
||||||
for (size_t i=0; i<size(); i++) {
|
for (size_t i=0; i<size(); i++) {
|
||||||
Neuron *n = &(elements[i]);
|
Neuron *n = &(elements[i]);
|
||||||
|
@ -100,4 +70,5 @@ void Layer::propagate() {
|
||||||
n->setActv( actv_f(n->getProp()) );
|
n->setActv( actv_f(n->getProp()) );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,28 +14,13 @@
|
||||||
#include "neural++.hpp"
|
#include "neural++.hpp"
|
||||||
#include "Markup.h"
|
#include "Markup.h"
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
using namespace neuralpp;
|
|
||||||
|
|
||||||
/**
|
namespace neuralpp {
|
||||||
* @brief Built-in function. The default activation function: f(x)=x
|
|
||||||
*/
|
|
||||||
double __actv(double prop) { return prop; }
|
double __actv(double prop) { return prop; }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Default derivate for default activation function: f'(x)=1
|
|
||||||
*/
|
|
||||||
double __deriv(double prop) { return 1; }
|
double __deriv(double prop) { return 1; }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Constructor
|
|
||||||
* @param in_size Size of the input layer
|
|
||||||
* @param hidden_size Size of the hidden layer
|
|
||||||
* @param out_size Size of the output layer
|
|
||||||
* @param l learn rate (get it after doing some experiments, but generally try to
|
|
||||||
* keep its value quite low to be more accurate)
|
|
||||||
* @param e Epochs (cycles) to execute (the most you execute, the most the network
|
|
||||||
* can be accurate for its purpose)
|
|
||||||
*/
|
|
||||||
NeuralNet::NeuralNet (size_t in_size, size_t hidden_size, size_t out_size, double l, int e) {
|
NeuralNet::NeuralNet (size_t in_size, size_t hidden_size, size_t out_size, double l, int e) {
|
||||||
epochs=e;
|
epochs=e;
|
||||||
ref_epochs=epochs;
|
ref_epochs=epochs;
|
||||||
|
@ -49,18 +34,6 @@ NeuralNet::NeuralNet (size_t in_size, size_t hidden_size, size_t out_size, doubl
|
||||||
link();
|
link();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Constructor
|
|
||||||
* @param in_size Size of the input layer
|
|
||||||
* @param hidden_size Size of the hidden layer
|
|
||||||
* @param out_size Size of the output layer
|
|
||||||
* @param actv Activation function to use (default: f(x)=x)
|
|
||||||
* @param deriv Derivate for the activation function to use (default: f'(x)=1)
|
|
||||||
* @param l learn rate (get it after doing some experiments, but generally try to
|
|
||||||
* keep its value quite low to be more accurate)
|
|
||||||
* @param e Epochs (cycles) to execute (the most you execute, the most the network
|
|
||||||
* can be accurate for its purpose)
|
|
||||||
*/
|
|
||||||
NeuralNet::NeuralNet (size_t in_size, size_t hidden_size, size_t out_size,
|
NeuralNet::NeuralNet (size_t in_size, size_t hidden_size, size_t out_size,
|
||||||
double(*a)(double), double(*d)(double), double l, int e) {
|
double(*a)(double), double(*d)(double), double l, int e) {
|
||||||
epochs=e;
|
epochs=e;
|
||||||
|
@ -76,15 +49,8 @@ NeuralNet::NeuralNet (size_t in_size, size_t hidden_size, size_t out_size,
|
||||||
link();
|
link();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It gets the output of the network (note: the layer output should contain
|
|
||||||
* an only neuron)
|
|
||||||
*/
|
|
||||||
double NeuralNet::getOutput() { return (*output)[0].getActv(); }
|
double NeuralNet::getOutput() { return (*output)[0].getActv(); }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It gets the output of the network in case the output layer contains more neurons
|
|
||||||
*/
|
|
||||||
vector<double> NeuralNet::getVectorOutput() {
|
vector<double> NeuralNet::getVectorOutput() {
|
||||||
vector<double> v;
|
vector<double> v;
|
||||||
|
|
||||||
|
@ -93,58 +59,30 @@ vector<double> NeuralNet::getVectorOutput() {
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It get the error made on the expected result as |v-v'|/v
|
|
||||||
* @param Expected value
|
|
||||||
* @return Mean error
|
|
||||||
*/
|
|
||||||
double NeuralNet::error(double expected) {
|
double NeuralNet::error(double expected) {
|
||||||
return abs( (getOutput() - expected*
|
return abs( (getOutput() - expected*
|
||||||
deriv(getOutput())) / (abs(expected)) );
|
deriv(getOutput())) / (abs(expected)) );
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It propagates values through the network. Use this when you want to give
|
|
||||||
* an already trained network some new values the get to the output
|
|
||||||
*/
|
|
||||||
void NeuralNet::propagate() {
|
void NeuralNet::propagate() {
|
||||||
hidden->propagate();
|
hidden->propagate();
|
||||||
output->propagate();
|
output->propagate();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It sets the input for the network
|
|
||||||
* @param v Vector of doubles, containing the values to give to your network
|
|
||||||
*/
|
|
||||||
void NeuralNet::setInput(vector<double>& v) {
|
void NeuralNet::setInput(vector<double>& v) {
|
||||||
input->setProp(v);
|
input->setProp(v);
|
||||||
input->setActv(v);
|
input->setActv(v);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It links the layers of the network (input, hidden, output). Don't use unless
|
|
||||||
* you exactly know what you're doing, it is already called by the constructor
|
|
||||||
*/
|
|
||||||
void NeuralNet::link() {
|
void NeuralNet::link() {
|
||||||
hidden->link(*input);
|
hidden->link(*input);
|
||||||
output->link(*hidden);
|
output->link(*hidden);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It sets the value you expect from your network
|
|
||||||
*/
|
|
||||||
void NeuralNet::setExpected(double e) { ex=e; }
|
void NeuralNet::setExpected(double e) { ex=e; }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It gets the value expected. Of course you should specify this when you
|
|
||||||
* build your network by using setExpected.
|
|
||||||
*/
|
|
||||||
double NeuralNet::expected() { return ex; }
|
double NeuralNet::expected() { return ex; }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It updates the weights of the net's synapsis through back-propagation.
|
|
||||||
* In-class use only
|
|
||||||
*/
|
|
||||||
void NeuralNet::updateWeights() {
|
void NeuralNet::updateWeights() {
|
||||||
double out_delta;
|
double out_delta;
|
||||||
|
|
||||||
|
@ -180,11 +118,6 @@ void NeuralNet::updateWeights() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It commits the changes made by updateWeights() to the layer l.
|
|
||||||
* In-class use only
|
|
||||||
* @param l Layer to commit the changes
|
|
||||||
*/
|
|
||||||
void NeuralNet::commitChanges (Layer *l) {
|
void NeuralNet::commitChanges (Layer *l) {
|
||||||
for (size_t i=0; i<l->size(); i++) {
|
for (size_t i=0; i<l->size(); i++) {
|
||||||
Neuron *n = &(*l)[i];
|
Neuron *n = &(*l)[i];
|
||||||
|
@ -197,11 +130,6 @@ void NeuralNet::commitChanges (Layer *l) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It updates through back-propagation the weights of the synapsis and
|
|
||||||
* computes again the output value for <i>epochs</i> times, calling back
|
|
||||||
* updateWeights and commitChanges functions
|
|
||||||
*/
|
|
||||||
void NeuralNet::update() {
|
void NeuralNet::update() {
|
||||||
while ((epochs--)>0) {
|
while ((epochs--)>0) {
|
||||||
updateWeights();
|
updateWeights();
|
||||||
|
@ -211,11 +139,6 @@ void NeuralNet::update() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Save an already trained neural network to a binary file
|
|
||||||
* @param fname Name of the file to write
|
|
||||||
* @return true in case of success, false otherwise
|
|
||||||
*/
|
|
||||||
bool NeuralNet::save(const char *fname) {
|
bool NeuralNet::save(const char *fname) {
|
||||||
FILE *fp;
|
FILE *fp;
|
||||||
struct netrecord record;
|
struct netrecord record;
|
||||||
|
@ -309,12 +232,6 @@ bool NeuralNet::save(const char *fname) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Constructs a neural network from a previously saved file
|
|
||||||
* (saved using 'save()' method)
|
|
||||||
* @param fname File name to load the network from
|
|
||||||
* @throw NetworkFileNotFoundException
|
|
||||||
*/
|
|
||||||
NeuralNet::NeuralNet (const char *fname) throw() {
|
NeuralNet::NeuralNet (const char *fname) throw() {
|
||||||
struct netrecord record;
|
struct netrecord record;
|
||||||
FILE *fp;
|
FILE *fp;
|
||||||
|
@ -423,13 +340,6 @@ NeuralNet::NeuralNet (const char *fname) throw() {
|
||||||
fclose(fp);
|
fclose(fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Train a network using a training set loaded from an XML file. A sample XML file
|
|
||||||
* is available in examples/adder.xml
|
|
||||||
* @param xml XML file containing our training set
|
|
||||||
* @param src Source type from which the XML will be loaded (from a file [default] or from a string)
|
|
||||||
* @throw InvalidXMLException
|
|
||||||
*/
|
|
||||||
void NeuralNet::train (string xmlsrc, NeuralNet::source src = file) throw() {
|
void NeuralNet::train (string xmlsrc, NeuralNet::source src = file) throw() {
|
||||||
double out;
|
double out;
|
||||||
CMarkup xml;
|
CMarkup xml;
|
||||||
|
@ -487,10 +397,6 @@ void NeuralNet::train (string xmlsrc, NeuralNet::source src = file) throw() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Initialize the training XML for the neural network
|
|
||||||
* @param xml String that will contain the XML
|
|
||||||
*/
|
|
||||||
void NeuralNet::initXML (string& xml) {
|
void NeuralNet::initXML (string& xml) {
|
||||||
xml.append("<?xml version=\"1.0\" encoding=\"iso-8859-1\"?>\n"
|
xml.append("<?xml version=\"1.0\" encoding=\"iso-8859-1\"?>\n"
|
||||||
"<!DOCTYPE NETWORK SYSTEM \"http://blacklight.gotdns.org/prog/neuralpp/trainer.dtd\">\n"
|
"<!DOCTYPE NETWORK SYSTEM \"http://blacklight.gotdns.org/prog/neuralpp/trainer.dtd\">\n"
|
||||||
|
@ -499,12 +405,6 @@ void NeuralNet::initXML (string& xml) {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Splits a string into a vector of doubles, given a delimitator
|
|
||||||
* @param delim Delimitator
|
|
||||||
* @param str String to be splitted
|
|
||||||
* @return Vector of doubles containing splitted values
|
|
||||||
*/
|
|
||||||
vector<double> NeuralNet::split (char delim, string str) {
|
vector<double> NeuralNet::split (char delim, string str) {
|
||||||
char tmp[1024];
|
char tmp[1024];
|
||||||
vector<double> v;
|
vector<double> v;
|
||||||
|
@ -522,18 +422,6 @@ vector<double> NeuralNet::split (char delim, string str) {
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Get a training set from a string and copies it to an XML
|
|
||||||
* For example, these strings could be training sets for making sums:
|
|
||||||
* "2,3;5" - "5,6;11" - "2,2;4" - "4,5:9"
|
|
||||||
* This method called on the first string will return an XML such this:
|
|
||||||
* '<training id="0"><input id="0">2</input><input id="1">3</input><output id="0">5</output>
|
|
||||||
* </training>'
|
|
||||||
*
|
|
||||||
* @param id ID for the given training set (0,1,..,n)
|
|
||||||
* @param set String containing input values and expected outputs
|
|
||||||
* @return XML string
|
|
||||||
*/
|
|
||||||
string NeuralNet::XMLFromSet (int id, string set) {
|
string NeuralNet::XMLFromSet (int id, string set) {
|
||||||
string xml;
|
string xml;
|
||||||
vector<double> in, out;
|
vector<double> in, out;
|
||||||
|
@ -579,11 +467,8 @@ string NeuralNet::XMLFromSet (int id, string set) {
|
||||||
return xml;
|
return xml;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Closes an open XML document generated by "initXML" and "XMLFromSet"
|
|
||||||
* @param XML string to close
|
|
||||||
*/
|
|
||||||
void NeuralNet::closeXML(string &xml) {
|
void NeuralNet::closeXML(string &xml) {
|
||||||
xml.append("</NETWORK>\n\n");
|
xml.append("</NETWORK>\n\n");
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,21 +12,14 @@
|
||||||
**************************************************************************************************/
|
**************************************************************************************************/
|
||||||
|
|
||||||
#include "neural++.hpp"
|
#include "neural++.hpp"
|
||||||
using namespace neuralpp;
|
|
||||||
|
|
||||||
/**
|
namespace neuralpp {
|
||||||
* @brief Constructor
|
|
||||||
* @param a Activation function
|
|
||||||
* @param d Its derivate
|
|
||||||
*/
|
|
||||||
Neuron::Neuron (double (*a)(double), double (*d)(double)) {
|
Neuron::Neuron (double (*a)(double), double (*d)(double)) {
|
||||||
actv_f=a;
|
actv_f=a;
|
||||||
deriv=d;
|
deriv=d;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Alternative constructor, that gets also the synapsis linked to the neuron
|
|
||||||
*/
|
|
||||||
Neuron::Neuron (vector< Synapsis > i, vector< Synapsis > o, double (*a)(double), double(*d)(double)) {
|
Neuron::Neuron (vector< Synapsis > i, vector< Synapsis > o, double (*a)(double), double(*d)(double)) {
|
||||||
in=i;
|
in=i;
|
||||||
out=o;
|
out=o;
|
||||||
|
@ -35,59 +28,26 @@ Neuron::Neuron (vector< Synapsis > i, vector< Synapsis > o, double (*a)(double),
|
||||||
deriv=d;
|
deriv=d;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Gets the i-th synapsis connected on the input of the neuron
|
|
||||||
*/
|
|
||||||
Synapsis& Neuron::synIn (size_t i) { return in[i]; }
|
Synapsis& Neuron::synIn (size_t i) { return in[i]; }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Gets the i-th synapsis connected on the output of the neuron
|
|
||||||
*/
|
|
||||||
Synapsis& Neuron::synOut (size_t i) { return out[i]; }
|
Synapsis& Neuron::synOut (size_t i) { return out[i]; }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It pushes a new input synapsis
|
|
||||||
*/
|
|
||||||
void Neuron::push_in (Synapsis& s) { in.push_back(s); }
|
void Neuron::push_in (Synapsis& s) { in.push_back(s); }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It pushes a new output synapsis
|
|
||||||
*/
|
|
||||||
void Neuron::push_out (Synapsis& s) { out.push_back(s); }
|
void Neuron::push_out (Synapsis& s) { out.push_back(s); }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Change the propagation value of the neuron
|
|
||||||
*/
|
|
||||||
void Neuron::setProp (double val) { prop_val=val; }
|
void Neuron::setProp (double val) { prop_val=val; }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Change the activation value of the neuron
|
|
||||||
*/
|
|
||||||
void Neuron::setActv (double val) { actv_val=actv_f(val); }
|
void Neuron::setActv (double val) { actv_val=actv_f(val); }
|
||||||
|
|
||||||
/**
|
|
||||||
* @return Number of input synapsis
|
|
||||||
*/
|
|
||||||
size_t Neuron::nIn() { return in.size(); }
|
size_t Neuron::nIn() { return in.size(); }
|
||||||
|
|
||||||
/**
|
|
||||||
* @return Number of output synapsis
|
|
||||||
*/
|
|
||||||
size_t Neuron::nOut() { return out.size(); }
|
size_t Neuron::nOut() { return out.size(); }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It gets the propagation value of the neuron
|
|
||||||
*/
|
|
||||||
double Neuron::getProp() { return prop_val; }
|
double Neuron::getProp() { return prop_val; }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief It gets the activation value of the neuron
|
|
||||||
*/
|
|
||||||
double Neuron::getActv() { return actv_val; }
|
double Neuron::getActv() { return actv_val; }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Propagate a neuron's activation value to the connected neurons
|
|
||||||
*/
|
|
||||||
double Neuron::propagate() {
|
double Neuron::propagate() {
|
||||||
double aux=0;
|
double aux=0;
|
||||||
|
|
||||||
|
@ -95,4 +55,5 @@ double Neuron::propagate() {
|
||||||
aux += (in[i].getWeight() * in[i].getIn()->actv_val);
|
aux += (in[i].getWeight() * in[i].getIn()->actv_val);
|
||||||
return aux;
|
return aux;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,8 @@
|
||||||
#include "neural++.hpp"
|
#include "neural++.hpp"
|
||||||
using namespace neuralpp;
|
using namespace neuralpp;
|
||||||
|
|
||||||
|
namespace neuralpp {
|
||||||
|
|
||||||
Synapsis::Synapsis(Neuron* i, Neuron* o, double w, double d) {
|
Synapsis::Synapsis(Neuron* i, Neuron* o, double w, double d) {
|
||||||
in=i; out=o;
|
in=i; out=o;
|
||||||
weight=w;
|
weight=w;
|
||||||
|
@ -66,4 +68,5 @@ void Synapsis::setDelta(double d) {
|
||||||
double Synapsis::momentum(int N, int x) {
|
double Synapsis::momentum(int N, int x) {
|
||||||
return (BETA0*N)/(20*x + N);
|
return (BETA0*N)/(20*x + N);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue