From 1aa4ec764688236fde7783e4b164bd5b8e16ca10 Mon Sep 17 00:00:00 2001 From: blacklight Date: Sat, 8 Aug 2009 18:05:02 +0200 Subject: [PATCH] float -> double for everything, learning rate now always considered >= 0 --- examples/adder.net | Bin 188 -> 332 bytes examples/adderFromScratch.cpp | 14 ++--- examples/doAdd.cpp | 14 ++--- examples/learnAdd.cpp | 6 +- include/neural++.hpp | 102 +++++++++++++++++----------------- src/layer.cpp | 8 +-- src/neuralnet.cpp | 48 ++++++++-------- src/neuron.cpp | 16 +++--- src/synapsis.cpp | 12 ++-- 9 files changed, 110 insertions(+), 110 deletions(-) diff --git a/examples/adder.net b/examples/adder.net index ae58fe01faa5ab649e3b043a20b2c9fa32d4609d..17cde54ff77d1b00fb4895a01d7dc7495adf757f 100644 GIT binary patch literal 332 zcmZQ#U|?VZVn!eqP-kGM7Fp;1u&Tr!$Yo$~2zCI|4GyR@gByeoq!lZ!h?TAKgwx6Q z6!p@%0vup;gw(a42mOK_U^K`sAUJSx<^_ly5Prj=ukYb}s2T?^X@nS1Swr&1XDDix z)So)KvISwz&AVo)pI;#KfXrFn7BS-%l3ia?oF0WCnF~^r@lNpFAtd{!J(W_Of@Bvc OY=8j8%}_BEcLM;|@krVL literal 188 zcmZQ#U|?VZVn!eqP-kG^y1sZf1H&vQ1_l-=HUQEFPW%3_fU#F_mlFuTVVnoXAie7> ztL=dl!?w$XU>YP=J|o`-Bqn<<8ZI`^ArZ_@RkeVNHJ*a#z4v_*SUpIuT6_^$-e-Oc Omtrain(xml, NeuralNet::str); - vector v; + net.train(xml, NeuralNet::str); + vector v; cout << "Network status: trained\n\n"; cout << "First number to add: "; @@ -34,9 +34,9 @@ int main() { cin >> tmp; v.push_back(tmp); - net->setInput(v); - net->propagate(); - cout << "Output: " << net->getOutput() << endl; + net.setInput(v); + net.propagate(); + cout << "Output: " << net.getOutput() << endl; return 0; } diff --git a/examples/doAdd.cpp b/examples/doAdd.cpp index 2203d3c..9b6d3ee 100644 --- a/examples/doAdd.cpp +++ b/examples/doAdd.cpp @@ -12,11 +12,11 @@ using namespace neuralpp; #define NETFILE "adder.net" int main() { - float a,b; - NeuralNet *net = NULL; + double a,b; + NeuralNet net; try { - net = new NeuralNet(NETFILE); + net = NeuralNet(NETFILE); } catch (NetworkFileNotFoundException e) { @@ -30,13 +30,13 @@ int main() { cout << "Second number to add: "; cin >> b; - vector v; + vector v; v.push_back(a); v.push_back(b); - net->setInput(v); - net->propagate(); - cout << "Neural net output: " << net->getOutput() << endl; + net.setInput(v); + net.propagate(); + cout << "Neural net output: " << net.getOutput() << endl; return 0; } diff --git a/examples/learnAdd.cpp b/examples/learnAdd.cpp index 2ea674c..157e960 100644 --- a/examples/learnAdd.cpp +++ b/examples/learnAdd.cpp @@ -11,12 +11,12 @@ using namespace neuralpp; int main() { - NeuralNet *net = new NeuralNet (2, 2, 1, -0.005, 10000); + NeuralNet net(2, 2, 1, 0.005, 10000); cout << "Training in progress - This may take a while...if it gets stuck, interrupt and restart the app\n"; - net->train("adder.xml", NeuralNet::file); + net.train("adder.xml", NeuralNet::file); - net->save("adder.net"); + net.save("adder.net"); cout << "Network trained. You can use adder.net file now to load this network\n"; return 0; } diff --git a/include/neural++.hpp b/include/neural++.hpp index 85d6e7e..e915dca 100644 --- a/include/neural++.hpp +++ b/include/neural++.hpp @@ -30,7 +30,7 @@ using namespace std; namespace neuralpp { //! Default rand value: |sin(rand)|, always >= 0 and <= 1 - #define RAND ( (float) abs( sinf((float) rand()) ) ) + #define RAND ( (double) abs( sinf((double) rand()) ) ) class Synapsis; class Neuron; @@ -46,8 +46,8 @@ namespace neuralpp { class NeuralNet { int epochs; int ref_epochs; - float l_rate; - float ex; + double l_rate; + double ex; Layer* input; Layer* hidden; @@ -71,10 +71,10 @@ namespace neuralpp { * @param Expected value * @return Mean error */ - float error(float); + double error(double); - float (*actv_f)(float); - float (*deriv)(float); + double (*actv_f)(double); + double (*deriv)(double); public: /** @@ -94,7 +94,7 @@ namespace neuralpp { * @param e Epochs (cycles) to execute (the most you execute, the most the network * can be accurate for its purpose) */ - NeuralNet (size_t, size_t, size_t, float, int); + NeuralNet (size_t, size_t, size_t, double, int); /** * @brief Constructor @@ -116,29 +116,29 @@ namespace neuralpp { * @param e Epochs (cycles) to execute (the most you execute, the most the network * can be accurate for its purpose) */ - NeuralNet (size_t, size_t, size_t, float(*)(float), float(*)(float), float, int); + NeuralNet (size_t, size_t, size_t, double(*)(double), double(*)(double), double, int); /** * @brief It gets the output of the network (note: the layer output should contain * an only neuron) */ - float getOutput(); + double getOutput(); /** * @brief It gets the output of the network in case the output layer contains more neurons */ - vector getVectorOutput(); + vector getVectorOutput(); /** * @brief It gets the value expected. Of course you should specify this when you * build your network by using setExpected. */ - float expected(); + double expected(); /** * @brief It sets the value you expect from your network */ - void setExpected(float); + void setExpected(double); /** * @brief It updates through back-propagation the weights of the synapsis and @@ -155,9 +155,9 @@ namespace neuralpp { /** * @brief It sets the input for the network - * @param v Vector of floats, containing the values to give to your network + * @param v Vector of doubles, containing the values to give to your network */ - void setInput (vector&); + void setInput (vector&); /** * @brief It links the layers of the network (input, hidden, output). Don't use unless @@ -186,12 +186,12 @@ namespace neuralpp { static void initXML (string&); /** - * @brief Splits a string into a vector of floats, given a delimitator + * @brief Splits a string into a vector of doubles, given a delimitator * @param delim Delimitator * @param str String to be splitted - * @return Vector of floats containing splitted values + * @return Vector of doubles containing splitted values */ - static vector split (char, string); + static vector split (char, string); /** * @brief Get a training set from a string and copies it to an XML @@ -220,17 +220,17 @@ namespace neuralpp { * you're doing, use NeuralNet instead */ class Synapsis { - float delta; - float weight; + double delta; + double weight; Neuron *in; Neuron *out; - float (*actv_f)(float); - float (*deriv)(float); + double (*actv_f)(double); + double (*deriv)(double); public: - Synapsis(Neuron* i, Neuron* o, float w, float d) { + Synapsis(Neuron* i, Neuron* o, double w, double d) { in=i; out=o; weight=w; delta=d; } @@ -242,7 +242,7 @@ namespace neuralpp { * @param a Activation function * @param d Derivate for activation function */ - Synapsis (Neuron* i, Neuron* o, float(*)(float), float(*)(float)); + Synapsis (Neuron* i, Neuron* o, double(*)(double), double(*)(double)); /** * @brief Constructor @@ -252,7 +252,7 @@ namespace neuralpp { * @param a Activation function * @param d Derivate for activation function */ - Synapsis (Neuron* i, Neuron* o, float w, float(*)(float), float(*)(float)); + Synapsis (Neuron* i, Neuron* o, double w, double(*)(double), double(*)(double)); /** * @return Reference to input neuron of the synapsis @@ -267,23 +267,23 @@ namespace neuralpp { /** * @brief It sets the weight of the synapsis */ - void setWeight(float); + void setWeight(double); /** * @brief It sets the delta (how much to change the weight after an update) * of the synapsis */ - void setDelta(float); + void setDelta(double); /** * @return Weight of the synapsis */ - float getWeight(); + double getWeight(); /** * @return Delta of the synapsis */ - float getDelta(); + double getDelta(); }; /** @@ -292,26 +292,26 @@ namespace neuralpp { * you're doing, use NeuralNet instead */ class Neuron { - float actv_val; - float prop_val; + double actv_val; + double prop_val; vector< Synapsis > in; vector< Synapsis > out; - float (*actv_f)(float); - float (*deriv)(float); + double (*actv_f)(double); + double (*deriv)(double); public: /** * @brief Constructor * @param a Activation function * @param d Its derivate */ - Neuron (float (*)(float), float(*)(float)); + Neuron (double (*)(double), double(*)(double)); /** * @brief Alternative constructor, that gets also the synapsis linked to the neuron */ - Neuron (vector< Synapsis >, vector< Synapsis >, float (*)(float), float(*)(float)); + Neuron (vector< Synapsis >, vector< Synapsis >, double (*)(double), double(*)(double)); /** * @brief Gets the i-th synapsis connected on the input of the neuron @@ -336,27 +336,27 @@ namespace neuralpp { /** * @brief Change the activation value of the neuron */ - void setActv (float); + void setActv (double); /** * @brief Change the propagation value of the neuron */ - void setProp (float); + void setProp (double); /** * @brief It gets the activation value of the neuron */ - float getActv(); + double getActv(); /** * @brief It gets the propagation value of the neuron */ - float getProp(); + double getProp(); /** * @brief It propagates its activation value to the connected neurons */ - float propagate(); + double propagate(); /** * @return Number of input synapsis @@ -383,8 +383,8 @@ namespace neuralpp { vector< Neuron > elements; void (*update_weights)(); - float (*actv_f)(float); - float (*deriv)(float); + double (*actv_f)(double); + double (*deriv)(double); public: /** @@ -393,13 +393,13 @@ namespace neuralpp { * @param a Activation function * @param d Its derivate */ - Layer (size_t sz, float (*)(float), float(*)(float)); + Layer (size_t sz, double (*)(double), double(*)(double)); /** * @brief Alternative constructor. It directly gets a vector of neurons to build * the layer */ - Layer (vector< Neuron >&, float(*)(float), float(*)(float)); + Layer (vector< Neuron >&, double(*)(double), double(*)(double)); /** * @brief Redefinition for operator []. It gets the neuron at i @@ -416,13 +416,13 @@ namespace neuralpp { * @brief It sets a vector of propagation values to all its neurons * @param v Vector of values to write as propagation values */ - void setProp (vector&); + void setProp (vector&); /** * @brief It sets a vector of activation values to all its neurons * @param v Vector of values to write as activation values */ - void setActv (vector&); + void setActv (vector&); /** * @brief It propagates its activation values to the output layers @@ -441,18 +441,18 @@ namespace neuralpp { int output_size; int epochs; - float l_rate; - float ex; + double l_rate; + double ex; }; struct neuronrecord { - float prop; - float actv; + double prop; + double actv; }; struct synrecord { - float w; - float d; + double w; + double d; }; } diff --git a/src/layer.cpp b/src/layer.cpp index c41370a..46de72d 100644 --- a/src/layer.cpp +++ b/src/layer.cpp @@ -21,7 +21,7 @@ using namespace neuralpp; * @param a Activation function * @param d Its derivate */ -Layer::Layer (size_t sz, float(*a)(float), float(*d)(float)) { +Layer::Layer (size_t sz, double(*a)(double), double(*d)(double)) { for (size_t i=0; i &el, float (*a)(float), float(*d)(float)) { +Layer::Layer (vector< Neuron > &el, double (*a)(double), double(*d)(double)) { elements=el; actv_f=a; deriv=d; @@ -75,7 +75,7 @@ void Layer::link (Layer& l) { * @brief It sets a vector of propagation values to all its neurons * @param v Vector of values to write as propagation values */ -void Layer::setProp (vector &v) { +void Layer::setProp (vector &v) { for (size_t i=0; i &v) { * @brief It sets a vector of activation values to all its neurons * @param v Vector of values to write as activation values */ -void Layer::setActv (vector &v) { +void Layer::setActv (vector &v) { for (size_t i=0; i NeuralNet::getVectorOutput() { - vector v; +vector NeuralNet::getVectorOutput() { + vector v; for (size_t i=0; isize(); i++) v.push_back( (*output)[i].getActv() ); @@ -98,7 +98,7 @@ vector NeuralNet::getVectorOutput() { * @param Expected value * @return Mean error */ -float NeuralNet::error(float expected) { +double NeuralNet::error(double expected) { return abs( (getOutput() - expected* deriv(getOutput())) / (abs(expected)) ); } @@ -114,9 +114,9 @@ void NeuralNet::propagate() { /** * @brief It sets the input for the network - * @param v Vector of floats, containing the values to give to your network + * @param v Vector of doubles, containing the values to give to your network */ -void NeuralNet::setInput(vector& v) { +void NeuralNet::setInput(vector& v) { input->setProp(v); input->setActv(v); } @@ -133,38 +133,38 @@ void NeuralNet::link() { /** * @brief It sets the value you expect from your network */ -void NeuralNet::setExpected(float e) { ex=e; } +void NeuralNet::setExpected(double e) { ex=e; } /** * @brief It gets the value expected. Of course you should specify this when you * build your network by using setExpected. */ -float NeuralNet::expected() { return ex; } +double NeuralNet::expected() { return ex; } /** * @brief It updates the weights of the net's synapsis through back-propagation. * In-class use only */ void NeuralNet::updateWeights() { - float out_delta; + double out_delta; for (size_t i=0; isize(); i++) { Neuron *n = &(*output)[i]; for (size_t j=0; jnIn(); j++) { Synapsis *s = &(n->synIn(j)); - out_delta = s->getIn()->getActv() * error(ex) * l_rate; + out_delta = s->getIn()->getActv() * error(ex) * (-l_rate); s->setDelta(out_delta); } } for (size_t i=0; isize(); i++) { Neuron *n = &(*hidden)[i]; - float d = deriv(n->getProp()) * n->synOut(0).getWeight() * out_delta; + double d = deriv(n->getProp()) * n->synOut(0).getWeight() * out_delta; for (size_t j=0; jnIn(); j++) { Synapsis *s = &(n->synIn(j)); - s->setDelta(l_rate * d * s->getIn()->getActv()); + s->setDelta((-l_rate) * d * s->getIn()->getActv()); } } } @@ -420,7 +420,7 @@ NeuralNet::NeuralNet (const char *fname) throw() { * @throw InvalidXMLException */ void NeuralNet::train (string xmlsrc, NeuralNet::source src = file) throw() { - float out; + double out; CMarkup xml; if (src == file) @@ -435,8 +435,8 @@ void NeuralNet::train (string xmlsrc, NeuralNet::source src = file) throw() { if (xml.FindElem("NETWORK")) { while (xml.FindChildElem("TRAINING")) { - vector input; - float output; + vector input; + double output; bool valid = false; xml.IntoElem(); @@ -489,14 +489,14 @@ void NeuralNet::initXML (string& xml) { } /** - * @brief Splits a string into a vector of floats, given a delimitator + * @brief Splits a string into a vector of doubles, given a delimitator * @param delim Delimitator * @param str String to be splitted - * @return Vector of floats containing splitted values + * @return Vector of doubles containing splitted values */ -vector NeuralNet::split (char delim, string str) { +vector NeuralNet::split (char delim, string str) { char tmp[1024]; - vector v; + vector v; memset (tmp, 0x0, sizeof(tmp)); for (unsigned int i=0, j=0; i <= str.length(); i++) { @@ -525,7 +525,7 @@ vector NeuralNet::split (char delim, string str) { */ string NeuralNet::XMLFromSet (int id, string set) { string xml; - vector in, out; + vector in, out; unsigned int delimPos = -1; char delim=';'; char tmp[1024]; diff --git a/src/neuron.cpp b/src/neuron.cpp index 73de928..8ebc7f0 100644 --- a/src/neuron.cpp +++ b/src/neuron.cpp @@ -19,7 +19,7 @@ using namespace neuralpp; * @param a Activation function * @param d Its derivate */ -Neuron::Neuron (float (*a)(float), float (*d)(float)) { +Neuron::Neuron (double (*a)(double), double (*d)(double)) { actv_f=a; deriv=d; } @@ -27,7 +27,7 @@ Neuron::Neuron (float (*a)(float), float (*d)(float)) { /** * @brief Alternative constructor, that gets also the synapsis linked to the neuron */ -Neuron::Neuron (vector< Synapsis > i, vector< Synapsis > o, float (*a)(float), float(*d)(float)) { +Neuron::Neuron (vector< Synapsis > i, vector< Synapsis > o, double (*a)(double), double(*d)(double)) { in=i; out=o; @@ -58,12 +58,12 @@ void Neuron::push_out (Synapsis& s) { out.push_back(s); } /** * @brief Change the propagation value of the neuron */ -void Neuron::setProp (float val) { prop_val=val; } +void Neuron::setProp (double val) { prop_val=val; } /** * @brief Change the activation value of the neuron */ -void Neuron::setActv (float val) { actv_val=actv_f(val); } +void Neuron::setActv (double val) { actv_val=actv_f(val); } /** * @return Number of input synapsis @@ -78,18 +78,18 @@ size_t Neuron::nOut() { return out.size(); } /** * @brief It gets the propagation value of the neuron */ -float Neuron::getProp() { return prop_val; } +double Neuron::getProp() { return prop_val; } /** * @brief It gets the activation value of the neuron */ -float Neuron::getActv() { return actv_val; } +double Neuron::getActv() { return actv_val; } /** * @brief Propagate a neuron's activation value to the connected neurons */ -float Neuron::propagate() { - float aux=0; +double Neuron::propagate() { + double aux=0; for (size_t i=0; iactv_val); diff --git a/src/synapsis.cpp b/src/synapsis.cpp index a2ff47f..89d10e5 100644 --- a/src/synapsis.cpp +++ b/src/synapsis.cpp @@ -22,7 +22,7 @@ using namespace neuralpp; * @param a Activation function * @param d Derivate for activation function */ -Synapsis::Synapsis (Neuron* i, Neuron* o, float(*a)(float), float(*d)(float)) { +Synapsis::Synapsis (Neuron* i, Neuron* o, double(*a)(double), double(*d)(double)) { srand((unsigned) time(NULL)); delta=0; @@ -42,7 +42,7 @@ Synapsis::Synapsis (Neuron* i, Neuron* o, float(*a)(float), float(*d)(float)) { * @param a Activation function * @param d Derivate for activation function */ -Synapsis::Synapsis (Neuron* i, Neuron* o, float w, float(*a)(float), float(*d)(float)) { +Synapsis::Synapsis (Neuron* i, Neuron* o, double w, double(*a)(double), double(*d)(double)) { delta=0; weight=w; in=i; @@ -65,21 +65,21 @@ Neuron* Synapsis::getOut() { return out; } /** * @return Weight of the synapsis */ -float Synapsis::getWeight() { return weight; } +double Synapsis::getWeight() { return weight; } /** * @return Delta of the synapsis */ -float Synapsis::getDelta() { return delta; } +double Synapsis::getDelta() { return delta; } /** * @brief It sets the weight of the synapsis */ -void Synapsis::setWeight(float w) { weight=w; } +void Synapsis::setWeight(double w) { weight=w; } /** * @brief It sets the delta (how much to change the weight after an update) * of the synapsis */ -void Synapsis::setDelta(float d) { delta=d; } +void Synapsis::setDelta(double d) { delta=d; }