Making everything cooler

This commit is contained in:
blacklight 2009-08-16 19:25:58 +02:00
parent 458eab5e99
commit d52976e74e
7 changed files with 41 additions and 21 deletions

12
BUGS Normal file
View file

@ -0,0 +1,12 @@
Sometimes the training phase of the network breaks in the middle. It happens
because the synaptical weights are initialized with random values, and
sometimes updating them causes those values to become >= 1. This makes the
output values of the network diverge instead of converging to the desired,
expected values. The library recognizes this behaviour, and when a weight
become >= 1 throws an InvalidSynapticalWeightException. So far there's no
way to prevent this odd, random behaviour. The network implements the usage
of an inertial momentum coefficient to avoid strong oscillations in the
training phase, in order to make this phenomenon rarer, but also using this
mechanism there's a possibility ~ 10% of getting a diverging network, and
so a training phase broken by an InvalidSynapticalWeightException.

3
INSTALL Normal file
View file

@ -0,0 +1,3 @@
$ make
% make install

View file

@ -17,10 +17,20 @@ all:
install:
mkdir -p ${PREFIX}/lib
mkdir -p ${PREFIX}/${INCLUDEDIR}
mkdir -p ${PREFIX}/share
mkdir -p ${PREFIX}/share/${LIB}
install -m 0644 README ${PREFIX}/share/${LIB}/README
install -m 0644 INSTALL ${PREFIX}/share/${LIB}/INSTALL
install -m 0644 BUGS ${PREFIX}/share/${LIB}/BUGS
install -m 0644 VERSION ${PREFIX}/share/${LIB}/VERSION
install -m 0644 ChangeLog ${PREFIX}/share/${LIB}/ChangeLog
cp -r examples ${PREFIX}/share/${LIB}
cp -r doc ${PREFIX}/share/${LIB}
install -m 0755 lib${LIB}.so.0.0.0 ${PREFIX}/lib/lib${LIB}.so.0.0.0
install -m 0644 lib${LIB}.a ${PREFIX}/lib/lib${LIB}.a
install -m 0644 ${INCLUDEDIR}/${LIB}.hpp ${PREFIX}/${INCLUDEDIR}
install -m 0644 ${INCLUDEDIR}/${LIB}_exception.hpp ${PREFIX}/${INCLUDEDIR}
install -m 0644 ${PREFIX}/share/${LIB}/README
ln -sf ${PREFIX}/lib/lib${LIB}.so.0.0.0 ${PREFIX}/lib/lib${LIB}.so.0
uninstall:
@ -29,8 +39,17 @@ uninstall:
rm ${PREFIX}/${INCLUDEDIR}/${LIB}_exception.hpp
rm ${PREFIX}/lib/lib${LIB}.so.0.0.0
rm ${PREFIX}/lib/lib${LIB}.so.0
rm ${PREFIX}/share/${LIB}/README
rm ${PREFIX}/share/${LIB}/INSTALL
rm ${PREFIX}/share/${LIB}/BUGS
rm ${PREFIX}/share/${LIB}/VERSION
rm ${PREFIX}/share/${LIB}/ChangeLog
rm -r ${PREFIX}/share/${LIB}/doc
rm -r ${PREFIX}/share/${LIB}/examples
rmdir ${PREFIX}/share/${LIB}
clean:
rm *.o
rm lib${LIB}.so.0.0.0
rm lib${LIB}.a

View file

@ -1,5 +1,6 @@
all:
g++ -Wall -o learnAdd learnAdd.cpp -lneural++
g++ -Wall -o doAnd doAnd.cpp -lneural++
g++ -Wall -o doAdd doAdd.cpp -lneural++
g++ -Wall -o adderFromScratch adderFromScratch.cpp -lneural++

View file

@ -15,14 +15,14 @@ using namespace std;
using namespace neuralpp;
double f (double x) {
return (1.0/(1.0 + pow(M_E,x)));
return (x <= 0) ? 1 : 0;
}
int main() {
int id = 0;
string xml;
time_t t1, t2;
NeuralNet net(2, 2, 1, 0.005, 100);
NeuralNet net(2, 2, 1, 0.005, 1000, 0.1, f);
NeuralNet::initXML(xml);
xml += NeuralNet::XMLFromSet(id, "2,3;5");

View file

@ -28,7 +28,7 @@
#define RAND (double) ( (rand() / (RAND_MAX/2)) - 1)
//! Initial value for the inertial momentum of the synapses
#define BETA0 0.8
#define BETA0 1.0
/**
* @namespace neuralpp
@ -108,7 +108,8 @@ namespace neuralpp {
* 'sensitive' on variations of the input values
* @param a Activation function to use (default: f(x)=x)
*/
NeuralNet (size_t in_size, size_t hidden_size, size_t out_size, double l, int e, double th = 0.0, double (*a)(double) = __actv);
NeuralNet (size_t in_size, size_t hidden_size, size_t out_size, double l,
int e, double th = 0.0, double (*a)(double) = __actv);
/**
* @brief Constructor
@ -117,23 +118,6 @@ namespace neuralpp {
*/
NeuralNet (const std::string file) throw(NetworkFileNotFoundException);
/**
* @brief Constructor
* @param in_size Size of the input layer
* @param hidden_size Size of the hidden layer
* @param out_size Size of the output layer
* @param actv Activation function to use (default: f(x)=x)
* @param l learn rate (get it after doing some experiments, but generally try to
* keep its value quite low to be more accurate)
* @param e Epochs (cycles) to execute (the most you execute, the most the network
* can be accurate for its purpose)
* @param th Threshold, value in [0,1] that establishes how much a neuron must be
* 'sensitive' on variations of the input values
*/
//NeuralNet (size_t in_size, size_t hidden_size, size_t out_size,
// double(*actv)(double), double l, int e, double th);
/**
* @brief It gets the output of the network (note: the layer output should contain
* an only neuron)

View file

@ -77,6 +77,7 @@ namespace neuralpp {
for (size_t i = 0; i < nIn(); i++)
aux += (in[i].getWeight() * in[i].getIn()->actv_val);
aux -= threshold;
setProp(aux);
setActv( actv_f(aux) );
}