1 |
(****************************************************************** |
(****************************************************************** |
2 |
[LibNN - Neural Networks Library] |
[LibNN - Neural Networks Library] |
3 |
http://libnn.org |
http://libnn.org |
4 |
Copyright (C) 2002 - 2003 LAGACHERIE Matthieu RICORDEAU Olivier |
Copyright (C) 2002 - 2003 LAGACHERIE Matthieu RICORDEAU Olivier |
5 |
|
|
6 |
This program is free software; you can redistribute it and/or |
This program is free software; you can redistribute it and/or |
7 |
modify it under the terms of the GNU General Public License |
modify it under the terms of the GNU General Public License |
8 |
as published by the Free Software Foundation; either version 2 |
as published by the Free Software Foundation; either version 2 |
9 |
of the License, or (at your option) any later version. This |
of the License, or (at your option) any later version. This |
10 |
program is distributed in the hope that it will be useful, |
program is distributed in the hope that it will be useful, |
11 |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 |
GNU General Public License for more details. You should have |
GNU General Public License for more details. You should have |
14 |
received a copy of the GNU General Public License |
received a copy of the GNU General Public License |
15 |
along with this program; if not, write to the Free Software |
along with this program; if not, write to the Free Software |
16 |
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
17 |
USA. |
USA. |
18 |
|
|
19 |
SPECIAL NOTE (the beerware clause): |
SPECIAL NOTE (the beerware clause): |
20 |
This software is free software. However, it also falls under the beerware |
This software is free software. However, it also falls under the beerware |
21 |
special category. That is, if you find this software useful, or use it |
special category. That is, if you find this software useful, or use it |
22 |
every day, or want to grant us for our modest contribution to the |
every day, or want to grant us for our modest contribution to the |
23 |
free software community, feel free to send us a beer from one of |
free software community, feel free to send us a beer from one of |
24 |
your local brewery. Our preference goes to Belgium abbey beers and |
your local brewery. Our preference goes to Belgium abbey beers and |
25 |
irish stout (Guiness for strength!), but we like to try new stuffs. |
irish stout (Guiness for strength!), but we like to try new stuffs. |
26 |
|
|
27 |
Authors: |
Authors: |
28 |
LAGACHERIE Matthieu |
LAGACHERIE Matthieu |
29 |
Paper mail : 7 rue Delescluzes 94280 LE KREMLIN BICETRE, FRANCE |
Paper mail : 7 rue Delescluzes 94280 LE KREMLIN BICETRE, FRANCE |
30 |
E-mail : matthieu@libnn.org |
E-mail : matthieu@libnn.org |
31 |
|
|
32 |
RICORDEAU Olivier |
RICORDEAU Olivier |
33 |
Paper mail : 69 avenue d'Italie 75013 PARIS, FRANCE |
Paper mail : 69 avenue d'Italie 75013 PARIS, FRANCE |
34 |
E-mail : olivier@libnn.org |
E-mail : olivier@libnn.org |
35 |
|
|
36 |
*****************************************************************) |
*****************************************************************) |
37 |
|
|
38 |
(** |
(** |
39 |
The errorTdnnVisitor class |
The errorTdnnVisitor class |
40 |
Computes each neurone's output error. |
Computes each neurone's output error. |
41 |
|
|
42 |
@author Matthieu Lagacherie |
@author Matthieu Lagacherie |
43 |
@author Olivier Ricordeau |
@since 10/08/2003 |
44 |
@since 10/08/2003 |
*) |
|
*) |
|
45 |
|
|
46 |
open Nn |
open Nn |
47 |
open ErrorVisitor |
open ErrorVisitor |
49 |
open TdNN |
open TdNN |
50 |
|
|
51 |
class errorTdnnVisitor = |
class errorTdnnVisitor = |
52 |
object |
object |
53 |
inherit [TdNN] errorVisitor |
inherit [tdNN] errorVisitor |
54 |
|
|
55 |
val mutable _transfertFunction = function x |
val mutable _transfertFunction = function x |
56 |
-> (1. /. (1. +. exp (-.x))) |
-> (1. /. (1. +. exp (-.x))) |
57 |
|
|
58 |
val mutable _derivateFunction = function x |
val mutable _derivateFunction = function x |
59 |
-> (((1. /. (1. +. exp (-.x)))) *. (1. -. (1. /. (1. +. exp (-.x))))) |
-> (((1. /. (1. +. exp (-.x)))) *. (1. -. (1. /. (1. +. exp (-.x))))) |
60 |
|
|
61 |
method visit (network : tdNN) = |
method visit (network : tdNN) = |
62 |
let error = network#getError and |
let error = network#getError and |
63 |
gradients = network#getGradients and |
gradients = network#getGradients and |
64 |
output = network#getOutputLearnVector and |
output = network#getOutputLearnVector and |
65 |
outputActivation = network#getOutputActivation and |
outputActivation = network#getOutputActivation and |
66 |
inputSum = network#getInputSum and |
inputSum = network#getInputSum and |
67 |
weights = network#getWeights and |
weights = network#getWeights and |
68 |
derivate = _derivateFunction and |
derivate = _derivateFunction and |
69 |
stepDelay = ref 0 in |
delay = network#getDelay and |
70 |
|
timeNb = network#getTimeNb and |
71 |
|
featuresNb = network#getFeaturesNb and |
72 |
|
fieldSize = network#getFieldSize and |
73 |
|
(** |
74 |
|
This method compute the neurons of the layer l + 1 which are connected |
75 |
|
to the neuron state of the layer l. |
76 |
|
|
77 |
|
state -> index of the neuron of the layer l in time direction. |
78 |
|
field -> the field of the layer l. |
79 |
|
delay -> the delay of the layer l. |
80 |
|
currentTimeNb -> the number of neuron in the time direction of the layer l |
81 |
|
nextTimeNb -> the number of neuron in the time direction of the layer l + 1 |
82 |
|
*) |
83 |
|
nbConnected (state, field, delay, currentTimeNb, nextTimeNb) = |
84 |
|
let step = ref 0 and |
85 |
|
startState = ref 0 and |
86 |
|
endState = ref 0 and |
87 |
|
stop = ref 0 in |
88 |
|
begin |
89 |
|
endState := -1; |
90 |
|
startState := -1; |
91 |
|
for i = 0 to nextTimeNb - 1 do |
92 |
|
stop := if ((!step + field - 1) >= currentTimeNb) then (currentTimeNb - 1) else (!step + field - 1); |
93 |
|
startState := if ((!step <= state) && (state <= !stop) && (!startState == -1)) then i else !startState; |
94 |
|
endState := if ((state < !step) && (!endState == -1)) then (i - 1) else !endState; |
95 |
|
step := !step + delay; |
96 |
|
Printf.printf "Index [i=%d] [stop=%d] [step=%d] [startState=%d] [endState=%d] [state=%d] [field=%d] [delay=%d] [current=%d] [next=%d]\n" |
97 |
|
i !stop !step !startState !endState state field delay currentTimeNb nextTimeNb |
98 |
|
done; |
99 |
|
endState := if (!endState == -1) then nextTimeNb - 1 else !endState |
100 |
|
end; (!startState, !endState) and |
101 |
|
startEnd = ref (0, 0) and |
102 |
|
stepDelay = ref 0 in |
103 |
begin |
begin |
104 |
(** |
(** |
105 |
Compute the error of the output layer |
Compute the error of the output layer |
106 |
Here the output vector is mapped in a first time on |
Here the output vector is mapped in a first time on |
107 |
the feature direction and in a second time in the time direction. |
the feature direction and in a second time in the time direction. |
108 |
*) |
*) |
109 |
for i = 0 to (Array.length !error.(network#getLayerNb - 1)) - 1 do |
Printf.printf "Starting Backpropagation out \n\n"; |
110 |
for j = 0 to (Array.length !error.(network#getLayerNb - 1).(0)) - 1 do |
for i = 0 to featuresNb.(network#getLayerNb - 1) - 1 do |
111 |
|
for j = 0 to timeNb.(network#getLayerNb - 1) - 1 do |
112 |
!error.(network#getLayerNb - 1).(i).(j) |
!error.(network#getLayerNb - 1).(i).(j) |
113 |
<- derivate(!inputSum.(network#getLayerNb - 1).(i).(j)) |
<- derivate(!inputSum.(network#getLayerNb - 1).(i).(j)) |
114 |
*. (!outputActivation.(network#getLayerNb - 1).(i).(j) -. output.(i + j)) |
*. (!outputActivation.(network#getLayerNb - 1).(i).(j) -. output.(i + j)) |
115 |
done |
done |
116 |
done; |
done; |
117 |
(** |
(** |
118 |
Compute the error and gradient of the hidden layers. |
Compute the error and gradient of the hidden layers. |
119 |
l the layer |
l the layer |
120 |
i neuron of the layer l + 1 in the feature direction |
i neuron of the layer l + 1 in the feature direction |
121 |
j neuron of the layer l + 1 in the time direction |
j neuron of the layer l + 1 in the time direction |
122 |
k neuron of the layer l in the feature direction |
k neuron of the layer l in the feature direction |
123 |
m neuron of the layer l in the time direction |
m neuron of the layer l in the time direction |
124 |
stepDelay used to keep the delay concept |
stepDelay used to keep the delay concept |
125 |
*) |
*) |
126 |
|
Printf.printf "Starting Backpropagation hidden \n\n"; |
127 |
for l = network#getLayerNb - 2 downto 0 do |
for l = network#getLayerNb - 2 downto 0 do |
128 |
for i = 0 to (Array.length !error.(l)) - 1 do |
for m = 0 to timeNb.(l) - 1 do |
129 |
for j = 0 to (Array.length !error.(l).(i)) - 1 do |
startEnd := nbConnected (m, fieldSize.(l), !delay.(l), timeNb.(l), timeNb.(l + 1)); |
130 |
!error.(l).(i).(j) <- 0.; |
for k = 0 to featuresNb.(l) - 1 do |
131 |
for k = 0 to (Array.length !error.(l + 1)) - 1 do |
(** |
132 |
for m = 0 to (Array.length !error.(l + 1).(k)) - 1 do |
Initialization of the error term. |
133 |
!error.(l).(i).(j) |
*) |
134 |
<- !error.(l).(i).(j) |
!error.(l).(k).(m) <- 0.; |
135 |
+. !error.(l + 1).(k).(m) *. !weights.(l).(i).(j); |
for j = fst !startEnd to snd !startEnd do |
136 |
!gradients.(l).(i).(j) |
for i = 0 to featuresNb.(l + 1) - 1 do |
137 |
<- !error.(l + 1).(j) *. !outputActivation.(l).(i) |
(** |
138 |
|
Backpropagation of the error term |
139 |
|
*) |
140 |
|
Printf.printf "Index [l=%d] [m=%d] [k=%d] [j=%d] [i=%d] [start=%d] [endD=%d]\n" l m k j i (fst !startEnd) (snd !startEnd); |
141 |
|
!error.(l).(k).(m) |
142 |
|
<- !error.(l).(k).(m) |
143 |
|
+. !error.(l + 1).(i).(j) *. !weights.(l).(k).(m).(i); |
144 |
|
Printf.printf "!error.(%d).(%d).(%d) <- !error.(%d).(%d).(%d) +. !error.(%d).(%d).(%d) *. !weights.(%d).(%d).(%d).(%d);\n" |
145 |
|
l k m l k m (l+1) i j l k m i; |
146 |
|
(** |
147 |
|
We compute the gradient with the error term of |
148 |
|
the layer l + 1. |
149 |
|
*) |
150 |
|
!gradients.(l).(k).(m).(i) |
151 |
|
<- !error.(l + 1).(i).(j) *. !outputActivation.(l).(k).(m); |
152 |
|
Printf.printf "!gradients.(%d).(%d).(%d).(%d) <- !error.(%d).(%d).(%d) *. !outputActivation.(%d).(%d).(%d)\n\n" |
153 |
|
l k m i (l+1) i j l k m |
154 |
done |
done |
155 |
done; |
done; |
156 |
!error.(l).(i).(j) <- !error.(l).(i).(j) |
!error.(l).(k).(m) <- !error.(l).(k).(m) |
157 |
*. derivate(!inputSum.(l).(i).(j)) |
*. derivate(!inputSum.(l).(k).(m)) |
158 |
done |
done |
159 |
done |
done |
160 |
done |
done |
161 |
end |
end |
162 |
end |
end |