-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathNeuralNetwork.java
162 lines (139 loc) · 3.78 KB
/
NeuralNetwork.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
import java.io.*;
public class NeuralNetwork{
static double[][] input = {
{0.0, 0.0},
{0.0, 1.0},
{1.0, 0.0},
{1.0, 1.0}
};
static double[] output = {
0.0,
1.0,
1.0,
0.0
};
static Neuron[][] network;
static NetworkVisualizer brain;
public static void main(String[]args)throws IOException{
network = new Neuron[3][];
network[0] = new Neuron[]{
new Neuron(),
new Neuron()
};
network[1] = new Neuron[]{
new Neuron(network[0]),
new Neuron(network[0]),
new Neuron(network[0]),
};
network[2] = new Neuron[]{
new Neuron(network[1])
};
brain = new NetworkVisualizer(network);
brain.frame.add(brain);
//test network
testNet();
System.out.println("training");
//train network?
for(int epoch = 0; epoch < 600000; epoch++){
//forward Propagation
for(int in = 0; in < 4; in++){
for(int x = 0; x < network[0].length; x++){
network[0][x].value = input[in][x];
}
for(int y = 1; y < network.length; y++){
for(int x = 0; x < network[y].length; x++){
network[y][x].process();
}
}
//back propagation
double MarginOfError = output[in] - network[network.length-1][0].value; // error for layer 3
if(epoch % 100000 == 0){
brain.frame.repaint();
//br.readLine(); <-- used to 'pause' visualizer
System.out.println("Epoch " + epoch + ", Error: " + MarginOfError);
}
//double deltaError = MarginOfError * nonLin(network[network.length-1][0].sum,true) ; //delta for layer 3
network[network.length-1][0].backProcess(MarginOfError,in);
}
network[network.length-1][0].learn();
}
System.out.println("finished training");
//test network
testNet();
System.exit(0);
}
public static void testNet() throws IOException{
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
//test network
while(true){
String[] in = br.readLine().split(" ");
if(in[0].equals("")){
break;
}
//input layer
for(int x = 0; x < network[0].length; x++){
network[0][x].value = Double.parseDouble(in[x]);
}
for(int y = 1; y < network.length; y++){
for(int x = 0; x < network[y].length; x++){
network[y][x].process();
}
}
brain.frame.repaint();
System.out.println(network[network.length-1][0].value);
}
}
//Math functions
public static double sigmoid(double i){
return 1/(1+Math.exp(-i));
}
public static double nonLin(double i, boolean d){
return i*(1-i);
}
}
/*
feed forward
get value from prev neurons(including bias)
multiply values with weights
summation
sigmoid function
{}
{}-/< \
X {}-{}
{}-\< /
{}
/ /
[] []
*/
/*
notes
FORWARD PROPAGATION
-apply weights(first iteration use Gaussian distribution to set weights)
-calculate output
BACK PROPAGATION
-calculate error
-adjust weights to decrease error
ITERATION RUNTHROUGH
(0. Set weights using Gaussian/normal distribution)
1. Input data to input layer
2. Hidden layer calculate value,(i1 * w1 + i2 * w2 + .... = value)
3. Apply 'activation' function to Hidden Layer neurons
- SIGMOID F(x) = 1
----------
1 + e^(-x)
Other activation functions include:
-(Linear)
-(Hyperbolic Tangent)
4. Output layer calculate value,(i1 * w1 + i2 * w2 + .... = value)
5. Apply 'activation' function to Output Layer neuron(s)
--------OUTPUT RECIEVED----------
6. Calculate Margin of Error(Targ: 0, Result: 0.77, Error: -0.77)
7. Calculate DOS(apply derivative of Action Function to output sum)
DELTA OUTPUT SUM: change in output sum{DOS = F'(outputSum[no activation]) * (marginOfError) }
8. Calculate the change weights in each edge
(chW = DOS * neuronValue, edge.weight += chW)
DELTA HIDDEN SUM: DOS but hidden layer
{DOS * OGhiddenWeight * F'(hiddenSum[no activation])}
9. Calculate the change weights in each edge
(chW = DHS * input)
*/