-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathpractice2.py
66 lines (54 loc) · 1.67 KB
/
practice2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
# hyper parameters
x_range = 400
num_point = 100
num_iters = 10000
num_dense_layer = 10
num_hidden_units = 100
learning_rate = 8e-5
#source data
ax = np.arange(0, x_range)
ay = 0.0
for i in range(1, 30, 2):
ay += (np.sin(i*0.01*ax*np.pi) / i)
y = ay + (0.5 * np.random.rand(x_range))
indice = np.sort(np.random.choice(x_range, num_point))
x = ax[indice]
y = ay[indice]
#placeholder
x_placeholder = tf.placeholder(tf.float32, [None])
y_placeholder = tf.placeholder(tf.float32, [None])
# change shape to [batch_size, 1]
bx = tf.expand_dims(x_placeholder,axis=1)
by = tf.expand_dims(y_placeholder,axis=1)
#Full connect layer
output = bx
for i in range(num_dense_layer):
output = tf.layers.dense(output, num_hidden_units, activation=tf.nn.tanh)
predict_y = tf.layers.dense(output, 1)
#loss function MAE
loss = tf.losses.absolute_difference(by, predict_y)
#minimize
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss)
with tf.Session() as sess:
#init variables
sess.run(tf.global_variables_initializer())
#training 10000 times
for epoch in range(num_iters):
output_x, output_y, output_predict, _ = sess.run(
[bx, by, predict_y, train_op],
feed_dict={x_placeholder: x, y_placeholder: y})
#print it
if epoch % 1000 == 0:
plt.figure('output')
plt.clf()
plt.plot(output_x, output_y, 'o')
plt.plot(output_x, output_predict, '-')
plt.show(block=False)
plt.pause(0.01)