Skip to content

Commit 6496a5b

Browse files
authored
Update ConvNet.py
1 parent 7aafa3d commit 6496a5b

File tree

1 file changed

+7
-3
lines changed

1 file changed

+7
-3
lines changed

FinalModel/ConvNet.py

+7-3
Original file line numberDiff line numberDiff line change
@@ -191,11 +191,11 @@ def model(x):
191191
print 'Done training the model!'
192192

193193
# validating the model
194-
J_train = sess.run(cost, feed_dict={x: X_train, y: Y_train})
194+
# J_train = sess.run(cost, feed_dict={x: X_train, y: Y_train})
195195
# J_validate = sess.run(cost, feed_dict={x: X_validate, y: Y_validate})
196196
# J_test = sess.run(cost, feed_dict={x: X_test, y: Y_test})
197197

198-
print 'Final cost over training set: ', J_train
198+
# print 'Final cost over training set: ', J_train
199199
# print 'Final cost over validation set: ', J_validate
200200
# print 'Final cost over test set: ', J_test
201201

@@ -204,13 +204,17 @@ def model(x):
204204
accuracy = tf.reduce_mean(tf.cast(corr_pred, tf.float32))
205205

206206
print '\nPredicting accuracy...'
207-
print 'Training accuracy: ', sess.run(accuracy, feed_dict={x: X_train, y: Y_train}) * 100, ' %'
207+
acc = []
208+
for idx in range(10):
209+
acc.append(sess.run(accuracy, feed_dict={x: X_train[idx*6000:(idx+1)6000], y: Y_train[idx*6000:(idx+1)6000]}))
210+
print 'Training accuracy: ', (sum(acc)/10.0) * 100, ' %'
208211
# print 'Validation accuracy: ', sess.run(accuracy, feed_dict={x: X_validate, y: Y_validate}) * 100
209212
# print 'Test accuracy: ', sess.run(accuracy, feed_dict={x: X_test, y: Y_test}) * 100
210213

211214
# save the weights and costs
212215
np.savez('./weights.npz', wc1=sess.run(wc1), wc2=sess.run(wc2), wf1=sess.run(wf1), wo=sess.run(wo))
213216
np.savez('./bias.npz', bc1=sess.run(bc1), bc2=sess.run(bc2), bf1=sess.run(bf1), bo=sess.run(bo))
214217
np.savez('./cost.npz', epoch_cost=cost_vec_epoch, batch_cost=cost_vec_batch)
218+
np.savez('./accuracy.npz', acc=acc)
215219

216220
print '\nTotal time taken: ', time.time() - init_time

0 commit comments

Comments
 (0)