|
| 1 | +# Importing TF, Mnist |
| 2 | +from __future__ import print_function |
| 3 | +import tensorflow as tf |
| 4 | +from tensorflow.examples.tutorials.mnist import input_data |
| 5 | +mnist = input_data.read_data_sets('tmp/data/', one_hot=True) |
| 6 | + |
| 7 | +# Define hidden layers |
| 8 | +n_nodes_hlayer1 = 500 |
| 9 | +n_nodes_hlayer2 = 500 |
| 10 | +n_nodes_hlayer3 = 500 |
| 11 | + |
| 12 | +# define how many runs and how many pics to run to do backprop, manipulate wgts and repeat & no of epochs you want to run |
| 13 | +n_classes = 10 |
| 14 | +batch_size = 100 |
| 15 | +hm_epoch = 10 |
| 16 | +logs_path = '/tmp/tensorflow_logs/example' |
| 17 | + |
| 18 | +# Height & Width / Input features of size = 28x28 pix = 784 |
| 19 | + |
| 20 | +x = tf.placeholder('float', [None, 784]) |
| 21 | +y = tf.placeholder('float') |
| 22 | + |
| 23 | +def nn_model(data): |
| 24 | + # simply put this model = inputs data*weights + biases |
| 25 | + hidden_layer_1 = {'weights':tf.Variable(tf.random_normal([784, n_nodes_hlayer1])), |
| 26 | + 'biases':tf.Variable(tf.random_normal([n_nodes_hlayer1]))} |
| 27 | + |
| 28 | + hidden_layer_2 = {'weights':tf.Variable(tf.random_normal([n_nodes_hlayer1, n_nodes_hlayer2])), |
| 29 | + 'biases':tf.Variable(tf.random_normal([n_nodes_hlayer2]))} |
| 30 | + |
| 31 | +hidden_layer_3 = {'weights':tf.Variable(tf.random_normal([n_nodes_hlayer2, n_nodes_hlayer3])), |
| 32 | + 'biases':tf.Variable(tf.random_normal([n_nodes_hlayer3]))} |
| 33 | + |
| 34 | + output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hlayer3, n_classes])), |
| 35 | + 'biases':tf.Variable(tf.random_normal([n_classes]))} |
| 36 | + |
| 37 | + layer1 = tf.add(tf.matmul(data, hidden_layer_1['weights']), hidden_layer_1['biases']) |
| 38 | + layer1 = tf.nn.relu(layer1) |
| 39 | + |
| 40 | +layer2 = tf.add(tf.matmul(layer1, hidden_layer_2['weights']), hidden_layer_2['biases']) |
| 41 | + layer2 = tf.nn.relu(layer2) |
| 42 | + |
| 43 | + layer3 = tf.add(tf.matmul(layer2, hidden_layer_3['weights']), hidden_layer_3['biases']) |
| 44 | + layer3 = tf.nn.relu(layer3) |
| 45 | + |
| 46 | + output = tf.add(tf.matmul(layer3, output_layer['weights']), output_layer['biases']) |
| 47 | + |
| 48 | + return output |
| 49 | + |
| 50 | +# We define function to train this NN |
| 51 | +def train_nn(x): |
| 52 | + prediction = nn_model(x) |
| 53 | + cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y)) |
| 54 | + # Watch out to pick Adam which is same as SGD |
| 55 | + optimizer = tf.train.AdamOptimizer().minimize(cost) #WTF Adadelta!!!! |
| 56 | + |
| 57 | + with tf.Session() as sess: |
| 58 | + sess.run(tf.global_variables_initializer()) #This changed in TF 1.0 version onwards |
| 59 | + # training it |
| 60 | + for epoch in range (hm_epoch): |
| 61 | + epoch_loss = 0 |
| 62 | + for _ in range ( int ( mnist.train.num_examples / batch_size)): |
| 63 | + epoch_x , epoch_y = mnist.train.next_batch (batch_size) |
| 64 | + _ , c = sess.run([optimizer, cost], feed_dict={x:epoch_x, y:epoch_y}) |
| 65 | + #code that will optimize the weights and biases |
| 66 | + epoch_loss += c |
| 67 | + print('Epochs', epoch, 'completed out of', hm_epoch, 'loss:', epoch_loss) |
| 68 | + |
| 69 | + #Testing |
| 70 | + correct = tf.equal(tf.argmax(prediction,1), tf.argmax(y,1)) |
| 71 | + accuracy = tf.reduce_mean(tf.cast (correct, 'float')) |
| 72 | + print('Accuracy:', accuracy.eval({x:mnist.test.images, y:mnist.test.labels})) |
| 73 | + |
| 74 | +train_nn(x) |
| 75 | + |
| 76 | +# Examine results with Tensorboard |
| 77 | + |
| 78 | +print("Run the command line:\n" \ |
| 79 | + "--> tensorboard --logdir=/tmp/tensorflow_logs " \ |
| 80 | + "\nThen open http://0.0.0.0:6006/ into your web browser") |
0 commit comments