Neural Network with Eager API
Build a 2-hidden layers fully connected neural network (a.k.a multilayer perceptron) with TensorFlow's Eager API.
This example is using some of TensorFlow higher-level wrappers (tf.estimators, tf.layers, tf.metrics, ...), you can check 'neural_network_raw' example for a raw, and more detailed TensorFlow implementation.
- Author: Aymeric Damien
- Project: https://github.com/aymericdamien/TensorFlow-Examples/
Neural Network Overview
MNIST Dataset Overview
This example is using MNIST handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flattened and converted to a 1-D numpy array of 784 features (28*28).
More info: http://yann.lecun.com/exdb/mnist/
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.eager as tfe
# Set Eager API
tfe.enable_eager_execution()
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
Extracting /tmp/data/train-images-idx3-ubyte.gz
Extracting /tmp/data/train-labels-idx1-ubyte.gz
Extracting /tmp/data/t10k-images-idx3-ubyte.gz
Extracting /tmp/data/t10k-labels-idx1-ubyte.gz
# Parameters
learning_rate = 0.001
num_steps = 1000
batch_size = 128
display_step = 100
# Network Parameters
n_hidden_1 = 256 # 1st layer number of neurons
n_hidden_2 = 256 # 2nd layer number of neurons
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
# Using TF Dataset to split data into batches
dataset = tf.data.Dataset.from_tensor_slices(
(mnist.train.images, mnist.train.labels)).batch(batch_size)
dataset_iter = tfe.Iterator(dataset)
# Define the neural network. To use eager API and tf.layers API together,
# we must instantiate a tfe.Network class as follow:
class NeuralNet(tfe.Network):
def __init__(self):
# Define each layer
super(NeuralNet, self).__init__()
# Hidden fully connected layer with 256 neurons
self.layer1 = self.track_layer(
tf.layers.Dense(n_hidden_1, activation=tf.nn.relu))
# Hidden fully connected layer with 256 neurons
self.layer2 = self.track_layer(
tf.layers.Dense(n_hidden_2, activation=tf.nn.relu))
# Output fully connected layer with a neuron for each class
self.out_layer = self.track_layer(tf.layers.Dense(num_classes))
def call(self, x):
x = self.layer1(x)
x = self.layer2(x)
return self.out_layer(x)
neural_net = NeuralNet()
# Cross-Entropy loss function
def loss_fn(inference_fn, inputs, labels):
# Using sparse_softmax cross entropy
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=inference_fn(inputs), labels=labels))
# Calculate accuracy
def accuracy_fn(inference_fn, inputs, labels):
prediction = tf.nn.softmax(inference_fn(inputs))
correct_pred = tf.equal(tf.argmax(prediction, 1), labels)
return tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# SGD Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# Compute gradients
grad = tfe.implicit_gradients(loss_fn)
# Training
average_loss = 0.
average_acc = 0.
for step in range(num_steps):
# Iterate through the dataset
try:
d = dataset_iter.next()
except StopIteration:
# Refill queue
dataset_iter = tfe.Iterator(dataset)
d = dataset_iter.next()
# Images
x_batch = d[0]
# Labels
y_batch = tf.cast(d[1], dtype=tf.int64)
# Compute the batch loss
batch_loss = loss_fn(neural_net, x_batch, y_batch)
average_loss += batch_loss
# Compute the batch accuracy
batch_accuracy = accuracy_fn(neural_net, x_batch, y_batch)
average_acc += batch_accuracy
if step == 0:
# Display the initial cost, before optimizing
print("Initial loss= {:.9f}".format(average_loss))
# Update the variables following gradients info
optimizer.apply_gradients(grad(neural_net, x_batch, y_batch))
# Display info
if (step + 1) % display_step == 0 or step == 0:
if step > 0:
average_loss /= display_step
average_acc /= display_step
print("Step:", '%04d' % (step + 1), " loss=",
"{:.9f}".format(average_loss), " accuracy=",
"{:.4f}".format(average_acc))
average_loss = 0.
average_acc = 0.
Initial loss= 2.340397596
Step: 0001 loss= 2.340397596 accuracy= 0.0703
Step: 0100 loss= 0.586046159 accuracy= 0.8305
Step: 0200 loss= 0.253318846 accuracy= 0.9282
Step: 0300 loss= 0.214748293 accuracy= 0.9377
Step: 0400 loss= 0.180644721 accuracy= 0.9466
Step: 0500 loss= 0.137285724 accuracy= 0.9591
Step: 0600 loss= 0.119845696 accuracy= 0.9636
Step: 0700 loss= 0.113618039 accuracy= 0.9665
Step: 0800 loss= 0.109642141 accuracy= 0.9676
Step: 0900 loss= 0.085067607 accuracy= 0.9746
Step: 1000 loss= 0.079819344 accuracy= 0.9754
# Evaluate model on the test image set
testX = mnist.test.images
testY = mnist.test.labels
test_acc = accuracy_fn(neural_net, testX, testY)
print("Testset Accuracy: {:.4f}".format(test_acc))
Testset Accuracy: 0.9719