2 High-Level Libraries for TensorFlow
TF Estimator MNIST Example
import os
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
tf.reset_default_graph()
mnist = input_data.read_data_sets(os.path.join('.', 'mnist'),
one_hot=False
)
x_train = mnist.train.images
y_train = mnist.train.labels
x_test = mnist.test.images
y_test = mnist.test.labels
n_classes = 10
batch_size = 100
n_steps = 1000
learning_rate = 0.01
def model_fn(features, labels, mode):
""" define the model function
"""
espec_op = tf.estimator.EstimatorSpec
x = features['images']
layer_1 = tf.layers.dense(x, 32)
layer_2 = tf.layers.dense(layer_1, 32)
logits = tf.layers.dense(layer_2, n_classes)
predicted_classes = tf.argmax(logits, axis=1)
if mode == tf.estimator.ModeKeys.PREDICT:
espec = espec_op(mode,
predictions=predicted_classes
)
else:
entropy_op = tf.nn.sparse_softmax_cross_entropy_with_logits
loss_op = tf.reduce_mean(entropy_op(logits=logits,
labels=tf.cast(labels,
dtype=tf.int32)
)
)
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate)
train_op = optimizer.minimize(
loss_op, global_step=tf.train.get_global_step())
accuracy_op = tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
espec = espec_op(mode=mode,
predictions=predicted_classes,
loss=loss_op,
train_op=train_op,
eval_metric_ops={'accuracy': accuracy_op}
)
return espec
model = tf.estimator.Estimator(model_fn)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': x_train},
y=y_train,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
model.train(train_input_fn, steps=n_steps)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': x_test},
y=y_test,
batch_size=batch_size,
shuffle=False)
model.evaluate(eval_input_fn)
Extracting ./mnist/train-images-idx3-ubyte.gz
Extracting ./mnist/train-labels-idx1-ubyte.gz
Extracting ./mnist/t10k-images-idx3-ubyte.gz
Extracting ./mnist/t10k-labels-idx1-ubyte.gz
INFO:tensorflow:Using default config.
WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmprvcqgu07
INFO:tensorflow:Using config: {'_save_checkpoints_steps': None, '_task_type': 'worker', '_save_checkpoints_secs': 600, '_service': None, '_task_id': 0, '_master': '', '_session_config': None, '_num_worker_replicas': 1, '_keep_checkpoint_max': 5, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7ff9d15f5fd0>, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_is_chief': True, '_save_summary_steps': 100, '_model_dir': '/tmp/tmprvcqgu07', '_num_ps_replicas': 0, '_tf_random_seed': None}
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Saving checkpoints for 1 into /tmp/tmprvcqgu07/model.ckpt.
INFO:tensorflow:loss = 2.4365, step = 1
INFO:tensorflow:global_step/sec: 597.996
INFO:tensorflow:loss = 1.47152, step = 101 (0.168 sec)
INFO:tensorflow:global_step/sec: 553.29
INFO:tensorflow:loss = 0.728581, step = 201 (0.182 sec)
INFO:tensorflow:global_step/sec: 519.498
INFO:tensorflow:loss = 0.89795, step = 301 (0.193 sec)
INFO:tensorflow:global_step/sec: 503.414
INFO:tensorflow:loss = 0.743328, step = 401 (0.202 sec)
INFO:tensorflow:global_step/sec: 539.251
INFO:tensorflow:loss = 0.413222, step = 501 (0.181 sec)
INFO:tensorflow:global_step/sec: 572.327
INFO:tensorflow:loss = 0.416304, step = 601 (0.174 sec)
INFO:tensorflow:global_step/sec: 543.99
INFO:tensorflow:loss = 0.459793, step = 701 (0.184 sec)
INFO:tensorflow:global_step/sec: 687.748
INFO:tensorflow:loss = 0.501756, step = 801 (0.146 sec)
INFO:tensorflow:global_step/sec: 654.217
INFO:tensorflow:loss = 0.666772, step = 901 (0.153 sec)
INFO:tensorflow:Saving checkpoints for 1000 into /tmp/tmprvcqgu07/model.ckpt.
INFO:tensorflow:Loss for final step: 0.426257.
INFO:tensorflow:Starting evaluation at 2017-12-15-02:27:45
INFO:tensorflow:Restoring parameters from /tmp/tmprvcqgu07/model.ckpt-1000
INFO:tensorflow:Finished evaluation at 2017-12-15-02:27:45
INFO:tensorflow:Saving dict for global step 1000: accuracy = 0.8856, global_step = 1000, loss = 0.40996
{'accuracy': 0.88559997, 'global_step': 1000, 'loss': 0.40995964}
TF Slim MNIST Example
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib import slim
import tensorflow as tf
import os
tf.reset_default_graph()
n_classes = 10
n_steps = 1000
mnist = input_data.read_data_sets(os.path.join('.', 'mnist'), one_hot=True)
X_train = mnist.train.images
X_train = tf.convert_to_tensor(X_train)
Y_train = mnist.train.labels
Y_train = tf.convert_to_tensor(Y_train)
def mlp(x):
net = slim.fully_connected(x, 32, scope='fc1')
net = slim.dropout(net, 0.5, scope='dropout1')
net = slim.fully_connected(net, 32, scope='fc2')
net = slim.dropout(net, 0.5, scope='dropout2')
net = slim.fully_connected(net, n_classes, activation_fn=None, scope='fc3')
return net
logits = mlp(X_train)
loss = tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=Y_train)
total_loss = tf.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = slim.learning.create_train_op(total_loss, optimizer)
final_loss = slim.learning.train(
train_op,
logdir='./slim_logs',
number_of_steps=n_steps,
log_every_n_steps=100)
print('final loss={}'.format(final_loss))
Extracting ./mnist/train-images-idx3-ubyte.gz
Extracting ./mnist/train-labels-idx1-ubyte.gz
Extracting ./mnist/t10k-images-idx3-ubyte.gz
Extracting ./mnist/t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From /usr/local/lib/python3.5/dist-packages/tensorflow/contrib/training/python/training/training.py:412: get_or_create_global_step (from tensorflow.contrib.framework.python.ops.variables) is deprecated and will be removed in a future version.
Instructions for updating:
Please switch to tf.train.get_or_create_global_step
INFO:tensorflow:Starting Session.
INFO:tensorflow:Saving checkpoint to path ./slim_logs/model.ckpt
INFO:tensorflow:global_step/sec: 0
INFO:tensorflow:Starting Queues.
INFO:tensorflow:global step 100: loss = 2.2669 (0.010 sec/step)
INFO:tensorflow:global step 200: loss = 2.2025 (0.010 sec/step)
INFO:tensorflow:global step 300: loss = 2.1257 (0.010 sec/step)
INFO:tensorflow:global step 400: loss = 2.0419 (0.009 sec/step)
INFO:tensorflow:global step 500: loss = 1.9532 (0.009 sec/step)
INFO:tensorflow:global step 600: loss = 1.8733 (0.010 sec/step)
INFO:tensorflow:global step 700: loss = 1.8002 (0.010 sec/step)
INFO:tensorflow:global step 800: loss = 1.7273 (0.010 sec/step)
INFO:tensorflow:global step 900: loss = 1.6688 (0.010 sec/step)
INFO:tensorflow:global step 1000: loss = 1.6132 (0.010 sec/step)
INFO:tensorflow:Stopping Training.
INFO:tensorflow:Finished training! Saving model to disk.
final loss=1.6131552457809448
TFLearn MNIST Example
import tensorflow as tf
tf.reset_default_graph()
import tflearn
import tflearn.datasets.mnist as mnist
import os
batch_size = 100
n_classes = 10
n_epochs = 10
X_train, Y_train, X_test, Y_test = mnist.load_data(
data_dir=os.path.join('.', 'mnist'), one_hot=True)
input_layer = tflearn.input_data(shape=[None, 784])
layer1 = tflearn.fully_connected(input_layer,
10,
activation='relu'
)
layer2 = tflearn.fully_connected(layer1,
10,
activation='relu'
)
output = tflearn.fully_connected(layer2,
n_classes,
activation='softmax'
)
net = tflearn.regression(output,
optimizer='adam',
metric=tflearn.metrics.Accuracy(),
loss='categorical_crossentropy'
)
model = tflearn.DNN(net)
model.fit(
X_train,
Y_train,
n_epoch=n_epochs,
batch_size=batch_size,
show_metric=True,
run_id='dense_model')
score = model.evaluate(X_test, Y_test)
print('Test accuracy:', score[0])
Pretty Tensor MNIST Example
import tensorflow as tf
tf.reset_default_graph()
import numpy as np
from __future__ import division
import prettytensor as pt
from prettytensor.tutorial import data_utils
import os
data_utils.WORK_DIRECTORY = os.path.join('.', 'mnist')
X_train, Y_train = data_utils.mnist(training=True)
X_test, Y_test = data_utils.mnist(training=False)
batch_size = 100
n_classes = 10
n_epochs = 10
n_batches = int(X_train.shape[0] / batch_size)
n_samples_in_train_batch = 60000 // batch_size
n_samples_in_test_batch = 10000 // batch_size
X = tf.placeholder(tf.float32, [batch_size, 28, 28, 1])
Y = tf.placeholder(tf.float32, [batch_size, 10])
X = pt.wrap(X)
model = (X.
flatten().
fully_connected(10).
softmax_classifier(n_classes, labels=Y)
)
evaluator = model.softmax.evaluate_classifier(Y)
optimizer = tf.train.GradientDescentOptimizer(0.1)
trainer = pt.apply_optimizer(optimizer, losses=[model.loss])
runner = pt.train.Runner()
with tf.Session() as tfs:
for epoch in range(0, n_epochs):
X_train, Y_train = data_utils.permute_data((X_train, Y_train))
runner.train_model(
trainer,
model.loss,
n_samples_in_train_batch,
feed_vars=(X, Y),
feed_data=pt.train.feed_numpy(batch_size, X_train, Y_train),
print_every=600
)
score = runner.evaluate_model(
evaluator,
n_samples_in_test_batch,
feed_vars=(X, Y),
feed_data=pt.train.feed_numpy(batch_size, X_test, Y_test)
)
print('Accuracy after {} epochs {} \n'.
format(epoch + 1, score[0]))
Sonnet MNIST Example
import tensorflow as tf
tf.reset_default_graph()
import os
import sonnet as snt
from tensorflow.examples.tutorials.mnist import input_data
tf.logging.set_verbosity(tf.logging.INFO)
class MNIST(snt.AbstractModule):
def __init__(self, mnist_part, batch_size, name='MNIST'):
super(MNIST, self).__init__(name=name)
self._X = tf.constant(mnist_part.images, dtype=tf.float32)
self._Y = tf.constant(mnist_part.labels, dtype=tf.float32)
self._batch_size = batch_size
self._M = mnist_part.num_examples
def _build(self):
idx = tf.random_uniform([self._batch_size], 0, self._M, tf.int64)
X = tf.gather(self._X, idx)
Y = tf.gather(self._Y, idx)
return X, Y
class MLP(snt.AbstractModule):
def __init__(self, output_sizes, name='mlp'):
super(MLP, self).__init__(name=name)
self._layers = []
for output_size in output_sizes:
self._layers.append(snt.Linear(output_size=output_size))
def _build(self, X):
model = tf.sigmoid(self._layers[0](X))
for i in range(1, len(self._layers) - 1):
model = tf.sigmoid(self._layers[i](model))
model = tf.nn.softmax(self._layers[len(self._layers) - 1](model))
return model
batch_size = 100
n_classes = 10
n_epochs = 10
mnist = input_data.read_data_sets(os.path.join('.', 'mnist'),
one_hot=True
)
train = MNIST(mnist.train, batch_size=batch_size)
test = MNIST(mnist.test, batch_size=batch_size)
X_train, Y_train = train()
X_test, Y_test = test()
model = MLP([20, n_classes])
Y_train_hat = model(X_train)
Y_test_hat = model(X_test)
def loss(Y_hat, Y):
return -tf.reduce_sum(Y * tf.log(Y_hat))
L_train = loss(Y_train_hat, Y_train)
L_test = loss(Y_test_hat, Y_test)
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=0.01).minimize(L_train)
with tf.Session() as tfs:
tf.global_variables_initializer().run()
for epoch in range(n_epochs):
loss_val, _ = tfs.run((L_train, optimizer))
print('Epoch : {} Training Loss : {}'.format(epoch, loss_val))
loss_val = tfs.run(L_test)
print('Test loss : {}'.format(loss_val))