11.2 Saving TF Models with SavedModel for TF Serving

import math
import os

import numpy as np
np.random.seed(123)
print("NumPy:{}".format(np.__version__))

import tensorflow as tf
tf.set_random_seed(123)
print("TensorFlow:{}".format(tf.__version__))
NumPy:1.13.3
Pandas:0.21.0
Matplotlib:2.1.0
TensorFlow:1.4.0
Keras:2.0.9


Using TensorFlow backend.
DATASETSLIB_HOME = os.path.expanduser('~/dl-ts/datasetslib')
import sys
if not DATASETSLIB_HOME in sys.path:
    sys.path.append(DATASETSLIB_HOME)
%reload_ext autoreload
%autoreload 2
import datasetslib

from datasetslib import util as dsu
datasetslib.datasets_root = os.path.join(os.path.expanduser('~'),'datasets')
models_root = os.path.join(os.path.expanduser('~'),'models')

Serving Model in TensorFlow

Saving model with SavedModel

# Restart kernel to run the flag setting again
#tf.flags.DEFINE_integer('model_version', 1, 'version number of the model.')
model_name = 'mnist'
model_version = '1'
model_dir = os.path.join(models_root,model_name,model_version)
# get the MNIST Data

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(os.path.join(datasetslib.datasets_root,'mnist'), one_hot=True)

x_train = mnist.train.images
x_test = mnist.test.images
y_train = mnist.train.labels
y_test = mnist.test.labels

# parameters
pixel_size = 28 
num_outputs = 10  # 0-9 digits
num_inputs = 784  # total pixels
Extracting /home/armando/datasets/mnist/train-images-idx3-ubyte.gz
Extracting /home/armando/datasets/mnist/train-labels-idx1-ubyte.gz
Extracting /home/armando/datasets/mnist/t10k-images-idx3-ubyte.gz
Extracting /home/armando/datasets/mnist/t10k-labels-idx1-ubyte.gz
def mlp(x, num_inputs, num_outputs,num_layers,num_neurons):
    w=[]
    b=[]
    for i in range(num_layers):
        # weights
        w.append(tf.Variable(tf.random_normal( \
                              [num_inputs if i==0 else num_neurons[i-1], \
                               num_neurons[i]]), \
                             name="w_{0:04d}".format(i) \
                            ) \
                ) 
        # biases
        b.append(tf.Variable(tf.random_normal( \
                              [num_neurons[i]]), \
                             name="b_{0:04d}".format(i) \
                            ) \
                )                   
    w.append(tf.Variable(tf.random_normal(
                          [num_neurons[num_layers-1] if num_layers > 0 else num_inputs,
                           num_outputs]),name="w_out"))
    b.append(tf.Variable(tf.random_normal([num_outputs]),name="b_out"))

    # x is input layer
    layer = x
    # add hidden layers
    for i in range(num_layers):
        layer = tf.nn.relu(tf.matmul(layer, w[i]) + b[i])
    # add output layer
    layer = tf.matmul(layer, w[num_layers]) + b[num_layers]
    model = layer
    probs = tf.nn.softmax(model)

    return model,probs
tf.reset_default_graph()
# input images
serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
feature_configs = {'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32),}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
x_p = tf.identity(tf_example['x'], name='x_p')  # use tf.identity() to assign name

# target output
y_p = tf.placeholder(dtype=tf.float32, name="y_p", shape=[None, num_outputs])
num_layers = 2
num_neurons = []
for i in range(num_layers):
    num_neurons.append(256)

learning_rate = 0.01
n_epochs = 50
batch_size = 100
n_batches = mnist.train.num_examples//batch_size

model,probs = mlp(x=x_p, 
            num_inputs=num_inputs, 
            num_outputs=num_outputs, 
            num_layers=num_layers, 
            num_neurons=num_neurons)

# loss function
#loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(model), axis=1))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=y_p))
# optimizer function
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)

train_op = optimizer.minimize(loss)

predictions_check = tf.equal(tf.argmax(probs,1), tf.argmax(y_p,1))
accuracy_function = tf.reduce_mean(tf.cast(predictions_check, tf.float32))

values, indices = tf.nn.top_k(probs, 10)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
  tf.constant([str(i) for i in range(10)]))
prediction_classes = table.lookup(tf.to_int64(indices))

with tf.Session() as tfs:
    tfs.run(tf.global_variables_initializer())
    for epoch in range(n_epochs):
        epoch_loss = 0.0
        for batch in range(n_batches):
            x_batch, y_batch = mnist.train.next_batch(batch_size)
            _,batch_loss = tfs.run([train_op,loss], feed_dict={x_p: x_batch, y_p: y_batch})
            epoch_loss += batch_loss 
        average_loss = epoch_loss / n_batches
        print("epoch: {0:04d}   loss = {1:0.6f}".format(epoch,average_loss))
    accuracy_score = tfs.run(accuracy_function, feed_dict={x_p: x_test, y_p: y_test })
    print("accuracy={0:.8f}".format(accuracy_score))

    # save the model

    # definitions for saving the models
    builder = tf.saved_model.builder.SavedModelBuilder(model_dir)

    # build signature_def_map

    classification_inputs = tf.saved_model.utils.build_tensor_info(
      serialized_tf_example)
    classification_outputs_classes = tf.saved_model.utils.build_tensor_info(
      prediction_classes)
    classification_outputs_scores = tf.saved_model.utils.build_tensor_info(values)

    classification_signature = (
      tf.saved_model.signature_def_utils.build_signature_def(
          inputs={
              tf.saved_model.signature_constants.CLASSIFY_INPUTS:
                  classification_inputs
          },
          outputs={
              tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES:
                  classification_outputs_classes,
              tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES:
                  classification_outputs_scores
          },
          method_name=tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME))

    tensor_info_x = tf.saved_model.utils.build_tensor_info(x_p)
    tensor_info_y = tf.saved_model.utils.build_tensor_info(probs)

    prediction_signature = (
          tf.saved_model.signature_def_utils.build_signature_def(
              inputs={'inputs': tensor_info_x},
              outputs={'outputs': tensor_info_y},
              method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))

    legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
    builder.add_meta_graph_and_variables(
      tfs, [tf.saved_model.tag_constants.SERVING],
      signature_def_map={
          'predict_images':
              prediction_signature,
          tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
              classification_signature,
      },
      legacy_init_op=legacy_init_op)

    builder.save()

print('Run following command:')
print('tensorflow_model_server --model_name=mnist --model_base_path={}'
      .format(os.path.join(models_root,model_name)))
epoch: 0000   loss = 58.275672
epoch: 0001   loss = 12.980265
epoch: 0002   loss = 7.946372
epoch: 0003   loss = 5.640132
epoch: 0004   loss = 4.194648
epoch: 0005   loss = 3.293190
epoch: 0006   loss = 2.660178
epoch: 0007   loss = 2.217029
epoch: 0008   loss = 1.806662
epoch: 0009   loss = 1.527517
epoch: 0010   loss = 1.308219
epoch: 0011   loss = 1.115864
epoch: 0012   loss = 0.957490
epoch: 0013   loss = 0.846164
epoch: 0014   loss = 0.733891
epoch: 0015   loss = 0.622274
epoch: 0016   loss = 0.540578
epoch: 0017   loss = 0.473720
epoch: 0018   loss = 0.416551
epoch: 0019   loss = 0.364625
epoch: 0020   loss = 0.326988
epoch: 0021   loss = 0.272674
epoch: 0022   loss = 0.243471
epoch: 0023   loss = 0.207583
epoch: 0024   loss = 0.183325
epoch: 0025   loss = 0.153376
epoch: 0026   loss = 0.135717
epoch: 0027   loss = 0.119947
epoch: 0028   loss = 0.103426
epoch: 0029   loss = 0.085863
epoch: 0030   loss = 0.076752
epoch: 0031   loss = 0.069757
epoch: 0032   loss = 0.056827
epoch: 0033   loss = 0.046720
epoch: 0034   loss = 0.041128
epoch: 0035   loss = 0.033346
epoch: 0036   loss = 0.029719
epoch: 0037   loss = 0.026037
epoch: 0038   loss = 0.022546
epoch: 0039   loss = 0.018932
epoch: 0040   loss = 0.016667
epoch: 0041   loss = 0.013022
epoch: 0042   loss = 0.013127
epoch: 0043   loss = 0.008736
epoch: 0044   loss = 0.006944
epoch: 0045   loss = 0.004991
epoch: 0046   loss = 0.004136
epoch: 0047   loss = 0.003154
epoch: 0048   loss = 0.002946
epoch: 0049   loss = 0.002317
accuracy=0.92979997
INFO:tensorflow:No assets to save.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b'/home/armando/models/mnist/1/saved_model.pb'
Go to folder where the notebooks are and run following command:
tensorflow_model_server --port=9000 --model_name=mnist --model_base_path=/home/armando/models/mnist

results matching ""

    No results matching ""