Tight Integration

import tensorflow as tf
tf.__version__
'1.1.0'
from tensorflow.contrib import keras

Tensorboard Integration

from keras.datasets import cifar100

(X_train, Y_train), (X_test, Y_test) = cifar100.load_data(label_mode='fine')
Using TensorFlow backend.
from keras import backend as K

img_rows, img_cols = 32, 32

if K.image_data_format() == 'channels_first':
    shape_ord = (3, img_rows, img_cols)
else:  # channel_last
    shape_ord = (img_rows, img_cols, 3)
shape_ord
(32, 32, 3)
X_train.shape
(50000, 32, 32, 3)
import numpy as np
nb_classes = len(np.unique(Y_train))
from keras.applications import vgg16
from keras.layers import Input
vgg16_model = vgg16.VGG16(weights='imagenet', include_top=False, 
                          input_tensor=Input(shape_ord))
vgg16_model.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 32, 32, 3)         0         
_________________________________________________________________
block1_conv1 (Conv2D)        (None, 32, 32, 64)        1792      
_________________________________________________________________
block1_conv2 (Conv2D)        (None, 32, 32, 64)        36928     
_________________________________________________________________
block1_pool (MaxPooling2D)   (None, 16, 16, 64)        0         
_________________________________________________________________
block2_conv1 (Conv2D)        (None, 16, 16, 128)       73856     
_________________________________________________________________
block2_conv2 (Conv2D)        (None, 16, 16, 128)       147584    
_________________________________________________________________
block2_pool (MaxPooling2D)   (None, 8, 8, 128)         0         
_________________________________________________________________
block3_conv1 (Conv2D)        (None, 8, 8, 256)         295168    
_________________________________________________________________
block3_conv2 (Conv2D)        (None, 8, 8, 256)         590080    
_________________________________________________________________
block3_conv3 (Conv2D)        (None, 8, 8, 256)         590080    
_________________________________________________________________
block3_pool (MaxPooling2D)   (None, 4, 4, 256)         0         
_________________________________________________________________
block4_conv1 (Conv2D)        (None, 4, 4, 512)         1180160   
_________________________________________________________________
block4_conv2 (Conv2D)        (None, 4, 4, 512)         2359808   
_________________________________________________________________
block4_conv3 (Conv2D)        (None, 4, 4, 512)         2359808   
_________________________________________________________________
block4_pool (MaxPooling2D)   (None, 2, 2, 512)         0         
_________________________________________________________________
block5_conv1 (Conv2D)        (None, 2, 2, 512)         2359808   
_________________________________________________________________
block5_conv2 (Conv2D)        (None, 2, 2, 512)         2359808   
_________________________________________________________________
block5_conv3 (Conv2D)        (None, 2, 2, 512)         2359808   
_________________________________________________________________
block5_pool (MaxPooling2D)   (None, 1, 1, 512)         0         
=================================================================
Total params: 14,714,688
Trainable params: 14,714,688
Non-trainable params: 0
_________________________________________________________________
for layer in vgg16_model.layers:
    layer.trainable = False  # freeze layer
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.normalization import BatchNormalization
x = Flatten(input_shape=vgg16_model.output.shape)(vgg16_model.output)
x = Dense(4096, activation='relu', name='ft_fc1')(x)
x = Dropout(0.5)(x)
x = BatchNormalization()(x)
predictions = Dense(nb_classes, activation = 'softmax')(x)
from keras.models import Model
#create graph of your new model
model = Model(inputs=vgg16_model.input, outputs=predictions)

#compile the model
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 32, 32, 3)         0         
_________________________________________________________________
block1_conv1 (Conv2D)        (None, 32, 32, 64)        1792      
_________________________________________________________________
block1_conv2 (Conv2D)        (None, 32, 32, 64)        36928     
_________________________________________________________________
block1_pool (MaxPooling2D)   (None, 16, 16, 64)        0         
_________________________________________________________________
block2_conv1 (Conv2D)        (None, 16, 16, 128)       73856     
_________________________________________________________________
block2_conv2 (Conv2D)        (None, 16, 16, 128)       147584    
_________________________________________________________________
block2_pool (MaxPooling2D)   (None, 8, 8, 128)         0         
_________________________________________________________________
block3_conv1 (Conv2D)        (None, 8, 8, 256)         295168    
_________________________________________________________________
block3_conv2 (Conv2D)        (None, 8, 8, 256)         590080    
_________________________________________________________________
block3_conv3 (Conv2D)        (None, 8, 8, 256)         590080    
_________________________________________________________________
block3_pool (MaxPooling2D)   (None, 4, 4, 256)         0         
_________________________________________________________________
block4_conv1 (Conv2D)        (None, 4, 4, 512)         1180160   
_________________________________________________________________
block4_conv2 (Conv2D)        (None, 4, 4, 512)         2359808   
_________________________________________________________________
block4_conv3 (Conv2D)        (None, 4, 4, 512)         2359808   
_________________________________________________________________
block4_pool (MaxPooling2D)   (None, 2, 2, 512)         0         
_________________________________________________________________
block5_conv1 (Conv2D)        (None, 2, 2, 512)         2359808   
_________________________________________________________________
block5_conv2 (Conv2D)        (None, 2, 2, 512)         2359808   
_________________________________________________________________
block5_conv3 (Conv2D)        (None, 2, 2, 512)         2359808   
_________________________________________________________________
block5_pool (MaxPooling2D)   (None, 1, 1, 512)         0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 512)               0         
_________________________________________________________________
ft_fc1 (Dense)               (None, 4096)              2101248   
_________________________________________________________________
dropout_1 (Dropout)          (None, 4096)              0         
_________________________________________________________________
batch_normalization_1 (Batch (None, 4096)              16384     
_________________________________________________________________
dense_1 (Dense)              (None, 100)               409700    
=================================================================
Total params: 17,242,020
Trainable params: 2,519,140
Non-trainable params: 14,722,880
_________________________________________________________________

TensorBoard Callback

from keras.callbacks import TensorBoard

# Arguments
    log_dir: the path of the directory where to save the log
        files to be parsed by TensorBoard.
    histogram_freq: frequency (in epochs) at which to compute activation
        and weight histograms for the layers of the model. If set to 0,
        histograms won't be computed. Validation data (or split) must be
        specified for histogram visualizations.
    write_graph: whether to visualize the graph in TensorBoard.
        The log file can become quite large when
        write_graph is set to True.
    write_grads: whether to visualize gradient histograms in TensorBoard.
        `histogram_freq` must be greater than 0.
    write_images: whether to write model weights to visualize as
        image in TensorBoard.
    embeddings_freq: frequency (in epochs) at which selected embedding
        layers will be saved.
    embeddings_layer_names: a list of names of layers to keep eye on. If
        None or empty list all the embedding layer will be watched.
    embeddings_metadata: a dictionary which maps layer name to a file name
        in which metadata for this embedding layer is saved.

See the details about metadata files format. In case if the same metadata file is used for all embedding layers, string can be passed.

## one-hot Encoding of labels (1 to 100 classes)
from keras.utils import np_utils
Y_train.shape
(50000, 1)
Y_train = np_utils.to_categorical(Y_train)
Y_train.shape
(50000, 100)
def generate_batches(X, Y, batch_size=128):
    """"""
    # Iterations has to go indefinitely
    start = 0
    while True:
        yield (X[start:start+batch_size], Y[start:start+batch_size])
        start=batch_size

batch_size = 64
steps_per_epoch = np.floor(X_train.shape[0] / batch_size)
model.fit_generator(generate_batches(X_train, Y_train, batch_size=batch_size),
                    steps_per_epoch=steps_per_epoch, epochs=20, verbose=1, 
                    callbacks=[TensorBoard(log_dir='./tf_logs', histogram_freq=10, 
                                           write_graph=True, write_images=True, 
                                           embeddings_freq=10, 
                                           embeddings_layer_names=['block1_conv2', 
                                                                   'block5_conv1', 
                                                                   'ft_fc1'], 
                                           embeddings_metadata=None)])
INFO:tensorflow:Summary name block1_conv1/kernel:0 is illegal; using block1_conv1/kernel_0 instead.
INFO:tensorflow:Summary name block1_conv1/bias:0 is illegal; using block1_conv1/bias_0 instead.
INFO:tensorflow:Summary name block1_conv2/kernel:0 is illegal; using block1_conv2/kernel_0 instead.
INFO:tensorflow:Summary name block1_conv2/bias:0 is illegal; using block1_conv2/bias_0 instead.
INFO:tensorflow:Summary name block2_conv1/kernel:0 is illegal; using block2_conv1/kernel_0 instead.
INFO:tensorflow:Summary name block2_conv1/bias:0 is illegal; using block2_conv1/bias_0 instead.
INFO:tensorflow:Summary name block2_conv2/kernel:0 is illegal; using block2_conv2/kernel_0 instead.
INFO:tensorflow:Summary name block2_conv2/bias:0 is illegal; using block2_conv2/bias_0 instead.
INFO:tensorflow:Summary name block3_conv1/kernel:0 is illegal; using block3_conv1/kernel_0 instead.
INFO:tensorflow:Summary name block3_conv1/bias:0 is illegal; using block3_conv1/bias_0 instead.
INFO:tensorflow:Summary name block3_conv2/kernel:0 is illegal; using block3_conv2/kernel_0 instead.
INFO:tensorflow:Summary name block3_conv2/bias:0 is illegal; using block3_conv2/bias_0 instead.
INFO:tensorflow:Summary name block3_conv3/kernel:0 is illegal; using block3_conv3/kernel_0 instead.
INFO:tensorflow:Summary name block3_conv3/bias:0 is illegal; using block3_conv3/bias_0 instead.
INFO:tensorflow:Summary name block4_conv1/kernel:0 is illegal; using block4_conv1/kernel_0 instead.
INFO:tensorflow:Summary name block4_conv1/bias:0 is illegal; using block4_conv1/bias_0 instead.
INFO:tensorflow:Summary name block4_conv2/kernel:0 is illegal; using block4_conv2/kernel_0 instead.
INFO:tensorflow:Summary name block4_conv2/bias:0 is illegal; using block4_conv2/bias_0 instead.
INFO:tensorflow:Summary name block4_conv3/kernel:0 is illegal; using block4_conv3/kernel_0 instead.
INFO:tensorflow:Summary name block4_conv3/bias:0 is illegal; using block4_conv3/bias_0 instead.
INFO:tensorflow:Summary name block5_conv1/kernel:0 is illegal; using block5_conv1/kernel_0 instead.
INFO:tensorflow:Summary name block5_conv1/bias:0 is illegal; using block5_conv1/bias_0 instead.
INFO:tensorflow:Summary name block5_conv2/kernel:0 is illegal; using block5_conv2/kernel_0 instead.
INFO:tensorflow:Summary name block5_conv2/bias:0 is illegal; using block5_conv2/bias_0 instead.
INFO:tensorflow:Summary name block5_conv3/kernel:0 is illegal; using block5_conv3/kernel_0 instead.
INFO:tensorflow:Summary name block5_conv3/bias:0 is illegal; using block5_conv3/bias_0 instead.
INFO:tensorflow:Summary name ft_fc1/kernel:0 is illegal; using ft_fc1/kernel_0 instead.
INFO:tensorflow:Summary name ft_fc1/bias:0 is illegal; using ft_fc1/bias_0 instead.
INFO:tensorflow:Summary name batch_normalization_1/gamma:0 is illegal; using batch_normalization_1/gamma_0 instead.
INFO:tensorflow:Summary name batch_normalization_1/beta:0 is illegal; using batch_normalization_1/beta_0 instead.
INFO:tensorflow:Summary name batch_normalization_1/moving_mean:0 is illegal; using batch_normalization_1/moving_mean_0 instead.
INFO:tensorflow:Summary name batch_normalization_1/moving_variance:0 is illegal; using batch_normalization_1/moving_variance_0 instead.
INFO:tensorflow:Summary name dense_1/kernel:0 is illegal; using dense_1/kernel_0 instead.
INFO:tensorflow:Summary name dense_1/bias:0 is illegal; using dense_1/bias_0 instead.
Epoch 1/20
781/781 [==============================] - 49s - loss: 0.0161 - acc: 0.9974    
Epoch 2/20
781/781 [==============================] - 48s - loss: 1.1923e-07 - acc: 1.0000    
Epoch 3/20
781/781 [==============================] - 47s - loss: 1.1922e-07 - acc: 1.0000    - ETA:
Epoch 4/20
781/781 [==============================] - 47s - loss: 1.1922e-07 - acc: 1.0000    
Epoch 5/20
781/781 [==============================] - 48s - loss: 1.1922e-07 - acc: 1.0000    
Epoch 6/20
781/781 [==============================] - 48s - loss: 1.1921e-07 - acc: 1.0000    
Epoch 7/20
781/781 [==============================] - 47s - loss: 1.1921e-07 - acc: 1.0000    
Epoch 8/20
781/781 [==============================] - 48s - loss: 1.1922e-07 - acc: 1.0000    
Epoch 9/20
781/781 [==============================] - 48s - loss: 1.1921e-07 - acc: 1.0000    
Epoch 10/20
781/781 [==============================] - 47s - loss: 1.1921e-07 - acc: 1.0000    - ET
Epoch 11/20
781/781 [==============================] - 48s - loss: 1.1921e-07 - acc: 1.0000    
Epoch 12/20
781/781 [==============================] - 47s - loss: 1.1921e-07 - acc: 1.0000    
Epoch 13/20
781/781 [==============================] - 47s - loss: 1.1921e-07 - acc: 1.0000    
Epoch 14/20
781/781 [==============================] - 48s - loss: 1.1921e-07 - acc: 1.0000    
Epoch 15/20
781/781 [==============================] - 46s - loss: 1.1921e-07 - acc: 1.0000    - ETA: 0s - loss: 1.1921e-07 - acc:
Epoch 16/20
781/781 [==============================] - 47s - loss: 1.1921e-07 - acc: 1.0000    
Epoch 17/20
781/781 [==============================] - ETA: 0s - loss: 1.1921e-07 - acc: 1.000 - 47s - loss: 1.1921e-07 - acc: 1.0000    
Epoch 18/20
781/781 [==============================] - 47s - loss: 1.1921e-07 - acc: 1.0000    
Epoch 19/20
781/781 [==============================] - 47s - loss: 1.1921e-07 - acc: 1.0000    
Epoch 20/20
781/781 [==============================] - 47s - loss: 1.1921e-07 - acc: 1.0000    





<keras.callbacks.History at 0x7fdb8f8f2be0>

Runing Tensorboard

%%bash
python -m tensorflow.tensorboard --logdir=./tf_logs

tf.Queue integration with Keras

Source: https://gist.github.com/Dref360/43e20eda5eb5834b61bc06a4c1855b29

import operator
import threading
from functools import reduce

import keras
import keras.backend as K
from keras.engine import Model
import numpy as np
import tensorflow as tf
import time
from keras.layers import Conv2D
from tqdm import tqdm
Using TensorFlow backend.
def prod(factors):
    return reduce(operator.mul, factors, 1)
TRAINING = True
with K.get_session() as sess:
    shp = [10, 200, 200, 3]
    shp1 = [10, 7, 7, 80]
    inp = K.placeholder(shp)
    inp1 = K.placeholder(shp1)
    queue = tf.FIFOQueue(20, [tf.float32, tf.float32], [shp, shp1])
    x1, y1 = queue.dequeue()
    enqueue = queue.enqueue([inp, inp1])
    model = keras.applications.ResNet50(False, "imagenet", x1, shp[1:])
    for i in range(3):
        model.layers.pop()
        model.layers[-1].outbound_nodes = []
        model.outputs = [model.layers[-1].output]
    output = model.outputs[0]  # 7x7
    # Reduce filter size to avoid OOM
    output = Conv2D(32, (1, 1), padding="same", activation='relu')(output)
    output3 = Conv2D(5 * (4 + 11 + 1), (1, 1), padding="same", activation='relu')(
        output)  # YOLO output B (4 + nb_class +1)
    cost = tf.reduce_sum(tf.abs(output3 - y1))
    optimizer = tf.train.RMSPropOptimizer(0.001).minimize(cost)
    sess.run(tf.global_variables_initializer())


    def get_input():
        # Super long processing I/O bla bla bla
        return np.arange(prod(shp)).reshape(shp).astype(np.float32), np.arange(prod(shp1)).reshape(shp1).astype(
            np.float32)


    def generate(coord, enqueue_op):
        while not coord.should_stop():
            inp_feed, inp1_feed = get_input()
            sess.run(enqueue_op, feed_dict={inp: inp_feed, inp1: inp1_feed})


    start = time.time()
    for i in tqdm(range(10)):  # EPOCH
        for j in range(30):  # Batch
            x,y = get_input()
            optimizer_, s = sess.run([optimizer, queue.size()], 
                                     feed_dict={x1:x,y1:y, K.learning_phase(): int(TRAINING)})
    print("Took : ", time.time() - start)


    coordinator = tf.train.Coordinator()
    threads = [threading.Thread(target=generate, args=(coordinator, enqueue)) for i in range(10)]
    for t in threads:
        t.start()
    start = time.time()
    for i in tqdm(range(10)):  # EPOCH
        for j in range(30):  # Batch
            optimizer_, s = sess.run([optimizer, queue.size()], 
                                     feed_dict={K.learning_phase(): int(TRAINING)})
    print("Took : ", time.time() - start)

    def clear_queue(queue, threads):
        while any([t.is_alive() for t in threads]):
            _, s = sess.run([queue.dequeue(), queue.size()])
            print(s)


    coordinator.request_stop()
    clear_queue(queue, threads)

    coordinator.join(threads)
    print("DONE Queue")

results matching ""

    No results matching ""