Graphpipe向けMNISTモデルのテスト (for Expert)

TensorFlow Deep MNIST for ML Beginnersでは、正答率30%と残念な結果だったので、for Expoertのモデルも試してみました。

1.学習用ソースコード

ここを参考とさせていただいています。

※Python3で動かすため、構文エラー箇所を修正加えています。

from __future__ import absolute_import, unicode_literals
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import shutil
import os.path

export_dir = ‘./tmp/expert-export’

if os.path.exists(export_dir):
shutil.rmtree(export_dir)

def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)

def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)

def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=’SAME’)

def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding=’SAME’)

mnist = input_data.read_data_sets(“data/”, one_hot=True)

g = tf.Graph()
with g.as_default():
x = tf.placeholder(“float”, shape=[None, 784])
y_ = tf.placeholder(“float”, shape=[None, 10])

W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)

W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

keep_prob = tf.placeholder(“float”)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])

y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, “float”))

sess = tf.Session()
sess.run(tf.initialize_all_variables())

for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(
{x: batch[0], y_: batch[1], keep_prob: 1.0}, sess)
print(“step %d, training accuracy %g” % (i, train_accuracy))
train_step.run(
{x: batch[0], y_: batch[1], keep_prob: 0.5}, sess)

print(“test accuracy %g” % accuracy.eval(
{x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}, sess))

# Store variable
_W_conv1 = W_conv1.eval(sess)
_b_conv1 = b_conv1.eval(sess)
_W_conv2 = W_conv2.eval(sess)
_b_conv2 = b_conv2.eval(sess)
_W_fc1 = W_fc1.eval(sess)
_b_fc1 = b_fc1.eval(sess)
_W_fc2 = W_fc2.eval(sess)
_b_fc2 = b_fc2.eval(sess)

sess.close()

# Create new graph for exporting
g_2 = tf.Graph()
with g_2.as_default():
x_2 = tf.placeholder(“float”, shape=[None, 784], name=”input”)

W_conv1_2 = tf.constant(_W_conv1, name=”constant_W_conv1″)
b_conv1_2 = tf.constant(_b_conv1, name=”constant_b_conv1″)
x_image_2 = tf.reshape(x_2, [-1, 28, 28, 1])
h_conv1_2 = tf.nn.relu(conv2d(x_image_2, W_conv1_2) + b_conv1_2)
h_pool1_2 = max_pool_2x2(h_conv1_2)

W_conv2_2 = tf.constant(_W_conv2, name=”constant_W_conv2″)
b_conv2_2 = tf.constant(_b_conv2, name=”constant_b_conv2″)
h_conv2_2 = tf.nn.relu(conv2d(h_pool1_2, W_conv2_2) + b_conv2_2)
h_pool2_2 = max_pool_2x2(h_conv2_2)

W_fc1_2 = tf.constant(_W_fc1, name=”constant_W_fc1″)
b_fc1_2 = tf.constant(_b_fc1, name=”constant_b_fc1″)
h_pool2_flat_2 = tf.reshape(h_pool2_2, [-1, 7 * 7 * 64])
h_fc1_2 = tf.nn.relu(tf.matmul(h_pool2_flat_2, W_fc1_2) + b_fc1_2)

W_fc2_2 = tf.constant(_W_fc2, name=”constant_W_fc2″)
b_fc2_2 = tf.constant(_b_fc2, name=”constant_b_fc2″)

# DropOut is skipped for exported graph.

y_conv_2 = tf.nn.softmax(tf.matmul(h_fc1_2, W_fc2_2) + b_fc2_2, name=”output”)

sess_2 = tf.Session()
init_2 = tf.initialize_all_variables();
sess_2.run(init_2)

graph_def = g_2.as_graph_def()
tf.train.write_graph(graph_def, export_dir, ‘expert-graph.pb’, as_text=False)

# Test trained model
y__2 = tf.placeholder(“float”, [None, 10])
correct_prediction_2 = tf.equal(tf.argmax(y_conv_2, 1), tf.argmax(y__2, 1))
accuracy_2 = tf.reduce_mean(tf.cast(correct_prediction_2, “float”))

print(“check accuracy %g” % accuracy_2.eval(
{x_2: mnist.test.images, y__2: mnist.test.labels}, sess_2))

2.Graphpipeの実行

Expert用のpbファイルを指定してdockerを起動します。

docker run -it –rm \
> -v “$PWD:/models/” \
> -p 9002:9002 \
> sleepsonthefloor/graphpipe-tf:cpu \
> –model=/models/expert-graph.pb \
> –listen=0.0.0.0:9002
3.テストコード

for Beginnerと基本的に同じですが、確率も表示するように変えました。

from io import BytesIO
from PIL import Image, ImageOps
import os,sys
import numpy as np
import requests
from graphpipe import remote
from matplotlib import pylab as plt

data_dir_path = u”/tmp/mnist_test”
file_list = os.listdir(r’/tmp/mnist_test’)
for file_name in file_list:
root, ext = os.path.splitext(file_name)
if ext == u’.png’:
test_file = data_dir_path + ‘/’ + file_name

img = Image.open(test_file).convert(‘L’)
img.thumbnail((28, 28))
img = map(lambda x: 255 – x, img.getdata())
img = np.fromiter(img, dtype=np.uint8)
img = img.reshape(1, 784)
img = img.astype(np.float32)
img = np.multiply(img, 1.0 / 255.0)
pred = remote.execute(“http://desktop.scpepper.tokyo:9002”, img)
print(‘———-‘)
print(test_file)
# print(np.argmax(pred, axis=1))

4.テスト実行
# python mnist_expert_gp.py
———-
/tmp/mnist_test/five001.png
[5 0 1 2 3 4 6 7 8 9] [100. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
———-
/tmp/mnist_test/four001.png
[4 0 1 2 3 5 6 7 8 9] [100. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
———-
/tmp/mnist_test/nine001.png
[7 9 4 8 0 1 2 3 5 6] [75. 11. 9. 4. 0. 0. 0. 0. 0. 0.]
———-
/tmp/mnist_test/one001.png
[1 0 2 3 4 5 6 7 8 9] [100. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
———-
/tmp/mnist_test/seven001.png
[7 1 2 8 0 3 4 5 6 9] [89. 7. 3. 1. 0. 0. 0. 0. 0. 0.]
———-
/tmp/mnist_test/six001.png
[5 9 0 1 2 3 4 6 7 8] [99. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
———-
/tmp/mnist_test/three001.png
[3 0 1 2 4 5 6 7 8 9] [100. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
———-
/tmp/mnist_test/two001.png
[2 0 1 3 4 5 6 7 8 9] [100. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
———-
/tmp/mnist_test/zero001.png
[0 4 9 1 2 3 5 6 7 8] [88. 11. 1. 0. 0. 0. 0. 0. 0. 0.]
———-
/tmp/mnist_test/eight001.png
[6 8 0 1 2 3 4 5 7 9] [65. 34. 0. 0. 0. 0. 0. 0. 0. 0.]

正答率60%に改善されました。

とはいってもまだ60%、文字のサイズや線の太さなど学習データとの差異が結構あるんでしょうね。

シェアする

  • このエントリーをはてなブックマークに追加

フォローする