前言

  本节将叙述一下如何将训练好的模型保存下来,并且在使用的时候还原出来,以及卷积神经网络的构建方法以及TensorFlow实现。

TensorFlow模型持久化

  在TensorFlow中,提供了tf.train.Saver类用来保存TensorFlow计算图,以下代码给出了保存TensorFlow计算图的方法。

1
2
3
4
5
6
7
8
9
10
11
12
13
import tensorflow as tf

v1 = tf.Variable(tf.constant(1.0, shape=[1]), name = "v1")
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name = "v2")
result = v1 + v2

init_op = tf.global_variables_initializer()
# 可以指定saver保存什么变量,如果只保存v1,那么下边要恢复result就会出错,因为缺少v2
saver = tf.train.Saver()

with tf.Session() as sess:
sess.run(init_op)
saver.save(sess, "Saved_model/model.ckpt")

  以上代码实现了持久化一个简单的TensorFlow模型的功能。在这段代码中,saver.save函数将TensorFlow模型保存到了Saved_model/model.ckpt文件中, 但是在该文件夹下,会产生三个文件,第一个文件为model.ckpt.meta他保存了计算图的结构;第二个文件为model.ckpt,这个文件保存了TensorFlow程序中每一个变量的取值;最后一个文件为checkpoint文件,这个文件中保存了一个目录下所有的模型文件列表。
  以下代码给出了加载这个已经保存的TensorFlow模型的方法。

1
2
3
4
5
6
7
8
9
10
11
import tensorflow as tf

v1 = tf.Variable(tf.constant(1.0, shape=[1]), name = "v1")
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name = "v2")
result = v1 + v2

saver = tf.train.Saver()

with tf.Session() as sess:
saver.restore(sess, "Saved_model/model.ckpt")
print(result.eval())

  可以看出这个代码与保存模型的代码差不多。在加载模型的代码中没有运行变量的初始化过程,而是将变量的值通过已经保存的模型加载下来。如果不希望重复定义图上的运算,也可以直接加载已经持久化的图。如下:

1
2
3
4
5
6
import tensorflow as tf

saver = tf.train.import_meta_graph("Saved_model/model.ckpt.meta")
with tf.Session() as sess:
saver.restore(sess, "Saved_model/model.ckpt")
print(sess.run(tf.get_default_graph().get_tensor_by_name("add:0")))

TensorFlow最佳实践样例程序

mnist_inference.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import tensorflow as tf

INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500

def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(weights))
return weights

def inference(input_tensor, regularizer):
with tf.variable_scope('layer1'):

weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)

with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases

return layer2

mnist_train.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import os

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

import mnist_inference

BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH="MNIST_model/"
MODEL_NAME="mnist_model"

def train(mnist):

x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')

regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
y = mnist_inference.inference(x, regularizer)
global_step = tf.Variable(0, trainable=False)

variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY,
staircase=True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train')

saver = tf.train.Saver()
with tf.Session() as sess:
tf.global_variables_initializer().run()
import time
start_time=time.time()
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
if i % 1000 == 0:
# print("spend"+str(time.time()-start_time))
print("Spend %.2f s,after %d training step(s), loss on training batch is %g." % (time.time()-start_time,step, loss_value))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
start_time=time.time()

def main(argv=None):
mnist = input_data.read_data_sets("data", one_hot=True)
train(mnist)

if __name__ == '__main__':
tf.app.run()

mnist_eval.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import time

import mnist_train
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

import mnist_inference

# 加载的时间间隔。
EVAL_INTERVAL_SECS = 1

def evaluate(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}

y = mnist_inference.inference(x, None)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)

while True:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
for v in tf.global_variables():
print(v.name, ":", v.eval())
print("#####################")
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))
else:
print('No checkpoint file found')
return
time.sleep(EVAL_INTERVAL_SECS)

def main(argv=None):
mnist = input_data.read_data_sets("data", one_hot=True)
evaluate(mnist)

if __name__ == '__main__':
main()

卷积神经网络

卷积层

  理论知识不再赘述,该文只叙述如何使用TensorFlow构建CNN结构,一般构建卷积层是通过以下几个步骤:

  1. 构建过滤器参数尺寸,参数为四维,前两个代表了过滤器的尺寸,第三个表示当前层的深度,第四个表示过滤器的深度。

    1
    2
    filter_weight = tf.get_variable('weights',[5,5,3,16],
    initializer=tf.truncated_normal_initializer(stddev=0.1))
  2. 创建偏执项,偏执项尺寸为过滤器的深度。

    1
    biases = tf.get_variable('biases',[16],initializer=tf.constant_initializer(0.1))
  3. 利用tf.nn.conv2d()创建卷积层,参数依次为:输入的batch,过滤器权值,各维度步长,填充方法。其中在各维度步长的参数中,第一位和最后一维的数字一定要是1,因为卷积的步长只与矩阵的长宽有效。填充参数有两种选择,分别是SAME(全0填充)、VALID(表示不添加)。

    1
    conv = tf.nn.conv2d(input,filter_weight,strides=[1,1,1,1],padding='SAME')
  4. 利用tf.nn.bias_add()完成加偏执项,第一个参数是该卷积层,第二个参数是偏执。

    1
    bias = tf.nn.bias_add(conv,biases)
  5. 将计算结果通过激活函数去线性化。

    1
    actived_conv = tf.nn.relu(bias)

池化层

  池化层可以有效地缩小矩阵的尺寸,从而减少最后全连接层的参数。使用池化层既可以加快计算速度也有防止过拟合问题的作用。
  池化层采用更加简单的最大值或者平均值计算,也叫作最大池化层(max pooling)或者平均池化层(average pooling。卷积层与池化层中过滤器移动的方式是相似的,唯一的区别在于卷积层使用的过滤器是横跨整个深度的,而池化层使用的过滤器只影响一个深度的节点。以下TensorFlow程序实现了最大池化层的前向传播算法:

1
pool = tf.nn.max_pool(actived_conv,ksize=[1,3,3,1],strides=[1,2,2,1],padding='SAME')

  在tf.nn.max_pool函数中,首先需要传入当前层的节点矩阵,第二个参数为过滤器的尺寸,且第一个和最后一个数字必须为1,第三个参数为步长信息且第一个和最后一个数字必须为1,padding也是填充选项。

CNN

  可以用以下正则表达式来表示一些经典的用于图片分类的问题的卷积神经网络结构:$$输入层\to(卷积层+ \to 池化层?)+\to 全连接层+$$
  在以上公式中,“卷积层+”表示一层或多层卷积层,大部分卷积神经网络中一般最多连续使用三层卷积层。“池化层?”表示没有或者一层池化层。LeNet-5模型就可以表示为以下结构:$$输入层\to卷积层\to池化层\to卷积层\to池化层\to全连接层\to全连接层\to输出层$$

LeNet-5

  解决MNIST手写数字分类的源代码:

LeNet5_infernece.py:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import tensorflow as tf

# 输入为28*28的图像[28, 28]
INPUT_NODE = 784
# 输出为1~10的可能性[10]
OUTPUT_NODE = 10

# 图像尺寸
IMAGE_SIZE = 28
# 图像的颜色通道数,这里只有黑白一种通道
NUM_CHANNELS = 1
# 标签的数量
NUM_LABELS = 10

# 第一层卷积的深度
CONV1_DEEP = 32
# 第一层卷积的过滤器尺寸
CONV1_SIZE = 5

# 第二层卷积的深度
CONV2_DEEP = 64
# 第二层卷积的过滤器尺寸
CONV2_SIZE = 5

# 全连接层的节点个数
FC_SIZE = 512


# 常见的卷积模型
# 本例子卷积模型 输入 -> 卷积层 -> 池化层 -> 卷积层 -> 池化层 -> 全连接层 -> 全连接层
# 输入 -> (卷积层+ -> 池化层?)+ -> 全连接层+


def inference(input_tensor, train, regularizer):
# 第一层卷积1
# 输入为[x-size=28, y-size=28, channel=1]的图像
# 过滤器尺寸[x-size=5, y-size=5, channel=1, deep=32]
# 过滤器步长=1
# 输出为[x-size=28, y-size=28, deep=32]的矩阵
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.get_variable(
name="weight",
shape=[CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
conv1_biases = tf.get_variable(
name="bias",
shape=[CONV1_DEEP],
initializer=tf.constant_initializer(0.0)
)
conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))

# 第二层池化1
# 输入为[x-size=28, y-size=28, deep=32]的矩阵
# 过滤器尺寸[x-size=2, y-size=2]
# 过滤器步长=2
# 输出为[x-size=14, y-size=14, deep=32]的矩阵
with tf.name_scope("layer2-pool1"):
pool1 = tf.nn.max_pool(relu1, ksize = [1,2,2,1],strides=[1,2,2,1],padding="SAME")

# 第三层卷积2
# 输入为[x-size=14, y-size=14, deep=32]的矩阵
# 过滤器尺寸[x-size=5, y-size=5, channel=1, deep=64]
# 过滤器步长=1
# 输出为[x-size=14, y-size=14, deep=64]的矩阵
with tf.variable_scope("layer3-conv2"):
conv2_weights = tf.get_variable(
name="weight",
shape=[CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
conv2_biases = tf.get_variable(
name="bias",
shape=[CONV2_DEEP],
initializer=tf.constant_initializer(0.0)
)
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))

# 第四层池化2
# 输入为[x-size=14, y-size=14, deep=64]的矩阵
# 过滤器尺寸[x-size=2, y-size=2]
# 过滤器步长=2
# 输出为[x-size=7, y-size=7, deep=64]的矩阵
with tf.name_scope("layer4-pool2"):
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# 把[batch, x-size, y-size, deep]4维矩阵转化为[batch, vector]2维矩阵,长*宽*深度转换为1维向量
pool_shape = pool2.get_shape().as_list()
nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
reshaped = tf.reshape(pool2, [pool_shape[0], nodes])

# 全连接层
with tf.variable_scope('layer5-fc1'):
fc1_weights = tf.get_variable(
name="weight",
shape=[nodes, FC_SIZE],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
# 只有全连接的权重需要加入正则化
if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights))
fc1_biases = tf.get_variable("bias", [FC_SIZE], initializer=tf.constant_initializer(0.1))

fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
# dropout在训练数据的时候,会随机把部分输出改为0
# dropout可以避免过度拟合,dropout一般只在全连接层,而不是在卷积层或者池化层使用
if train: fc1 = tf.nn.dropout(fc1, 0.5)

# 全连接层
# 输入为[512]的向量
# 输出为[10]的向量
with tf.variable_scope('layer6-fc2'):
fc2_weights = tf.get_variable(
name="weight",
shape=[FC_SIZE, NUM_LABELS],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights))
fc2_biases = tf.get_variable("bias", [NUM_LABELS], initializer=tf.constant_initializer(0.1))
logit = tf.matmul(fc1, fc2_weights) + fc2_biases

return logit


LeNet5_train.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import LeNet5_infernece
import os
import numpy as np

BATCH_SIZE = 1000
LEARNING_RATE_BASE = 0.01
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 6000
MOVING_AVERAGE_DECAY = 0.99


def train(mnist):
# 定义输出为4维矩阵的placeholder
x = tf.placeholder(tf.float32, [
BATCH_SIZE,
LeNet5_infernece.IMAGE_SIZE,
LeNet5_infernece.IMAGE_SIZE,
LeNet5_infernece.NUM_CHANNELS],
name='x-input')
y_ = tf.placeholder(tf.float32, [None, LeNet5_infernece.OUTPUT_NODE], name='y-input')

regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
y = LeNet5_infernece.inference(x, False, regularizer)
global_step = tf.Variable(0, trainable=False)

# 定义损失函数、学习率、滑动平均操作以及训练过程。
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY,
staircase=True)

train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train')

# 初始化TensorFlow持久化类。
saver = tf.train.Saver()
with tf.Session() as sess:
tf.global_variables_initializer().run()
import time
start_time=time.time()
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)

reshaped_xs = np.reshape(xs, (
BATCH_SIZE,
LeNet5_infernece.IMAGE_SIZE,
LeNet5_infernece.IMAGE_SIZE,
LeNet5_infernece.NUM_CHANNELS))
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys})

if i % 50 == 0:
print("Spend %.2f s, after %d training step(s), loss on training batch is %g." % (time.time()-start_time,step, loss_value))
start_time=time.time()

def main(argv=None):
mnist = input_data.read_data_sets("data", one_hot=True)
train(mnist)

if __name__ == '__main__':
main()