Tensorflow运作方式入门

学习mnist.py— 构建图表

跟着教程步骤将代码打一遍,这样更容易记住。

Builds the MNIST network.
Implements the inference/loss/training pattern for model building.

  1. inference() — Builds the model as far as required for running the network
    forward to make predictions.
  2. loss() — Adds to the inference model the layers required to generate loss.
  3. training() — Adds to the loss model the Ops required to generate and
    apply gradients.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import math
import tensorflow as tf

NUM_CLASSES = 10
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE

def inference(images, hidden1_units, hidden2_units):
# inference()函数会尽可能地构建图表,做到返回包含了预测结果(output prediction)的Tensor。
'''
Args:
images: Images placeholder, from inputs().
hidden1_units: Size of the first hidden layer.
hidden2_units: Size of the second hidden layer.
Returns:
softmax_linear: Output tensor with the computed logits.
'''
# Hidden 1
# 当这些层是在hidden1作用域下生成时,赋予权重变量的独特名称将会是"hidden1/weights"。
with tf.name_scope('hidden1'):
weights = tf.Variable(
tf.truncated_normal([IMAGE_PIXELS, hidden1_units],
stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
name='weights') # truncated_normal(shape, mean, stddev)函数产生正态分布,初始化权重
biases = tf.Variable(tf.zeros([hidden1_units]),
name='biases')
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)

# Hidden 2
with tf.name_scope('hidden2'):
weights = tf.Variable(
tf.truncated_normal([hidden1_units, hidden2_units],
stddev=1.0 / math.sqrt(float(hidden1_units))),
name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]),
name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)

# Linear
with tf.name_scope('softmax_linear'):
weights = tf.Variable(
tf.truncated_normal([hidden2_units, NUM_CLASSES],
stddev=1.0/math.sqrt(float(hidden2_units))),
name = 'weights')
biases = tf.Variable(tf.zeros([NUM_CLASSES]),
name='biases')
logits = tf.matmul(hidden2, weights) + biases

return logits # 返回包含了输出结果的logits Tensor

def loss(logits, labels):
'''
Calculates the loss from the logits and the labels.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size].
Returns:
loss: Loss tensor of type float
'''
labels = tf.to_int64(labels)
return tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)

def training(loss, learning_rate):
'''
Sets up the training Ops.
Creates a summarizer to track the loss over time in TensorBoard.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the `sess.run()` call to cause the model to train.
Args:
loss: Loss tensor, from loss().
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
'''
# 该函数从loss()函数中获取损失Tensor,将其交给tf.scalar_summary,后者在与
# SummaryWriter(见下文)配合使用时,可以向事件文件(events file)中生成汇总值(summary values)
tf.summary.scalar('loss', loss)
# 应用梯度下降法
Optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# 生成一个变量用于保存全局训练步骤(global training step)的数值
# 使用minimize()函数更新系统中的三角权重(triangle weights), 增加全局步骤的操作
global_step = tf.Variable(0, name="global_step", trainable=False)
train_op = optimizer.minimize(loss, global_step=global_step)

return train_op # 返回包含了训练操作(training op)输出结果的Tensor

def evaluation(logits, labels):
'''
Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
'''
correct = tf.nn.in_top_k(logis, labels, 1)
# Return the number of true entries.
return tf.reduce_sum(tf.cast(correct, tf.int32))

学习fully_connected_feed.py— 训练模型

上一节的mnist.py起到构建图表的作用。一旦图表构建完毕,就通过fully_connected_feed.py文件中的用户代码进行循环地迭代式训练和评估。

Trains and Evaluates the MNIST network using a feed dictionary.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
import argparse
import os
import sys
import time

import tensorflow as tf
import input_data # 生成数据集
import mnist

FLAGS = None

# placeholder_inputs()函数将生成两个tf.placeholder操作,定义传入图表中的shape参数,shape参数中包括batch_size值,后续还会将实际的训练用例传入图表

# 在训练循环(training loop)的后续步骤中,传入的整个图像和标签数据集会被切片,以符合每一个操作所设置的batch_size值,占位符操作将会填补以符合这个batch_size值。然后使用feed_dict参数,将数据传入sess.run()函数。
def placeholder_inputs(batch_size):
'''
Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
'''
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, mnist.IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))

return images_placeholder, labels_placeholder

def fill_feed_dict(data_set, images_pl, labels_pl):
'''
Fills the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Args:
data_set: The set of images and labels, from input_data.read_data_sets()
images_pl: The images placeholder, from placeholder_inputs().
labels_pl: The labels placeholder, from placeholder_inputs().
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
'''
# 索要下一批次batch_size的图像和标签,与占位符相匹配的Tensor则会包含下一批次的图像和标签。
images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size, FLAGS.fake_data)
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
}
return feed_dict

def do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_set):
# Runs one evaluation against the full epoch of data.
'''
Args:
sess: The session in which the model has been trained.
eval_correct: The Tensor that returns the number of correct predictions.
images_placeholder: The images placeholder.
labels_placeholder: The labels placeholder.
data_set: The set of images and labels to evaluate, from
input_data.read_data_sets().
'''
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = data_set.num_examples # FLAGS.batch_size
num_examples = steps_per_epoch * FLAGS.batch_size
for step in range(steps_per_epoch):
feed_dict = fill_feed_dict(data_set, images_placeholder, labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = float(true_count) / num_examples
print('Num examples: %d Num correct: %d Precision @ 1: %0.04f' % (num_examples, true_count, precision))

def run_training():
"""Train MNIST for a number of steps."""
data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)
with tf.Graph().as_default():
# 这个命令表明所有已经构建的操作都要与默认的tf.Graph全局实例关联起来。
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels_placeholder)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels_placeholder)
# Build the summary Tensor based on the TF collection of Summaries.
summary = tf.summary.merge_all()
# Add the variable initializer Op.
init = tf.global_variables_initializer()
# Create a saver for writing training checkpoints.
sess = tf.Session()
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)

sess.run(init)

# Start training
for step in range(FLAGS.max_steps):
start_time = time.time()
feed_dict = fill_feed_dict(data_set.train, images_placeholder, labels_placeholder)
_, loss_value = sess.run([train_op,loss], feed_dict=feed_dict)
duration = time.time() - start_time

if step % 100 == 0:
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
# Update the events file.
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()

if (step+1)%1000==0 or (step+1)==FLAGS.max_steps:
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(ses, checkpoint_file, global_step=step)
# saver.restore(sess, FLAGS.train_dir)可重载模型参数,继续训练
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.train)
print('Validation Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.validation)
# Evaluate against the test set.
print('Test Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.test)

def main():
if tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.DeleteRecursively(FLAGS.log_dir)
tf.gfile.MakeDirs(FLAGS.log_dir)
run_training()

if __name__ == '__main__':
parser = argparse.ArgumentParser() #参数设置
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning_rate')
parser.add_argument(
'--max_steps',
type=int,
default=200,
help='Number of steps to run trainer.')
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.')
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.')
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.')
parser.add_argument(
'--input_data_dir',
type=str,
default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),
'tensorflow/mnist/input_data'),
help='Directory to put the input data.')
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)