TensorFlow的tf.train.SummaryWriter
函数是可以用来将记录在TensorFlow张量中的数据输出到事件文件中。这些事件文件可以被TensorBoard可视化工具读取以便进行深入的分析。
1.函数作用
tf.train.SummaryWriter
的作用是在训练过程中保存TensorBoard所需要的数据。
2.函数参数
tf.summary.FileWriter(logdir, graph=None,max_queue=10,flush_secs=120,
filename_suffix=None)
其中,函数参数说明如下:
logdir
- 输出文件的文件路径。graph
(默认为None
) - 要写入的图,可通过tf.get_default_grap()
函数获取。max_queue
- 最大的队列大小。默认取值为10。flush_sec
- 两次日志写入事件之间的最大秒值。默认为120秒。filename_suffix
- 日志文件名的后缀。
3.函数使用
下面示例代码展示了如何在代码运行过程中记录数据:
import tensorflow as tf
import numpy as np
# 构建图和数据
with tf.name_scope("input") :
x = tf.placeholder(tf.float32, [None,1], name="x_input")
y = tf.placeholder(tf.float32, [None,1], name="y_input")
with tf.name_scope("L1") :
W1 = tf.Variable(tf.random_normal([1,10]), name="Weights")
b1 = tf.Variable(tf.zeros([1,10]), name="bias")
L1 = tf.matmul(x, W1) + b1
#添加sigmoid激活函数
L1 = tf.nn.sigmoid(L1)
with tf.name_scope("L2") :
W2 = tf.Variable(tf.random_normal([10,1]), name="Weights")
b2 = tf.Variable(tf.zeros([1,1]), name="bias")
L2 = tf.matmul(L1,W2) + b2
loss = tf.reduce_mean(tf.square(y-L2))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#初始化参数
init = tf.global_variables_initializer()
#记录数据操作
log_path="/tmp/tensorflow_logs/chapter2_logs"
writer = tf.summary.FileWriter(log_path, tf.get_default_graph())
#运行这个模型
with tf.Session() as sess:
sess.run(init)
for i in range(1000):
sess.run(train_step, feed_dict={x:np.linspace(-1,1,100)[:,np.newaxis],
y:np.linspace(-1,1,100)[:,np.newaxis]})
if i%10 ==0:
_, l, result = sess.run([train_step, loss, L2], feed_dict={x:np.linspace(-1,1,100)[:,np.newaxis],
y:np.linspace(-1,1,100)[:,np.newaxis]})
print(l)
#关闭writer
writer.close()
运行上述代码,就可以在文件夹"/tmp/tensorflow_logs/chapter2_logs"中读到训练期间记录下来的数据。
我们还可以使用tf.summary
来构建一个更为详细的示例:
import tensorflow as tf;
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data/", one_hot=True)
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev/' + name, stddev)
tf.summary.scalar('max/' + name, tf.reduce_max(var))
tf.summary.scalar('min/' + name, tf.reduce_min(var))
tf.summary.histogram(name, var)
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
weights = tf.Variable(tf.zeros([input_dim, output_dim]))
variable_summaries(weights, layer_name + '/weights')
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([output_dim]))
variable_summaries(biases, layer_name + '/biases')
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.summary.histogram(layer_name + '/pre_activations', preactivate)
activations = act(preactivate, name='activation')
tf.summary.histogram(layer_name + '/activations', activations)
return activations
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None, 784], name='x-input')
y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
#构建新的网络
hidden1 = nn_layer(x, 784, 500, 'layer1')
with tf.name_scope('softmax_linear'):
with tf.name_scope('weights'):
weights = tf.Variable(tf.zeros([500, 10]), name='weights')
variable_summaries(weights, 'softmax_linear/weights')
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([10]), name='biases')
variable_summaries(biases, 'softmax_linear/biases')
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(hidden1, weights) + biases
tf.summary.histogram('softmax_linear/pre_activations', logits)
y = tf.nn.softmax(logits, name='softmax')
with tf.name_scope('cross_entropy'):
diff = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_)
with tf.name_scope('total'):
cross_entropy = tf.reduce_mean(diff)
tf.summary.scalar('cross_entropy', cross_entropy)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(0.002).minimize(cross_entropy)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('/tmp/mnist_logs/train',sess.graph)
test_writer = tf.summary.FileWriter('/tmp/mnist_logs/test')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(10000):
if i % 10 == 0: # 记录测试集的统计数据
summary, acc = sess.run(
fetches=[merged, accuracy],
feed_dict={x: mnist.test.images, y_: mnist.test.labels})
test_writer.add_summary(summary, i)
print('Accuracy at step %s: %s' % (i, acc))
else: # 记录训练集的统计数据
summary, _ = sess.run(
fetches=[merged, train_step],
feed_dict={x: mnist.train.images, y_: mnist.train.labels})
train_writer.add_summary(summary, i)
train_writer.close()
test_writer.close()
这段代码会在卷积神经网络(CNN)训练中每一次迭代时输出结果。结果会发现一个网页上,我们最后要使用TensorBoard打开这个网页。在TensorBoard中,可以看到关于我们CNN的所有指标。
输出图像如下:
这里我们可以看到一个图表界面,显示了训练了多少?如果出现了任意奇怪的行为,会发现哪一个变量发生了什么。在这个特例中,图表展示了这个CNN的weights的变化。这是一个关键使用指标,在继续训练和测试之前,把我们的观察结果和假设做一个对比。
最后,我们发现指标结果还有很多分析可以做,但是这将会是我们另一个指标分析TensorBoard的教程。现在我们已经能够构建可供工程师和科学家使用的神经网络,并且了解了如何通过TensorBoard将指标导入仪器。
本站文章如无特殊说明,均为本站原创,如若转载,请注明出处:详解TensorFlow的 tf.train.SummaryWriter 函数:保存 TensorBoard 可视化数据 - Python技术站