Saver&restore
主要是用于训练的一部分数据,如果还想继续训练,就需要保存模型和重新加载上次模型
如下图:训练集一般有三个文件+checkpoint
checkpoint用于找到当前的训练模型,比如你里面有很多模型
有了checkpoint,就会找到最新的model.ckpt-100
看一段测试代码(Note: 本地路径的话一定要加上"./" ,否则会报错)
# -*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
isTrain = True
train_steps = 100
checkpoint_steps = 50
checkpoint_dir = ''
x = tf.placeholder(tf.float32, shape=[None, 1])
y = 4 * x + 4
w = tf.Variable(tf.random_normal([1], -1, 1))
b = tf.Variable(tf.zeros([1]))
y_predict = w * x + b
loss = tf.reduce_mean(tf.square(y - y_predict))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
# 训练的时候屏蔽
isTrain = False
train_steps = 100
checkpoint_steps = 50
checkpoint_dir = ''
i = 0
saver = tf.train.Saver() # defaults to saving all variables - in this case w and b
x_data = np.reshape(np.random.rand(10).astype(np.float32), (10, 1))
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
print("isTrain")
if isTrain:
for i in xrange(train_steps):
sess.run(train, feed_dict={x: x_data})
if (i + 1) % checkpoint_steps == 0:
saver.save(sess, checkpoint_dir + 'model.ckpt', global_step=i+1)
else:
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
print "gongjia : %s"%ckpt.model_checkpoint_path
# 本地路径的话一定要加上"./" ,否则会报错
saver.restore(sess, "./%s"%ckpt.model_checkpoint_path)
else:
print("没找到模型")
print(sess.run(w))
print(sess.run(b))