#Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data/",one_hot=True)
import tensorflow as tf
#Parameters
learning_rate = 0.001
batch_size = 100
display_step = 1
model_path = "save/model.ckpt"
#Network Parameters
n_hidden_1 = 256 #1st layer number of features
n_hidden_2 = 256 #2nd layer number of features
n_input = 784 #MNIST data input (img shape:28*28)
n_classes = 10 #MNIST total classes (0-9 digits)
#tf Graph input
x = tf.placeholder("float",[None,n_input])
y = tf.placeholder("float",[None,n_classes])
#Creat model
def mutilayer_perceptron(x,weights,biases):
#Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x,weights['h1']),biases['b1'])
layer_1 = tf.nn.relu(layer_1)
#Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1,weights['h2']),biases['b2'])
layer_2 = tf.nn.relu(layer_2)
#Output layer with linear activation
out_layer = tf.matmul(layer_2,weights['out']) + biases['out']
return out_layer
#Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input,n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2,n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
#Construct model
pred = mutilayer_perceptron(x,weights,biases)
#Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
#Initializing the variables
init = tf.initializers.global_variables()
#saver 保存模型
saver = tf.train.Saver()
print("运行第一个Session...")
with tf.Session() as sess:
sess.run(init)
#Training cycle
for epoch in range(3):
avg_cost = 0
total_batch = int(mnist.train.num_examples/batch_size)
#Loop over all batches
for i in range(total_batch):
batch_x,batch_y = mnist.train.next_batch(batch_size)
#Run optimization op (backrop) and cost op (to get loss value)
_, c = sess.run([optimizer,cost],feed_dict={x:batch_x,y:batch_y})
#Compute average loss
avg_cost += c / total_batch
#Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:",'%04d' % (epoch+1),"cost=","{:.9f}".format(avg_cost))
print("第一次运行完成")
#保存模型的权重和偏移量
save_path = saver.save(sess,model_path)
print ("文件保存在:%s" % save_path )
print("运行第二个Session...")
with tf.Session() as sess:
sess.run(init)
#Restore model weights from previously saved model
load_path = saver.restore(sess,model_path)
print("提取模型目录:%s" % save_path)
#Resume training
for epoch in range(7):
avg_cost = 0
total_batch = int(mnist.train.num_examples/batch_size)
#Loop over all batches
for i in range(total_batch):
batch_x,batch_y = mnist.train.next_batch(batch_size)
#Run optimization op (backrop) and cost op (to get loss value)
_, c = sess.run([optimizer,cost],feed_dict={x:batch_x,y:batch_y})
#Compute average loss
avg_cost += c / total_batch
#Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:",'%04d' % (epoch+1),"cost=","{:.9f}".format(avg_cost))
print ("文件保存在:%s" % save_path )