首先,我们需要下载一个mnist数据集合,然后进行读取:
from tensorflow.examples.tutorials.mnist import input_data
data=input_data.read_data_sets("具体的路径",one_hot=True)
#当然如果我们没有下载mnist数据集的话,可以首先选在这儿将具体的路径设置为"data/mnist",那么将会自动为我们在当前项目目录下下载mnist数据集,网速很慢
现在我们可以看看我们下载的数据的数组维度:
train_imgs=data.train.images
train_labels=data.train.labels
test_imgs=data.test.images
test_labels=data.test.labels
print train_imgs.shape
print train_labels.dtype
print type(train_imgs)
train_imgs=train_imgs.reshape([-1,28*28,1])
print train_imgs.shape
运行结果为:
这样我们就可以得到输入的类型数据,方便我们自己制作数据集;
i我们可以选择将图片打印出来查看,带上它的真实值:
#coding=utf-8
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
data=input_data.read_data_sets("/home/jobs/Desktop/code/tensorflow-mnist-master/mnist/data",one_hot=True)
def plot_images(images,cls_true,cls_pred=None):
assert len(images)==len(cls_true)==9
#在3*3的栅格中画出9张图像,然后在每张图像下面写出真实的类别和预测类别
fig,axes=plt.subplots(3,3)
fig.subplots_adjust(hspace=0.3,wspace=0.3)
for i,ax in enumerate(axes.flat):
#开始制作图片
ax.imshow(images[i].reshape([28,28]),cmap="binary")
#展示正确的和预估的类别
if cls_pred is None:
xlabel="True:{0}".format(np.where(cls_true[i]==1)[0])
else:
xlabel="True:{0},Pred:{1}".format(cls_true[i],cls_pred[i])
ax.set_xlabel(xlabel)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
images=data.test.images[0:9]
cls_true=data.test.labels[0:9]
plot_images(images=images,cls_true=cls_true)
运行结果:
现在我们可以完整的使用tensorflow框架搭建CNN网络并且利用梯度下降算法进行参数更新了:
在讲整个模型之前,我们首先来看一下我们搭建模型并利用梯度下降来更新模型权重的这个过程的原理图:
原理图
model.py
搭建CNN网络模型,注意我们将图像由二维数组reshape到了4维数组,因为卷基层需要输入的tensor为4维的,所以我我们将其reshape到了4维度tensor:
def convolutional(x,keep_prob):
def conv2d(x,w):
return tf.nn.conv2d(x,w,strides=[1,1,1,1],padding="SAME")
def max_pool(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")
def weight_variable(shape):
initial=tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial=tf.constant(0.1,shape=shape)
return tf.Variable(initial)
x_image=tf.reshape(x,[-1,28,28,1])
w_conv1=weight_variable([5,5,1,32])
b_conv1=bias_variable([32])
h_conv1=tf.nn.relu(conv2d(x_image,w_conv1)+b_conv1)
h_pool1=max_pool(h_conv1)
print("现在在搭建网络的第一层")
w_conv2=weight_variable([5,5,32,64])
b_conv2=bias_variable([64])
h_conv2=tf.nn.relu(conv2d(h_pool1,w_conv2)+b_conv2)
h_pool2=max_pool(h_conv2)
print("现在在搭建网络的第二层")
w_fc1=weight_variable([7*7*64,1024])
b_fc1=bias_variable([1024])
h_pool2_flat=tf.reshape(h_pool2,[-1,7*7*64])
h_fc1=tf.nn.relu(tf.matmul(h_pool2_flat,w_fc1)+b_fc1)
print("现在在搭建网络的第三层")
h_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)
w_fc2=weight_variable([1024,10])
b_fc2=bias_variable([10])
y=tf.nn.softmax(tf.matmul(h_fc1_drop,w_fc2)+b_fc2)
return y,[w_conv1,b_conv1,w_conv2,b_conv2,w_fc1,b_fc1,w_fc2,b_fc2]
convolutional.py
#为输入图像定义placeholder变量,这种操作使得我们可以改变输入到tensorflow中的tensor上,现在我们为实际输入x和实际输出y_设定placeholder
with tf.variable_scope("convolutional"):
x=tf.placeholder(tf.float32,[None,784])
keep_prob=tf.placeholder(tf.float32)
y,variables=convolutional(x,keep_prob)
y_=tf.placeholder(tf.float32,[None,10])
#定义loss/cost function
cross_entropy=-tf.reduce_sum(y_*tf.log(y))
#创建accuracy op和train_step op操作,用于在sess会话中进行运行
train_step=tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
saver=tf.train.Saver(variables)
#初始化所有变量
init_op=tf.global_variables_initializer()
#启动Session会话
with tf.Session() as sess:
sess.run(init_op)
for i in range(200):
batch=data.train.next_batch(50)
if i%100==0:
#在tensorflow中,在一个With tf.Session() as sess底下执行一个op操作执行eval()函数等价于执行sess.run(op)操作
train_accuracy=accuracy.eval(feed_dict={x:batch[0],y_:batch[1],keep_prob:1.0})
print("step %d,training accuracy %g"%(i,train_accuracy))
sess.run(train_step,feed_dict={x:batch[0],y_:batch[1],keep_prob:0.5})
print(sess.run(accuracy,feed_dict={x:data.test.images,y_:data.test.labels,keep_prob:1.0}))
path=saver.save(sess,os.path.join(os.path.dirname(__file__),'data','convolutional.ckpt'),write_meta_graph=False,write_state=False)
print("Saved:",path)
运行结果如下:
保存Saver我在实际中并没有创建哈!