一个小小的卷积神经网络测试

训练集是mnist
测试集是网上下载的一些图片集
经测:
单层神经网络 ---------------------------------- %89
两层神经网络 -----------------------------------%93
卷积两层神经网络 -----------------------------%97
想要自己试一试的,用writeByMyself(url,sess)
url 图片地址,要求28*28,黑底


# coding=utf8
#       98%    
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  #1,通知,2警告,3错误
import tensorflow as tf
from PIL import Image
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
import  numpy as np
#Activation function : don't Know  没卷积没池化,我也不知道为啥,也不敢问
#lost Function : - log(p)  对数损失函数softmax分类

#    单个测试-------------------------------------------------------
def writeByMyself(url,sess):
    wimg = ImgToArr(url).reshape(1, 784)
    #sess.run(tf.global_variables_initializer)
    wpred=sess.run(yo,feed_dict={xx:wimg,y:np.zeros([1,10])})
    outofvalue = np.argmax(wpred,1)
    print("您的预测结果为",outofvalue)

def ImgToArr(url):
    return np.array(Image.open(url).convert('L'))

#  ------------  保存   -------------------------
saver = tf.train.Saver()


mnist=input_data.read_data_sets('data/',one_hot=True)
#print
#mnist  type是dataSet ,分train,test,validation (训练,测试,验证)
# trainimg=mnist.train.images
# #print(np.array(trainimg).shape)   #np.ndarray ,可以转化为np.array 读
# trainlab=mnist.train.labels
# testimg = mnist.test.images
# testlab = mnist.test.labels

xx = tf.placeholder('float',[None,784])
x = tf.reshape(xx,[-1,28,28,1])
y = tf.placeholder('float',[None,10]) #---------------------------

W = {
    'Wc1':tf.Variable(tf.random_normal([3,3,1,10],stddev=0.1)),    #576
    'Wc2':tf.Variable(tf.random_normal([3,3,10,20],stddev=0.1)),  #73728
    'W1' : tf.Variable(tf.random_normal([7*7*20,100],stddev=0.1)),#627000
    'Wout': tf.Variable(tf.random_normal([100,10],stddev=0.1))
}
b = {
    'bc1' : tf.Variable(tf.random_normal([10],stddev=0.1)),
    'bc2' : tf.Variable(tf.random_normal([20],stddev=0.1)),
    'b1' : tf.Variable(tf.random_normal([100],stddev=0.1)),
    'bout':tf.Variable(tf.random_normal([10],stddev=0.1))
}

c1_layyer = tf.nn.conv2d(x,W['Wc1'],strides = [1,1,1,1],padding='SAME')
c1_layyer = tf.nn.relu(tf.nn.bias_add(c1_layyer,b['bc1']))    #划重点  ,bias_add()
p1_layyer = tf.nn.max_pool(c1_layyer,ksize=[1,2,2,1],strides=[1,2,2,1],padding = 'SAME')

c2_layyer = tf.nn.conv2d(p1_layyer,W['Wc2'],strides = [1,1,1,1],padding='SAME')
c2_layyer = tf.nn.relu(tf.nn.bias_add(c2_layyer,b['bc2']))
p2_layyer = tf.nn.max_pool(c2_layyer,ksize=[1,2,2,1],strides=[1,2,2,1],padding = 'SAME')


y2 = tf.reshape(p2_layyer,[-1,W['W1'].get_shape().as_list()[0]])
y1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(y2,W['W1']),b['b1']))
yo = (tf.nn.bias_add(tf.matmul(y1,W['Wout']),b['bout']))
# yo = tf.nn.softmax(tf.add(tf.matmul(hidden2,W['Wout']),b['bout']))


init = tf.global_variables_initializer()

# with tf.Session() as sess:
#     sess.run(init)
#     feed_x,feed_y = mnist.train.next_batch(100)
#     print('-------------------------------------')
#     print(sess.run(yo,feed_dict={x:feed_x,y:feed_y}))
#     print(yo.shape,)#[?,10]
#     print(y.shape,) #[?,10]
#     #print(sess.run(yo2, feed_dict={x:feed_x,y:feed_y}))
#     print('-------------------------------------')

predict = tf.argmax(yo,1)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predict,tf.argmax(y,1)),'float'))

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=yo,labels=y))
# cost = tf.reduce_mean(tf.reduce_sum(-y*tf.log(yo),reduction_indices=1))

optimizer = tf.train.GradientDescentOptimizer(0.1).minimize(cost)

train_number = 31   #迭代次数
batch_size = 100    #分批次处理
display_step = 5    #显示频率


init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for step in range(train_number):
    print("step:",step)
    avg_cost=0
    numbatch = int(mnist.train.num_examples/batch_size)  # 批数
    for i in range(numbatch):
        batch_xs ,batch_ys = mnist.train.next_batch(batch_size)
        #print("batch_xs is :",batch_xs)   图片矩阵
        #print("batch_ys is :", batch_ys)   标签矩阵
        feeds = {xx: batch_xs, y: batch_ys}
        #print(type(batch_xs))   #numpy.ndarray
        #print("------------------")
        sess.run(optimizer,feed_dict=feeds)
        addcost = sess.run(cost,feed_dict=feeds)/numbatch
    if step % display_step == 0:
        feeds_train = {xx:batch_xs,y:batch_ys}
        feed_test = {xx:mnist.test.images,y:mnist.test.labels}
        train_acc = sess.run(accuracy, feed_dict=feeds_train)
        test_acc = sess.run(accuracy, feed_dict = feed_test)
        # print(yo.shape)  #(?,10)
        # print(y.shape)   #(?,10)
        print("Epoch: %03d/%03d cost: %9f train_acc: %3f  test_acc: %3f"%(step,train_number,addcost,train_acc,test_acc))





mytestnum = 100         #每个数字测试个数
alloftest = 0
trueoftest = 0

for i in range(10):
    labnow = np.zeros([1,10])
    #print(type(labnow))#np.adarray
    labnow[0][i]=1
    #print("lab is :",labnow)  #标签数组
    src ="C:/Users/Administrator/Desktop/trainimage/testimage/pic2/"+ str(i)+"/"
    for j in range(1,mytestnum+1):
        thissrc=src+str(j)+".bmp"
        test_x=ImgToArr(thissrc).reshape([1,784])
        test_y=labnow
        # print(testx)
        feed_test = {xx:test_x,y:test_y}
        #test_acc = sess.run(accr, feed_dict=feed_test)   无用功,我要这个干啥?。知道y和yo对比一下就出来了
        predval1 = sess.run(yo, feed_dict=feed_test)
        predval2 = np.argmax(predval1,1)
        # print("-------------")
        # print(yo.shape)      (?,10)
        # print(labnow.shape)  (1 ,10)
        reallab=np.argmax(labnow,1)
        print("Pred: ",predval2," True Value: ",reallab)
        # print("True Value: ", argmax(labnow, 1))
        #print?
        alloftest += 1
        if predval2[0]==reallab[0]:
            trueoftest+=1
print("true:",trueoftest," all:",alloftest," percision:",trueoftest/alloftest)
sess.close()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值