tesorflow 1

# -*- coding: utf-8 -*-

import numpy as np
import tensorflow as tf
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
import matplotlib.pylab as plt
from tensorflow.examples.tutorials.mnist import input_data

# 用Tensorflow计算 a = (b + c) * (c + 2)
def test01():
	# 首先,创建一个TensorFlow常量=>2
	const = tf.constant(2.0, name='const')

	# 创建TensorFlow变量b和c
	b = tf.Variable(2.0, name='b')
	c = tf.Variable(1.0, dtype=tf.float32, name='c')
	
	# 创建operation
	d = tf.add(b, c, name='d')
	e = tf.add(c, const, name='e')
	a = tf.multiply(d, e, name='a')
	
	# 1. 定义init operation
	init_op = tf.global_variables_initializer()
	
	# session
	with tf.Session() as sess:
		# 2. 运行init operation
		sess.run(init_op)
		# 计算
		a_out = sess.run(a)
		print("Variable a is {}".format(a_out))


def test02():
	# 首先,创建一个TensorFlow常量=>2
	const = tf.constant(2.0, name='const')

	# 创建TensorFlow变量b和c
	b = tf.placeholder(tf.float32, [None, 1], name='b')
	c = tf.Variable(1.0, dtype=tf.float32, name='c')
	
	# 创建operation
	d = tf.add(b, c, name='d')
	e = tf.add(c, const, name='e')
	a = tf.multiply(d, e, name='a')
	
	# 1. 定义init operation
	init_op = tf.global_variables_initializer()
	
	# session
	with tf.Session() as sess:
		# 2. 运行init operation
		sess.run(init_op)
		# 计算
		#a_out = sess.run(a)
		a_out = sess.run(a, feed_dict={b: np.arange(0, 10)[:, np.newaxis]})
		print("Variable a is {}".format(a_out))


def test03():
	mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
	# 超参数
	learning_rate = 0.5
	epochs = 10
	batch_size = 100

	# placeholder
	# 输入图片为28 x 28 像素 = 784
	x = tf.placeholder(tf.float32, [None, 784])
	# 输出为0-9的one-hot编码
	y = tf.placeholder(tf.float32, [None, 10])
	
	# hidden layer => w, b
	W1 = tf.Variable(tf.random_normal([784, 300], stddev=0.03), name='W1')
	b1 = tf.Variable(tf.random_normal([300]), name='b1')
	# output layer => w, b
	W2 = tf.Variable(tf.random_normal([300, 10], stddev=0.03), name='W2')
	b2 = tf.Variable(tf.random_normal([10]), name='b2')
	
	# hidden layer
	hidden_out = tf.add(tf.matmul(x, W1), b1)
	hidden_out = tf.nn.relu(hidden_out)
	
	# 计算输出
	out = tf.add(tf.matmul(hidden_out, W2), b2)
	y_ = tf.nn.softmax(out)
	
	# tf.clip_by_value(A, min, max):输入一个张量A,把A中的每一个元素的值都压缩在
	# min和max之间。小于min的让它等于min,大于max的元素的值等于max。
	y_clipped = tf.clip_by_value(y_, 1e-10, 0.9999999)
	cross_entropy = -tf.reduce_mean(tf.reduce_sum(y * tf.log(y_clipped) + (1 - y) * tf.log(1 - y_clipped), axis=1))
	
	# 创建优化器,确定优化目标
	optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
	
	# init operator
	init_op = tf.global_variables_initializer()

	# 创建准确率节点
	# 0:按列计算,1:行计算;0:按列计算,1:行计算
	# correct_predicion会返回一个m×1 m\times 1m×1的tensor,tensor的值为True/False表示是否正确预测。
	correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
	accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

	# 创建session
	with tf.Session() as sess:
		# 变量初始化
		sess.run(init_op)
		total_batch = int(len(mnist.train.labels) / batch_size)
		for epoch in range(epochs):
			avg_cost = 0
			for i in range(total_batch):
				batch_x, batch_y = mnist.train.next_batch(batch_size=batch_size)
				_, c = sess.run([optimizer, cross_entropy], feed_dict={x: batch_x, y: batch_y})
				avg_cost += c / total_batch
			print("Epoch:", (epoch + 1), "cost = ", "{:.3f}".format(avg_cost))
			
		# test	
		print(sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}))


# 线性回归
def test04():
	#create data
	x_data=np.random.rand(100).astype(np.float32)
	y_data=x_data*0.1+0.3

	#create tensorflow structure
	Weights=tf.Variable(tf.random_uniform([1],-1.0,1.0)) #一维,范围[-1,1]
	biases=tf.Variable(tf.zeros([1]))

	y=Weights*x_data+biases

	# 均方误差
	loss=tf.reduce_mean(tf.square(y-y_data))

	#建立优化器,减小误差,提高参数准确度,每次迭代都会优化
	optimizer=tf.train.GradientDescentOptimizer(0.5) #学习效率<1
	train=optimizer.minimize(loss)

	#初始化变量
	init=tf.global_variables_initializer()

	with tf.Session() as sess:
		sess.run(init)
		#train
		for step in range(201):
			sess.run(train)
			if step%20==0:
				print(step,sess.run(Weights),sess.run(biases))


def test05():
	matrix1 = tf.constant([[3, 3]])
	matrix2 = tf.constant([[2], [2]])

	# matrix multiply
	# np.dot(m1,m2)
	product = tf.matmul(matrix1, matrix2)

	# # method 1
	# sess = tf.Session()  # Session是一个object,首字母要大写
	# # 只有sess.run()之后,tensorflow才会执行一次
	# result = sess.run(product)
	# print(result)
	# # close 不影响,会显得更整洁
	# sess.close()

	# method 2
	# with 可以自己关闭会话
	with tf.Session() as sess:
		result2 = sess.run(product)
		print(result2)


def test06():
	state=tf.Variable(0,name='counter')
	# print(state.name)
	# 变量+常量=变量
	one=tf.constant(1)
	new_value=tf.add(state,one)
	
	#将state用new_value代替
	updata=tf.assign(state,new_value)

	#变量必须要激活
	init=tf.global_variables_initializer()

	with tf.Session() as sess:
		sess.run(init)
		for _ in range(3):
			sess.run(updata)
			print(sess.run(state))


# placeholder
def test07():
	# 给定type,tf大部分只能处理float32数据
	input1 = tf.placeholder(tf.float32)
	input2 = tf.placeholder(tf.float32)

	# Tensorflow 1.0 修改版
	# tf.mul---tf.multiply
	# tf.sub---tf.subtract
	# tf.neg---tf.negative
	output = tf.multiply(input1, input2)

	with tf.Session() as sess:
		# placeholder在sess.run()的时候传入值
		print(sess.run(output, feed_dict={input1: [7.], input2: [2.]}))
		print(sess.run(output, feed_dict={input1: [7., 2], input2: [[2.], [2]]}))


def add_layer(inputs, in_size, out_size, activation_function=None):
    #Weights是一个矩阵,[行,列]为[in_size,out_size]
    Weights=tf.Variable(tf.random_normal([in_size,out_size]))#正态分布
	
    #初始值推荐不为0,所以加上0.1,一行,out_size列
    biases=tf.Variable(tf.zeros([1,out_size])+0.1)
	
    #Weights*x+b的初始化的值,也就是未激活的值
    Wx_plus_b=tf.matmul(inputs,Weights)+biases

    #激活

    if activation_function is None:
        #激活函数为None,也就是线性函数
        outputs=Wx_plus_b
    else:
        outputs=activation_function(Wx_plus_b)
    return outputs


def test08():
	"""定义数据形式"""
	# (-1,1)之间,有300个单位,后面的是维度,x_data是有300行(300个例子)
	x_data=np.linspace(-1,1,300)[:,np.newaxis]
	
	# 加噪声,均值为0,方差为0.05,大小和x_data一样
	noise=np.random.normal(0,0.05,x_data.shape)
	y_data=np.square(x_data)-0.5+noise

	xs=tf.placeholder(tf.float32,[None,1])
	ys=tf.placeholder(tf.float32,[None,1])

	"""建立网络"""
	#定义隐藏层,输入1个节点,输出10个节点
	l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
	#定义输出层
	prediction=add_layer(l1,10,1,activation_function=None)

	"""预测"""
	#损失函数,算出的是每个例子的平方,要求和(reduction_indices=[1],按行求和),再求均值
	loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))

	"""训练"""
	#优化算法,minimize(loss)以0.1的学习率对loss进行减小
	train_step=tf.train.GradientDescentOptimizer(0.08).minimize(loss)

	init=tf.global_variables_initializer()

	with tf.Session() as sess:
		sess.run(init)
		for i in range(5000):
			sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
			if i%100==0:
				print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))


def test09():
	"""定义数据形式"""
	# (-1,1)之间,有300个单位,后面的是维度,x_data是有300行(300个例子)
	x_data=np.linspace(-1,1,300)[:,np.newaxis]
	
	# 加噪声,均值为0,方差为0.05,大小和x_data一样
	noise=np.random.normal(0,0.05,x_data.shape)
	y_data=np.square(x_data)-0.5+noise

	xs=tf.placeholder(tf.float32,[None,1])
	ys=tf.placeholder(tf.float32,[None,1])

	"""建立网络"""
	#定义隐藏层,输入1个节点,输出10个节点
	l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
	#定义输出层
	prediction=add_layer(l1,10,1,activation_function=None)

	"""预测"""
	#损失函数,算出的是每个例子的平方,要求和(reduction_indices=[1],按行求和),再求均值
	loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))

	"""训练"""
	#优化算法,minimize(loss)以0.1的学习率对loss进行减小
	train_step=tf.train.GradientDescentOptimizer(0.08).minimize(loss)

	init=tf.global_variables_initializer()

	with tf.Session() as sess:
		sess.run(init)
		
		fig=plt.figure()
		#连续性的画图
		ax=fig.add_subplot(1,1,1)
		ax.scatter(x_data,y_data)
		# 不暂停
		plt.ion()
		# plt.show()绘制一次就会暂停
		# plt.show() #也可以用plt.show(block=False)来取消暂停,但是python3.5以后提供了ion的功能,更方便
			
		for i in range(5000):
			sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
			if i%100==0:
				print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
				try:
					# 画出一条后抹除掉,去除第一个线段,但是只有一个,也就是抹除当前的线段
					ax.lines.remove(lines[0])
				except Exception:
					pass
				
				prediction_value=sess.run(prediction,feed_dict={xs:x_data})
				lines=ax.plot(x_data,prediction_value,'r-',lw=5) #lw线宽

				# 暂停0.1s
				plt.pause(0.1)	
				

# 手写体
def test10():
	mnist=input_data.read_data_sets('MNIST_data',one_hot=True)
	
	# define placeholder for inputs to networks
	# 不规定有多少个sample,但是每个sample大小为784(28*28)
	xs=tf.placeholder(tf.float32,[None,784])
	ys=tf.placeholder(tf.float32,[None,10])

	#add output layer
	prediction=add_layer(xs,784,10,activation_function=tf.nn.softmax)

	#the error between prediction and real data
	cross_entropy=tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1]))
	train_strp=tf.train.GradientDescentOptimizer(0.3).minimize(cross_entropy)

	init=tf.global_variables_initializer()
	
	with tf.Session() as sess:
		sess.run(init)
		for i in range(3000):
			batch_xs, batch_ys=mnist.train.next_batch(100)
			sess.run(train_strp,feed_dict={xs:batch_xs,ys:batch_ys})

			if i%100==0:
				y_pre=sess.run(prediction,feed_dict={xs:mnist.test.images})
				correct_prediction=tf.equal(tf.arg_max(y_pre,1),tf.arg_max(mnist.test.labels,1))
				accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
				result=sess.run(accuracy,feed_dict={xs:mnist.test.images,ys:mnist.test.labels})
				print(i, " accuracy:", result)


def add_layer_EX(inputs,in_size,out_size,layer_name,activation_function=None):
    #Weights是一个矩阵,[行,列]为[in_size,out_size]
    Weights=tf.Variable(tf.random_normal([in_size,out_size]))#正态分布
    #初始值推荐不为0,所以加上0.1,一行,out_size列
    biases=tf.Variable(tf.zeros([1,out_size])+0.1)
    #Weights*x+b的初始化的值,也就是未激活的值
    Wx_plus_b=tf.matmul(inputs,Weights)+biases

    #激活

    if activation_function is None:
        #激活函数为None,也就是线性函数
        outputs=Wx_plus_b
    else:
        outputs=activation_function(Wx_plus_b)
    # 下面的表示outputs的值
    tf.summary.histogram(layer_name+'/outputs',outputs)

    return outputs


# 未使用dropout:
def test11():
	#load data
	digits=load_digits()
	#0~9的图像
	X=digits.data
	print(type(X))
	print("X.shape:", X.shape)
	
	#y是binary的,表示数字1,就在第二个位置放上1,其余都为0
	y=digits.target
	print(type(y))
	print("y.shape:", y.shape)
	y=LabelBinarizer().fit_transform(y)
	print(type(y))
	print("y.shape:", y.shape)
	
	#切分
	X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=round(X.shape[0]*0.2))
	print("X_train.shape:", X_train.shape)
	print("y_train.shape:", y_train.shape)
	print("X_test.shape:", X_test.shape)
	print("y_test.shape:", y_test.shape)
	
	#define placeholder for inputs to network
	"""dropout"""
	# 确定保留多少结果不被舍弃掉
	keep_prob=tf.placeholder(tf.float32)
	
	#define placeholder for inputs to network
	xs=tf.placeholder(tf.float32,[None,64])
	ys=tf.placeholder(tf.float32,[None,10])

	#add output layer
	# l1为隐藏层,为了更加看出overfitting,所以输出给了100
	l1=add_layer_EX(xs,64,100,'l1',activation_function=tf.nn.tanh)

	prediction=add_layer_EX(l1,100,10,'l2',activation_function=tf.nn.softmax)

	#the error between prediction and real data
	cross_entropy=tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1]))
	#添加标量统计结果
	tf.summary.scalar('loss',cross_entropy)
	train_step=tf.train.GradientDescentOptimizer(0.4).minimize(cross_entropy)

	init=tf.global_variables_initializer()

	with tf.Session() as sess:
		sess.run(init)
		#添加一个操作,代表执行所有summary操作,这样可以避免人工执行每一个summary op
		merged=tf.summary.merge_all()
		#summary writer goes in here
		train_writer=tf.summary.FileWriter("logs/train",sess.graph)#train为log的子文件夹
		test_writer=tf.summary.FileWriter("logs/test",sess.graph)
		for i in range(2500):
			#sess.run(train_step,feed_dict={xs:X_train,ys:y_train})
			# drop掉60%,保持40%不被drop掉
			sess.run(train_step,feed_dict={xs:X_train,ys:y_train,keep_prob:0.4})
			if i%50==0:
				#record loss
				y_pre=sess.run(prediction,feed_dict={xs:X_test})
				correct_prediction=tf.equal(tf.arg_max(y_pre,1),tf.arg_max(y_test,1))
				accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
				result=sess.run(accuracy,feed_dict={xs:X_test,ys:y_test})
				print("epoch ", i, ",accuracy:", result)
				
				train_result=sess.run(merged,feed_dict={xs:X_train,ys:y_train})
				test_result = sess.run(merged, feed_dict={xs: X_test, ys: y_test})
				train_writer.add_summary(train_result,i)
				test_writer.add_summary(test_result,i)

		
def main():
	#test01()
	#test02()
	#test03()
	#test04()
	#test05()
	#test06()
	#test07()
	#test08()
	#test09()
	#test10()
	test11()

if __name__ == '__main__':
	main()

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值