本章内容:
run、eval、placeholder
tf语句的变量、常量、占位符及简单运算
sess/with 会话的用法
基础运算法则
tf.cast类型转换、tf.reduce函数(矩阵求均值,最大值,求和)
FLAGS定义全局变量
tf.variable_scope、tf.name_scope的区别+reuse的使用
tf.squared_difference()、tf.where()、tf.control_dependencies()、tf.no_op()
run:tf习惯预先定义好各种参数,运行函数,然后通过会话seesion,用.run()来启动计算。
eval:也是一种启动计算的方式,与.run()类似;但是有限制条件,只能用于tf.Tensor类对象(即有输出的计算)。
run与eval的区别:
1.在计算一个张量时,eval相当于run的缩写,等价关系;
2.run可以同时计算多个张量,eval每次只能计算一个。
例1---等价:
#例1---等价:
import tensorflow as tf
a_tensor=tf.cast(tf.constant([1,2,3]),dtype=tf.float32)
t=a_tensor*a_tensor
sess=tf.Session()
with sess.as_default():
print(t.eval())
print(sess.run(t))
例2---区别:
import tensorflow as tf
a_tensor=tf.cast(tf.constant([1,2,3]),dtype=tf.float32)
t=a_tensor*a_tensor
sess=tf.Session()
with sess.as_default():
print(t.eval(),a_tensor.eval()) #两个张量,需要分开调用
print(sess.run((t,a_tensor))) #两个张量同时调用,格式run((一个整体)),(t,a_tensor)作为一个整体
placeholder:相当于占位符,在构建graph在模型中时占位,用以系统分配内存,后面在喂入数据。
#例3---placeholder
import tensorflow as tf
import numpy as np
a=tf.placeholder(tf.float32)
b=tf.placeholder(tf.float32)
output=tf.multiply(a,b)
with tf.Session() as sess:
print(sess.run(output,feed_dict={a:[3.],b:[4.]}))
tf语句的变量、常量、占位符及简单运算
# #例1:常量
# import tensorflow as tf
# a=tf.constant(10)
# b=tf.constant('ggg')
# with tf.Session() as sess:
# print(sess.run(a,b))
#例2:变量
import tensorflow as tf
x=tf.Variable(3)
y=tf.Variable(5)
z=x+y
init=tf.initialize_all_variables() #变量计算,需要先定义这个,用来初始化变量
with tf.Session() as sess:
sess.run(init)
print(sess.run(z))
# #例3---placeholder
# import tensorflow as tf
# import numpy as np
# a=tf.placeholder(tf.float32)
# b=tf.placeholder(tf.float32)
# output=tf.multiply(a,b)
# with tf.Session() as sess:
# print(sess.run(output,feed_dict={a:[3.],b:[4.]})) #placeholder先预占位置,再通过feed_dict来喂入数据
sess/with 会话的用法
用with~as的形式,可以自动结束对话;
否则需要sess.close()来自动关闭
import tensorflow as tf
import numpy as np
a=tf.placeholder(tf.float32)
b=tf.placeholder(tf.float32)
output=tf.multiply(a,b)
#方法1:
# with tf.Session() as sess:
# print(sess.run(output,feed_dict={a:[3.],b:[4.]}))
#方法2:
sess=tf.Session()
print(sess.run(output,feed_dict={a:[3.],b:[4.]}))
sess.close()
基础运算法则
import tensorflow as tf
a = tf.constant(3.0) #定义常量(赋值)
b = tf.Variable(1.0) #定义变量(赋值)
c = tf.placeholder(tf.float32) #定义占位符(位数)
d1 = tf.add(a,b) #a+b
d2 = tf.subtract(a,b) #a-bb
d3 = tf.multiply(a,b) #a*b
d4 = tf.div(a,b) #a/b
e1 = tf.mod(a,b) #a%b
e2 = tf.abs(c) #|c|
e3 = tf.sqrt(c) #平方根
e4 = tf.pow(a,b) #a^b
f1 = tf.maximum(a,b) #最大者
f2 = tf.minimum(a,b) #最小值
with tf.Session() as sess:
sess.run(tf.global_variables_initializer()) #变量初始化
result = sess.run([d1,d2,d3,d4,e1,e4,f1,f2]) #Fetch
print(result)
tf.cast 转换数据类型
tf.reduce_mean(data,axis,keep_dims=False) 矩阵求均值,最大值,求和
data:输入数据
axis:不写默认计算全部数字的均值 0:代表计算每一列的均值 1:代表计算每一行均值
keep_dims:是否降维度,设置为True,输出的结果保持输入tensor的形状,设置为False,输出结果会降低维度
# import tensorflow as tf
# x = [[1,2,3],
# [3,4,5]]
# xx = tf.cast(x,tf.float32) #把数据转为float
# mean_all = tf.reduce_mean(xx, keep_dims=False)
# mean_0 = tf.reduce_mean(xx, axis=0, keep_dims=False)
# mean_1 = tf.reduce_mean(x, axis=1, keep_dims=False)
# with tf.Session() as sess:
# m_all,m_0,m_1 = sess.run([mean_all, mean_0, mean_1])
# print (m_all)
# print (m_0)
# print (m_1)
#延伸---求和、最大值
import tensorflow as tf
x = [[1,2,3],
[3,4,5]]
sum=tf.reduce_sum(x,axis=0,keep_dims=False)
max=tf.reduce_max(x,axis=0,keep_dims=False)
with tf.Session() as sess:
a=sess.run([sum,max])
print(a)
flags:统一设置好参数,供后面使用;此方法好处是:可以在命令行控制改变这些参数。
例如:运行python test.py --str_name mj 则str_name=mj
import tensorflow as tf
FLAGS = tf.flags.FLAGS
#第一个是参数名称,第二个参数是默认值,第三个是参数描述
tf.flags.DEFINE_string('str_name', 'def_v_1',"descrip1")
tf.flags.DEFINE_integer('int_name', 10,"descript2")
tf.flags.DEFINE_boolean('bool_name', False, "descript3")
#必须带参数,否则:'TypeError: main() takes no arguments (1 given)'; main的参数名随意定义,无要求
def main(_):
print(FLAGS.str_name)
print(FLAGS.int_name)
print(FLAGS.bool_name)
if __name__ == '__main__':
tf.app.run() #执行main函数
tf.variable_scope() 上下文变量管理
tf.Variable:直接生成新的变量
tf.get_variable:获取已经存在的变量,如果不存在,就新建一个
reuse参数的使用: true:直接获取已创建的变量 False:直接创建新的变量(若已经有变量,新建则会报错)
tf.variable_scope:可以让变量有相同的命名,包括tf.get_variable得到的变量,还有tf.Variable的变量
tf.name_scope:可以让变量有相同的命名,只是限于tf.Variable的变量,tf.get_variable不行
# # 1.Flase
# import tensorflow as tf
# with tf.variable_scope("foo",reuse=False):
# v=tf.get_variable("v",[1],initializer=tf.constant_initializer(1.0)) #直接创建一个新变量
# v1=tf.get_variable("v",[1]) #已经有"v"变量,此时会报错
# #2.True
# import tensorflow as tf
# with tf.variable_scope("foo"): #默认reuse=Flase
# v=tf.get_variable("v",[1],initializer=tf.constant_initializer(1.0))
# with tf.variable_scope("foo",reuse=True):
# v1=tf.get_variable("v",[1]) #此时reuse=True,表示获取"v"变量,即v1=v 若v不存在,则报错
# #3.tf.variable_scope---可以让变量有相同的命名(tf.get_variable和tf.Variable均可)
# import tensorflow as tf;
# import numpy as np;
# import matplotlib.pyplot as plt;
# with tf.variable_scope('V1'):
# a1 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(1))
# a2 = tf.Variable(tf.random_normal(shape=[2,3], mean=0, stddev=1), name='a2')
# with tf.variable_scope('V2'):
# a3 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(1))
# a4 = tf.Variable(tf.random_normal(shape=[2,3], mean=0, stddev=1), name='a2')
# with tf.Session() as sess:
# sess.run(tf.initialize_all_variables())
# print(a1.name)
# print(a2.name)
# print(a3.name)
# print(a4.name)
#4.tf.name_scope---只有tf.Variable可以同样命名,tf.get_variable不可以
import tensorflow as tf;
import numpy as np;
import matplotlib.pyplot as plt;
with tf.name_scope('V1'):
# a1 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(1)) #tf.get_variable不可以同样命名,否则报错
a2 = tf.Variable(tf.random_normal(shape=[2,3], mean=0, stddev=1), name='a2')
with tf.name_scope('V2'):
# a3 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(1)) #tf.get_variable不可以同样命名,否则报错
a4 = tf.Variable(tf.random_normal(shape=[2,3], mean=0, stddev=1), name='a2')
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
# print(a1.name)
print(a2.name)
# print(a3.name)
print(a4.name)
1.tf.squared_difference():两个矩阵的每个元素先进行相减,然后对再求平方,输出一个同样大小的矩阵
2.tf.where(condition, x=None, y=None, name=None):
condition,x,y的维度相同,condition是bool型值,True/False
返回值是对应元素,condition中元素为True的元素替换为x中的元素,为False的元素替换为y中对应元素
x只负责对应替换True的元素,y只负责对应替换False的元素,x,y各有分工
3.tf.control_dependencies(control_inputs):返回一个控制依赖的上下文管理器
例如:with tf.control_dependencies([a, b]):
c = ....
d = ...
在执行完 a,b 操作之后,才能执行 c,d 操作。意思就是 c,d 操作依赖 a,b 操作
4.tf.no_op():什么都不做,仅做为点位符使用控制边界。
# 1.tf.squared_difference()
# import tensorflow as tf
# import numpy as np
# x=tf.constant([[1,2],[3,4]])
# y=tf.constant([[2,3],[4,5]])
# a=tf.squared_difference(x,y)
# with tf.Session() as sess:
# sess.run(a)
# print(sess.run(a))
# # 2.tf.where(condition, x=None, y=None, name=None)
# import tensorflow as tf
# x = [[1,2,3],[4,5,6]]
# y = [[7,8,9],[10,11,12]]
# condition3 = [[True,False,False],
# [False,True,True]]
# condition4 = [[True,False,False],
# [True,True,False]]
# with tf.Session() as sess:
# print(sess.run(tf.where(condition3,x,y)))
# print(sess.run(tf.where(condition4,x,y)))
#3.tf.control_dependencies:在执行完a、b之后,才能执行c、d操作
#4.tf.no_op()
import tensorflow as tf
x = tf.constant(3)
y = tf.constant(2)
a=x+y
b=x-y
with tf.Session() as sess:
with tf.control_dependencies([a,b]):
c=x*y
d=tf.no_op() #表示什么都不做,仅做为点位符使用控制边界。
print(sess.run(a))