Tensorflow

本文介绍了TensorFlow的安装方式,包括pip和C。还详细阐述了TensorFlow的多个常用功能,如tf.InteractiveSession、tf.placeholder等,涉及会话、占位符、变量等操作,以及tf.dtypes、tf.initializers等模块的相关函数。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Install

pip

pip install --upgrade pip
pip install tensorflow

C

wget https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-linux-x86_64-1.15.0.tar.gz
sudo tar -C /usr/local -xzf (downloaded file)
sudo ldconfig		# linker

tf

__init__(
	target=' ',
	graph=None,
	config=None
)
# methods
run(
	fetches,
	feed_dict=None,				# a dictionary that maps graph elemetns to values(described above)
	options=None,
	run_metadata=None
)

  • tf.placeholder
    Inserts a placeholder for a tensor that will be always fed.
    Its value must be fed using the feed_dict optional argument to Session.run() , Teensor.eval() , or Operation.run() .
    Returns: A tensor that may be used as a handle for feeding a value, but not evaluated directly.
tf.placeholder(
	dtype,			# the type of elements in the tensor to be fed
	shape=None,		# the shape of the tensor to be fed(optional). If the shape is not specified, you can feed a tensor of any shape
	name=None		# the name for the opration(optional)
)

# for example
x = tf.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)

with tf.Session() as sess:
	print(sess.run(y))		# ERROR: will fail because x was not fed.
	
	rand_array = np.random.rand(1024, 1024)
	print(sess.run(y, feed_dict={x: rand_array}))		# will succeed
  • tf.Session
    A class for runing TensorFlow operations
    A Session object encapsulates the environment in which Operation objects are excuted, and Tensor objects are evaluated.
__init__(
	target=' ',
	graph=None,
	config=None
)
# for example:
# 1 using the 'close()' method
sess = tf.Session()
sess.run(...)
sess.close()
# 2 using the context manager
with tf.Session() as sess:
	sess.run(...)
# methods
  • tf.Variable
    A variable mantains state in the graph across calls to run() . Add a variable to the graph by constructing an instance of the class Variable.
  • tf.zeros
    Create a tensor with all elements se to zero, return a tensor of type dtype with shape shape and all elements set to zero.
tf.zeros(
	shape,
	dtype=tf.dtypes.float32,
	name=None
)

tf.dtypes

  • cast
    Create a tensor to a new type.
tf.dtypes.cast(
	x,
	dtype,
	name=None
)
  • DType
    Represents the type of the elements in a Tensor.
tf.float32: 32-bit single-precision floating-point

tf.initializers

tf.linalg

tf.linalg.matmul(
	a,
	b,
	tranpose_a=False,
	tranpose_b=False,
	adjoint_a=False,
	adjoint_b=False,
	a_is_sparse=False,
	b_is_sparse=False,
	name=None
)

tf.math

tf.math.argmax(
	input,
	axis=None,
	name=None,
	dimension=None,
	output_type=tf.dtypes.int64
)
  • equal
    Return a Tensor of type bool . True or False
tf.math.equal(
	x,
	y,
	name=None
)
tf.math.exp(
	x,
	name=None
)
tf.math.log(
	x,
	name=None
)
tf.math.reduce_mean(
	input_tensor,
	axis=None,
	keepdims=None,				# if true, retains reduced dimensions with length 1
	name=None,
	reduction_indices=None,		# the old(deprecated) name for axis
	keep_dims=None				# deprecated alias for keepdims
)

reduction_indices

  • reduce_sum
    Compute the sum of elements across dimensions of a tensor.(deprecated arguments)
tf.math.reduce_sum(
	input_tensor,
	axis=None,
	keepdims=None,
	name=None,
	reduction_indices=None,
	keep_dims=None
)

tf.nn

  • softmax
    Computes softmax activations(deprecated arguments)
tf.nn.softmax(
	logits,
	axis=None,
	name=None,
	dim=None
)
softmax = tf.math.exp(logits) / tf.math.reduce_sum(tf.math.exp(logits), axis)

tf.train

__init__(
	learning_rate,
	use_locking=False,				# if True use locaks for update operations
	name='GradientDescent',
)
# methods
minimize(
	loss,
	global_step=None,						# optional Variable to increment by one after the variable have been updated
	var_list=None,							# 
	gate_gradients=GATE_OP,					# how to gate the computation of gradient terms, can be GATE_NONE, or GATE_GRAPH
	aggregation_method=None,				# specifies the method used to combine gradient terms, valid values are define in the class AggregationMethod
	colocate_gradients_with_ops=False,		# if True, try colocating gradients with the corresponding op
	name=None,
	grad_loss=None							# optional, a tensor holding the gradient computed for loss
)
# create a saver
saver = tf.train.Saver(...variables...)
# launch the graph and train, saving the model every 1000 steps
sess = tf.Session()
for step in xrange(1000000):
	sess.run(...training_op...)
	if step % 1000 == 0:
	saver.save(sess, 'my-model', global_step=step)

__init__(
	var_list=None,
	reshape=False,
	sharded=False,
	max_to_keep=5,		# Maximum number of recent checkpoints to keep. Defaults to 5
	keep_checkpoint_every_n_hours=10000.0,
	name=None,
	restore_sequentially=False,
	saver_def=None,
	builder=None,
	defer_build=False,
	allow_empty=False,
	write_version=tf.train.SaverDef.V2,
	pad_step_number=False,
	save_relative_paths=False,
	filename=None
)

#methods
save(
	sess,
	save_path,
	global_step=None,		# if provided the global step number is appended to save_path to create the checkpoint filenames
	latest_filename=None,
	meta_graph_suffix='meta',
	write_meta_graph=True,
	write_state=True,
	strip_default_attrs=False
)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Tres_Lu

您的鼓励将是我创作最大的动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值