tensorFlow2基本操作(二)

本文详细介绍并演示了TensorFlow中张量排序、填充与复制、限幅、高阶操作等核心功能,通过实例展示了如何使用softmax、argmax、pad、tile、relu等函数,并解释了where、scatter_nd和meshgrid的用法。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

import tensorflow as tf

1 张量排序实例

output = tf.random.normal([10,6])# 随机生成一个正态分布
output
<tf.Tensor: id=5, shape=(10, 6), dtype=float32, numpy=
array([[ 0.76310456, -1.1337202 ,  1.0353428 ,  1.0621719 , -1.3435235 ,
        -0.8340364 ],
       [-0.47014472, -0.2422621 ,  0.2518393 , -0.23825932, -1.0721133 ,
         0.24922352],
       [ 0.44158378,  0.6831124 , -0.54429495,  1.1736444 , -0.26429346,
         0.7027973 ],
       [-0.7662839 ,  1.4855492 ,  0.42412958, -0.29403406,  1.043192  ,
         1.0433921 ],
       [ 0.44711986,  0.7275903 ,  0.31700605,  0.2726328 , -0.16104753,
        -1.1572416 ],
       [-0.90731895, -0.2063934 ,  0.952755  ,  0.6108949 ,  2.9908571 ,
        -1.7417939 ],
       [-0.17370664, -0.03342665,  0.5075081 ,  1.3842217 ,  0.4897528 ,
        -0.37641558],
       [-1.0750929 , -0.03694849,  1.319619  ,  1.5328622 ,  1.5641279 ,
        -0.2661516 ],
       [ 0.4573638 ,  0.8733606 ,  0.0470969 ,  1.0739747 , -0.8262296 ,
        -0.59460646],
       [ 1.420134  ,  1.216806  ,  0.7350984 , -2.035839  ,  0.675249  ,
         0.5640526 ]], dtype=float32)>
output = tf.math.softmax(output, axis = 1)# 使这六类的概率和为1
output
<tf.Tensor: id=7, shape=(10, 6), dtype=float32, numpy=
array([[0.24179898, 0.03628056, 0.31745782, 0.32609025, 0.02941423,
        0.04895815],
       [0.12230478, 0.15360723, 0.25176606, 0.15422331, 0.0669903 ,
        0.25110835],
       [0.15342492, 0.19533965, 0.05724456, 0.31902578, 0.07574209,
        0.199223  ],
       [0.03621496, 0.34422845, 0.11909077, 0.05807425, 0.22117367,
        0.22121793],
       [0.20889905, 0.27653062, 0.18341242, 0.17545176, 0.11371368,
        0.04199244],
       [0.01568617, 0.03161731, 0.10077128, 0.07159271, 0.7735231 ,
        0.00680941],
       [0.08595599, 0.09890062, 0.16987287, 0.4082027 , 0.16688333,
        0.07018443],
       [0.02241746, 0.06330627, 0.24580826, 0.30423334, 0.3138957 ,
        0.05033905],
       [0.17673211, 0.26790482, 0.117257  , 0.3274207 , 0.04896186,
        0.0617235 ],
       [0.30757383, 0.2509835 , 0.15503944, 0.00970598, 0.14603263,
        0.1306646 ]], dtype=float32)>
target = tf.random.uniform([10],maxval = 6,dtype = tf.int32)# 生成一个随机的lable
target
<tf.Tensor: id=12, shape=(10,), dtype=int32, numpy=array([4, 5, 5, 5, 4, 1, 3, 3, 4, 3])>
print('prob:',output.numpy())
prob: [[0.24179898 0.03628056 0.31745782 0.32609025 0.02941423 0.04895815]
 [0.12230478 0.15360723 0.25176606 0.15422331 0.0669903  0.25110835]
 [0.15342492 0.19533965 0.05724456 0.31902578 0.07574209 0.199223  ]
 [0.03621496 0.34422845 0.11909077 0.05807425 0.22117367 0.22121793]
 [0.20889905 0.27653062 0.18341242 0.17545176 0.11371368 0.04199244]
 [0.01568617 0.03161731 0.10077128 0.07159271 0.7735231  0.00680941]
 [0.08595599 0.09890062 0.16987287 0.4082027  0.16688333 0.07018443]
 [0.02241746 0.06330627 0.24580826 0.30423334 0.3138957  0.05033905]
 [0.17673211 0.26790482 0.117257   0.3274207  0.04896186 0.0617235 ]
 [0.30757383 0.2509835  0.15503944 0.00970598 0.14603263 0.1306646 ]]
pred = tf.argmax(output,axis = 1)
print('pred:',pred.numpy())
pred: [3 2 3 1 1 4 3 4 3 0]
print('lable:',target.numpy())
lable: [4 5 5 5 4 1 3 3 4 3]

计算准确度

topk = (1,2,3,4,5,6)
maxk = max(topk)
batch_size = target.shape[0]
print(batch_size)
10
pred = tf.math.top_k(output,maxk).indices
print(pred.numpy())
[[3 2 0 5 1 4]
 [2 5 3 1 0 4]
 [3 5 1 0 4 2]
 [1 5 4 2 3 0]
 [1 0 2 3 4 5]
 [4 2 3 1 0 5]
 [3 2 4 1 0 5]
 [4 3 2 1 5 0]
 [3 1 0 2 5 4]
 [0 1 2 4 5 3]]
pred = tf.transpose(pred,perm = [1,0])
target_ = tf.broadcast_to(target, pred.shape)
print(target_.numpy())
[[4 5 5 5 4 1 3 3 4 3]
 [4 5 5 5 4 1 3 3 4 3]
 [4 5 5 5 4 1 3 3 4 3]
 [4 5 5 5 4 1 3 3 4 3]
 [4 5 5 5 4 1 3 3 4 3]
 [4 5 5 5 4 1 3 3 4 3]]
correct = tf.equal(pred,target_)
print(correct.numpy())
[[False False False False False False  True False False False]
 [False  True  True  True False False False  True False False]
 [False False False False False False False False False False]
 [False False False False False  True False False False False]
 [False False False False  True False False False False False]
 [ True False False False False False False False  True  True]]
res = []
print(correct[:1])
#tf.reshape()    -1所代表的含义是我们不用亲自去指定这一维的大小,
#函数会自动进行计算,但是列表中只能存在一个-1。
#(如果存在多个-1,就是一个存在多解的方程) 
correct_1 = tf.cast(tf.reshape(correct[:1],[-1]),dtype = tf.float32) 
print(correct_1.numpy())
correct_1 = tf.reduce_sum(correct_1)
print(correct_1.numpy())
acc = float(correct_1*(100.0/batch_size))
print(acc)
res.append(acc)
print(res)
tf.Tensor([[False False False False False False  True False False False]], shape=(1, 10), dtype=bool)
[0. 0. 0. 0. 0. 0. 1. 0. 0. 0.]
1.0
10.0
[10.0]
res = []
for k in topk:
    correct_k = tf.cast(tf.reshape(correct[:k],[-1]),dtype = tf.float32) 
    correct_k = tf.reduce_sum(correct_k)
    acc = float(correct_k*(100.0/batch_size))
    res.append(acc)
print(res)
[10.0, 50.0, 50.0, 60.0, 70.0, 100.0]

2 填充与复制

填充:pad
(在图片的填充时是经常使用的)

a = tf.reshape(tf.range(9),[3,3])
b = tf.pad(a,[[0,0],[0,0]])
c = tf.pad(a,[[1,1],[1,1]])
print('a\n',a.numpy())
print('b\n',b.numpy())
print('\nc',c.numpy())
a
 [[0 1 2]
 [3 4 5]
 [6 7 8]]
b
 [[0 1 2]
 [3 4 5]
 [6 7 8]]

c [[0 0 0 0 0]
 [0 0 1 2 0]
 [0 3 4 5 0]
 [0 6 7 8 0]
 [0 0 0 0 0]]
a = tf.random.normal([4,28,28,3])
b = tf.pad(a,[[0,0],[2,2],[2,2],[0,0]])
print(b.shape)
(4, 32, 32, 3)

复制:tile
(broadcast_to也是复制数据,不过不是在真实的内存中复制)

a = [[0,1,2],[3,4,5],[5,3,3]]
b = tf.tile(a,[1,2])
c = tf.tile(a,[2,1])
d = tf.tile(a,[2,2])
print(a)
print(b)
print(c)
print(d)
[[0, 1, 2], [3, 4, 5], [5, 3, 3]]
tf.Tensor(
[[0 1 2 0 1 2]
 [3 4 5 3 4 5]
 [5 3 3 5 3 3]], shape=(3, 6), dtype=int32)
tf.Tensor(
[[0 1 2]
 [3 4 5]
 [5 3 3]
 [0 1 2]
 [3 4 5]
 [5 3 3]], shape=(6, 3), dtype=int32)
tf.Tensor(
[[0 1 2 0 1 2]
 [3 4 5 3 4 5]
 [5 3 3 5 3 3]
 [0 1 2 0 1 2]
 [3 4 5 3 4 5]
 [5 3 3 5 3 3]], shape=(6, 6), dtype=int32)

3 张量的限幅

a = tf.range(10)
b = tf.maximum(a,2)
c = tf.minimum(a,8)
tf.clip_by_value(a,2,8)
print(a)
print(b)
print(c)
tf.Tensor([0 1 2 3 4 5 6 7 8 9], shape=(10,), dtype=int32)
tf.Tensor([2 2 2 3 4 5 6 7 8 9], shape=(10,), dtype=int32)
tf.Tensor([0 1 2 3 4 5 6 7 8 8], shape=(10,), dtype=int32)
a = a-5
d = tf.nn.relu(a)
e = tf.maximum(a,0)
print(a)
print(d)
print(e)
tf.Tensor([-5 -4 -3 -2 -1  0  1  2  3  4], shape=(10,), dtype=int32)
tf.Tensor([0 0 0 0 0 0 1 2 3 4], shape=(10,), dtype=int32)
tf.Tensor([0 0 0 0 0 0 1 2 3 4], shape=(10,), dtype=int32)

等比例缩放,完成向量方向不变

a = tf.random.normal([2,2],mean = 10)
b = tf.norm(a)
c = tf.clip_by_norm(a,15)
d = tf.norm(c)
print(a)
print(b)
print(c)
print(d)
tf.Tensor(
[[ 9.901226 11.381033]
 [10.905721  9.152639]], shape=(2, 2), dtype=float32)
tf.Tensor(20.742897, shape=(), dtype=float32)
tf.Tensor(
[[7.1599636 8.23007  ]
 [7.8863535 6.618632 ]], shape=(2, 2), dtype=float32)
tf.Tensor(14.999999, shape=(), dtype=float32)

Gradient Clipping

 from tensorflow.keras import datasets
(x,y),_ = datasets.mnist.load_data()
x = tf.convert_to_tensor(x,dtype = tf.float32)

4 高阶OP

where(tensor)

import tensorflow as tf
a = tf.random.normal([3,3])
mask = a>0
mask
<tf.Tensor: id=7, shape=(3, 3), dtype=bool, numpy=
array([[False,  True,  True],
       [False, False,  True],
       [ True,  True,  True]])>
tf.boolean_mask(a,mask)
<tf.Tensor: id=35, shape=(6,), dtype=float32, numpy=
array([0.52404463, 0.62450945, 0.6324052 , 0.4014356 , 1.3766853 ,
       0.8314979 ], dtype=float32)>
indices = tf.where(mask)
indices
<tf.Tensor: id=38, shape=(6, 2), dtype=int64, numpy=
array([[0, 1],
       [0, 2],
       [1, 2],
       [2, 0],
       [2, 1],
       [2, 2]], dtype=int64)>
tf.gather_nd(a,indices)
<tf.Tensor: id=40, shape=(6,), dtype=float32, numpy=
array([0.52404463, 0.62450945, 0.6324052 , 0.4014356 , 1.3766853 ,
       0.8314979 ], dtype=float32)>
A = tf.ones([3,3])
B = tf.zeros([3,3])
tf.where(mask,A,B)
<tf.Tensor: id=59, shape=(3, 3), dtype=float32, numpy=
array([[0., 1., 1.],
       [0., 0., 1.],
       [1., 1., 1.]], dtype=float32)>

scatter_nd

indices = tf.constant([[4],[3],[1],[7]])
indices
<tf.Tensor: id=62, shape=(4, 1), dtype=int32, numpy=
array([[4],
       [3],
       [1],
       [7]])>
updates = tf.constant([9,10,11,12])
updates
<tf.Tensor: id=64, shape=(4,), dtype=int32, numpy=array([ 9, 10, 11, 12])>
shape = tf.constant([8])
print(shape.numpy())
[8]
tf.scatter_nd(indices,updates,shape)
<tf.Tensor: id=68, shape=(8,), dtype=int32, numpy=array([ 0, 11,  0, 10,  9,  0,  0, 12])>
indices = tf.constant([[0],[2]])
indices
<tf.Tensor: id=71, shape=(2, 1), dtype=int32, numpy=
array([[0],
       [2]])>
updates = tf.constant([[[5,5,5,5],[6,6,6,6],[7,7,7,7],[8,8,8,8]],[[5,5,5,5],[6,6,6,6],[7,7,7,7],[8,8,8,8]]])
updates
<tf.Tensor: id=73, shape=(2, 4, 4), dtype=int32, numpy=
array([[[5, 5, 5, 5],
        [6, 6, 6, 6],
        [7, 7, 7, 7],
        [8, 8, 8, 8]],

       [[5, 5, 5, 5],
        [6, 6, 6, 6],
        [7, 7, 7, 7],
        [8, 8, 8, 8]]])>
updates.shape
TensorShape([2, 4, 4])
shape = tf.constant([4,4,4])
print(shape.numpy())
[4 4 4]
tf.scatter_nd(indices,updates,shape)
<tf.Tensor: id=78, shape=(4, 4, 4), dtype=int32, numpy=
array([[[5, 5, 5, 5],
        [6, 6, 6, 6],
        [7, 7, 7, 7],
        [8, 8, 8, 8]],

       [[0, 0, 0, 0],
        [0, 0, 0, 0],
        [0, 0, 0, 0],
        [0, 0, 0, 0]],

       [[5, 5, 5, 5],
        [6, 6, 6, 6],
        [7, 7, 7, 7],
        [8, 8, 8, 8]],

       [[0, 0, 0, 0],
        [0, 0, 0, 0],
        [0, 0, 0, 0],
        [0, 0, 0, 0]]])>

meshgird画三位坐标轴

y = tf.linspace(-2.,2,5)
y
<tf.Tensor: id=83, shape=(5,), dtype=float32, numpy=array([-2., -1.,  0.,  1.,  2.], dtype=float32)>
x = tf.linspace(-2.,2,5)
x
<tf.Tensor: id=93, shape=(5,), dtype=float32, numpy=array([-2., -1.,  0.,  1.,  2.], dtype=float32)>
points_x,points_y = tf.meshgrid(x,y)
points_x.shape
TensorShape([5, 5])
points_x
<tf.Tensor: id=115, shape=(5, 5), dtype=float32, numpy=
array([[-2., -1.,  0.,  1.,  2.],
       [-2., -1.,  0.,  1.,  2.],
       [-2., -1.,  0.,  1.,  2.],
       [-2., -1.,  0.,  1.,  2.],
       [-2., -1.,  0.,  1.,  2.]], dtype=float32)>
points_y
<tf.Tensor: id=116, shape=(5, 5), dtype=float32, numpy=
array([[-2., -2., -2., -2., -2.],
       [-1., -1., -1., -1., -1.],
       [ 0.,  0.,  0.,  0.,  0.],
       [ 1.,  1.,  1.,  1.,  1.],
       [ 2.,  2.,  2.,  2.,  2.]], dtype=float32)>
points = tf.stack([points_x,points_y],axis = 2)
points
<tf.Tensor: id=119, shape=(5, 5, 2), dtype=float32, numpy=
array([[[-2., -2.],
        [-1., -2.],
        [ 0., -2.],
        [ 1., -2.],
        [ 2., -2.]],

       [[-2., -1.],
        [-1., -1.],
        [ 0., -1.],
        [ 1., -1.],
        [ 2., -1.]],

       [[-2.,  0.],
        [-1.,  0.],
        [ 0.,  0.],
        [ 1.,  0.],
        [ 2.,  0.]],

       [[-2.,  1.],
        [-1.,  1.],
        [ 0.,  1.],
        [ 1.,  1.],
        [ 2.,  1.]],

       [[-2.,  2.],
        [-1.,  2.],
        [ 0.,  2.],
        [ 1.,  2.],
        [ 2.,  2.]]], dtype=float32)>
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值