tf.nn.in_top_k

本文通过示例详细解析了TensorFlow中tf.nn.in_top_k函数的工作原理及使用方法,帮助读者掌握如何评估预测结果与真实标签之间的匹配度。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

tf.nn.in_top_k(predictions,targets,k,name=None)tf.nn.in\_top\_k(predictions, targets, k, name=None)tf.nn.in_top_k(predictions,targets,k,name=None)
predictionspredictionspredictions:你的预测结果(一般也就是你的网络输出值)大小是预测样本的数量乘以输出的维度。
targettargettarget:实际样本类别的标签,大小是样本数量的个数。
kkk:每个样本中前KKK个最大的数里面(序号)是否包含对应targettargettarget中的值。

import tensorflow as tf

X=tf.Variable([[0.4,0.2,0.3,0.1],[0.1,0.1,0.2,0.6],[0.7,0.1,0.1,0.1]])
Y=tf.Variable([2,1,1])
k=tf.placeholder(tf.int32,shape=None)
result=tf.nn.in_top_k(X,Y,k)

with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    print(sess.run(X))
    print(sess.run(Y))
    print(sess.run(result,feed_dict={k:1}))
    print(sess.run(result,feed_dict={k:2}))

#结果为:
#[[0.4 0.2 0.3 0.1]
   [0.1 0.1 0.2 0.6]
   [0.7 0.1 0.1 0.1]]
#[2 1 1]
#[False False False]
#[ True False  True]

分析一下结果
k=1k=1k=1时,
XXX[0.40.20.30.1][0.4 0.2 0.3 0.1][0.40.20.30.1]最大元素为0.40.40.4,索引为000,而BBB222,不包含BBB,故FalseFalseFalse
[0.10.10.20.6][0.1 0.1 0.2 0.6][0.10.10.20.6]最大元素为0.60.60.6,索引为333,B是111,不包含BBB,故FalseFalseFalse
[0.70.10.10.1][0.7 0.1 0.1 0.1][0.70.10.10.1]最大元素为0.70.70.7,索引为000,B是111,不包含BBB,故FalseFalseFalse
k=2k=2k=2时,
XXX[0.40.20.30.1][0.4 0.2 0.3 0.1][0.40.20.30.1]最大的两个元素为0.4、0.30.4、0.30.40.3,索引为0、20、202BBB222,故TrueTrueTrue
[0.10.10.20.6][0.1 0.1 0.2 0.6][0.10.10.20.6]最大两个元素为0.6、0.20.6、0.20.60.2,索引为3、23、232BBB111,不包含BBB,故FalseFalseFalse
[0.70.10.10.1][0.7 0.1 0.1 0.1][0.70.10.10.1]最大元素为0.7,0.10.7,0.10.70.1,索引为0、10、101BBB111,包含BBB,故TrueTrueTrue

def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, # 标准差0.02 #卷积核大小和步长 name="conv2d", padding='REFLECT'): with tf.variable_scope(name): # reflect padding反射填充 if padding == 'REFLECT': in_height, in_width = input_.get_shape().as_list()[1:3] #从形状元组中获取索引为 1 和 2 的维度大小,即高度和宽度。 if (in_height % d_h == 0): pad_along_height = max(k_h - d_h, 0) else: pad_along_height = max(k_h - (in_height % d_h), 0) if (in_width % d_w == 0): pad_along_width = max(k_w - d_w, 0) else: pad_along_width = max(k_w - (in_width % d_w), 0) #算出宽度和高度可填充像素数 pad_top = pad_along_height // 2 pad_bottom = pad_along_height - pad_top pad_left = pad_along_width // 2 pad_right = pad_along_width - pad_left #可填充像素上下分配 input_ = tf.pad(input_, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]], "REFLECT")#按上述参数进行反射填充 padding = 'VALID' #不进行额外填充 w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], #卷积核高度宽度,输入通道数,输出通道数 initializer=tf.truncated_normal_initializer(stddev=stddev))#使用了截断正态分布初始化器(truncated normal initializer) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding) #进行二维卷积操作 biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) #创建一个可训练变量,初始化偏执为0.0 # conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) conv = tf.nn.bias_add(conv, biases) return conv在这个卷积层定义中加入膨胀卷积
03-19
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) /tmp/ipykernel_553597/306262114.py in <module> 7 ## Training 8 # Epoch_list,Loss_list = model_train(batchsize,channel_SNR_db1,noise_init,nl_factor,eq_flag,norm_epsilon,earlystop_epoch) ----> 9 Epoch_list,Loss_list, Min_Distance_list = model_train(batchsize,channel_SNR_db1,noise_init,nl_factor,eq_flag,norm_epsilon,earlystop_epoch, min_distance_threshold=0.7,flags_schedule=[(1, 0), (0, 1), (1, 1)],iter_per_stage=50) /tmp/ipykernel_553597/4102420687.py in model_train(batchsize, channel_SNR, noise_init, nl_factor, eq_flag, epsilon, earlystop_epoch, min_distance_threshold, flags_schedule, iter_per_stage) 58 59 (batch_loss, batch_loss_Eq, NGMI, GMI, entropy_S, ---> 60 p_s, norm_constellation, x, min_distance) = train_step( 61 channel_SNR, noise_tf, GS_flag_now, PS_flag_now, eq_flag, epsilon, min_distance_threshold 62 ) ~/miniconda3/lib/python3.8/site-packages/tensorflow/python/util/traceback_utils.py in error_handler(*args, **kwargs) 151 except Exception as e: 152 filtered_tb = _process_traceback_frames(e.__traceback__) --> 153 raise e.with_traceback(filtered_tb) from None 154 finally: 155 del filtered_tb /tmp/__autograph_generated_file_jsnzuik.py in tf__train_step(inp_SNR, noise, GS_flag, PS_flag, eq_flag, epsilon, min_distance_threshold) 39 batch_size = ag__.converted_call(ag__.ld(tf).shape, (ag__.ld(p_s),), None, fscope)[0] 40 batch_indices = ag__.converted_call(ag__.ld(tf).tile, (ag__.converted_call(ag__.ld(tf).range, (ag__.ld(batch_size),), None, fscope)[:, ag__.ld(tf).newaxis, ag__.ld(tf).newaxis], [1, ag__.ld(M_int), ag__.ld(k)]), None, fscope) ---> 41 gather_indices = ag__.converted_call(ag__.ld(tf).stack, ([ag__.ld(batch_indices), ag__.converted_call(ag__.ld(tf).tile, (ag__.ld(topk_indices)[:, :, ag__.ld(tf).newaxis, :], [1, 1, ag__.ld(k), 1]), None, fscope)],), dict(axis=(- 1)), fscope) 42 neighbor_probs = ag__.converted_call(ag__.ld(tf).gather_nd, (ag__.ld(p_s), ag__.ld(gather_indices)), None, fscope) 43 neighbor_sum = ag__.converted_call(ag__.ld(tf).reduce_sum, (ag__.ld(neighbor_probs),), dict(axis=(- 1)), fscope) ValueError: in user code: File "/tmp/ipykernel_553597/675414708.py", line 77, in train_step * gather_indices = tf.stack([ ValueError: Shapes must be equal rank, but are 3 and 4 From merging shape 0 with other shapes. for '{{node stack_1}} = Pack[N=2, T=DT_INT32, axis=-1](Tile, Tile_1)' with input shapes: [1,8,3], [1,8,3,3].
最新发布
08-22
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值