tf.reduce_sum()与np.sum()结果有差异

本文通过对比TensorFlow与NumPy在处理矩阵元素平方和运算时的差异,探讨了两者在数值计算中四舍五入处理的不同之处。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

原始矩阵:sim = [[ 0.0633368   0.03025951 -0.00220987 -0.0531667   0.03444977 -0.0488556
  -0.00196008  0.07881159  0.03389797  0.08372366  0.0333223   0.0176071
  -0.00224441  0.0824874  -0.03343089 -0.04426372  0.07569032  0.04527829
  -0.06080772 -0.03149401 -0.04424602  0.03829231  0.08600915  0.01092609
   0.06797898 -0.05491981  0.0820925  -0.04554598  0.1065703  -0.04199364
  -0.06205101  0.0771563   0.01580388 -0.06705654  0.04348055  0.00977025
   0.00630135  0.03439317 -0.10059236 -0.0058723   0.07199351 -0.01509629
   0.04180547  0.05150584 -0.10040252 -0.0202249   0.02953287 -0.07901669
   0.05349912 -0.13867164  0.02644693  0.04152147  0.04740985  0.04323491
  -0.04674747  0.01901413  0.03496659  0.05445549 -0.01283339 -0.01178931
   0.03019426  0.11259495  0.02909229 -0.07703097 -0.01920692 -0.05572202
  -0.0536941  -0.02376463 -0.01435714 -0.00367904  0.00169768 -0.01333818
  -0.00412208 -0.01937822  0.11194922  0.04181561 -0.08908901  0.01521138
   0.03944519 -0.06595123 -0.01376847 -0.03565291  0.05950167  0.08433112
   0.02374262 -0.08566118 -0.08323887 -0.04420728 -0.07242703 -0.02334067
  -0.06204451  0.01135593  0.08844964 -0.00414931 -0.12452846  0.01702958
   0.00906508  0.08146071  0.05649486  0.06206311 -0.02805709  0.00536422
   0.00845915 -0.02146988  0.02263701 -0.04352717 -0.00483911 -0.09436607
  -0.05710883 -0.07486961  0.01584719 -0.10638418 -0.05693468 -0.02661438
  -0.06290082 -0.06862796  0.00480995  0.01318355 -0.06308029 -0.11313859
  -0.05549913 -0.02006303 -0.0336807  -0.11503669 -0.04958578  0.03850305
   0.06475405 -0.00147124 -0.02259531 -0.07379939  0.04325223 -0.06571346
  -0.00782108 -0.00115696 -0.06043857 -0.06248773 -0.052317    0.07274361
   0.08502913  0.00510302 -0.02960619 -0.06978994  0.01938855 -0.03513198
  -0.07154845  0.02094    -0.03970449  0.01795545  0.03318511  0.02465078
   0.04738654 -0.08066574 -0.00388275 -0.02099387  0.03754564 -0.07722694
   0.01730175  0.03879887 -0.08431429  0.06048522  0.05777646 -0.06644108
  -0.04969462 -0.04525338 -0.01366917 -0.01003728 -0.03181716  0.00738974
   0.01788789 -0.02119223  0.00441297 -0.02415969  0.11627045  0.02513858
   0.06692413  0.03697169 -0.01973801  0.07337175 -0.03809317  0.03373428
   0.04252695  0.05552882  0.01617196  0.10927352 -0.10627645  0.05459184
  -0.04589703  0.11513919  0.01229725  0.01107223  0.03752197  0.01446257
   0.00427283  0.00117563  0.06214455 -0.01300458 -0.12091196 -0.00387319
  -0.00456788 -0.0734743  -0.08124372 -0.0977838  -0.09568566  0.01595367
   0.07937328 -0.08260249 -0.08759715  0.00026855 -0.09273349 -0.02556868
  -0.04740639  0.11204395 -0.0700826   0.10068378  0.03814976 -0.01333561
   0.0469743  -0.01917796  0.0642072  -0.10448973 -0.04106322 -0.00085127
   0.02731112 -0.09183019 -0.04996088 -0.01655298  0.01971691  0.00147668
  -0.03487838  0.04323542  0.01614081 -0.04805927 -0.09906296  0.0971921
   0.1017317   0.03685566  0.02913323 -0.00236775  0.01005206 -0.06212483
   0.07081419 -0.04770667  0.08412839 -0.12682422  0.01647955  0.03767439
  -0.1054775   0.04018845 -0.14415762  0.04647692  0.00675798  0.01562993
   0.08388472  0.08248888  0.11877951 -0.04089082 -0.06427614  0.03757285
  -0.04088425 -0.0090223  -0.11968314 -0.08466517 -0.05562597  0.07813478
   0.05071322 -0.04363221  0.03459674 -0.03363322 -0.02520343 -0.04668384
   0.06159448  0.00868899 -0.03694644 -0.09774038  0.06127869 -0.06427088
   0.07525009 -0.05450463  0.01831613 -0.02957821  0.00138609  0.04013913
  -0.05819475  0.12136054 -0.05239633 -0.01001451  0.06543468 -0.03257976
  -0.04370891  0.07321297 -0.00927433 -0.04305443 -0.04572316 -0.0351896

   0.0540113   0.12926351  0.0190832  -0.02427365 -0.02213438  0.03767746]]

temp = tf.square(sim)
temp1 = np.square(sim)
print('temp',session.run(temp),'\n')
print('temp1',temp1,'\n')
temp2 = tf.reduce_sum(tf.square(sim), 1, keep_dims=True)
temp3 = np.sum(np.square(sim), 1, keepdims=True)
print('temp2',session.run(temp2), '\n')
print('temp3',temp3, '\n')
temp4 = tf.square(tf.reduce_sum(tf.square(sim), 1, keep_dims=True))
temp5 = np.square(np.sum(np.square(sim), 1, keepdims=True))
print('temp4',session.run(temp4), '\n')
print('temp5',temp5, '\n')

输出结果:

temp2 [[ 1.]] 

temp3 [[ 0.99999994]] 

temp4 [[ 1.]] 

temp5 [[ 0.99999988]]

我以为temp2=temp3,temp4=temp5,但实际上结果并不是如此。

可能原因tf.reduce_sum()与np.sum()在四舍五入时不同。

如果有大神知道原因,请在评论里指出。

class ClassSpecificPrecision(tf.keras.metrics.Metric): def __init__(self, class_id, num_classes=8, name="class_precision", **kwargs): """ 为指定类别计算精确率 参数: class_id: 要计算精确率的类别ID (0-7) num_classes: 总类别数 (默认为8) name: 指标名称 """ super().__init__(name=name, **kwargs) self.class_id = class_id self.num_classes = num_classes # 初始化真正例(TP)和预测正例(PP)计数器 self.true_positives = self.add_weight( name="tp", initializer="zeros", dtype=tf.float32) self.predicted_positives = self.add_weight( name="pp", initializer="zeros", dtype=tf.float32) def update_state(self, y_true, y_pred, sample_weight=None): # 将标签转换为整数类型 y_true = tf.cast(y_true, tf.int32) # 获取预测类别 (形状: [batch_size]) y_pred_class = tf.argmax(y_pred, axis=-1, output_type=tf.int32) # 计算真正例 (TP): 实际为class_id且预测为class_id true_positive = tf.logical_and( tf.equal(y_true, self.class_id), tf.equal(y_pred_class, self.class_id) ) # 计算预测正例 (PP): 预测为class_id (不论实际类别) predicted_positive = tf.equal(y_pred_class, self.class_id) # 应用样本权重 (如果提供) if sample_weight is not None: true_positive = tf.cast(true_positive, tf.float32) * sample_weight predicted_positive = tf.cast(predicted_positive, tf.float32) * sample_weight else: true_positive = tf.cast(true_positive, tf.float32) predicted_positive = tf.cast(predicted_positive, tf.float32) # 更新状态变量 self.true_positives.assign_add(tf.reduce_sum(true_positive)) self.predicted_positives.assign_add(tf.reduce_sum(predicted_positive)) def result(self): # 避免除以零错误 return tf.where( self.predicted_positives > 0, self.true_positives / self.predicted_positives, 0.0 ) def reset_state(self): # 重置计数器 self.true_positives.assign(0.0) self.predicted_positives.assign(0.0) 上面自定义的指标输出为什么会超过100.0
最新发布
07-26
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值