tensorflow 池化操作实例 tf.nn.max_pooling

本文通过一个具体的示例介绍了如何使用TensorFlow实现最大池化操作,并展示了池化前后的数据变化。示例中详细解释了池化窗口大小、滑动步长等参数设置及其对结果的影响。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >


#encoding:utf-8
import numpy as np
import tensorflow as tf


x_image = tf.placeholder(tf.float32, shape = [4,4])
x = tf.reshape(x_image, [1, 4, 4, 1 ])

ksize = [1, 2, 2, 1]
strides = [1, 2, 2 ,1]
padding = 'VALID'

#max_pooling
#x:池化操作的输入
#ksize:池化窗口的大小
#strides:窗口在每一个维度上滑动的步长,一般是[1, stride, stride, 1]
#padding:"VALID"or"SAME"
y = tf.nn.max_pool(x, ksize, strides, padding)
x_data = np.array([
    [4, 3, 1, 8],
    [7, 2, 6, 3],
    [2, 0, 1, 1],
    [3, 4, 2, 5]
])

with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        x = sess.run(x, feed_dict = {x_image : x_data})
        y = sess.run(y, feed_dict = {x_image : x_data})

print "The shape of x:", x.shape
print x.reshape(4, 4)
print ""

print "The shape pf y:", y.shape
print y.reshape(2,2)
print ""

输出:

The shape of x: (1, 4, 4, 1)
[[ 4.  3.  1.  8.]
 [ 7.  2.  6.  3.]
 [ 2.  0.  1.  1.]
 [ 3.  4.  2.  5.]]

The shape pf y: (1, 2, 2, 1)
[[ 7.  8.]
 [ 4.  5.]]

class CBAMLayer(tf.keras.layers.Layer): def __init__(self, reduction_ratio=16, **kwargs): super(CBAMLayer, self).__init__(**kwargs) self.reduction_ratio = reduction_ratio def build(self, input_shape): channel = input_shape[-1] self.shared_layer_one = layers.Dense(channel // self.reduction_ratio, activation='relu') self.shared_layer_two = layers.Dense(channel) self.conv2d = layers.Conv2D(1, kernel_size=7, padding='same', activation='sigmoid') def call(self, input_feature): x = tf.cast(input_feature, tf.float32) # 统一数据类型 # Channel attention avg_pool = tf.reduce_mean(x, axis=[1, 2], keepdims=True) max_pool = tf.reduce_max(x, axis=[1, 2], keepdims=True) avg_out = self.shared_layer_two(self.shared_layer_one(avg_pool)) max_out = self.shared_layer_two(self.shared_layer_one(max_pool)) cbam_feature = tf.nn.sigmoid(avg_out + max_out) cbam_feature = tf.cast(cbam_feature, x.dtype) # 统一数据类型 channel_refined = x * cbam_feature avg_spatial = tf.reduce_mean(channel_refined, axis=-1, keepdims=True) max_spatial = tf.reduce_max(channel_refined, axis=-1, keepdims=True) concat = tf.concat([avg_spatial, max_spatial], axis=-1) spatial_attention = self.conv2d(concat) refined_feature = channel_refined * spatial_attention return channel_refined # Spatial attention avg_spatial = tf.reduce_mean(channel_refined, axis=-1, keepdims=True) max_spatial = tf.reduce_max(channel_refined, axis=-1, keepdims=True) concat = tf.concat([avg_spatial, max_spatial], axis=-1) spatial_attention = self.conv2d(concat) refined_feature = channel_refined * spatial_attention return tf.cast(refined_feature, input_dtype) def get_config(self): config = super(CBAMLayer, self).get_config() config.update({'reduction_ratio': self.reduction_ratio}) return
最新发布
03-27
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值