padding=‘post‘ padding=‘pre‘ 填充数据的位置

本文解析了在Keras中如何使用pad_sequences函数对训练数据进行预处理,重点讲解了'pre'和'post'填充方式,以及maxlen参数的作用,确保数据一致性在256长度内。

 

train_data = keras.preprocessing.sequence.pad_sequences(train_data,
                                                        value=word_index["<PAD>"],
                                                        padding='post',
                                                        maxlen=256)

padding:‘pre’或‘post’。在数据的头部或者尾部填充数据。

maxlen :将数据填充的最大长度

value:填充的值

l2_all = l2(0) # 0.0001 dropoutRate = 0.25 filters = 256 inputs = tf.keras.Input(shape=(64, 210)) # inputs_cnn = tf.keras.layers.Dropout(0.1)(inputs) # dr = tf.keras.layers.Dropout(0.2)(inputs) , kernel_regularizer=l2_all # inputs_cnn = tf.keras.layers.Conv1D(filters=256, kernel_size=1, padding='causal', activation='gelu', # dilation_rate=1, kernel_initializer='he_normal', kernel_regularizer=l2_all)(inputs_cnn) # inputs_cnn = tf.keras.layers.LayerNormalization()(inputs_cnn) # -----1 cnn1 = tf.keras.layers.Conv1D(filters=filters, kernel_size=3, padding='causal', dilation_rate=1, kernel_initializer='he_normal', kernel_regularizer=l2_all)(inputs) cnn1 = tf.keras.layers.Activation('gelu')(cnn1) cnn1 = tf.keras.layers.LayerNormalization()(cnn1) cnn1 = tf.keras.layers.Dropout(dropoutRate)(cnn1) cnn1 = tf.keras.layers.Conv1D(filters=filters, kernel_size=3, padding='causal', dilation_rate=1, kernel_initializer='he_normal', kernel_regularizer=l2_all)(cnn1) cnna = tf.keras.layers.Conv1D(filters=filters, kernel_size=1, padding='causal', dilation_rate=1, kernel_initializer='he_normal', kernel_regularizer=l2_all)(inputs) add1 = tf.add(cnn1, cnna) add1 = tf.keras.layers.Activation('gelu')(add1) add1 = tf.keras.layers.LayerNormalization()(add1) add1 = tf.keras.layers.Dropout(dropoutRate)(add1) # -----2 cnn1 = tf.keras.layers.Conv1D(filters=filters, kernel_size=3, padding='causal', dilation_rate=2, kernel_initializer='he_normal', kernel_regularizer=l2_all)(add1) cnn1 = tf.keras.layers.Activation('gelu')(cnn1) cnn1 = tf.keras.layers.LayerNormalization()(cnn1) cnn1 = tf.keras.layers.Dropout(dropoutRate)(cnn1) cnn1 = tf.keras.layers.Conv1D(filters=filters, kernel_size=3, padding='causal', dilation_rate=2, kernel_initializer='he_normal', kernel_regularizer=l2_all)(cnn1) cnna = tf.keras.layers.Conv1D(filters=filters, kernel_size=1, padding='causal', dilation_rate=1, kernel_initializer='he_normal', kernel_regularizer=l2_all)(inputs) add1 = tf.add(cnn1, cnna) add1 = tf.keras.layers.Activation('gelu')(add1) add1 = tf.keras.layers.LayerNormalization()(add1) add1 = tf.keras.layers.Dropout(dropoutRate)(add1) # ----4 cnn1 = tf.keras.layers.Conv1D(filters=filters, kernel_size=3, padding='causal', dilation_rate=4, kernel_initializer='he_normal', kernel_regularizer=l2_all)(add1) cnn1 = tf.keras.layers.Activation('gelu')(cnn1) cnn1 = tf.keras.layers.LayerNormalization()(cnn1) cnn1 = tf.keras.layers.Dropout(dropoutRate)(cnn1) cnn1 = tf.keras.layers.Conv1D(filters=filters, kernel_size=3, padding='causal', dilation_rate=4, kernel_initializer='he_normal', kernel_regularizer=l2_all)(cnn1) cnna = tf.keras.layers.Conv1D(filters=filters, kernel_size=1, padding='causal', dilation_rate=1, kernel_initializer='he_normal', kernel_regularizer=l2_all)(inputs) add1 = tf.add(cnn1, cnna) add1 = tf.keras.layers.Activation('gelu')(add1) add1 = tf.keras.layers.LayerNormalization()(add1) add1 = tf.keras.layers.Dropout(dropoutRate)(add1) # ----8 cnn1 = tf.keras.layers.Conv1D(filters=filters, kernel_size=3, padding='causal', dilation_rate=8, kernel_initializer='he_normal', kernel_regularizer=l2_all)(add1) cnn1 = tf.keras.layers.Activation('gelu')(cnn1) cnn1 = tf.keras.layers.LayerNormalization()(cnn1) cnn1 = tf.keras.layers.Dropout(dropoutRate)(cnn1) cnn1 = tf.keras.layers.Conv1D(filters=filters, kernel_size=3, padding='causal', dilation_rate=8, kernel_initializer='he_normal', kernel_regularizer=l2_all)(cnn1) cnna = tf.keras.layers.Conv1D(filters=filters, kernel_size=1, padding='causal', dilation_rate=1, kernel_initializer='he_normal', kernel_regularizer=l2_all)(inputs) add1 = tf.add(cnn1, cnna) add1 = tf.keras.layers.Activation('gelu')(add1) add1 = tf.keras.layers.LayerNormalization()(add1) add1 = tf.keras.layers.Dropout(dropoutRate)(add1) # ---16 cnn1 = tf.keras.layers.Conv1D(filters=filters, kernel_size=3, padding='causal', dilation_rate=16, kernel_initializer='he_normal', kernel_regularizer=l2_all)(add1) cnn1 = tf.keras.layers.Activation('gelu')(cnn1) cnn1 = tf.keras.layers.LayerNormalization()(cnn1) cnn1 = tf.keras.layers.Dropout(dropoutRate)(cnn1) cnn1 = tf.keras.layers.Conv1D(filters=filters, kernel_size=3, padding='causal', dilation_rate=16, kernel_initializer='he_normal', kernel_regularizer=l2_all)(cnn1) cnna = tf.keras.layers.Conv1D(filters=filters, kernel_size=1, padding='causal', dilation_rate=1, kernel_initializer='he_normal', kernel_regularizer=l2_all)(inputs) add1 = tf.add(cnn1, cnna) add1 = tf.keras.layers.Activation('gelu')(add1) add1 = tf.keras.layers.LayerNormalization()(add1) add1 = tf.keras.layers.Dropout(dropoutRate)(add1) # ----32 cnn1 = tf.keras.layers.Conv1D(filters=filters, kernel_size=3, padding='causal', dilation_rate=32, kernel_initializer='he_normal', kernel_regularizer=l2_all)(add1) cnn1 = tf.keras.layers.Activation('gelu')(cnn1) cnn1 = tf.keras.layers.LayerNormalization()(cnn1) cnn1 = tf.keras.layers.Dropout(dropoutRate)(cnn1) cnn1 = tf.keras.layers.Conv1D(filters=filters, kernel_size=3, padding='causal', dilation_rate=32, kernel_initializer='he_normal', kernel_regularizer=l2_all)(cnn1) cnna = tf.keras.layers.Conv1D(filters=filters, kernel_size=1, padding='causal', dilation_rate=1, kernel_initializer='he_normal', kernel_regularizer=l2_all)(inputs) add1 = tf.add(cnn1, cnna) add1 = tf.keras.layers.Activation('gelu')(add1) add1 = tf.keras.layers.LayerNormalization()(add1) add1 = tf.keras.layers.Dropout(dropoutRate)(add1) # ----lstm1 lstm1 = tf.keras.layers.LSTM(filters, return_sequences=True, kernel_regularizer=l2_all)(add1) lstm1 = tf.keras.layers.LayerNormalization()(lstm1) lstm1 = tf.keras.layers.Dropout(dropoutRate)(lstm1) # -----lstm2 lstm1 = tf.keras.layers.LSTM(filters, return_sequences=True, kernel_regularizer=l2_all)(lstm1) # ---- cnna = tf.keras.layers.Conv1D(filters=filters, kernel_size=1, padding='causal', dilation_rate=1, kernel_initializer='he_normal', kernel_regularizer=l2_all)(inputs) add1 = tf.add(lstm1, cnna) add1 = tf.keras.layers.LayerNormalization()(add1) add1 = tf.keras.layers.Dropout(dropoutRate)(add1) ma1 = tf.keras.layers.MultiHeadAttention(num_heads=16, key_dim=64, kernel_regularizer=l2_all, dropout=0.1)(add1, add1) # 0.2 cnna = tf.keras.layers.Conv1D(filters=filters, kernel_size=1, padding='causal', dilation_rate=1, kernel_initializer='he_normal', kernel_regularizer=l2_all)(inputs) ma1 = tf.keras.layers.Dropout(dropoutRate)(ma1) add1 = tf.add(ma1, cnna) add1 = tf.keras.layers.LayerNormalization()(add1) dense = tf.keras.layers.Dense(4096, activation='gelu', kernel_regularizer=l2_all)(add1) dense = tf.keras.layers.Dropout(dropoutRate)(dense) dense = tf.keras.layers.Dense(1024, kernel_regularizer=l2_all)(dense) cnna = tf.keras.layers.Conv1D(filters=1024, kernel_size=1, padding='causal', dilation_rate=1, kernel_initializer='he_normal', kernel_regularizer=l2_all)(inputs) dense = tf.keras.layers.Dropout(dropoutRate)(dense) add1 = tf.add(dense, cnna) add1 = tf.keras.layers.LayerNormalization()(add1) # fl1 = tf.keras.layers.Flatten()(add1) gp1 = tf.keras.layers.GlobalMaxPooling1D()(add1) ap1 = tf.keras.layers.GlobalAveragePooling1D()(add1) cat1 = tf.concat([gp1, ap1], axis=1) cat1 = tf.keras.layers.Dropout(0.25)(cat1) out = tf.keras.layers.Dense(8, activation='linear', kernel_regularizer=l2_all)(cat1) # linear model = tf.keras.models.Model(inputs=inputs, outputs=out) model.summary() optimizer = keras.optimizers.Adadelta(learning_rate=2.0,rho=0.95,epsilon=1e-8) 上面这个时序8分类的模型合理吗?有需要改进的地方吗?
07-29
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Alocus_

如果我的内容帮助到你,打赏我吧

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值