方法1:序列化构建:
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28,28)),
keras.layers.Dense(128,activation=tf.nn.relu),
keras.layers.Dense(10,activation=tf.nn.softmax)
])
方法2:基于tf.keras.Model
import tensorflow as tf
inputs = tf.keras.layers.Input()
x = tf.keras.layers.Dense(4,activation=tf.nn.relu)(inputs)
outputs = tf.keras.layers.Dense(5,activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs,outputs=outputs)
方法三:自定义,继承tf.keras.Model,灵活度最高,可自定义模型方法
import tensorflow as tf
class ConvMaxPooling1d(tf.keras.layers.Layer):
def __init__(self, filters, kernel):
super(ConvMaxPooling1d, self).__init__()
self.kernel_size = kernel
#(batch_size, step, embedding_size)->(batch_size,step-kernel_size+1,filter_size)
self.conv = tf.keras.layers.Conv1D(filters=filters, kernel_size=kernel, activation='relu')
# (batch_size,step-kernel_size+1,filter_size)->
self.pool = tf.keras.layers.GlobalMaxPool1D()
def call(self, inputs, masks=None):
conv_out = self.conv(inputs)
# if masks is not None:
#
# masks_exp = tf.expand_dims(masks, axis=-1)
# # 遮罩处理操作
# conv_out += masks_exp[:, self.kernel_size - 1:]
pool_out = self.pool(conv_out)
return pool_out
class TextCNN(tf.keras.models.Model):
def __init__(self, vocab, embedding_size, hidden_size, filters_list=[50 ,60, 70, 80], kernels=[2,3, 4, 5],
dropout=0.5, sentence_length=20):
super(TextCNN, self).__init__()
ind = tf.feature_column.categorical_column_with_vocabulary_file("sentence_vocab", vocabulary_file=vocab,
default_value=0)
self.embedding_size = embedding_size
self.sentence_length = sentence_length
self.dense_feature_layer = tf.keras.layers.DenseFeatures(
[tf.feature_column.embedding_column(ind, dimension=embedding_size)])
self.conv_maxs = [ConvMaxPooling1d(f, k) for f, k in zip(filters_list, kernels)]
self.dropout = tf.keras.layers.Dropout(dropout)
self.dense = tf.keras.layers.Dense(hidden_size, activation='relu')
self.classifier = tf.keras.layers.Dense(1, activation='sigmoid')
# @tf.function(input_signature=(tf.TensorSpec(shape=(None, None), dtype=tf.dtypes.string),))
def call(self, inputs):
# ***************word token embedding begin***************
inputs = tf.convert_to_tensor(inputs)
inputs_tensor = tf.reshape(inputs, (-1, 1))
embed_word_vectors1 = self.dense_feature_layer({"sentence_vocab": inputs_tensor})
embeddings = tf.reshape(embed_word_vectors1, (-1, self.sentence_length, self.embedding_size))
# ***************word token embedding end***************
#对于每一个layer来说,输入是:(batch_size,step,embedding_size)->(batch_size,step-kernel_size+1,filter_size)
conv_outs = [layer(embeddings, None) for layer in self.conv_maxs]
# 对于每一个layer来说,输入是:[(batch_size,step-kernel_size+1,filter_size)]->(batch_size,step-kernel_size+1,sum(filter_size))
concat_out = tf.concat(conv_outs, axis=-1)
dense_out = self.dense(concat_out)
drop_out = self.dropout(dense_out)
logits = self.classifier(drop_out)
return logits


被折叠的 条评论
为什么被折叠?



