目录
1. 管道PipeLine
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# 方法1: You can create a Sequential model by passing a list of layers to the Sequential constructor:
model_1 = keras.Sequential(
[
layers.Dense(2, activation="relu", name="layer1"),
layers.Dense(3, activation="relu", name="layer2"),
layers.Dense(4, name="layer3"),
],name="my_sequential"
)
# You can also create a Sequential model incrementally via the `add()` method:
model = keras.Sequential(name="my_sequential")
model.add(layers.Dense(2, activation="relu",name='layer1'))
model.add(layers.Dense(3, activation="relu",name='layer2'))
model.add(layers.Dense(4,name='layer3'))
# Call model on a test input
x = tf.ones((3, 3))
y = model(x)
2. 函数式建模
2.1 函数式API
tf.keras.Input函数参数说明
2. 具体应用教程
num_tags = 12 # Number of unique issue tags
num_words = 10000 # Size of vocabulary obtained when preprocessing text data
num_departments = 4 # Number of departments for predictions
title_input = keras.Input(shape=(None,), name="title") #
body_input = keras.Input(shape=(None,), name="body")
tags_input = keras.Input(shape=(num_tags,), name="tags")
# Embed each word in the title into a 64-dimensional vector
title_features = layers.Embedding(num_words, 64)(title_input)
# Embed each word in the text into a 64-dimensional vector
body_features = layers.Embedding(num_words, 64)(body_input)
# Reduce sequence of embedded words in the title into a single 128-dimensional vector
title_features = layers.LSTM(128)(title_features)
# Reduce sequence of embedded words in the body into a single 32-dimensional vector
body_features = layers.LSTM(32)(body_features)
# Merge all available features into a single large vector via concatenation
x = layers.concatenate([title_features, body_features, tags_input])
# Stick a logistic regression for priority prediction on top of the features
priority_pred = layers.Dense(1, name="priority")(x)
# Stick a department classifier on top of the features
department_pred = layers.Dense(num_departments, name="department")(x)
# Instantiate an end-to-end model predicting both priority and department
model = keras.Model(
inputs=[title_input, body_input, tags_input],
outputs=[priority_pred, department_pred],
)
模型配置
# 使用字典配置
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"priority": keras.losses.BinaryCrossentropy(from_logits=True),
"department": keras.losses.CategoricalCrossentropy(from_logits=True),
},
loss_weights={"priority": 1.0, "department": 0.2},
)
使用 NumPy arrays 格式的数据训练模型
# Dummy input data
title_data = np.random.randint(num_words, size=(1280, 10))
body_data = np.random.randint(num_words, size=(1280, 100))
tags_data = np.random.randint(2, size=(1280, num_tags)).astype("float32")
# Dummy target data
priority_targets = np.random.random(size=(1280, 1))
dept_targets = np.random.randint(2, size=(1280, num_departments))
model.fit(
{"title": title_data, "body": body_data, "tags": tags_data},
{"priority": priority_targets, "department": dept_targets},
epochs=2,
batch_size=32,
)
2.2 Embedding共享层
shared_embedding = layers.Embedding(1000, 128)
text_input_a = keras.Input(shape=(None,), dtype="int32")
text_input_b = keras.Input(shape=(None,), dtype="int32")
# Reuse the same layer to encode both inputs
encoded_input_a = shared_embedding(text_input_a)
encoded_input_b = shared_embedding(text_input_b)
3. 继承
3.1 自定义layer
All layers subclass the Layer
class and implement:
call
method:指定自定义层需要完成的计算build
method:创建层的权重,也可以在__init__
中创建
# 方法: 快捷定义layer
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
self.w = self.add_weight(
shape=(input_dim, units), initializer="random_normal", trainable=True
)
self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
根据 input 的size定义weights 的形状
class Linear(keras.layers.Layer):
def __init__(self, units=32):
super(Linear, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
自定义层的__call__()
方法会在第一次被调用时自动运行build
方法。
3.2 自定义model
class Mymodel(Model):
def __init__(self):
super(Mymodel,self).__init__()
self.conv1=Conv2D(32,3,activation='relu')
self.flatten=Flatten() # 在中间层不用写参数input_shape,模型能自动判定输入到该层的形状
self.d1=Dense(128,activation='relu')
self.d2=Dense(10)
def call(self,x):
x=self.conv1(x)
x=self.flatten(x)
x=self.d1(x)
output=self.d2(x)
return output
4. 其他keras层介绍
4.1 基本运算
tf.keras.layers.Add
|tf.keras.layers.add
对input list按元素求和(小写的是大写的功能实现)tf.keras.layers.Average
对input list按元素求平均tf.keras.layers.Dot
计算两个张量中样本之间的点积tf.keras.layers.Dot(axes=(1, 2))([x, y])
# 输入层 1
input1 = tf.keras.layers.Input(shape=(16,))
x1 = tf.keras.layers.Dense(8, activation='relu')(input1)
# 输入层 2
input2 = tf.keras.layers.Input(shape=(32,))
x2 = tf.keras.layers.Dense(8, activation='relu')(input2)
# 对其求和
# equivalent to `added = tf.keras.layers.add([x1, x2])`
added = tf.keras.layers.Add()([x1, x2])
avg = tf.keras.layers.Average()([x1, x2])
out = tf.keras.layers.Dense(4)(added)
model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
4.1 张量的concatenate、flatten、reshape操作
tf.keras.layers.Concatenate
对张量进行串联运算conatenated = keras.layers.conatenate([x1, x2])
tf.keras.layers.Flatten
用于将输入层的数据压成一维的数据。一般用在卷积层和全连接层之间。tf.keras.layers.Reshape
:tf.keras.layers.Reshape((6, 2))
4.3 Lambda层
self.feature_1 = list(range(0, 5000))
cate_1 = Lambda(lambda x, idx: tf.gather(x, idx, axis=1), arguments={"idx": self.feature_1},
name="input_%s" % (self.feature_1[0]))(origin_input)
all_features.append(cate_1)