tensorflow 2.1 自学(一)

大神的git链接
从大神的代码中学习到了不少精髓,在此进行总结

基本元素

tf.constant
创建一个constant op,以一个节点形式加入到默认图中,即在默认图中创建了一个常量op,其形式是tensorflow的张量
Helloworld=tf.constant(“hello, tensorflow”)
Print(“value : ”, Helloworld.numpy())

tf.Variable
创建数值变量 variable = tf.Variable(1),可通过加号’+’进行数值加,也可以调用assign_add

基本运算

两个tensor相加
Tf.add(a,b)
相乘
Tf.multiply(a,b)
矩阵相乘方法
Tf.matmul(matrix1, matrix2)

Tf.cast() 强制转换类型
Tf.shape(a) a为tensor,创建一个和a形状大小相同的张量

激励方程

tf.nn.relu
tf.nn.sigmoid
tf.nn.tanh
tf.nn.softplus

tf.GradientTape
一个计算梯度的利器,是使用eager模式进行梯度计算的。

x = tf.constant(3.0)
with tf.GradientTape(persistent=True) as t:
    t.watch(x)       # Ensures that `tensor` is being traced by this tape.
    y = x * x
    z = y * y
dz_dx = t.gradient(z, x)  # 108.0 (4*x^3 at x = 3)
dy_dx = t.gradient(y, x)  # 6.0
print("dz/dx=", dz_dx.numpy())
print("dy/dx=", dy_dx.numpy())
del t  # Drop the reference to the tape

这里使用watch将要计算梯度的变量加入了进来,实际上GradientTape默认监控trainable属性为True的变量。
另外,默认情况下GradientTape的资源在调用gradient函数后就被释放,再次调用就无法计算了。所以如果需要多次计算梯度,需要开启persistent=True属性,例如:

线性回归

设计一个模型实现线性回归

class Model(object):
    def __init__(self):
        self.W = tf.Variable(10.0)
        self.b = tf.Variable(-5.0)

    def __call__(self, inputs):
        return self.W * inputs + self.b

def compute_loss(y_true, y_pred):
    return tf.reduce_mean(tf.square(y_true-y_pred))

model = Model()

TRUE_W = 3.0
TRUE_b = 2.0

NUM_EXAMPLES = 1000
inputs  = tf.random.normal(shape=[NUM_EXAMPLES])
noise   = tf.random.normal(shape=[NUM_EXAMPLES])
outputs = inputs * TRUE_W + TRUE_b + noise

for epoch in range(30):
    with tf.GradientTape() as tape:
        loss = compute_loss(outputs, model(inputs))

    dw, db = tape.gradient(loss, [model.w, model.b])
    model.w.assign_sub(learning_rate * dw)
    model.b.assign_sub(learning_rate * db)

这里相当于记录了模型中的w和b,然后在外部利用tap求偏导时,计算出dy/dw和dy/db,每次将模型的参数利用梯度逼近,将模型的w和b趋近到设置的真实w和b中

逻辑回归实现mnist

import numpy as np
import tensorflow as tf

lr = 1e-3
epoch = 10
batch_size = 600

def load_MnistData():
    (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

    train_data = (
        tf.data.Dataset.from_tensor_slices((tf.reshape(x_train, [-1,784]), y_train))
        .batch(batch_size).shuffle(1000)
    )

    train_data = (
        train_data.map(lambda x, y:
                       (tf.divide(tf.cast(x, tf.float32), 255.0),
                        tf.reshape(tf.one_hot(y, 10), (-1, 10))))
    )

    test_data = (
        tf.data.Dataset.from_tensor_slices((tf.reshape(x_test, [-1, 784]), y_test))
        .batch(len(x_test)).shuffle(1000)
    )

    test_data = (
        test_data.map(lambda x, y:
                       (tf.divide(tf.cast(x, tf.float32), 255.0),
                        tf.reshape(tf.one_hot(y, 10), (-1, 10))))
    )

    return train_data, test_data

def loadWeight():
    return tf.Variable(tf.zeros([784,10])), tf.Variable(tf.zeros([10]))

def train():
    trainset, testset = load_MnistData()
    w, b = loadWeight()
    model = lambda x: tf.nn.softmax(tf.matmul(x, w) + b)
    compute_loss = lambda true, pred: tf.reduce_mean(tf.reduce_sum(tf.losses.binary_crossentropy(true, pred), axis=-1))
    compute_acc = lambda true, pred: tf.reduce_mean(tf.keras.metrics.categorical_accuracy(true, pred))
    optimizer = tf.optimizers.Adam(lr)

    for ep in range(epoch):
        for i, (x_, y_) in enumerate(trainset):
            with tf.GradientTape() as tape:
                pred = model(x_)
                loss = compute_loss(y_, pred)
            acc = compute_acc(y_, pred)
            grad = tape.gradient(loss, [w, b])
            optimizer.apply_gradients(zip(grad, [w, b]))
            print("=> loss %.2f acc %.2f" % (loss.numpy(), acc.numpy()))

    for i, (x_, y_) in enumerate(testset):
        pred_test = model(x_)
        acc_test = compute_acc(y_, pred_test)
        print("test acc is : %f" % acc_test)

if __name__ == '__main__':
    train()

基本CNN实现mnist

import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Dense, Flatten
from tensorflow.keras import Model

class CModel(Model):
    def __init__(self):
        super(CModel, self).__init__()
        self.conv1 = Conv2D(32, 3, activation='relu')
        self.conv2 = Conv2D(32, 3, activation='relu')
        self.flatten = Flatten()
        self.d1 = Dense(128, activation='relu')
        self.d2 = Dense(10, activation='softmax')

    def __call__(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.flatten(x)
        x = self.d1(x)
        return self.d2(x)

def train():
    (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
    x_train, x_test = x_train / 255.0, x_test / 255.0
    x_train = x_train[..., tf.newaxis]
    x_test = x_test[..., tf.newaxis]

    trainset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(32)
    testset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)

    model = CModel()

    optimizer = tf.keras.optimizers.Adam()
    loss_obj = tf.keras.losses.SparseCategoricalCrossentropy()

    train_loss = tf.keras.metrics.Mean()
    train_acc = tf.keras.metrics.SparseCategoricalAccuracy()

    test_loss = tf.keras.metrics.Mean()
    test_acc = tf.keras.metrics.SparseCategoricalAccuracy()

    def train_step(images, labels):
        with tf.GradientTape() as tape:
            pred = model(images)
            loss = loss_obj(labels, pred)
        gradients = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))
        train_loss(loss)
        train_acc(labels, pred)

    def test_step(images, labels):
        pred = model(images)
        t_loss = loss_obj(labels, pred)
        test_loss(t_loss)
        test_acc(labels, pred)

    for i in range(5):
        for images, labels in trainset:
            train_step(images, labels)
        for test_imgs, test_labels in testset:
            test_step(test_imgs, test_labels)
        template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
        print(template.format(i + 1,
                              train_loss.result(),
                              train_acc.result() * 100,
                              test_loss.result(),
                              test_acc.result() * 100))
        # Reset the metrics for the next epoch
        train_loss.reset_states()
        train_acc.reset_states()
        test_loss.reset_states()
        test_acc.reset_states()

if __name__ == '__main__':
    train()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值