Tensorflow学习(莫烦)

本文详细介绍了TensorFlow的基础知识,包括变量、会话、保存模型等核心概念,并逐步讲解了多层感知机、卷积神经网络、循环神经网络的应用,还涉及到了dropout、批归一化、自动编码器以及迁移学习和梯度下降可视化等内容。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

1,基础结构

import tensorflow as tf
import numpy as np

x_data = np.random.rand(100).astype(np.float32)  # 创造数据x_data
y_data = x_data * 0.1 + 0.3  # 创造数据y_data 标签

Weights = tf.Variable(tf.random_uniform([1], -1.0, 1.0))  # 初始化随机数,1维,范围为[-1,1]
biases = tf.Variable(tf.zeros([1]))  # 初始化为0
y = Weights * x_data + biases

loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)  # 学习效率为0.5
train = optimizer.minimize(loss)
init = tf.initialize_all_variables()

with tf.Session() as sess:
    sess.run(init)  # 初始化,很重要
    for step in range(240):
        sess.run(train)
        if step % 20 == 0:
            print(step, sess.run(Weights), sess.run(biases))

2,vairible

import tensorflow as tf

state = tf.Variable(0, name='counter')
print(state.name)
one = tf.constant(1)
new_value = tf.add(state, one)
update = tf.assign(state, new_value)  # 将state用new_value代替
init = tf.initialize_all_variables()  # 变量必须要激活

with tf.Session() as sess:
    sess.run(init)
    for _ in range(3):
        sess.run(update)
        print(sess.run(state))

3,session

import tensorflow as tf

matrix1 = tf.constant([[3, 3]])
matrix2 = tf.constant([[2], [2]])
product = tf.matmul(matrix1, matrix2)  # np.dot(m1, m2)

# method1
# sess = tf.Session()
# result = sess.run(product)
# print(result)
# sess.close()

# method2
with tf.Session() as sess:
    result2 = sess.run(product)
    print(result2)

4,saver

import base as tf
import numpy as np

# W = tf.Variable([[1, 2, 3], [3, 4, 5]], dtype=tf.float32, name='weights')
# b = tf.Variable([[1, 2, 3]], dtype=tf.float32, name='biases')
#
# init = tf.initialize_all_variables()
# saver = tf.train.Saver()
#
# with tf.Session() as sess:
#     sess.run(init)
#     save_path = saver.save(sess, "my_net/save_net.ckpt")
#     print("Save to path:", save_path)


W = tf.Variable(np.arange(6).reshape((2, 3)), dtype=tf.float32, name="weight")
b = tf.Variable(np.arange(3).reshape((1, 3)), dtype=tf.float32, name="biases")
saver = tf.train.Saver()    # saver用来存储各种变量
with tf.Session() as sess:
    saver.restore(sess, "my_net/save_net.ckpt")
    print("weights:", sess.run(W))
    print("biases:", sess.run(b))

5,mlp

import tensorflow as tf
import numpy as np


def add_layer(inputs, in_size, out_size, activation_function=None):
    Weights = tf.Variable(tf.random_normal([in_size, out_size]))  # Weights是一个矩阵,维度为[in_size, out_size],里面是随机数
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)  # biases维度为[1, out_size]
    Wx_plus_b = tf.matmul(inputs, Weights) + biases
    if activation_function is None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)  # 激活函数
    return outputs


x_data = np.linspace(-1, 1, 300)[:, np.newaxis]  # 在-1到1之间,300行
noise = np.random.normal(0, 0.05, x_data.shape)  # 随机噪声
y_data = np.square(x_data) - 0.5 + noise

xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])

l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)  # 隐藏层
prediction = add_layer(l1, 10, 1, activation_function=None)  # 输出层

loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))  # 按行求和再平均

train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)  # 以0.1的学习率最小化loss

init = tf.global_variables_initializer()  # 初始化所有参数

with tf.Session() as sess:
    sess.run(init)
    for i in range(1000):
        sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
        if i % 50 == 0:
            print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))

6,tensorboard

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt


# 使用说明:
# 1,定位到logs所在文件夹,打开cmd,其中不能有中文路径
# 2,输入命令行tensorboard --logdir=logs
# 3,打开浏览器,登入Ma-Pc:6006即可
def add_layer(inputs, in_size, out_size, n_layer, activation_function=None):
    layer_name = 'layer%s' % n_layer
    with tf.name_scope(layer_name):
        with tf.name_scope('weights'):
            Weights = tf.Variable(tf.random_normal([in_size, out_size]))
            tf.summary.histogram(layer_name + '/weight', Weights)
        with tf.name_scope('biases'):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
            tf.summary.histogram(layer_name + '/biases', biases)
        with tf.name_scope('Wx_plus_b'):
            Wx_plus_b = tf.matmul(inputs, Weights) + biases
            tf.summary.histogram(layer_name + '/Wx_plus_b', Wx_plus_b)
        if activation_function is None:
            outputs = Wx_plus_b
        else:
            outputs = activation_function(Wx_plus_b)
            tf.summary.histogram(layer_name + '/outputs', outputs)
        return outputs


x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise

with tf.name_scope('inputs'):
    xs = tf.placeholder(tf.float32, [None, 1], name='x_input')
    ys = tf.placeholder(tf.float32, [None, 1], name='y_input')

l1 = add_layer(xs, 1, 10, n_layer=1, activation_function=tf.nn.relu)
prediction = add_layer(l1, 10, 1, n_layer=2, activation_function=None)

with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))
    tf.summary.scalar('loss', loss)

with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

with tf.Session() as sess:
    merged = tf.summary.merge_all()  # 合并所有的summary
    writer = tf.summary.FileWriter("logs/", tf.get_default_graph())
    sess.run(tf.initialize_all_variables())

    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    ax.scatter(x_data, y_data)
    plt.ion()
    plt.show()

    for i in range(1000):
        sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
        if i % 50 == 0:
            result = sess.run(merged, feed_dict={xs: x_data, ys: y_data})
            print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))
            writer.add_summary(result, i)
            try:
                ax.lines.remove(lines[0])
            except Exception:
                pass
            prediction_value = sess.run(prediction, feed_dict={xs: x_data})
            lines = ax.plot(x_data, prediction_value, 'r-', lw=5)
            plt.pause(0.1)

7,scope

from __future__ import print_function
import tensorflow as tf

with tf.name_scope("a_name_scope"):
    initializer = tf.constant_initializer(value=1)
    var1 = tf.get_variable(name='var1', shape=[1], dtype=tf.float32, initializer=initializer)
    var2 = tf.Variable(name='var2', initial_value=[2], dtype=tf.float32)
    var21 = tf.Variable(name='var2', initial_value=[2.1], dtype=tf.float32)
    var22 = tf.Variable(name='var2', initial_value=[2.2], dtype=tf.float32)

with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    print(var1.name)  # var1:0
    print(sess.run(var1))  # [ 1.]
    print(var2.name)  # a_name_scope/var2:0
    print(sess.run(var2))  # [ 2.]
    print(var21.name)  # a_name_scope/var2_1:0
    print(sess.run(var21))  # [ 2.0999999]
    print(var22.name)  # a_name_scope/var2_2:0
    print(sess.run(var22))  # [ 2.20000005]

with tf.variable_scope("a_variable_scope") as scope:
    initializer = tf.constant_initializer(value=3)
    var3 = tf.get_variable(name='var3', shape=[1], dtype=tf.float32, initializer=initializer)
    var4 = tf.Variable(name='var4', initial_value=[4], dtype=tf.float32)
    var4_reuse = tf.Variable(name='var4', initial_value=[4], dtype=tf.float32)
    scope.reuse_variables()  # 此时可以用get_variable同时调用var3
    var3_reuse = tf.get_variable(name='var3', )

with tf.Session() as sess:
    # tf.initialize_all_variables() no long valid from
    # 2017-03-02 if using tensorflow >= 0.12
    if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
        init = tf.initialize_all_variables()
    else:
        init = tf.global_variables_initializer()
    sess.run(init)
    print(var3.name)  # a_variable_scope/var3:0
    print(sess.run(var3))  # [ 3.]
    print(var4.name)  # a_variable_scope/var4:0
    print(sess.run(var4))  # [ 4.]
    print(var4_reuse.name)  # a_variable_scope/var4_1:0
    print(sess.run(var4_reuse))  # [ 4.]
    print(var3_reuse.name)  # a_variable_scope/var3:0
    print(sess.run(var3_reuse))  # [ 3.]

8,classification

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('MNIST_data', one_hot=True)


def add_layer(inputs, in_size, out_size, activation_function=None):
    Weights = tf.Variable(tf.random_normal([in_size, out_size]))
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
    Wx_plus_b = tf.matmul(inputs, Weights) + biases
    if activation_function is None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs


def compute_accuracy(v_xs, v_ys):
    global prediction
    y_pre = sess.run(prediction, feed_dict={xs: v_xs})  # 生成预测值
    correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))  # 得出预测正确的数量
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))  # tf.cast转换数据类型
    result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})  # 计算出准确率
    return result


xs = tf.placeholder(tf.float32, [None, 784])
ys = tf.placeholder(tf.float32, [None, 10])

prediction = add_layer(xs, 784, 10, activation_function=tf.nn.softmax)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction), reduction_indices=[1]))  # 交叉熵损失函数
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

sess = tf.Session()
sess.run(tf.initialize_all_variables())

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys})
    if i % 50 == 0:
        print(compute_accuracy(mnist.test.images, mnist.test.labels))

9,cnn

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值