梯度下降线性回归
import tensorflow as tf
W = tf.Variable(tf.random_normal([1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
X = tf.placeholder(tf.float32, shape=[None])
Y = tf.placeholder(tf.float32, shape=[None])
hypothesis = X * W + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(2001):
# sess.run(train)
# b_val后面的' _ '不能少啊,对应着train
cost_val, W_val, b_val, _ = sess.run([cost, W, b, train],
feed_dict={X: [1, 2, 3], Y: [1, 2, 3]})
if step % 20 == 0:
print(step, cost_val)
softmax
import tensorflow as tf
x_data = [[1, 2, 1, 1], [2, 1, 3, 2], [3, 1, 3, 4], [4, 1, 5, 5], [1, 7, 5, 5], [1, 2, 5, 6], [1, 6, 6, 6], [1, 7, 7, 7]]
y_data = [[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 1, 0], [0, 1, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]]
X = tf.placeholder("float", [None, 4])# 2维张量的形式,也就是矩阵,行数为None也就是样本数量不受限制
Y = tf.placeholder("float", [None, 3])
nb_classes = 3# 识别的类别数
W = tf.Variable(tf.random_normal([4, nb_classes], name="weight"))
b = tf.Variable(tf.random_normal([nb_classes], name="bias"))# 注意这里的bias也是每个类别一个
hypothesis = tf.nn.softmax(tf.matmul(X, W) + b)
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis=1))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(2001):
sess.run(optimizer, feed_dict={X: x_data, Y: y_data})
if step%200 == 0:
简单的说,tf.argmax就是返回最大的那个数值所在的下标。
a = sess.run(hypothesis, feed_dict={X: [[1, 11, 7, 9]]})
print(a, sess.run(tf.arg_max(a, 1)))
输出[[ 1.38904958e-03 9.98601854e-01 9.06129117e-06]] [1]
1就是最大元素的下标
softmax实现mnist手写数字识别
import tensorflow as tf
import input_data
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
x = tf.placeholder("float", [None, 784])
y_ = tf.placeholder("float", [None, 10])
nb_classes = 10
W = tf.Variable(tf.random_normal([784, nb_classes], name="weight"))
b = tf.Variable(tf.random_normal([nb_classes], name="bias"))
y = tf.nn.softmax(tf.matmul(x, W) + b)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), axis=1))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.2).minimize(cross_entropy)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(2000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(optimizer, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))#好像直接拿的训练集做的验证
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
其中,mnist数据集的下载和导入可以通过这段代码
以及非常好的搭建CNN的教程