使用随机数据进行训练
5.1 score2a
程序:
import tensorflow as tf
import random
random.seed()
x = tf.placeholder(dtype=tf.float32)
yTrain = tf.placeholder(dtype=tf.float32)
w = tf.Variable(tf.zeros([3]), dtype=tf.float32)
wn = tf.nn.softmax(w)
n1 = wn * x
n2 = tf.reduce_sum(n1)
y = tf.nn.sigmoid(n2)
loss = tf.abs(yTrain - y)
optimizer = tf.train.RMSPropOptimizer(0.1)
train = optimizer.minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(5):
xData = [int(random.random() * 8 + 93), int(random.random() * 8 + 93), int(random.random() * 8 + 93)]
xAll = xData[0] * 0.6 + xData[1] * 0.3 + xData[2] * 0.1
if xAll >= 95:
yTrainData = 1
else:
yTrainData = 0
result = sess.run([train, x, yTrain, w, n2, y, loss], feed_dict={x: xData, yTrain: yTrainData})
print(result)
xData = [int(random.random() * 41 + 60), int(random.random() * 41 + 60), int(random.random() * 41 + 60)]
xAll = xData[0] * 0.6 + xData[1] * 0.3 + xData[2] * 0.1
if xAll >= 95:
yTrainData = 1
else:
yTrainData = 0
result = sess.run([train, x, yTrain, w, n2, y, loss], feed_dict={x: xData, yTrain: yTrainData})
print(result)
结果:
[None, array([94., 93., 96.], dtype=float32), array(0., dtype=float32), array([0., 0., 0.], dtype=float32), 94.333336, 1.0, 1.0]
[None, array([93., 60., 99.], dtype=float32), array(0., dtype=float32), array([0., 0., 0.], dtype=float32), 84.0, 1.0, 1.0]
[None, array([ 94., 100., 98.], dtype=float32), array(1., dtype=float32), array([0., 0., 0.], dtype=float32), 97.33334, 1.0, 0.0]
[None, array([68., 73., 83.], dtype=float32), array(0., dtype=float32), array([0., 0., 0.], dtype=float32), 74.66667, 1.0, 1.0]
[None, array([96., 95., 94.], dtype=float32), array(1., dtype=float32), array([0., 0., 0.], dtype=float32), 95.0, 1.0, 0.0]
[None, array([86., 94., 99.], dtype=float32), array(0., dtype=float32), array([0., 0., 0.], dtype=float32), 93.0, 1.0, 1.0]
[None, array([ 93., 100., 97.], dtype=float32), array(1., dtype=float32), array([0., 0., 0.], dtype=float32), 96.66667, 1.0, 0.0]
[None, array([91., 81., 96.], dtype=float32), array(0., dtype=float32), array([0., 0., 0.], dtype=float32), 89.333336, 1.0, 1.0]
[None, array([96., 97., 99.], dtype=float32), array(1., dtype=float32), array([0., 0., 0.], dtype=float32), 97.333336, 1.0, 0.0]
[None, array([92., 90., 88.], dtype=float32), array(0., dtype=float32), array([0., 0., 0.], dtype=float32), 90.0, 1.0, 1.0]
加入偏移量b加快训练过程
5.2 score2b
程序:
import tensorflow as tf
import random
random.seed()
x = tf.placeholder(dtype=tf.float32)
yTrain = tf.placeholder(dtype=tf.float32)
w = tf.Variable(tf.zeros([3]), dtype=tf.float32)
b = tf.Variable(80, dtype=tf.float32)
wn = tf.nn.softmax(w)
n1 = wn * x
n2 = tf.reduce_sum(n1) - b
y = tf.nn.sigmoid(n2)
loss = tf.abs(yTrain - y)
optimizer = tf.train.RMSPropOptimizer(0.1)
train = optimizer.minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(500):
xData = [int(random.random() * 8 + 93), int(random.random() * 8 + 93), int(random.random() * 8 + 93)]
xAll = xData[0] * 0.6 + xData[1] * 0.3 + xData[2] * 0.1
if xAll >= 95:
yTrainData = 1
else:
yTrainData = 0
result = sess.run([train, x, yTrain, wn, b, n2, y, loss], feed_dict={x: xData, yTrain: yTrainData})
print(result)
xData = [int(random.random() * 41 + 60), int(random.random() * 41 + 60), int(random.random() * 41 + 60)]
xAll = xData[0] * 0.6 + xData[1] * 0.3 + xData[2] * 0.1
if xAll >= 95:
yTrainData = 1
else:
yTrainData = 0
result = sess.run([train, x, yTrain, wn, b, n2, y, loss], feed_dict={x: xData, yTrain: yTrainData})
print(result)
结果:
...
[None, array([94., 62., 86.], dtype=float32), array(0., dtype=float32), array([0.6192946 , 0.25602525, 0.12468012], dtype=float32), 93.66678, -8.857033, 0.00014235664, 0.00014235664]
[None, array([ 99., 94., 100.], dtype=float32), array(1., dtype=float32), array([0.6185921 , 0.25666544, 0.12474249], dtype=float32), 93.666954, 4.1744614, 0.9848496, 0.015150428]
[None, array([65., 61., 67.], dtype=float32), array(0., dtype=float32), array([0.628845 , 0.24586184, 0.12529323], dtype=float32), 93.647736, -29.380592, 0.0, 0.0]
[None, array([100., 97., 93.], dtype=float32), array(1., dtype=float32), array([0.628845 , 0.24586184, 0.12529323], dtype=float32), 93.647736, 4.737625, 0.9913167, 0.008683324]
[None, array([84., 92., 81.], dtype=float32), array(0., dtype=float32), array([0.6364523 , 0.24166633, 0.12188137], dtype=float32), 93.63542, -8.067734, 0.00031339467, 0.00031339467]
批量生成随机训练数据
5.3 score2c
程序:
import tensorflow as tf
import random
import numpy as np
random.seed()
rowCount = 5
xData = np.full(shape=(rowCount, 3), fill_value=0, dtype=np.float32)
yTrainData = np.full(shape=rowCount, fill_value=0, dtype=np.float32)
goodCount = 0
# 生成随机训练数据的循环
for i in range(rowCount):
xData[i] = [int(random.random() * 11 + 90), int(random.random() * 11 + 90), int(random.random() * 11 + 90)]
xAll = xData[i][0] * 0.6 + xData[i][1] * 0.3 + xData[i][2] * 0.1
if xAll >= 95:
yTrainData[i] = 1
goodCount = goodCount + 1
else:
yTrainData[i] = 0
print("xData=%s" % xData)
print("yTrainData=%s" % yTrainData)
print("goodCount=%d" % goodCount)
x = tf.placeholder(dtype=tf.float32)
yTrain = tf.placeholder(dtype=tf.float32)
w = tf.Variable(tf.zeros([3]), dtype=tf.float32)
b = tf.Variable(80, dtype=tf.float32)
wn = tf.nn.softmax(w)
n1 = wn * x
n2 = tf.reduce_sum(n1) - b
y = tf.nn.sigmoid(n2)
loss = tf.abs(yTrain - y)
optimizer = tf.train.RMSPropOptimizer(0.1)
train = optimizer.minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(2):
for j in range(rowCount):
result = sess.run([train, x, yTrain, wn, b, n2, y, loss], feed_dict={x: xData[j], yTrain: yTrainData[j]})
print(result)
结果:
xData=[[94. 93. 97.]
[96. 92. 91.]
[94. 95. 98.]
[96. 97. 93.]
[91. 97. 91.]]
yTrainData=[0. 0. 0. 1. 0.]
goodCount=1
[None, array([94., 93., 97.], dtype=float32), array(0., dtype=float32), array([0.33333334, 0.33333334, 0.33333334], dtype=float32), 80.0, 14.666672, 0.9999995, 0.9999995]
[None, array([96., 92., 91.], dtype=float32), array(0., dtype=float32), array([0.33333334, 0.33333334, 0.33333334], dtype=float32), 80.0, 13.0, 0.99999774, 0.99999774]
[None, array([94., 95., 98.], dtype=float32), array(0., dtype=float32), array([0.33333325, 0.33333337, 0.33333337], dtype=float32), 80.0, 15.666672, 0.9999999, 0.9999999]
[None, array([96., 97., 93.], dtype=float32), array(1., dtype=float32), array([0.33333325, 0.33333337, 0.33333337], dtype=float32), 80.0, 15.333328, 0.99999976, 2.3841858e-07]
[None, array([91., 97., 91.], dtype=float32), array(0., dtype=float32), array([0.33333325, 0.33333337, 0.33333337], dtype=float32), 80.0, 13.0, 0.99999774, 0.99999774]
[None, array([94., 93., 97.], dtype=float32), array(0., dtype=float32), array([0.33333334, 0.33333325, 0.33333343], dtype=float32), 80.0, 14.666672, 0.9999995, 0.9999995]
[None, array([96., 92., 91.], dtype=float32), array(0., dtype=float32), array([0.3333333 , 0.33333325, 0.3333334 ], dtype=float32), 80.0, 13.0, 0.99999774, 0.99999774]
[None, array([94., 95., 98.], dtype=float32), array(0., dtype=float32), array([0.33333322, 0.3333333 , 0.3333335 ], dtype=float32), 80.0, 15.666672, 0.9999999, 0.9999999]
[None, array([96., 97., 93.], dtype=float32), array(1., dtype=float32), array([0.33333322, 0.3333333 , 0.3333335 ], dtype=float32), 80.0, 15.333336, 0.99999976, 2.3841858e-07]
[None, array([91., 97., 91.], dtype=float32), array(0., dtype=float32), array([0.33333322, 0.33333328, 0.33333346], dtype=float32), 80.0, 12.999992, 0.99999774, 0.99999774]