tensorflow

翻译自python machine learning第十四章,作者使用了一个随机的数据集,通过tensorflow训练了一个线性回归的模型,在训练的过程中涉及到了对模型的搭建,模型参数的设置,保存模型和对应步骤的可视化操作。

#create a simple regression model 

>>> import tensorflow as tf
>>> import numpy as np
>>>
>>> g = tf.Graph()
>>>
>>> with g.as_default():
... tf.set_random_seed(123)
... ## placeholders
... tf_x = tf.placeholder(shape=(None),
... dtype=tf.float32,
... name='tf_x')
... tf_y = tf.placeholder(shape=(None),
... dtype=tf.float32,
... name='tf_y')
...
... ## define the variable (model parameters)
... weight = tf.Variable(
... tf.random_normal(
... shape=(1, 1),
... stddev=0.25)),
... name='weight')
... bias = tf.Variable(0.0, name='bias')
...
... ## build the model
... y_hat = tf.add(weight * tf_x, bias,
... name='y_hat')
...
... ## compute the cost,MSE 
... cost = tf.reduce_mean(tf.square(tf_y - y_hat),
... name='cost')
...
... ## train the model
... optim = tf.train.GradientDescentOptimizer(
... learning_rate=0.001)
... train_op = optim.minimize(cost, name='train_op')

## create a random toy dataset for regression
>>>
>>> import numpy as np

>>> import matplotlib.pyplot as plt
>>> np.random.seed(0)
>>>
>>> def make_random_data():
... x = np.random.uniform(low=-2, high=4, size=200)
... y = []
... for t in x:
... r = np.random.normal(loc=0.0,
... scale=(0.5 + t*t/3),
... size=None)
... y.append(r)
... return x, 1.726*x -0.84 + np.array(y)
>>>
>>>
>>> x, y = make_random_data()
>>>
>>> plt.plot(x, y, 'o')
>>> plt.show()

 ## train/test splits
>>> x_train, y_train = x[:100], y[:100]
>>> x_test, y_test = x[100:], y[100:]
>>> n_epochs = 500
>>> training_costs = []
>>> with tf.Session(graph=g) as sess:
... sess.run(tf.global_variables_initializer())
...
... ## train the model for n_epochs
... for e in range(n_epochs):
... c, _ = sess.run([cost, train_op],
... feed_dict={tf_x: x_train,
... tf_y: y_train})
... training_costs.append(c)
... if not e % 50:
... print('Epoch %4d: %.4f' % (e, c))


>>> plt.plot(training_costs)
>>> plt.show()

#add saver node to graph g to save the model 

>>> with g.as_default():

... saver = tf.train.Saver()

#retrain the model to store the variables

>>> n_epochs = 500
>>> training_costs = []
>>> with tf.Session(graph=g) as sess:
... sess.run(tf.global_variables_initializer())
...
... ## train the model for n_epochs
... for e in range(n_epochs):
... c, _ = sess.run([cost, train_op],
... feed_dict={tf_x: x_train,
... tf_y: y_train})
... training_costs.append(c)
... if not e % 50:
... print('Epoch %4d: %.4f' % (e, c))

... saver.save(sess, './trained-model')

#rebuild the graph by importing it from the meta file ,then use the new_saver object to restore the parameters of model.

>>> import tensorflow as tf
>>> import numpy as np
>>>
>>> g2 = tf.Graph()
>>> with tf.Session(graph=g2) as sess:
... new_saver = tf.train.import_meta_graph(
... './trained-model.meta')
... new_saver.restore(sess, './trained-model')
...
... y_pred = sess.run('y_hat:0',
... feed_dict={'tf_x:0': x_test})

#visualize the prediction

>>> import matplotlib.pyplot as plt
>>>
>>> x_arr = np.arange(-2, 4, 0.1)
>>>
>>> g2 = tf.Graph()
>>> with tf.Session(graph=g2) as sess:
... new_saver = tf.train.import_meta_graph(
... './trained-model.meta')
... new_saver.restore(sess, './trained-model')
...
... y_arr = sess.run('y_hat:0',
... feed_dict={'tf_x:0' : x_arr})
>>>
>>> plt.figure()
>>> plt.plot(x_train, y_train, 'bo')
>>> plt.plot(x_test, y_test, 'bo', alpha=0.3)
>>> plt.plot(x_arr, y_arr.T[:, 0], '-r', lw=3)
>>> plt.show()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值