供给数据(Feeding): 在TensorFlow程序运行的每一步, 让Python代码来供给数据。
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print 'Extracting', filename
# 把x变成一个4d向量,其第2、第3维对应图片的宽、高,
# 最后一维代表图片的颜色通道数(因为是灰度图所以这里的通道数为1,如果是rgb彩色图,则为3
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
return data # get train_data...
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), :, :, :] #get process data for every step
#设计placeholder
节点的唯一的意图就是为了提供数据供给(feeding)的方法。placeholder
节点被声明的时候是未初始化的, 也不包含数据
# shape:指定了需要将tensor转换为什么结构的tensor
train_data_node = tf.placeholder( tf.float32, shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
feed_dict = {train_data_node: batch_data, train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
tf.Session().run(
[optimizer, loss, learning_rate, train_prediction], #data compute
feed_dict=feed_dict)# data feeding!
#Its value mustbe fed using the feed_dict
optional argument to
Session.run()
,Tensor.eval()
, or Operation.run()
.