处理单张图片
import tensorflow as tf
import matplotlib.pyplot as plt
import glob
import numpy as np
def read_single_image(image_path):
image_file = tf.read_file(image_path)
print('image_file类型:',type(image_file))
image_date = tf.image.decode_jpeg(image_file) #图像解码成unit8张量【w,h,c]
print('image_data类型:',type(image_date))
return image_date
def main():
with tf.name_scope('read_single_image'):
image = read_single_image('./images/image.jpg')
with tf.Session() as sess:
image_numpy = sess.run(image)
print(image_numpy.shape)
plt.imshow(image_numpy)
plt.show()
if __name__ == '__main__':
main()
批量处理:
img_dir = './images/'
image_path = glob.glob(img_dir + '*.jpg')
image_path += glob.glob(img_dir + '*jpeg')
'''
1.构造图片文件队列
2.构造文件阅读器
3.读取文件数据
4.处理图片数据
5.批处理
'''
def read_images(filelist):
#1.构造图片文件队列
filename_queue = tf.train.string_input_producer(filelist)
#2.图像读取器,将文件的全部内容作为值输出的读取器
#return :读取器实例
#read(filename_queue):输出是一个文件名(key)和文件的内容(value)
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
#3.读取文件数据
images = tf.image.decode_jpeg(value)
#处理图片数据
images_resize = tf.image.resize_images(images,[100,100])
images_resize.set_shape([100,100,3])
#批处理
images_resize_batch = tf.train.batch([images_resize],batch_size=4,num_threads=1,capacity=4)
return images_resize_batch
def main():
filelist = [os.path.join(os.getcwd(),file) for file in image_path]
image_batch = read_images(filelist)
print(image_batch.shape)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess,coord)
img_data = sess.run(image_batch)
print(img_data)
coord.request_stop()
coord.join(threads)
if __name__ == '__main__'
main()
还有一种简单的方法:使用tf,data这个API,关于API的详细内容参考博客(https://blog.youkuaiyun.com/qq_34914551/article/details/96834647)
img_dir = './images/'
image_path = glob.glob(img_dir + '*.jpg')
image_path += glob.glob(img_dir + '*jpeg')
'''
1. 获取所有图片的路径
2. 写好预处理的代码
3. 定义好数据生成器
'''
def pre_image(filename):
image = tf.read_file(filename)
image = tf.image.decode_jpeg(image,channels=3)
image_resize = tf.image.resize_images(image,[100,100])
return image_resize
def data_generator(batchsize, shuffle=True):
with tf.Session() as sess:
dataset = tf.data.Dataset().from_tensor_slices(image_path)
dataset = dataset.map(pre_image)
dataset = dataset.batch(batchsize)
if shuffle:
dataset = dataset.shuffle(buffer_size=4)
iterator = dataset.make_one_shot_iterator()
#iterator = dataset.make_initializable_iterator()
#sess.run(iterator.initializer)
batch_image = iterator.get_next()
while 1:
try:
batch_data = sess.run(batch_image)
print(batch_data)
print(batch_data.shape)
except tf.errors.OutOfRangeError:
print("iterator done")
break
if __name__ == '__main__':
data_generator(4)