从图像转换到txt和从txt读取图片,并使用tensorflow实现epoch无重复迭代

本文介绍了一种从图像文件生成数据集的方法,并详细解释了如何通过TensorFlow从文本文件加载图像数据进行模型训练的过程。包括图像名称的乱序处理、数据集的批量加载以及损失函数的计算。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

   闲话少说,从图像转换成txt非常简单,直接附代码,如下所示:

import os
import random

def generate(dir):
    files = os.listdir(dir)
    print('****************')
    print('input :',dir)
    print('start...')
    listText = open('dataset.txt','w')
    random.shuffle(files)  #suhffle list
    for file in files:
        fileType = os.path.split(file)
        if fileType[1] == '.txt':
            continue
        name = file + '\n'
        listText.write(name)
    listText.close()
    print('down!')
    print('****************')

if __name__ == '__main__':
    generate('test_data/canon/')

上面代码中使用random.shuffle实现文件的乱序输出。代码主要是将文件里的图像名称以乱序的方式存入dataset.txt文件里。
   从txt文件中读取图像数据,并且按照epoch方式进行训练,代码如下:

import os
import tensorflow as tf
import argparse

class DataPipeLine(object):
    def __init__(self,path):
        self.path = path
    def produce_one_samples(self):
        dirname = os.path.dirname(self.path)
        with open(self.path,'r') as fid:
            flist = [l.strip() for l in fid.xreadlines()]
        input_files = [os.path.join(dirname, 'iphone', f) for f in flist]
        output_files = [os.path.join(dirname,'canon',f) for f in flist]
        input_queue,output_queue = tf.train.slice_input_producer([input_files,output_files],shuffle=True,
                                                                 seed=1234, num_epochs=None)
        input_file = tf.read_file(input_queue)
        output_file = tf.read_file(output_queue)
        im_input = tf.image.decode_jpeg(input_file,channels=3)
        im_output = tf.image.decode_jpeg(output_file,channels=3)
        sample = {}
        with tf.name_scope('normalize_images'):
            im_input = tf.to_float(im_input) / 255.0
            im_output = tf.to_float(im_output) / 255.0
        inout = tf.concat([im_input,im_output],axis=2)
        inout.set_shape([None, None, 6])
        inout = tf.image.resize_images(inout,[100,100])

        sample['input'] = inout[:, :, :3]
        sample['output'] = inout[:, :, 3:]
        return sample


def main(args):
    sample = DataPipeLine(args.data_dir).produce_one_samples()
    samples = tf.train.batch(sample,batch_size=args.batch_size,
                                     num_threads=2,
                                     capacity=32)
    loss = tf.reduce_sum(tf.pow(samples['input'] - samples['output'], 2)) / (2 * args.batch_size)
    global_step = tf.contrib.framework.get_or_create_global_step()

    total_batch = int(400 / args.batch_size)
    sv = tf.train.Supervisor()
    total_loss = 0
    with sv.managed_session() as sess:
        step = 0
        while True:
            if sv.should_stop():
                print("stopping supervisor")
                break
            try:
                loss_ = sess.run( loss)
                total_loss += loss_
                step += 1
                print("step:%d,loss:%.2f" %(step,loss_))
                if step%total_batch == 0:
                    print("%d epochs,total loss:%.2f" %((step/total_batch),total_loss))
                    total_loss = 0
            except tf.errors.AbortedError:
                print("Aborted")
                break
            except KeyboardInterrupt:
                break
        sv.request_stop()



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--data_dir",default='test_data/dataset.txt',help="The path of input txt")
    parser.add_argument("--batch_size", default=40, help="Number Images of each batch")
    parser.add_argument("--epochs", default=30, help="The number of epochs")
    args = parser.parse_args()
    main(args)

   这里从txt里获得图像名称,结合路径获得图像名称对应图像的路径,Datapipeline里使用tf.train.slice_input_producer获得图像,这里的shuffle为true,也就是每次从文件里随机取一个图像作为sample,为了产生samples,main里使用了tf.train.batch,这里不使用tf.train.shuffle_batch是为了让每个数据在一个epoch只使用一次。sess里run的内容是计算两个图像之间的MSE,最后每个epoch结束时输出total loss,结果相同,说明每个epoch确实计算的数据都是相同的(一个epoch大小按照总数据量除以batch_size计算)。

图片转化为字符画。需要先安装PIL。推荐下载PIL包来实现。 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - - - - - - - - - - - - - - - * * * * * * * * * * * * * * * * + + + + + + + + + + + + + + + + + + + + * - - - - - - - - - - - - - - - * + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + * * * - - - - - - - - - - - - - - - * * * + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + / / / - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + / / / - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + / / / / / / - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + / / / / / / - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + / / / / / / - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + / / / / / / / / / / / / / / / / / / + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + / / / / / / / / / / / / / / / / / / + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + # # # # # / / / / / / / / / / / / / # # + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + # # # # # / / / / / / / / / / / / / # # + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + # # # # # / / / / / / / / / / / / / # # + + + + + + + + + + + + + + + + + + + + + + + + + + + # # # # # # # # # # # # + / / - - - - - - - / # # # # # # # + + + + + + + + + + + + + + + + + + + + + + # # # # # # # # # # # # + / / - - - - - - - / # # # # # # # + + + + + + + + + + + + + + + + + + + # # # # # # # # # # # # # # # + / / / / / - - / / / # # # # # # # # # # + + + + + + + + + + + + + + + + # # # # # # # # # # # # # # # + / / / / / - - / / / # # # # # # # # # # + + + + + + + + + + + + + + + + # # # # # # # # # # # # # # # + / / / / / - - / / / # # # # # # # # # # + + + + + + + + + + + + + + # # # # # # # # # # # # # # # # # + / / / / / / / / / / # # # # # # # # # # # # * + + + + + + + + + / + # # # # # # # # # # # # # # # # # + / / / / / / / / / / # # # # # # # # # # # # * + + + + + / / / / / + # # # # # # # # # # # # # # # # # + / / / / / / / / / / # # # # # # # # # # # # * / / / / / / / / / / + # # # # # # # # # # # # # # # # # + / / / / / / / / / / # # # # # # # # # # # # * / / / / / / / / / / + # # # # # # # # # # # # # # # # # + / / / / / / / / / / # # # # # # # # # # # # * / / / / / / / / / / + # # # # # # # # # # # # # # # # # + / / / / / / / / / / # # # # # # # # # # # # * / / / / / / / / / / / # # # # # # # # # # # # # # # # # + / / / / / / / / / / # # # # # # # # # # # # * / / / / / / / / / / / # # # # # # # # # # # # # # # # # + / / / / / / / / / / # # # # # # # # # # # # * / / / / /
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值