PSP笔记(1)

本文介绍了一种使用TensorFlow加载MIT场景解析数据集的方法,包括数据下载、解压、预处理及创建图像列表等步骤,适用于场景理解与图像分割任务。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

from __future__ import print_function
import tensorflow as tf
import numpy as np
import os
import TensorflowUtils as utils
import read_MITSceneParsingData as scene_parsing
import datetime
import BatchDatsetReader as dataset

import random
from six.moves import cPickle as pickle
from tensorflow.python.platform import gfile
import glob


from six.moves import xrange






FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("data_dir", "Data_zoo\\MIT_SceneParsing\\", "path to dataset")
tf.flags.DEFINE_string("image_dir", "Data_zoo\\MIT_SceneParsing\\ADEChallengeData2016", "path to dataset")


DATA_URL = 'http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip'


def read_dataset(data_dir):
    pickle_filename = "MITSceneParsing.pickle"
    pickle_filepath = os.path.join(data_dir, pickle_filename)
    if not os.path.exists(pickle_filepath):
        utils.maybe_download_and_extract(data_dir, DATA_URL, is_zipfile=True)
        SceneParsing_folder = os.path.splitext(DATA_URL.split("/")[-1])[0]
        #SceneParsing_folder = os.path.splitext(DATA_URL.split("\\")[-1])[0]
        result,images, masks= create_image_lists(os.path.join(data_dir, SceneParsing_folder))
        print ("Pickling ...")
        with open(pickle_filepath, 'wb') as f:
            pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
    else:
        print ("Found pickle file!")

    SceneParsing_folder = os.path.splitext(DATA_URL.split("/")[-1])[0]
    result,images, masks = create_image_lists(os.path.join(data_dir, SceneParsing_folder))
    training_records = result['training']
    validation_records = result['validation']
    
    
    #del result

    return images, masks



def create_image_lists(image_dir):
    if not gfile.Exists(image_dir):
        print("Image directory '" + image_dir + "' not found.")
        return None
    directories = ['training', 'validation']
    image_list = {}

    for directory in directories:
        file_list = []
        image_list[directory] = []
        file_glob = os.path.join(image_dir, "images", directory, '*.' + 'png')#Data_zoo\MIT_SceneParsing\ADEChallengeData2016\images\training\*.png
        file_list.extend(glob.glob(file_glob))
        if directory=='training':
            images = []
            
            #txt_path = r'C:\Users\Administrator\Desktop\train_multitask.txt'
            #txt1 = open(txt_path, 'w')
            
            for f in file_list:
                #txt1.write(f+ '\n')
                images.append(f)
        else:
            masks  = []
            #txt_path = r'C:\Users\Administrator\Desktop\validation_multitask.txt'
            #txt2 = open(txt_path, 'w')
            
            for f in file_list:
                #txt2.write(f+ '\n')
                
                masks.append(f)

        
        if not file_list:
            print('No files found')
        else:
            for f in file_list:
                filename = os.path.splitext(f.split("\\")[-1])[0]

                annotation_file = os.path.join(image_dir, "annotations", directory, filename + '.png')
                if os.path.exists(annotation_file):
                    record = {'image': f, 'annotation': annotation_file, 'filename': filename}
                    image_list[directory].append(record)
                else:
                    print("Annotation file not found for %s - Skipping" % filename)

        random.shuffle(image_list[directory])
        no_of_images = len(image_list[directory])
        print ('No. of %s files: %d' % (directory, no_of_images))
        
    return images, masks

images, masks=create_image_lists(FLAGS.image_dir)

GitHub 代码简单修改

https://github.com/Ncier/Tensorflow-psp

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值