数据量大的时候,不能全部从内存加载,使用fit_generator,创建多线程迭代器。核心代码如下:
def GetImgMat(imgpath):
img = Image.open(imgpath).convert("RGB")
img = img.resize((224,224))
img = np.array(img)/255.
img = img.reshape((224*224*3,1))
return np.array(img)
def process_line(line):
filename = line.split("\t")[0]
y = int(line.split("\t")[1])
x = GetImgMat(filename)
y = np_utils.to_categorical(y,class_num)
return x,y
def GetFileData():
linedata = open('utils/traindata.txt','r',encoding='utf-8').read().split('\n')
while 1:
random.shuffle(linedata)
for line in linedata:
with threading.Lock():
yield line
globalList = []
def child_threading(batch_size):
global globalList
getfiledata = GetFileData()
while 1:
if len(globalList) >50:
time.sleep(1)
else:
X = []
Y = []
for i in range(batch_size):
linedata =next(getfiledata)
x, y = process_line(linedata)
X.append(x)
Y.append(y)
with threading.Lock():
globalList.append((np.array(X),np.array(Y)))
def generate_arrays_from_file(batch_size):
global globalList
for i in range(160):
_thread.start_new_thread(child_threading,(batch_size,))
while 1:
if len(globalList)>0:
with threading.Lock():
returnX,returnY = globalList.pop()
returnX = returnX.reshape(-1,224,224,3)
yield (returnX,returnY)