生成验证码1:
from captcha.image import ImageCaptcha
import numpy as np
from PIL import Image
import random
import sys
number = []
def produce_number(char_set=number):
assert isinstance(char_set,list)
for i in range(0,10):
char_set.append(str(i))
def random_catcha_text(char_set=number, captcha_size=4):
assert isinstance(char_set, list)
captcha_text = []
for i in range(captcha_size):
c = random.choice(char_set)
captcha_text.append(c)
return captcha_text
def gen_captha_text_and_image():
image = ImageCaptcha()
captcha_text = random_catcha_text()
captcha_text = ''.join(captcha_text)
capcha = image.generate(captcha_text)
image.write(captcha_text, 'captcha/images/'+captcha_text+'.jpg')
num = 10000
if __name__ == '__main__':
produce_number()
for i in range(num):
gen_captha_text_and_image()
sys.stdout.write('\r>> Creating image %d/%d' %(i+1, num))
sys.stdout.flush()
sys.stdout.write('\n')
sys.stdout.flush()
print('over')
生成验证码2:
from captcha.image import ImageCaptcha
import numpy as np
from PIL import Image
import random
import sys
def v_code(code_num=5):
code = ''
if not isinstance(code_num, int):
code_num = 5
for i in range(code_num):
num = random.randint(0,9)
alf = chr(random.randint(65,90))
add = random.choice([num, alf])
code = ''.join([code, str(add)])
return code
def gen_captha_text_and_image(codenum):
image = ImageCaptcha()
captcha_text = v_code(codenum)
captcha_text = ''.join(captcha_text)
capcha = image.generate(captcha_text)
image.write(captcha_text, 'captcha/images/'+captcha_text+'.jpg')
num = 10000
if __name__ == '__main__':
for i in range(num):
gen_captha_text_and_image(5)
sys.stdout.write('\r>> Creating image %d/%d' %(i+1, num))
sys.stdout.flush()
sys.stdout.write('\n')
sys.stdout.flush()
print('over')
生成tfrecord
import os
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import random,math,sys
from PIL import Image
import numpy as np
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.333)
_NUM_TEST = 500
_RANDOM_SEED = 0
_NUM_SHARDS = 5
DATASET_DIR = r'captcha/images/'
TFRECORD_DIR = 'captcha/'
def _dataset_exists(dataset_dir):
for split_name in ['train', 'test']:
output_filename = os.path.join(dataset_dir, split_name, '.tfrecords')
if not tf.gfile.Exists(output_filename):
return False
return True
def _get_filename_and_classes(dataset_dir):
photo_filenames = []
for filename in os.listdir(dataset_dir):
path = os.path.join(dataset_dir,filename)
photo_filenames.append(path)
return photo_filenames
def int64_feature(values):
if not isinstance(values, (tuple,list)):
values = [values]
return tf.train.Feature(int64_list = tf.train.Int64List(value=values))
def bytes_feature(values):
return tf.train.Feature(bytes_list = tf.train.BytesList(value=[values]))
def image_to_tfexample(image_data, label0,label1,label2,label3,label4):
return tf.train.Example(features=tf.train.Features(feature={
'image':bytes_feature(image_data),
'label0':int64_feature(label0),
'label1': int64_feature(label1),
'label2': int64_feature(label2),
'label3': int64_feature(label3),
'label4': int64_feature(label4),
}))
def _covert_dataset(split_name, filenames, dataset_dir):
assert split_name in ['train', 'test']
with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) as sess:
output_filename = os.path.join(TFRECORD_DIR,split_name+'.tfrecords')
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
for i, filename in enumerate(filenames):
try:
sys.stdout.write('\r >> Converting image %d/%d %s' %(i+1, len(filenames), filename))
sys.stdout.flush()
image_data = Image.open(filename)
image_data = image_data.resize((224,224))
image_data = np.array(image_data.convert('L'))
image_data = image_data.tobytes()
labels = filename.split('/')[-1][0:5]
num_labels = []
for j in range(5):
str = labels[j]
if str.isdigit():
num_labels.append(int(str))
elif str.isalpha():
num_labels.append(ord(str))
example = image_to_tfexample(image_data, num_labels[0], num_labels[1], num_labels[2], num_labels[3],num_labels[4])
tfrecord_writer.write(example.SerializeToString())
except IOError as err:
print("Could not read:", filenames[i])
print("Erroe:", err)
print("skip it \n")
sys.stdout.write('\n')
sys.stdout.flush()
if __name__ == '__main__':
if _dataset_exists(DATASET_DIR):
print('tfrecord is Exists')
else:
photo_filenames = _get_filename_and_classes(DATASET_DIR)
random.seed(_RANDOM_SEED)
random.shuffle(photo_filenames)
training_filenames = photo_filenames[_NUM_TEST:]
testing_filenames = photo_filenames[:_NUM_TEST]
_covert_dataset('train', training_filenames, DATASET_DIR)
_covert_dataset('test', testing_filenames, DATASET_DIR)
print('produce tfrecord sucessful')
使用alexnet进行训练
将数据进行训练,并转化成train.tfrecord, test.tfrecord
import os
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import random,math,sys
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from src.nets import nets_factory
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.666)
CHAR_SET_LEN = 36
IMAGE_HEIGHT = 60
IMAGE_WIDTH = 160
BATCH_SIZE = 25
TFRECORD_FILE = 'captcha/train.tfrecords'
x = tf.placeholder(tf.float32, [None, 224,224])
y0 = tf.placeholder(tf.float64, [None])
y1 = tf.placeholder(tf.float64, [None])
y2 = tf.placeholder(tf.float64, [None])
y3 = tf.placeholder(tf.float64, [None])
y4 = tf.placeholder(tf.float64, [None])
lr = tf.Variable(0.003, dtype=tf.float32)
def read_and_decode(filename):
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example,
features={
'image': tf.FixedLenFeature([], tf.string ),
'label0': tf.FixedLenFeature([], tf.int64, ),
'label1': tf.FixedLenFeature([], tf.int64, ),
'label2': tf.FixedLenFeature([], tf.int64, ),
'label3': tf.FixedLenFeature([], tf.int64, ),
'label4': tf.FixedLenFeature([], tf.int64, ),
})
image = tf.decode_raw(features['image'], tf.uint8)
image = tf.reshape(image, [224,224])
image = tf.cast(image, tf.float32)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
label0 = tf.cast(features['label0'], tf.int64)
label1 = tf.cast(features['label0'], tf.int64)
label2 = tf.cast(features['label0'], tf.int64)
label3 = tf.cast(features['label0'], tf.int64)
label4 = tf.cast(features['label0'], tf.int64)
return image, label0, label1, label2, label3, label4
image, label0, label1, label2, label3, label4 = read_and_decode(TFRECORD_FILE)
image_batch, label0_bath, label1_bath, label2_bath, label3_bath, label4_bath = tf.train.shuffle_batch(
[image, label0, label1, label2, label3, label4], batch_size=BATCH_SIZE, capacity=5000, min_after_dequeue=1000, num_threads=1)
train_network_fn = nets_factory.get_network_fn(
'alexnet_v2',
num_classes = CHAR_SET_LEN,
weight_decay = 0.0005,
is_training = True
)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
X = tf.reshape(x, [BATCH_SIZE,224,224,1])
logits0,logits1,logits2,logits3,logits4,end_points = train_network_fn(X)
one_hot_label0 = tf.one_hot(indices=tf.cast(y0, tf.int64), depth=CHAR_SET_LEN)
one_hot_label1 = tf.one_hot(indices=tf.cast(y1, tf.int64), depth=CHAR_SET_LEN)
one_hot_label2 = tf.one_hot(indices=tf.cast(y2, tf.int64), depth=CHAR_SET_LEN)
one_hot_label3 = tf.one_hot(indices=tf.cast(y3, tf.int64), depth=CHAR_SET_LEN)
one_hot_label4 = tf.one_hot(indices=tf.cast(y4, tf.int64), depth=CHAR_SET_LEN)
loss0 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits0, labels=one_hot_label0))
loss1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits1, labels=one_hot_label1))
loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits2, labels=one_hot_label2))
loss3 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits3, labels=one_hot_label3))
loss4 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits4, labels=one_hot_label4))
total_loss = (loss0+loss1+loss2+loss3+loss4)/5.0
optimizer = tf.train.AdamOptimizer(lr).minimize(total_loss)
correct_preduiction0 = tf.equal(tf.argmax(one_hot_label0, 1), tf.argmax(logits0, 1))
accuracy0 = tf.reduce_mean(tf.cast(correct_preduiction0, tf.float32))
correct_preduiction1 = tf.equal(tf.argmax(one_hot_label1, 1), tf.argmax(logits1, 1))
accuracy1 = tf.reduce_mean(tf.cast(correct_preduiction1, tf.float32))
correct_preduiction2 = tf.equal(tf.argmax(one_hot_label2, 1), tf.argmax(logits2, 1))
accuracy2 = tf.reduce_mean(tf.cast(correct_preduiction2, tf.float32))
correct_preduiction3 = tf.equal(tf.argmax(one_hot_label3, 1), tf.argmax(logits3, 1))
accuracy3 = tf.reduce_mean(tf.cast(correct_preduiction3, tf.float32))
correct_preduiction4 = tf.equal(tf.argmax(one_hot_label4, 1), tf.argmax(logits4, 1))
accuracy4 = tf.reduce_mean(tf.cast(correct_preduiction4, tf.float32))
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(6001):
b_image, b_label0, b_label1, b_label2, b_label3, b_label4 = sess.run(
[image_batch, label0_bath, label1_bath, label2_bath, label3_bath, label4_bath])
sess.run(optimizer, feed_dict={x:b_image, y0:b_label0, y1:b_label1, y2:b_label2, y3:b_label3, y4:b_label4 })
if i%200 == 0:
if i%2000 == 0:
sess.run(tf.assign(lr, lr/3))
acc0, acc1, acc2, acc3, acc4, loss_ = sess.run(
[accuracy0,accuracy1,accuracy2,accuracy3,accuracy4,total_loss],
feed_dict={x:b_image, y0:b_label0, y1:b_label1, y2:b_label2, y3:b_label3, y4:b_label4})
learning_rate = sess.run(lr)
print('iter:%d,loss:%d,accuracy:%.2f,%.2f,%.2f,%.2f,%.2f learn_rate:%.4f'%(i, loss_, acc0,acc1,acc2,acc3,acc4,learning_rate))
if acc0 > 0.90 and acc1 > 0.90 and acc2 > 0.90 and acc3 > 0.90 and acc4 > 0.90:
saver.save(sess, r'captcha/model/crack_captcha.model', global_step=i)
coord.request_stop()
coord.join(threads)