15.验证码识别

本文详细介绍了一种验证码生成方法及使用AlexNet进行训练的过程。首先,通过Python脚本批量生成包含数字和字母的验证码图片,随后将这些图片转换为TFRecord格式,便于后续的深度学习训练。接着,利用AlexNet模型对TFRecord数据进行训练,实现对验证码的自动识别,展示了从数据准备到模型训练的完整流程。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

生成验证码1:

from captcha.image import ImageCaptcha
# pip install captcha
import numpy as np
from PIL import  Image
import random
import sys

number = []
def produce_number(char_set=number):
    assert isinstance(char_set,list)
    for i in range(0,10):
        char_set.append(str(i))

def random_catcha_text(char_set=number, captcha_size=4):
    assert isinstance(char_set, list)
    captcha_text = []
    for i in range(captcha_size):
        c = random.choice(char_set)
        captcha_text.append(c)
    return captcha_text

def gen_captha_text_and_image():
    image = ImageCaptcha()
    captcha_text = random_catcha_text()
    captcha_text = ''.join(captcha_text)
    capcha = image.generate(captcha_text)
    image.write(captcha_text, 'captcha/images/'+captcha_text+'.jpg')

num = 10000
if __name__ == '__main__':
        produce_number()
        for i in range(num):
            gen_captha_text_and_image()
            sys.stdout.write('\r>> Creating image %d/%d' %(i+1, num))
            sys.stdout.flush()
        sys.stdout.write('\n')
        sys.stdout.flush()
        print('over')


生成验证码2:

from captcha.image import ImageCaptcha  # pip install captcha
import numpy as np
from PIL import  Image
import random
import sys

def v_code(code_num=5):
    code = ''
    if not isinstance(code_num, int):
        code_num = 5
    for i in range(code_num):
        num = random.randint(0,9)
        alf = chr(random.randint(65,90))
        add = random.choice([num, alf])
        code = ''.join([code, str(add)])
    return  code

def gen_captha_text_and_image(codenum):
    image = ImageCaptcha()
    captcha_text = v_code(codenum)
    captcha_text = ''.join(captcha_text)
    capcha = image.generate(captcha_text)
    image.write(captcha_text, 'captcha/images/'+captcha_text+'.jpg')

num = 10000
if __name__ == '__main__':
        for i in range(num):
            gen_captha_text_and_image(5)
            sys.stdout.write('\r>> Creating image %d/%d' %(i+1, num))
            sys.stdout.flush()
        sys.stdout.write('\n')
        sys.stdout.flush()
        print('over')


生成tfrecord

import os
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import random,math,sys
from PIL import Image
import numpy as np

gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.333)

_NUM_TEST = 500
_RANDOM_SEED = 0
_NUM_SHARDS = 5

#dataset path
DATASET_DIR = r'captcha/images/'
#tfrecord where to save
TFRECORD_DIR = 'captcha/'

#判断 tfrecord is exists
def _dataset_exists(dataset_dir):
    for split_name in ['train', 'test']:
        output_filename = os.path.join(dataset_dir, split_name, '.tfrecords')
        if not tf.gfile.Exists(output_filename):
            return  False
    return True

#get all v_code's path
def _get_filename_and_classes(dataset_dir):
    photo_filenames = []
    for filename in os.listdir(dataset_dir):
        path = os.path.join(dataset_dir,filename)
        photo_filenames.append(path)
    return photo_filenames

def int64_feature(values):
    if not isinstance(values, (tuple,list)):
        values = [values]
    return tf.train.Feature(int64_list = tf.train.Int64List(value=values))

def bytes_feature(values):
    return  tf.train.Feature(bytes_list = tf.train.BytesList(value=[values]))

def image_to_tfexample(image_data, label0,label1,label2,label3,label4):
    return tf.train.Example(features=tf.train.Features(feature={
        'image':bytes_feature(image_data), #bytes类型   int bytes float 可以有三种类型
        'label0':int64_feature(label0),
        'label1': int64_feature(label1),
        'label2': int64_feature(label2),
        'label3': int64_feature(label3),
        'label4': int64_feature(label4),
    }))
#为什么要拆成5位呢? 而不是1位呢? 是为了多任务的方式。

def _covert_dataset(split_name, filenames, dataset_dir):
    assert split_name in ['train', 'test']
    with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) as sess:
        output_filename = os.path.join(TFRECORD_DIR,split_name+'.tfrecords')
        with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
            for i, filename in enumerate(filenames):
                try:
                    sys.stdout.write('\r >> Converting image %d/%d %s' %(i+1, len(filenames), filename))
                    sys.stdout.flush()

                    image_data = Image.open(filename)
                    image_data = image_data.resize((224,224)) # 160*60
                    image_data = np.array(image_data.convert('L')) #灰度化
                    image_data = image_data.tobytes() #转化为bytes

                    #获取label
                    labels = filename.split('/')[-1][0:5]
                    num_labels = []
                    for j in range(5):
                        str = labels[j]
                        if str.isdigit():
                            num_labels.append(int(str))
                        elif str.isalpha():
                            num_labels.append(ord(str))

                    #生成protocol数据
                    example = image_to_tfexample(image_data, num_labels[0], num_labels[1], num_labels[2], num_labels[3],num_labels[4])
                    tfrecord_writer.write(example.SerializeToString())
                except IOError as err:
                    print("Could not read:", filenames[i])
                    print("Erroe:", err)
                    print("skip it \n")

    sys.stdout.write('\n')
    sys.stdout.flush()


if __name__ == '__main__':
    if _dataset_exists(DATASET_DIR):
        print('tfrecord is Exists')
    else:
        photo_filenames = _get_filename_and_classes(DATASET_DIR)

        random.seed(_RANDOM_SEED)
        random.shuffle(photo_filenames)
        training_filenames = photo_filenames[_NUM_TEST:]
        testing_filenames  = photo_filenames[:_NUM_TEST]

        _covert_dataset('train', training_filenames, DATASET_DIR)
        _covert_dataset('test', testing_filenames, DATASET_DIR)

    print('produce tfrecord sucessful')

使用alexnet进行训练

将数据进行训练,并转化成train.tfrecord, test.tfrecord
# -*- coding: utf-8 -*-

import os
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import random,math,sys
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from src.nets import nets_factory

gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.666)

#不同字符数量
CHAR_SET_LEN = 36 # 26英文字母+10个数字  
IMAGE_HEIGHT = 60
IMAGE_WIDTH  = 160
BATCH_SIZE  =  25
TFRECORD_FILE = 'captcha/train.tfrecords'

x = tf.placeholder(tf.float32, [None, 224,224]) #image_data
y0 = tf.placeholder(tf.float64, [None])
y1 = tf.placeholder(tf.float64, [None])
y2 = tf.placeholder(tf.float64, [None])
y3 = tf.placeholder(tf.float64, [None])
y4 = tf.placeholder(tf.float64, [None])

lr = tf.Variable(0.003, dtype=tf.float32)

# read data from tfrecord
def read_and_decode(filename):
    #根据文件名生成一个队列
    filename_queue = tf.train.string_input_producer([filename])
    reader = tf.TFRecordReader()
    #返回文件名和文件
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(serialized_example,  #对之前封装的文件进行解析
                                       features={
                                           'image':  tf.FixedLenFeature([], tf.string ),
                                           'label0': tf.FixedLenFeature([], tf.int64, ),
                                           'label1': tf.FixedLenFeature([], tf.int64, ),
                                           'label2': tf.FixedLenFeature([], tf.int64, ),
                                           'label3': tf.FixedLenFeature([], tf.int64, ),
                                           'label4': tf.FixedLenFeature([], tf.int64, ),
                                       })
    #获取图片数据
    image = tf.decode_raw(features['image'], tf.uint8)
    #tf.train.shuffle_bath必须确定shape  [batch_size, (224,224), 1]
    image = tf.reshape(image, [224,224])
    #图片预处理 , 目的是把image中数字0-255 转到-1到 1 之间
    image = tf.cast(image, tf.float32)
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0) #

    #获取label
    label0 = tf.cast(features['label0'], tf.int64)
    label1 = tf.cast(features['label0'], tf.int64)
    label2 = tf.cast(features['label0'], tf.int64)
    label3 = tf.cast(features['label0'], tf.int64)
    label4 = tf.cast(features['label0'], tf.int64)

    return image, label0, label1, label2, label3, label4

#获取图片数据和标签
image, label0, label1, label2, label3, label4 = read_and_decode(TFRECORD_FILE)

#使用shuffle_batch 可以随机打乱
image_batch, label0_bath, label1_bath, label2_bath, label3_bath, label4_bath = tf.train.shuffle_batch(
    [image, label0, label1, label2, label3, label4], batch_size=BATCH_SIZE, capacity=5000, min_after_dequeue=1000, num_threads=1)
    #这段代码写的是从[image, label0, label1, label2, label3, label4]利用1个线程读取BATCH_SIZE行

#定义网络结构
train_network_fn = nets_factory.get_network_fn(
    'alexnet_v2',
    num_classes  = CHAR_SET_LEN,
    weight_decay = 0.0005,
    is_training  = True
)

with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
# with tf.Session() as sess:
    #inputs: a tensor of size [batch_size, height, width, channels]
    #传过来的是 [25,224,224] 例如
    X = tf.reshape(x, [BATCH_SIZE,224,224,1])
    #数据输入网络得到输出值 (预测值,prediction)
    logits0,logits1,logits2,logits3,logits4,end_points = train_network_fn(X)#问题出现

    #把标签转化为one_hot形式  对应的标签是1,其余都是0
    one_hot_label0 = tf.one_hot(indices=tf.cast(y0, tf.int64), depth=CHAR_SET_LEN)
    one_hot_label1 = tf.one_hot(indices=tf.cast(y1, tf.int64), depth=CHAR_SET_LEN)
    one_hot_label2 = tf.one_hot(indices=tf.cast(y2, tf.int64), depth=CHAR_SET_LEN)
    one_hot_label3 = tf.one_hot(indices=tf.cast(y3, tf.int64), depth=CHAR_SET_LEN)
    one_hot_label4 = tf.one_hot(indices=tf.cast(y4, tf.int64), depth=CHAR_SET_LEN)

    #计算loss logists 网络的输出预测的标签, label是真实的标签
    loss0 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits0, labels=one_hot_label0))
    loss1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits1, labels=one_hot_label1))
    loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits2, labels=one_hot_label2))
    loss3 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits3, labels=one_hot_label3))
    loss4 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits4, labels=one_hot_label4))

    #计算总的loss
    total_loss = (loss0+loss1+loss2+loss3+loss4)/5.0
    # optimizer loss
    optimizer = tf.train.AdamOptimizer(lr).minimize(total_loss)

    #计算准确率
    correct_preduiction0 = tf.equal(tf.argmax(one_hot_label0, 1), tf.argmax(logits0, 1))
    accuracy0 = tf.reduce_mean(tf.cast(correct_preduiction0, tf.float32))

    correct_preduiction1 = tf.equal(tf.argmax(one_hot_label1, 1), tf.argmax(logits1, 1))
    accuracy1 = tf.reduce_mean(tf.cast(correct_preduiction1, tf.float32))

    correct_preduiction2 = tf.equal(tf.argmax(one_hot_label2, 1), tf.argmax(logits2, 1))
    accuracy2 = tf.reduce_mean(tf.cast(correct_preduiction2, tf.float32))

    correct_preduiction3 = tf.equal(tf.argmax(one_hot_label3, 1), tf.argmax(logits3, 1))
    accuracy3 = tf.reduce_mean(tf.cast(correct_preduiction3, tf.float32))

    correct_preduiction4 = tf.equal(tf.argmax(one_hot_label4, 1), tf.argmax(logits4, 1))
    accuracy4 = tf.reduce_mean(tf.cast(correct_preduiction4, tf.float32))

    #用于保存模型
    saver = tf.train.Saver()
    #初始化tf.variable
    sess.run(tf.global_variables_initializer())

    #创建一个协调器,管理线程
    coord = tf.train.Coordinator()
    #启动QueueRunner, 此时文件名队列已经进队
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    for i in range(6001):
        #获取一个批次的数据和标签
        b_image, b_label0, b_label1, b_label2, b_label3, b_label4 = sess.run(
            [image_batch, label0_bath, label1_bath, label2_bath, label3_bath, label4_bath])
        #优化模型
        sess.run(optimizer, feed_dict={x:b_image, y0:b_label0, y1:b_label1, y2:b_label2, y3:b_label3, y4:b_label4 })

        #每迭代200次计算一次loss和准确率
        if i%200 == 0:
            #每迭代2000次降低一次学习率
            if i%2000 == 0:
                sess.run(tf.assign(lr, lr/3))

            acc0, acc1, acc2, acc3, acc4, loss_ = sess.run(
                    [accuracy0,accuracy1,accuracy2,accuracy3,accuracy4,total_loss],
                    feed_dict={x:b_image, y0:b_label0, y1:b_label1, y2:b_label2, y3:b_label3, y4:b_label4})

            learning_rate = sess.run(lr)
            print('iter:%d,loss:%d,accuracy:%.2f,%.2f,%.2f,%.2f,%.2f learn_rate:%.4f'%(i, loss_, acc0,acc1,acc2,acc3,acc4,learning_rate))

            #save graph
            if acc0 > 0.90 and acc1 > 0.90 and acc2 > 0.90 and acc3 > 0.90 and acc4 > 0.90: #当精度都>0.9 就保存图
            #if i == 60000:
                saver.save(sess, r'captcha/model/crack_captcha.model', global_step=i)

    #通知其他线程关闭
    coord.request_stop()
    #其他所有线程关闭之后,这函数才能返回
    coord.join(threads)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值