如何微调人脸模型 如下为一段已经经过验证的个人示例

Python微调人脸模型实践
部署运行你感兴趣的模型镜像
import os
import numpy as np
import tensorflow as tf
from deepface import DeepFace
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from datetime import datetime
from tensorflow.keras import layers, models

# ===== 路径设置 =====
dataset_dir = r"E:\python project 1\人脸分析调用\data6"
output_dir = r"E:\python project 1\人脸分析调用\tunedmodel"
weights_path = r"C:\Users\Lenovo\.deepface\weights\facial_expression_model_weights.h5"

# 路径检查
if not os.path.exists(dataset_dir):
    raise FileNotFoundError(f"数据集路径不存在: {dataset_dir}")
os.makedirs(output_dir, exist_ok=True)

results = []

# ===== 数据增强生成器 =====
datagen = ImageDataGenerator(
    rescale=1./255,
    validation_split=0.2,
    rotation_range=15,
    width_shift_range=0.1,
    height_shift_range=0.1,
    zoom_range=0.1,
    horizontal_flip=True
)

def build_emotion_model(open_layers=None, dropout_rate=0.5, emotion_labels=None):
    """构建情绪识别模型结构(保留原始结构,但改成9类输出,支持可变Dropout)"""
    emotion_client = DeepFace.build_model(model_name="Emotion", task="facial_attribute")
    base_model = emotion_client.model
    base_model.load_weights(weights_path)

    # 激活计算图
    dummy_x = np.random.rand(1, 48, 48, 1).astype(np.float32)
    base_model.predict(dummy_x)

    # ===== 替换最后一层为 9 类 =====
    x = base_model.layers[-2].output
    x = layers.Dropout(dropout_rate)(x)
    output = layers.Dense(len(emotion_labels), activation='softmax', name='emotion_output')(x)
    model = models.Model(inputs=base_model.inputs, outputs=output)

    # 设置可训练层
    if open_layers is None:
        for layer in model.layers:
            layer.trainable = True
    else:
        for layer in model.layers[:-open_layers]:
            layer.trainable = False
        for layer in model.layers[-open_layers:]:
            layer.trainable = True

    return model

def train_model(open_cfg, batch_size, lr, epochs, dropout_rate):
    # 生成训练集和验证集
    train_gen = datagen.flow_from_directory(
        dataset_dir,
        target_size=(48, 48),
        color_mode='grayscale',
        batch_size=batch_size,
        class_mode='categorical',
        subset='training'
    )
    val_gen = datagen.flow_from_directory(
        dataset_dir,
        target_size=(48, 48),
        color_mode='grayscale',
        batch_size=batch_size,
        class_mode='categorical',
        subset='validation'
    )

    # 自动生成标签顺序
    emotion_labels = [label for label, idx in sorted(train_gen.class_indices.items(), key=lambda x: x[1])]
    print("📊 训练集类别顺序:", emotion_labels)

    # 构建模型
    model = build_emotion_model(open_layers=open_cfg["open_layers"], dropout_rate=dropout_rate, emotion_labels=emotion_labels)

    # 编译模型
    model.compile(optimizer=Adam(learning_rate=lr),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    early_stop = EarlyStopping(monitor='val_accuracy', patience=5, restore_best_weights=True)

    # 训练
    history = model.fit(train_gen, validation_data=val_gen, epochs=epochs, callbacks=[early_stop], verbose=1)

    best_val_acc = max(history.history['val_accuracy'])

    # 保存模型
    model_name = f"{open_cfg['name']}_bs{batch_size}_lr{lr}_ep{epochs}_do{dropout_rate}_acc{best_val_acc:.4f}.h5"
    model_path = os.path.join(output_dir, model_name)
    model.save(model_path)

    results.append((best_val_acc, open_cfg['name'], batch_size, lr, epochs, dropout_rate, model_name))
    print(f"✅ 完成: {model_name} | 验证准确率: {best_val_acc:.4f}")
    return best_val_acc

# ===== 暴力搜索模式(Dropout固定0.3) =====
dropout_rate_fixed = 0.3
open_layer_configs = [
    {"name": "open_last_3", "open_layers": 3},
    {"name": "open_last_6", "open_layers": 6},
    {"name": "open_all", "open_layers": None}
]
batch_sizes = [4, 8]
learning_rates = [ 5e-5, 1e-4]
epochs_list = [ 30, 50]

for open_cfg in open_layer_configs:
    for bs in batch_sizes:
        for lr in learning_rates:
            for ep in epochs_list:
                train_model(open_cfg, batch_size=bs, lr=lr, epochs=ep, dropout_rate=dropout_rate_fixed)

# ===== 排序并保存结果 =====
results.sort(key=lambda x: x[0], reverse=True)
txt_path = os.path.join(output_dir, f"results_sorted_{datetime.now().strftime('%Y%m%d_%H%M%S')}.txt")
with open(txt_path, "w", encoding="utf-8") as f:
    for acc, open_name, bs, lr, ep, dr, name in results:
        f.write(f"模型文件: {name} | 验证准确率: {acc:.4f} | 开放层数: {open_name} | 批次大小: {bs} | 学习率: {lr} | 迭代轮次: {ep} | Dropout: {dr}\n")
print(f"📊 排序结果已保存到 {txt_path}")

# ===== 保留前5个模型,删除其他 =====
top5 = set([name for _, _, _, _, _, _, name in results[:5]])
for file in os.listdir(output_dir):
    if file.endswith(".h5") and file not in top5:
        os.remove(os.path.join(output_dir, file))
print("✅ 已保留前5个模型,其他已删除")

python目前的训练模型 微调模型的生态还是很强大的,如上示例已经经过验证,各位用到手要确保有相关路径文件  各种问题可以评论区沟通

您可能感兴趣的与本文相关的镜像

Llama Factory

Llama Factory

模型微调
LLama-Factory

LLaMA Factory 是一个简单易用且高效的大型语言模型(Large Language Model)训练与微调平台。通过 LLaMA Factory,可以在无需编写任何代码的前提下,在本地完成上百种预训练模型的微调

评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值