from sklearn.externals import joblib出错解决办法

本文解决了一个关于从sklearn.externals导入joblib时出现的错误。通过修改导入方式,即将from sklearn.externals import joblib更改为import joblib,成功解决了问题。

问题:

代码from sklearn.externals import joblib中的joblib下面标曲线显示错误

解决办法:

将以上代码改为:

import joblib

这样就没问题了

import numpy as np import cv2 import os from skimage.feature import hog from sklearn.model_selection import train_test_split from sklearn.svm import SVC from sklearn.metrics import accuracy_score, classification_report from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import joblib # 1. 数据准备和特征提取 def load_dataset_and_extract_features(data_dir): """ 加载车型图像数据集并提取HOG特征 """ features = [] labels = [] categories = ['0', '1', '2', '3'] # 车型分类 for label, category in enumerate(categories): category_dir = os.path.join(data_dir, category) if not os.path.exists(category_dir): print(f"警告:类别目录 {category_dir} 不存在,已跳过") continue for img_name in os.listdir(category_dir): if img_name.endswith('.jpg'): # 读取图像并转为灰度图 img_path = os.path.join(category_dir, img_name) try: img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) if img is None: print(f"警告:无法读取图像 {img_path},已跳过") continue # 调整图像尺寸 img = cv2.resize(img, (128, 64)) # 标准尺寸用于HOG特征提取 # 提取HOG特征 hog_features = hog(img, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), transform_sqrt=True, block_norm='L2-Hys') features.append(hog_features) labels.append(label) except Exception as e: print(f"处理图像 {img_path} 时出错: {e}") continue if not features: raise ValueError("未找到有效图像数据,请检查数据集路径和文件格式") return np.array(features), np.array(labels) # 2. 数据预处理 def preprocess_data(features, labels): """ 数据标准化和训练/测试集划分 """ # 特征标准化 scaler = StandardScaler() features_scaled = scaler.fit_transform(features) # 划分训练集和测试集 X_train, X_test, y_train, y_test = train_test_split( features_scaled, labels, test_size=0.2, random_state=42 ) return X_train, X_test, y_train, y_test, scaler # 3. SVM模型训练 def train_svm_classifier(X_train, y_train): """ 训练SVM分类器 """ svm = SVC(kernel='rbf', C=1.0, gamma='scale', probability=True) svm.fit(X_train, y_train) return svm # 4. 模型评估 def evaluate_model(model, X_test, y_test): """ 评估模型性能 """ y_pred = model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) report = classification_report(y_test, y_pred, target_names=['0', '1', '2', '3']) print(f"模型准确率: {accuracy:.2f}") print("分类报告:\n", report) # 可视化混淆矩阵 from sklearn.metrics import ConfusionMatrixDisplay ConfusionMatrixDisplay.from_predictions(y_test, y_pred, display_labels=['0', '1', '2', '3']) plt.title('车型分类混淆矩阵') plt.show() # 主函数 def main(): try: # 数据集路径 - 替换为实际路径 DATA_DIR = "C:/Users/Administrator/Desktop/jinwan" if not os.path.exists(DATA_DIR): raise FileNotFoundError(f"数据集目录 {DATA_DIR} 不存在") # 执行流程 print("正在加载数据集并提取特征...") features, labels = load_dataset_and_extract_features(DATA_DIR) print("正在进行数据预处理...") X_train, X_test, y_train, y_test, scaler = preprocess_data(features, labels) print("正在训练SVM模型...") model = train_svm_classifier(X_train, y_train) print("正在评估模型性能...") evaluate_model(model, X_test, y_test) # 保存模型和标准化器到指定路径 SAVE_DIR = "C:/Users/Administrator/Desktop/ultralytics-main" os.makedirs(SAVE_DIR, exist_ok=True) # 自动创建目录 model_path = os.path.join(SAVE_DIR, "vehicle_classifier_svm.pkl") scaler_path = os.path.join(SAVE_DIR, "standard_scaler.pkl") joblib.dump(model, model_path) joblib.dump(scaler, scaler_path) print(f"模型已保存到: {model_path}") print(f"标准化器已保存到: {scaler_path}") except Exception as e: print(f"程序运行出错: {e}") import traceback traceback.print_exc() if __name__ == "__main__": main()为什么我训练完之后模型保存不了
06-12
import cv2 import numpy as np from sklearn.model_selection import train_test_split from sklearn.svm import SVC from sklearn.metrics import accuracy_score, classification_report import os import joblib # 修改图像预处理函数以确保输出尺寸符合HOG要求 def preprocess_image(image): gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # 确保这里的目标尺寸和HOGDescriptor初始化时的winSize参数一致 resized_image = cv2.resize(gray_image, (64, 128)) # 示例尺寸,根据实际情况调整 # normalized_image = resized_image / 255.0 # HOG计算通常不需要归一化 return resized_image # 直接返回调整大小后的灰度图 def extract_hog_features(images): # 创建HOG描述符,参数与预处理尺寸匹配 hog = cv2.HOGDescriptor(_winSize=(64, 128), _blockSize=(16, 16), _blockStride=(8, 8), _cellSize=(8, 8), _nbins=9) hog_features = [] for img in images: # 首先对图像进行预处理 preprocessed_img = preprocess_image(img) # 确保图像尺寸正确 if preprocessed_img.shape[0] < 128 or preprocessed_img.shape[1] < 64: print(f"警告: 图像尺寸 {preprocessed_img.shape} 小于HOG窗口大小 (64,128)") # 调整到最小尺寸 preprocessed_img = cv2.resize(preprocessed_img, (64, 128)) # 计算HOG特征 features = hog.compute(preprocessed_img) if features is not None: hog_features.append(features.ravel()) else: print("HOG特征计算失败") return np.array(hog_features) def load_images_from_folder(folder): images = [] labels = [] class_names = [] # 检查文件夹是否存在 if not os.path.exists(folder): raise FileNotFoundError(f"数据集目录不存在: {folder}") # 获取所有子目录(每个子目录代表一个类别) for class_name in os.listdir(folder): class_dir = os.path.join(folder, class_name) if os.path.isdir(class_dir): class_names.append(class_name) class_idx = len(class_names) - 1 # 使用索引作为标签 print(f"加载类别: {class_name} (索引: {class_idx})") image_count = 0 # 遍历类别目录中的文件 for filename in os.listdir(class_dir): if filename.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp')): file_path = os.path.join(class_dir, filename) try: img = cv2.imread(file_path) if img is not None: images.append(img) labels.append(class_idx) image_count += 1 else: print(f"无法读取图像: {file_path}") except Exception as e: print(f"加载图像 {file_path} 时出错: {e}") print(f" 已加载 {image_count} 张图像") return images, labels, class_names if __name__ == "__main__": # 修正:数据集目录应指向包含类别子目录的文件夹,而不是单个图像文件 dataset_folder=r"E:\flowers"# 替换为包含类别子目录的实际路径 try: images, labels, class_names = load_images_from_folder(dataset_folder) if len(images) == 0: raise ValueError("未加载任何图像!请检查数据集路径和内容") print(f"总共加载 {len(images)} 张图像,{len(class_names)} 个类别") # 提取HOG特征 hog_features = extract_hog_features(images) if len(hog_features) == 0: raise ValueError("未提取到任何HOG特征") # 分割数据集 X_train, X_test, y_train, y_test = train_test_split( hog_features, labels, test_size=0.3, random_state=42 ) # 训练SVM模型 svm_model = SVC(kernel='rbf', C=1.0, gamma='scale') svm_model.fit(X_train, y_train) # 评估模型 y_pred = svm_model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print(f"准确率: {accuracy * 100:.2f}%") print(classification_report(y_test, y_pred, target_names=class_names)) # 保存模型 joblib.dump(svm_model, 'svm_model.pkl') print("模型已保存为 svm_model.pkl") except Exception as e: print(f"程序出错: {e}")写出调用这份代码模型的代码
06-15
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Dream_Bri

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值