整理digit-recognizer几种解决方案

本文介绍了在Windows 10环境下使用Anaconda解决TensorFlow导入错误的问题,详细阐述了从Python 2.7升级到Python 3.5以及如何通过conda命令安装TensorFlow CPU版本。此外,还探讨了digit-recognizer问题的随机森林方法,并展示了CNN模型的初步应用和训练结果,尽管数据量大导致训练时间较长。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

先放上理想曲线:
这里写图片描述

几种方法代码:

#!usr/bin/python
#codeing: utf-8
'''
Create on 2018-08-09
Author: Gunther17
   Ctrl + 1: 注释/反注释

  Ctrl + 4/5: 块注释/块反注释

  Ctrl + L: 跳转到行号

  Tab/Shift + Tab: 代码缩进/反缩进

  Ctrl +I:显示帮助
'''
import os.path
import csv
import time
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC


data_dir='D:\data Competition\digit-recognizer/'

#加载数据
def opencsv():
    data_train=pd.read_csv(os.path.join(data_dir,'input/train.csv'))
    data_test=pd.read_csv(os.path.join(data_dir,'input/test.csv'))

    train_data=data_train.values[0:,1:] #读入全部训练数据,  [行,列]
    train_label=data_train.values[0:,0] # 读取列表的第一列
    test_data=data_test.values[0:,0:]  # 测试全部测试个数据

    return train_data,train_label,test_data


def saveResults(result,csvName):
    with open(csvName,'w') as myfile:
        '''
       创建记录输出结果的文件(w 和 wb 使用的时候有问题)
       python3里面对 str和bytes类型做了严格的区分,不像python2里面某些函数里可以混用。
       所以用python3来写wirterow时,打开文件不要用wb模式,只需要使用w模式,然后带上newline=''
       '''
        mywrite=csv.writer(myfile)
        mywrite.writerow(["ImageId","Label"])
        index=0
        for r in result:
            index+=1
            mywrite.writerow([index,int(r)])
        print('Saved successfully....')


def knnClassify(traindata,trainlabel):
    print('Train knn...')
    knnClf=KNeighborsClassifier() # default:k = 5,defined by yourself:KNeighborsClassifier(n_neighbors=10)
    knnClf.fit(traindata,np.ravel(trainlabel))# ravel/flattened 返回1维 array,其中flatten函数返回的是拷贝。.
    return knnClf

def dtClassify(traindata,trainlabel):
    print('Train decision tree...')
    dtClf=DecisionTreeClassifier()
    dtClf.fit(traindata,np.ravel(trainlabel))
    return dtClf

def rfClassify(traindata,trainlabel):
    print('Train Random forest...')
    rfClf=RandomForestClassifier()
    rfClf.fit(traindata,np.ravel(trainlabel))
    return rfClf

def svmClassify(traindata,trainlabel):
    print('Train svm...')
    svmClf=SVC(C=4,kernel='rbf')
    svmClf.fit(traindata,np.ravel(trainlabel))
    return svmClf



def dpPCA(x_train,x_test,Com):
    print('dimension reduction....')
    trainData=np.array(x_train)
    testData=np.array(x_test)
    '''
    n_components>=1
      n_components=NUM   设置占特征数量比
    0 < n_components < 1
      n_components=0.99  设置阈值总方差占比
    '''
    pca=PCA(n_components=Com,whiten=False)
    pca.fit(trainData) #fit the model with X
    pcaTrainData=pca.transform(trainData)# 在 X上进行降维
    pcaTestData=pca.transform(testData)
    #pca 方差大小 方差占比 特征数量
   # print(pca.explained_variance_,'\n',pca.explained_variance_ratio_,'\n',pca.components_)
    return pcaTrainData,pcaTestData

def dRecognition_knn():
    start_time=time.time()
    #load data
    trainData,trainLabel,testData=opencsv()
    print('load data finish...')
    stop_time1=time.time()
    print('load data take: %f' %(stop_time1-start_time))

    #dimension reduction
    trainData,testData=dpPCA(trainData,testData,0.8)

    knnClf=knnClassify(trainData,trainLabel)
    dtClf=dtClassify(trainData,trainLabel)
    rfClf=rfClassify(trainData,trainLabel)
    svmClf=svmClassify(trainData,trainLabel)


    trainlabel_knn=knnClf.predict(trainData)
    trainlabel_dt=dtClf.predict(trainData)
    trainlabel_rf=rfClf.predict(trainData)
    trainlabel_svm=svmClf.predict(trainData)

    knn_acc=accuracy_score(trainLabel,trainlabel_knn)
    print('knn train accscore:%f'%(knn_acc))

    dt_acc=accuracy_score(trainLabel,trainlabel_dt)
    print('dt train accscore:%f'%(dt_acc))

    rf_acc=accuracy_score(trainLabel,trainlabel_rf)
    print('rf train accscore:%f'%(rf_acc))

    svm_acc=accuracy_score(trainLabel,trainlabel_svm)
    print('svm train accscore:%f'%(svm_acc))

    testLabel_knn=knnClf.predict(testData)
    testLabel_dt=dtClf.predict(testData)
    testLabel_rf=rfClf.predict(testData)
    testLabel_svm=svmClf.predict(testData)

    saveResults(testLabel_knn,os.path.join(data_dir,'output/Result_knn.csv'))
    saveResults(testLabel_dt,os.path.join(data_dir,'output/Result_dt.csv'))
    saveResults(testLabel_rf,os.path.join(data_dir,'output/Result_rf.csv'))
    saveResults(testLabel_svm,os.path.join(data_dir,'output/Result_svm.csv'))
    print('knn dt rf svm process finished ....')
    stop_time2=time.time()
    print('knn classification take:%f' %(stop_time2-start_time))

if __name__=='__main__':
    dRecognition_knn()







最后附knn图:
这里写图片描述
随机森林图:
这里写图片描述


方法二 cnn待更新:

环境:windows10,64 python报错:
ImportError:no module named tensorflow.python
看来这里涉及到安装Win10下用Anaconda安装TensorFlow

解决方案:TensorFlow目前在Windows下只支持Python 3.5版本。

用Anaconda安装tensorflow, conda命令,这样tensorflow cpu版本就安装好了。
卸载了之前的 python 2.7

  • 测试成功!
    这里写图片描述

  • cnn code:

#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 11:03:20 2018

@author: Gunther17
"""


import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import os

from keras.callbacks import ReduceLROnPlateau
from keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPool2D
from keras.models import Sequential
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical  # convert to one-hot-encoding


np.random.seed(2)

# 数据路径
data_dir = 'D:\data Competition\digit-recognizer/'

# Load the data
train = pd.read_csv(os.path.join(data_dir, 'input/train.csv'))
test = pd.read_csv(os.path.join(data_dir, 'input/test.csv'))

X_train = train.values[:, 1:]
Y_train = train.values[:, 0]
test = test.values

# Normalization
X_train = X_train / 255.0
test = test / 255.0

# Reshape image in 3 dimensions (height = 28px, width = 28px , canal = 1)
X_train = X_train.reshape(-1, 28, 28, 1)
test = test.reshape(-1, 28, 28, 1)

# Encode labels to one hot vectors (ex : 2 -> [0,0,1,0,0,0,0,0,0,0])
Y_train = to_categorical(Y_train, num_classes=10)

# Set the random seed
random_seed = 2

# Split the train and the validation set for the fitting
X_train, X_val, Y_train, Y_val = train_test_split(
    X_train, Y_train, test_size=0.1, random_state=random_seed)

# Set the CNN model 
# my CNN architechture is In -> [[Conv2D->relu]*2 -> MaxPool2D -> Dropout]*2 -> Flatten -> Dense -> Dropout -> Out

model = Sequential()

model.add(
    Conv2D(
        filters=32,
        kernel_size=(5, 5),
        padding='Same',
        activation='relu',
        input_shape=(28, 28, 1)))
model.add(
    Conv2D(
        filters=32, kernel_size=(5, 5), padding='Same', activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(
    Conv2D(
        filters=64, kernel_size=(3, 3), padding='Same', activation='relu'))
model.add(
    Conv2D(
        filters=64, kernel_size=(3, 3), padding='Same', activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation="softmax"))

# Define the optimizer
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)

# Compile the model
model.compile(
    optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"])

epochs = 30
batch_size = 86

# Set a learning rate annealer
learning_rate_reduction = ReduceLROnPlateau(
    monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001)

datagen = ImageDataGenerator(
    featurewise_center=False,  # set input mean to 0 over the dataset
    samplewise_center=False,  # set each sample mean to 0
    featurewise_std_normalization=False,  # divide inputs by std of the dataset
    samplewise_std_normalization=False,  # divide each input by its std
    zca_whitening=False,  # apply ZCA whitening
    rotation_range=10,  # randomly rotate images in the range (degrees, 0 to 180)
    zoom_range=0.1,  # Randomly zoom image 
    width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
    height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
    horizontal_flip=False,  # randomly flip images
    vertical_flip=False)  # randomly flip images

datagen.fit(X_train)

history = model.fit_generator(
    datagen.flow(
        X_train, Y_train, batch_size=batch_size),
    epochs=epochs,
    validation_data=(X_val, Y_val),
    verbose=2,
    steps_per_epoch=X_train.shape[0] // batch_size,
    callbacks=[learning_rate_reduction])

# predict results
results = model.predict(test)

# select the indix with the maximum probability
results = np.argmax(results, axis=1)

results = pd.Series(results, name="Label")

submission = pd.concat(
    [pd.Series(
        range(1, 28001), name="ImageId"), results], axis=1)

submission.to_csv(os.path.join(data_dir, 'output/Result_keras_CNN.csv'),index=False)
print('finished')
  • 贴上结果:
    这里写图片描述

  • trick没有任何意义:就是拿已经知道的数据集MINIST来训练。接下来就当玩玩。。。
    Atrain.csv训练,然后进行测试。
    数据太大了运行好长时间
    这里写图片描述

这里写图片描述

这里写图片描述

  • 如果用cnn训练所有数据,trick效果是0.99942 参见2.





参考 apachecn

参考2

import torch import torch.nn as nn import torch.optim as optim from torchvision import datasets, transforms from torch.utils.data import DataLoader import torch.optim.lr_scheduler as lr_scheduler import numpy as np import tkinter as tk from tkinter import Button, messagebox from PIL import Image, ImageDraw, ImageOps import os # ====================== 训练部分 ====================== # # 数据预处理 transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) # MNIST数据集的均值和标准差 ]) # 加载训练数据 train_dataset = datasets.MNIST('data', train=True, download=True, transform=transform) train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True) # 增大batch size # 加载测试数据 test_dataset = datasets.MNIST('data', train=False, transform=transform) test_loader = DataLoader(test_dataset, batch_size=1000) # 定义改进模型 class Improved_MNIST_CNN(nn.Module): def __init__(self): super(Improved_MNIST_CNN, self).__init__() self.layer1 = nn.Sequential( nn.Conv2d(1, 32, kernel_size=3, padding=1), nn.BatchNorm2d(32), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2) ) self.layer2 = nn.Sequential( nn.Conv2d(32, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(2) ) self.layer3 = nn.Sequential( nn.Conv2d(64, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(), nn.MaxPool2d(2) ) self.fc1 = nn.Linear(128 * 3 * 3, 512) self.dropout1 = nn.Dropout(0.4) self.fc2 = nn.Linear(512, 128) self.dropout2 = nn.Dropout(0.3) self.fc3 = nn.Linear(128, 10) def forward(self, x): x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = x.view(x.size(0), -1) x = self.fc1(x) x = self.dropout1(x) x = self.fc2(x) x = self.dropout2(x) x = self.fc3(x) return x # 初始化模型、损失函数和优化器 model = Improved_MNIST_CNN() criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5) scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.7) # 学习率衰减 # 训练函数 def train_model(model, train_loader, test_loader, optimizer, scheduler, epochs=15): model.train() best_accuracy = 0.0 for epoch in range(epochs): running_loss = 0.0 for batch_idx, (data, target) in enumerate(train_loader): optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() running_loss += loss.item() if batch_idx % 100 == 0: print(f'Epoch {epoch+1}/{epochs}, Batch {batch_idx}, Loss: {loss.item():.6f}') # 更新学习率 scheduler.step() # 每个epoch结束后在测试集上评估 accuracy = test_model(model, test_loader) avg_loss = running_loss / len(train_loader) print(f'Epoch {epoch+1} completed, Avg Loss: {avg_loss:.6f}, Test Accuracy: {accuracy:.2f}%') # 保存最佳模型 if accuracy > best_accuracy: best_accuracy = accuracy torch.save(model.state_dict(), 'mnist_model_best.pth') print(f"Saved best model with accuracy: {best_accuracy:.2f}%") return best_accuracy # 测试函数 def test_model(model, test_loader): model.eval() correct = 0 total = 0 with torch.no_grad(): for data, target in test_loader: output = model(data) _, predicted = torch.max(output.data, 1) total += target.size(0) correct += (predicted == target).sum().item() accuracy = 100. * correct / total model.train() return accuracy # 训练模型 def run_training(): print("开始训练模型...") best_accuracy = train_model(model, train_loader, test_loader, optimizer, scheduler, epochs=15) print(f"训练完成! 最佳准确率: {best_accuracy:.2f}%") # 保存最终模型 torch.save(model.state_dict(), 'mnist_model_final.pth') print("模型已保存为: mnist_model_final.pth") return best_accuracy # ====================== 识别部分 ====================== # # 加载训练好的模型 def load_model(model_path='mnist_model_best.pth'): model = Improved_MNIST_CNN() try: if os.path.exists(model_path): model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu'))) model.eval() print(f"成功加载模型: {model_path}") return model else: print(f"警告: 找不到模型文件 '{model_path}'") return None except Exception as e: print(f"加载模型时出错: {e}") return None # 手写数字识别应用 class DigitRecognizer: def __init__(self, model): self.model = model self.transform = transforms.Compose([ transforms.Resize((28, 28)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) # 创建主窗口 self.root = tk.Tk() self.root.title("MNIST手写数字识别") self.root.geometry("400x500") # 标题 self.title_label = tk.Label(self.root, text="手写数字识别", font=("Arial", 16)) self.title_label.pack(pady=10) # 创建画布 self.canvas_width = 280 self.canvas_height = 280 self.canvas = tk.Canvas( self.root, width=self.canvas_width, height=self.canvas_height, bg="white", cursor="cross" ) self.canvas.pack(pady=10) # 绑定鼠标事件 self.canvas.bind("<B1-Motion>", self.draw) # 创建PIL图像 self.image = Image.new("L", (self.canvas_width, self.canvas_height), 255) self.draw_img = ImageDraw.Draw(self.image) # 按钮框架 button_frame = tk.Frame(self.root) button_frame.pack(pady=10) # 识别按钮 self.recognize_btn = Button( button_frame, text="识别", command=self.recognize, width=10, height=2, bg="#4CAF50", fg="white", font=("Arial", 12) ) self.recognize_btn.pack(side=tk.LEFT, padx=10) # 清除按钮 self.clear_btn = Button( button_frame, text="清除", command=self.reset, width=10, height=2, bg="#F44336", fg="white", font=("Arial", 12) ) self.clear_btn.pack(side=tk.LEFT, padx=10) # 结果标签 self.result_label = tk.Label( self.root, text="结果: 请书写数字并点击'识别'", font=("Arial", 14), pady=10 ) self.result_label.pack() # 状态栏 self.status_var = tk.StringVar() self.status_var.set("就绪") self.status_bar = tk.Label( self.root, textvariable=self.status_var, bd=1, relief=tk.SUNKEN, anchor=tk.W ) self.status_bar.pack(side=tk.BOTTOM, fill=tk.X) print("请在画布上书写数字,然后点击'识别'按钮...") def reset(self): self.canvas.delete("all") self.image = Image.new("L", (self.canvas_width, self.canvas_height), 255) self.draw_img = ImageDraw.Draw(self.image) self.result_label.config(text="结果: 请书写数字并点击'识别'") self.status_var.set("画布已清除") def draw(self, event): x, y = event.x, event.y r = 10 # 笔触半径 self.canvas.create_oval(x-r, y-r, x+r, y+r, fill="black", outline="black") self.draw_img.ellipse([x-r, y-r, x+r, y+r], fill=0) def preprocess(self): # 反转颜色:黑底白字 -> 白底黑字 (符合MNIST格式) inverted_img = ImageOps.invert(self.image) # 找到数字的边界 bbox = inverted_img.getbbox() if not bbox: return None # 裁剪数字 cropped = inverted_img.crop(bbox) # 计算缩放比例,保持宽高比 width, height = cropped.size max_dim = max(width, height) scale = 20.0 / max_dim # 缩放至20像素内 # 创建新图像并居中放置 new_width = int(width * scale) new_height = int(height * scale) resized = cropped.resize((new_width, new_height), Image.LANCZOS) # 创建28x28空白图像 final_img = Image.new("L", (28, 28), 0) # 背景为黑色 # 计算放置位置(居中) x_offset = (28 - new_width) // 2 y_offset = (28 - new_height) // 2 final_img.paste(resized, (x_offset, y_offset)) return final_img def recognize(self): if self.model is None: messagebox.showerror("错误", "模型未加载成功,请先训练模型") return processed_img = self.preprocess() if processed_img is None: self.status_var.set("错误: 未检测到书写内容") messagebox.showwarning("警告", "未检测到书写内容,请在画布上书写数字") return # 转换为张量 tensor = self.transform(processed_img).unsqueeze(0) # 预测 with torch.no_grad(): output = self.model(tensor) probabilities = torch.nn.functional.softmax(output[0], dim=0) _, predicted = torch.max(output, 1) digit = predicted.item() confidence = probabilities[digit].item() * 100 self.result_label.config(text=f"识别结果: {digit} (置信度: {confidence:.1f}%)") self.status_var.set(f"识别完成: {digit} (置信度: {confidence:.1f}%)") # 显示处理后的图像(可选) # processed_img.show() # 主函数 def main(): # 检查模型是否存在 model_path = 'mnist_model_best.pth' model = None if os.path.exists(model_path): model = load_model(model_path) else: print("未找到预训练模型,开始训练新模型...") run_training() model = load_model(model_path) if model: # 创建识别器 recognizer = DigitRecognizer(model) recognizer.root.mainloop() if __name__ == "__main__": main()这是手写数字识别代码,写实训报告大约8000字包括代码
最新发布
07-01
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值