keras多层感知器使用例子

多层感知器一般用于解决分类和回归的问题,将多个维度的数据映射到单一输出的数据维度上。

第一个例子 二分类,判断是不是糖尿病

一共3层,输入层12个神经元,8个输入参数,激活函数是relu;隐藏层8个神经元;输出层1个神经元。

使用adma有效梯度下降算法作为优化器,损失函数是二进制交叉熵,准确度作为度量模型的标准,数据可以从网上下载,印第安人糖料病数据。

diabetes.csv文件和当前脚本文件放在一个目录下就行了。

from keras.models import Sequential
from keras.layers import Dense
import numpy as np
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
KTF.set_session(tf.Session(config=tf.ConfigProto(device_count={'gpu': 0})))
# 多层感知器
np.random.seed(7)
dataset = np.loadtxt('diabetes.csv',delimiter=',')
x = dataset[:,0:8]
y = dataset[:,8]
# 创建模型
model = Sequential()
model.add(Dense(12,input_dim=8,activation='relu'))
model.add(Dense(8,activation='relu'))
model.add(Dense(1,activation='sigmoid'))
# 编译模型
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
# 训练模型
model.fit(x=x,y=y,epochs=150,batch_size=10,validation_split=0.2)
# 评估模型
scores = model.evaluate(x=x,y=y)
print ('n%s : %.2f%%' % (model.metrics_names[1],scores[1]*100))

epochs=150时,准确率是79,250时,准确率是80,差别不大。

K折交叉验证评估模型
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
from sklearn.model_selection import StratifiedKFold

# 多层感知器
np.random.seed(7)
dataset = np.loadtxt('diabetes.csv',delimiter=',')
x = dataset[:,0:8]
y = dataset[:,8]
# 创建模型
kfl = StratifiedKFold(n_splits=10,random_state=7,shuffle=True)
cv =[]
for train,valitation in kfl.split(x,y):
    model = Sequential()
    model.add(Dense(12,input_dim=8,activation='relu'))
    model.add(Dense(8,activation='relu'))
    model.add(Dense(1,activation='sigmoid'))
    # 编译模型
    model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
    # 训练模型
    model.fit(x[train],y[train],epochs=150,batch_size=10,validation_split=0.2)
    # 评估模型
    scores = model.evaluate(x=x[valitation],y=y[valitation],verbose=0)
    print ('n%s : %.2f%%' % (model.metrics_names[1],scores[1]*100))
    cv.append(scores[1]*100)
# 输出平均值和标准差
print('%.2f%%(+/-%.2f%%)'%(np.mean(cv),np.std(cv)))
第二个例子 识别手写数字
import numpy as np
import pandas as pd
from paint import show_train_history
from paint import plot_image_labels_prediction

from keras.models import Sequential
from keras.layers import Dense,Dropout
from keras.utils import np_utils
from keras.datasets import mnist

np.random.seed(10)
# (X_train_image,y_train_label),(X_test_image,y_test_label)=mnist.load_data()
f = np.load('./mnist.npz')
X_train_image,y_train_label = f['x_train'],f['y_train']
X_test_image,y_test_label = f['x_test'],f['y_test']
print('train data  = ',len(X_train_image))
print(' test data = ',len(X_test_image))


print(X_train_image.shape)
# plot_image_labels_prediction(X_train_image,y_train_label,[],0,10)
# 将28x28的二维数字图像以reshape转换为Float,共784个浮点数
x_Train = X_train_image.reshape(60000,784).astype('float32')
x_Test = X_test_image.reshape(10000,784).astype('float32')
# 将数字标准化,每个数字都是0-255的
x_Train_normalize = x_Train/255
x_Test_normalize = x_Test/255

# 标签字段是0-9的数字,以one-hot编码转换为10个0或1 的组合,对应10个输出层的神经元
y_train_label_Onehot = np_utils.to_categorical(y_train_label)
y_test_label_Onehot =np_utils.to_categorical(y_test_label)
# 创建模型
model = Sequential()
# 正态分布随机数来初始化权重,激活函数为relu ;定义了输入层和隐藏层
model.add(Dense(units=256,input_dim=784,kernel_initializer='normal',activation='relu'))

model.add(Dropout(0.5))
# 定义输出层,神经元10个
model.add(Dense(units=10,kernel_initializer='normal',activation='softmax'))
#查看模型摘要
print(model.summary())

# 编译模型,损失函数-交叉熵,优化器-随机梯度下降,评估方式-准确率
model.compile(loss='categorical_crossentropy',
              optimizer='adam',metrics=['accuracy'])

# 训练模型,80%作为训练数据,20%作为验证数据
# 10个训练周期,每个批次数据200项,verbose=2显示训练过程
train_history = model.fit(x=x_Train_normalize,
                          y=y_train_label_Onehot,
                          validation_split=0.2,
                          epochs=10,batch_size=200,verbose=2)

show_train_history(train_history,'acc','val_acc')
# 验证模型 ,准确率0.97
scores = model.evaluate(x_Test_normalize,y_test_label_Onehot)
print(scores)
predictions = model.predict_classes(x_Test)
# 使用混淆矩阵
print(pd.crosstab(y_test_label,predictions,rownames=['label'],colnames=['predict']))

df =pd.DataFrame({'label':y_test_label,'predict':predictions})
print(df[(df.label==5)&(df.predict==3)])
# plot_image_labels_prediction(X_test_image,y_test_label,predictions,idx=340)

import matplotlib.pyplot as  plt

def plot_image_labels_prediction(image,labels, prediction,
                                  idx, num = 10):
    fig = plt.gcf()
    fig.set_size_inches(12,14)
    if num>25: num = 25
    for i in range(0,num):
        ax =plt.subplot(5,5,1+i) # 5x5的图形
        ax .imshow(image[idx],cmap='binary')
        title = "label="+ str(labels[idx])
        if len(prediction)>0:
            title+=",predict ="+str(prediction[idx])
        ax.set_title(title,fontsize=10)
        ax.set_xticks([])# 不显示刻度
        ax.set_yticks([])
        idx+=1
    plt.show()

def show_train_history(train_history,train,validation):
    plt.plot(train_history.history[train])
    plt.plot(train_history.history[validation])

    plt.title('train history')
    plt.ylabel(train)
    plt.xlabel('Epoch')
    plt.legend(['train','validation'],loc='upper left')
    plt.show()

在这里插入图片描述

第三个例子 鸢尾花分类

鸢尾花一共有4种特征值,所以输入层有4个神经元;有3种分类,所以输出层3个神经元;再加2个隐藏层。

from sklearn import datasets
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold

datasets = datasets.load_iris()

x =datasets.data
Y =datasets.target

seed =7
np.random.seed(seed)

def create_model(optimizer='adam',init = 'glorot_uniform'):
    model = Sequential()
    model.add(Dense(units=4,activation='relu',input_dim=4,kernel_initializer=init))
    model.add(Dense(units=6,activation='relu',kernel_initializer=init))
    model.add(Dense(units=3,activation='softmax',kernel_initializer=init))

    model.compile(loss='categorical_crossentropy',optimizer=optimizer,metrics=['accuracy'])

    return model
model = KerasClassifier(build_fn=create_model,epochs=299,batch_size=5,verbose=0)

kflod = KFold(n_splits=10,shuffle=True,random_state=seed)
results = cross_val_score(model,x,Y,cv=kflod)

print('Accuracy:%.2f%% (%.2f)' % (results.mean()*100,results.std()))
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值