python 实现线性分类器和二层神经网络

本文通过使用Python中的NumPy和Matplotlib库实现了一个简单的二维数据分类任务,并通过可视化手段展示了分类效果。文中定义了交叉熵损失函数来评估分类器的表现,并通过迭代训练改进分类准确性。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

import matplotlib.pyplot as plt
import numpy as np

N = 100 # number of points per class
D = 2 # dimensionality
K = 3 # number of classes
X = np.zeros((N*K,D)) # data matrix (each row = single example)
y = np.zeros(N*K, dtype='uint8') # class labels

step_size=1e-0
reg=1e-3
num_examples=X.shape[0]

#生成数据
for j in range(K):
    ix = range(N*j,N*(j+1))
    r = np.linspace(0.0,1,N) # radius
    t = np.linspace(j*4,(j+1)*4,N) + np.random.randn(N)*0.2 # theta
    X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
    y[ix] = j
# lets visualize the data:
#plt.scatter(X[:, 0], X[:, 1], c=y, s=10, cmap=plt.cm.Spectral)
#plt.show()

#交叉熵损失函数  计算损失并返回最后一层的梯度
def DataLoss(scores):
    exp_scores=np.exp(scores)
    probs=exp_scores/np.sum(exp_scores,axis=1,keepdims=True)
    correct_logprobs=-np.log(probs[range(num_examples),y])
    data_loss=np.sum(correct_logprobs)/num_examples
    
    dscores=probs
    dscores[range(num_examples),y]-=1
    dscores/=num_examples

    return data_loss,dscores

#可视化分类边界
#原理:生成密集的网格数据,利用contourf函数自动生成等高线,即分类边界
def visualize_ans(f):
    h=0.02
    x_min,x_max=X[:,0].min()-1,X[:,0].max()+1
    y_min,y_max=X[:,1].min()-1,X[:,1].max()+1
    xx,yy=np.meshgrid(np.arange(x_min,x_max,h),np.arange(y_min,y_max,h))  #生成网格数据
    Z=f(xx,yy)
    Z=np.argmax(Z,axis=1)
    Z=Z.reshape(xx.shape)
    fig=plt.figure()
    plt.contourf(xx,yy,Z,cmap=plt.cm.Spectral,alpha=0.8)    #自动生成等高线
    plt.scatter(X[:,0],X[:,1],c=y,s=10,cmap=plt.cm.Spectral)
    plt.xlim(x_min,x_max)
    plt.ylim(y_min,y_max)
    plt.show()

def linear_classifier():
    
    W=0.01*np.random.randn(D,K)
    b=np.zeros((1,K))
    

    for i in range(200):
        scores=np.dot(X,W)+b

        data_loss,dscores=DataLoss(scores)
        reg_loss=0.5*reg*np.sum(W*W)
        loss=data_loss+reg_loss

        if i % 10 == 0:
            print ("iteration %d: loss %f" % (i, loss))

        dW=np.dot(X.T,dscores)+reg*W
        db=np.sum(dscores,axis=0,keepdims=True)

        W+=-step_size*dW
        b+=-step_size*db

        scores=np.dot(X,W)+b
        predicted_class=np.argmax(scores,axis=1)
        print('training accuracy:%.2f' % (np.mean(predicted_class==y)))

    visualize_ans(lambda xx,yy:np.dot(np.c_[xx.ravel(),yy.ravel()],W)+b)
    

def nn_classifier():
    h=100
    W=0.01*np.random.randn(D,h)
    b=np.zeros((1,h))
    W2=0.01*np.random.randn(h,K)
    b2=np.zeros((1,K))


    for i in range(10000):

        hidden_layer = np.maximum(0, np.dot(X, W) + b) #  ReLU activation
        scores = np.dot(hidden_layer, W2) + b2
        data_loss ,dscores= DataLoss(scores)
        reg_loss = 0.5*reg*np.sum(W*W) + 0.5*reg*np.sum(W2*W2)
        loss = data_loss + reg_loss

        if i % 1000 == 0:
            print ("iteration %d: loss %f" % (i, loss))

        
        dW2 = np.dot(hidden_layer.T, dscores)
        db2 = np.sum(dscores, axis=0, keepdims=True)

        dhidden = np.dot(dscores, W2.T)
        dhidden[hidden_layer <= 0] = 0

        dW = np.dot(X.T, dhidden)
        db = np.sum(dhidden, axis=0, keepdims=True)

        dW2 += reg * W2
        dW += reg * W

        W += -step_size * dW
        b += -step_size * db
        W2 += -step_size * dW2
        b2 += -step_size * db2                                                         

    hidden_layer=np.maximum(0,np.dot(X,W)+b)
    scores=np.dot(hidden_layer,W2)+b2
    predicted_class=np.argmax(scores,axis=1)
    if i % 1000 == 0:
        print ("iteration %d: loss %f" % (i, loss))

    visualize_ans(lambda xx,yy:np.dot(np.maximum(0,np.dot(np.c_[xx.ravel(),yy.ravel()],W)+b),W2)+b2)
    
    

if __name__ == '__main__':
    nn_classifier()




评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值