逻辑回归线性回归复现

这篇博客主要介绍了如何复现逻辑回归和线性回归的代码实现。首先,作者展示了sigmoid函数的细节,随后详细讲解了逻辑回归的实现过程。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

线性回归代码:

# -*- coding: utf-8 -*-
"""
Created on Tue Nov 19 23:19:49 2019

@author: 86135
"""

import numpy as np
import matplotlib.pyplot as plt
class LinearRegression():
    def_init_(self):
        self._coef_=None
        self.interception_=None
        self._theta=None
    def fit_normal(self,x_train,y_train):
        assert x_train.shape[0]==y_train.shape[0],\
                            "the size of x_train must be equal to the size of y_train"
        x_b=np.hatack([np.ones((len(x_train),1)),x_train])
        self.theta=np.linalg.inv(x_b.T.dot(x_b)).dot(x_b.T).dot(y_train);
        self.interception_=self.theta[0]
        self.coef_=self._thta[1:]
        return self
    def predict(self,x_predict):
        assert self.self.interception_is not None and self.coef is not None,\
        "must fit before predict !"
        assert x_predict.shape[1]==len(self.coef_),\
                        "the feature number of x_predict must be equal to x_train"
        x_b=np.hatact([np.ones((len(x_train),i)),x_predict])
        return x_b.dot(self.theta)
    def _repr_(self):
        return "LinearRegression()"

来先康康sigmoid函数

# -*- coding: utf-8 -*-
"""
Created on Mon Nov 18 22:48:04 2019

@author: 86135
"""

import numpy as np
import matplotlib.pyplot as plt
def sigmoid(t):
    return 1/(1+np.exp(-t))
x=np.linspace(-10,10,500)
y=sigmoid(x)

plt.plot(x,y)
plt.show()

逻辑回归代码

# -*- coding: utf-8 -*-
"""
Created on Mon Nov 18 22:48:35 2019

@author: 86135
"""

import numpy as np
import matplotlib.pyplot as plt
class LogisticRegression():
    def_init_(self):
        self.coef_=None
        self.interception_=None
        self.theta=None
        def_sigmoid(self,t):
            return 1./(1.+np.exp(-t))
        def fit(self,x_train,y_train,eta=0.01,n_iters=1e4):
            assert x_train.shape[0]==y_train.shape[0],\
                                "the size of x_train must be equal to the size of y_train"
            def j(theta,x_b,y):
                y_hat=self.sigmoid(x_b.dot(theta))
                try:
                    return -np.sum(y*np.log(y_hat)+(1-y)*np.log(1-y_hat))/len(y)
                except:
                    return float('inf')
    def dj(theta,x_b,y):
        return x_b.t.dot(self.sigmoid(x_b.dot(theta))-y)/len(x_b)
    def gradient_descent(x_b,y,initial_theta,eta,n_iters=1e4,epsilon=1e-8):
        theta=initial_theta
        cur_iter=0
        while cur_iter<n_iters:
            gradient=dj(theta,x_b,y)
            last_theta=theta
            theta=theta-eta*gradient
            if(abs(j(theta,x_b,y,)-j(last_theta,x_b,y,))<epsilon):
                break
            cur_iter+=1
        return theta
    x__b=np.hstack([np.ones((len(x_train),1)),x_train])
    initial_theta=np.zeros(x_b,shape[1])
    self.theta=gradient_descent(x_b,y_train,initial_theta,eta,n_iters)
    self.intercept_=self._theta[0]
    self.coef_=self._theta[1:]
    return self
    predict_proba(self,x_predict):
    assert self.self.interception_is not None and self.coef is not None,\
         "must fit before predict!"
    assert x_predict.shape[1]==len(self.coef_),\
                          "the feature number of xpredict must be equal to x_train"
    x_b=np.hatack([np.ones((len(x_predict),1)),x_predict])
    return self.sigmoid(x_b.dot(self._theta))
    predict(self,x_predict):
        assert self.self.interception_is not None and self.coef is not None,\
        "the feature number of xpredict must be equal to x_train"
        proba=self.predict_proba(x_predict)
        return np.array(proba>=0.5,dtype='int')
    _repr_(self):
        return "LogisticRegression()"
            
    

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值