机器学习实战(8) ——预测数值型数据回归(python实现)

本文通过实战代码深入解析机器学习中的关键算法,包括线性回归、局部加权线性回归、岭回归及前向逐步回归等。通过亲手实现这些算法,帮助读者更好地理解其工作原理与应用场景。

这是学习机器学习算法实战这本书时,写的代码实战。让自己对各个算法有更直观的了解,不能一直不写啊。不管简单还是不简单都亲自一行一行的敲一遍啊。

具体的源码和和数据链接:https://pan.baidu.com/s/1G2S2pb5gfBnxGNNTFgTkEA 密码:fov0

这个第八章的代码和自己做的测试regression.py。基本符合结果。

# -*- coding: utf-8 -*-
# author: Yufeng Song
from numpy import*
import matplotlib.pyplot as plt
def loadDataSet(fileName):
    numFeat = len(open(fileName).readline().split('\t'))-1
    dataMat = [];labelMat = []
    fr = open(fileName)
    for line in fr.readlines():
        lineArr = []
        curLine = line.strip().split('\t')
        for i in range(numFeat):
            lineArr.append(float(curLine[i]))
        dataMat.append(lineArr)
        labelMat.append(float(curLine[-1]))
    return dataMat,labelMat

def standRegres(xArr,yArr):
    xMat = mat(xArr); yMat = mat(yArr).T
    xTx = xMat.T*xMat
    if linalg.det(xTx) == 0.0:
        print("This matrix is singular,cannot do inverse")
        return
    ws = xTx.I*(xMat.T*yMat)
    return ws

def lwlr(testPoint,xArr,yArr,k=1.0):
    xMat = mat(xArr); yMat = mat(yArr).T
    m = shape(xMat)[0]
    weights = mat(eye((m)))
    for j in range(m):                      #next 2 lines create weights matrix
        diffMat = testPoint - xMat[j,:]     #
        weights[j,j] = exp(diffMat*diffMat.T/(-2.0*k**2))
    xTx = xMat.T * (weights * xMat)
    if linalg.det(xTx) == 0.0:
        print("This matrix is singular, cannot do inverse")
        return
    ws = xTx.I * (xMat.T * (weights * yMat))
    return testPoint * ws

def lwlrTest(testArr,xArr,yArr,k=1.0):  #loops over all the data points and applies lwlr to each one
    m = shape(testArr)[0]
    yHat = zeros(m)
    for i in range(m):
        yHat[i] = lwlr(testArr[i],xArr,yArr,k)
    return yHat

def rssError(yArr,yHatArr):
    return ((yArr-yHatArr)**2).sum()

def ridgeRegres(xMat,yMat,lam=0.2):
    xTx = xMat.T*xMat
    denom = xTx + eye(shape(xMat)[1])*lam
    if linalg.det(denom) == 0.0:
        print ("This matrix is singular, cannot do inverse")
        return
    ws = denom.I * (xMat.T*yMat)
    return ws

def ridgeTest(xArr,yArr):
    xMat = mat(xArr); yMat=mat(yArr).T
    yMean = mean(yMat,0)
    yMat = yMat - yMean     #to eliminate X0 take mean off of Y
    #regularize X's
    xMeans = mean(xMat,0)   #calc mean then subtract it off
    xVar = var(xMat,0)      #calc variance of Xi then divide by it
    xMat = (xMat - xMeans)/xVar
    numTestPts = 30
    wMat = zeros((numTestPts,shape(xMat)[1]))
    for i in range(numTestPts):
        ws = ridgeRegres(xMat,yMat,exp(i-10))
        wMat[i,:]=ws.T
    return wMat

def stageWise(xArr,yArr,eps=0.01,numIt=100):
    xMat = mat(xArr); yMat=mat(yArr).T
    yMean = mean(yMat,0)
    yMat = yMat - yMean     #can also regularize ys but will get smaller coef
    # xMat = regularize(xMat)
    m,n=shape(xMat)
    returnMat = zeros((numIt,n)) #testing code remove
    ws = zeros((n,1)); wsTest = ws.copy(); wsMax = ws.copy()
    for i in range(numIt):#could change this to while loop
        #print ws.T
        lowestError = inf;
        for j in range(n):
            for sign in [-1,1]:
                wsTest = ws.copy()
                wsTest[j] += eps*sign
                yTest = xMat*wsTest
                rssE = rssError(yMat.A,yTest.A)
                if rssE < lowestError:
                    lowestError = rssE
                    wsMax = wsTest
        ws = wsMax.copy()
        returnMat[i,:]=ws.T
    return returnMat


from time import sleep
import json
# import urllib2
import urllib.request
# response = urllib.request.urlopen('http://python.org/')
def searchForSet(retX, retY, setNum, yr, numPce, origPrc):
    sleep(10)
    myAPIstr = 'AIzaSyD2cR2KFyx12hXu6PFU-wrWot3NXvko8vY'
    searchURL = 'https://www.googleapis.com/shopping/search/v1/public/products?key=%s&country=US&q=lego+%d&alt=json' % (myAPIstr, setNum)
    # pg = urllib2.urlopen(searchURL)
    pg = urllib.request.urlopen(searchURL)
    retDict = json.loads(pg.read())
    for i in range(len(retDict['items'])):
        try:
            currItem = retDict['items'][i]
            if currItem['product']['condition'] == 'new':
                newFlag = 1
            else: newFlag = 0
            listOfInv = currItem['product']['inventories']
            for item in listOfInv:
                sellingPrice = item['price']
                if  sellingPrice > origPrc * 0.5:
                    print ("%d\t%d\t%d\t%f\t%f" % (yr,numPce,newFlag,origPrc, sellingPrice))
                    retX.append([yr, numPce, newFlag, origPrc])
                    retY.append(sellingPrice)
        except: print ('problem with item %d' % i)



def scrapePage(inFile,outFile,yr,numPce,origPrc):
    # from beautifulsoup4 import BeautifulSoup
    from bs4 import BeautifulSoup
    fr = open(inFile); fw=open(outFile,'a') #a is append mode writing
    soup = BeautifulSoup(fr.read())
    i=1
    currentRow = soup.findAll('table', r="%d" % i)
    while(len(currentRow)!=0):
        currentRow = soup.findAll('table', r="%d" % i)
        title = currentRow[0].findAll('a')[1].text
        lwrTitle = title.lower()
        if (lwrTitle.find('new') > -1) or (lwrTitle.find('nisb') > -1):
            newFlag = 1.0
        else:
            newFlag = 0.0
        soldUnicde = currentRow[0].findAll('td')[3].findAll('span')
        if len(soldUnicde)==0:
            print ("item #%d did not sell" % i)
        else:
            soldPrice = currentRow[0].findAll('td')[4]
            priceStr = soldPrice.text
            priceStr = priceStr.replace('$','') #strips out $
            priceStr = priceStr.replace(',','') #strips out ,
            if len(soldPrice)>1:
                priceStr = priceStr.replace('Free shipping', '') #strips out Free Shipping
            print ("%s\t%d\t%s" % (priceStr,newFlag,title))
            fw.write("%d\t%d\t%d\t%f\t%s\n" % (yr,numPce,newFlag,origPrc,priceStr))
        i += 1
        currentRow = soup.findAll('table', r="%d" % i)
    fw.close()

# def setDataCollect(retX, retY):
#     searchForSet(retX, retY, 8288, 2006, 800, 49.99)
#     searchForSet(retX, retY, 10030, 2002, 3096, 269.99)
#     searchForSet(retX, retY, 10179, 2007, 5195, 499.99)
#     searchForSet(retX, retY, 10181, 2007, 3428, 199.99)
#     searchForSet(retX, retY, 10189, 2008, 5922, 299.99)
#     searchForSet(retX, retY, 10196, 2009, 3263, 249.99)

def setDataCollect():
    scrapePage('setHtml/lego8288.html','out.txt', 2006, 800, 49.99)
    scrapePage('setHtml/lego10030.html','out.txt', 2002, 3096, 269.99)
    scrapePage('setHtml/lego10179.html','out.txt', 2007, 5195, 499.99)
    scrapePage('setHtml/lego10181.html','out.txt', 2007, 3428, 199.99)
    scrapePage('setHtml/lego10189.html','out.txt', 2008, 5922, 299.99)
    scrapePage('setHtml/lego10196.html','out.txt', 2009, 3263, 249.99)

def crossValidation(xArr,yArr,numVal=10):
    m = len(yArr)
    indexList = range(m)
    errorMat = zeros((numVal,30))#create error mat 30columns numVal rows
    for i in range(numVal):
        trainX=[]; trainY=[]
        testX = []; testY = []
        random.shuffle(indexList)
        for j in range(m):#create training set based on first 90% of values in indexList
            if j < m*0.9:
                trainX.append(xArr[indexList[j]])
                trainY.append(yArr[indexList[j]])
            else:
                testX.append(xArr[indexList[j]])
                testY.append(yArr[indexList[j]])
        wMat = ridgeTest(trainX,trainY)    #get 30 weight vectors from ridge
        for k in range(30):#loop over all of the ridge estimates
            matTestX = mat(testX); matTrainX=mat(trainX)
            meanTrain = mean(matTrainX,0)
            varTrain = var(matTrainX,0)
            matTestX = (matTestX-meanTrain)/varTrain #regularize test with training params
            yEst = matTestX * mat(wMat[k,:]).T + mean(trainY)#test ridge results and store
            errorMat[i,k]=rssError(yEst.T.A,array(testY))
            #print errorMat[i,k]
    meanErrors = mean(errorMat,0)#calc avg performance of the different ridge weight vectors
    minMean = float(min(meanErrors))
    bestWeights = wMat[nonzero(meanErrors==minMean)]
    #can unregularize to get model
    #when we regularized we wrote Xreg = (x-meanX)/var(x)
    #we can now write in terms of x not Xreg:  x*w/var(x) - meanX/var(x) +meanY
    xMat = mat(xArr); yMat=mat(yArr).T
    meanX = mean(xMat,0); varX = var(xMat,0)
    unReg = bestWeights/varX
    print ("the best model from Ridge Regression is:\n",unReg)
    print ("with constant term: ",-1*sum(multiply(meanX,unReg)) + mean(yMat))


if __name__ == '__main__':
    # xArr,yArr = loadDataSet('ex0.txt')
    # print(xArr[0:2])
    # ws = standRegres(xArr,yArr)
    # # print(ws)
    # xMat = mat(xArr)
    # yMat = mat(yArr)
    # yHat = xMat*ws
    # fig = plt.figure()
    # ax = fig.add_subplot(111)
    # ax.scatter(xMat[:,1].flatten().A[0],yMat.T[:,0].flatten().A[0])#flatten缩成一维数组
    # xCopy = xMat.copy()
    # # print(xCopy)
    # xCopy.sort(0)#按列排序
    # # print(xCopy)
    # yHat = xCopy*ws
    # ax.plot(xCopy[:,1],yHat)
    # plt.show()
    # cor = corrcoef(yHat.T,yMat)
    # print(cor)

    #143页测试
    # xArr,yArr = loadDataSet('ex0.txt')
    # # print(yArr[0])
    # # print(lwlr(xArr[0],xArr,yArr,1.0))
    # yHat = lwlrTest(xArr,xArr,yArr,0.003)
    # xMat = mat(xArr)
    # print('#'*40)
    # print(xMat)
    # print('*'*40)
    # print(xMat[151])
    # print(xMat[:,1])#取第一列
    # srtInd = xMat[:,1].argsort(0)
    #
    # print('#'*40)
    # print(srtInd)
    # xSort = xMat[srtInd][:,0,:]
    # print('#'*40)
    # print(xSort)
    # fig = plt.figure()
    # ax = fig.add_subplot(111)
    # ax.plot(xSort[:,1],yHat[srtInd])
    # ax.scatter(xMat[:,1].flatten().A[0],mat(yArr).T[:,0].flatten().A[0],s=2,c='red')#flatten缩成一维数组
    # plt.show()

    # 145页测试
    # abX,abY = loadDataSet('abalone.txt')
    # # yHat01 = lwlrTest(abX[0:99],abX[0:99],abY[0:99],0.1)
    # # yHat1 = lwlrTest(abX[0:99],abX[0:99],abY[0:99],1)
    # # yHat10 = lwlrTest(abX[0:99],abX[0:99],abY[0:99],10)
    # #
    # # print(rssError(abY[0:99],yHat01.T))
    # # print(rssError(abY[0:99],yHat1.T))
    # # print(rssError(abY[0:99],yHat10.T))
    #
    # yHat01 = lwlrTest(abX[100:199],abX[0:99],abY[0:99],0.1)
    # print(rssError(abY[100:199],yHat01))
    #
    # yHat1 = lwlrTest(abX[100:199],abX[0:99],abY[0:99],1)
    # print(rssError(abY[100:199],yHat1))
    #
    # yHat10 = lwlrTest(abX[100:199],abX[0:99],abY[0:99],10)
    # print(rssError(abY[100:199],yHat10))
    #
    # ws = standRegres(abX[0:99],abY[0:99])
    # yHat = mat(abX[100:199])*ws
    # print(rssError(abY[100:199],yHat.T.A))

    #147页测试
    # abX,abY = loadDataSet('abalone.txt')
    # ridgeWeights = ridgeTest(abX,abY)
    # fig = plt.figure()
    # ax = fig.add_subplot(111)
    # print(ridgeWeights)
    # print(ridgeWeights[-1,:])
    # ax.plot(ridgeWeights[0,:])
    # plt.show()


    #150页测试
    # xArr,yArr = loadDataSet('abalone.txt')
    # stageWiseWeights = stageWise(xArr,yArr,0.01,200)
    # fig = plt.figure()
    # ax = fig.add_subplot(111)
    # print(stageWiseWeights)
    # print(stageWiseWeights)
    # ax.plot(stageWiseWeights)
    # plt.show()

    lgX=[];lgY=[]
    print(setDataCollect())



机器学习项目实战中,利用逻辑回归算法进行糖尿病预测是一个常见的入门案例。逻辑回归通常用于二分类问题,如疾病诊断或用户行为预测。这里我们假设有一个数据集包含患者的各项指标,目标是预测某人是否患有糖尿病。 以下是简单的Python代码示例,使用scikit-learn库中的LogisticRegression类: ```python # 导入所需库 import pandas as pd from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.metrics import accuracy_score, confusion_matrix # 加载数据 data = pd.read_csv('diabetes.csv') # 假设数据文件名 # 数据预处理 X = data.drop('Outcome', axis=1) # X是特征,'Outcome'是目标变量 y = data['Outcome'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # 特征缩放 scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) # 创建并训练模型 model = LogisticRegression() model.fit(X_train, y_train) # 预测 y_pred = model.predict(X_test) # 评估模型性能 accuracy = accuracy_score(y_test, y_pred) cm = confusion_matrix(y_test, y_pred) print(f"Accuracy: {accuracy}") print("Confusion Matrix:\n", cm) ``` 这个过程包括数据加载、划分训练集和测试集、特征缩放(对数值型特征进行标准化)、模型训练、预测以及最后评估模型的准确性和混淆矩阵。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值