ML实例

# #!/usr/bin/env python
# # -*- coding: utf-8 -*-
#
# # from StudyML.LogicRegression import LogicRegression
# #
# # logic = LogicRegression()
# # logic.excute('test.txt')
#
# # import matplotlib.pyplot as plt
# # from mpl_toolkits.mplot3d import Axes3D
# # fig = plt.figure()
# # ax = fig.add_subplot(111, projection='3d')
# # X = [1, 1, 2, 2]
# # Y = [3, 4, 4, 3]
# # Z = [1, 2, 1, 1]
# # ax.scatter(X, Y, Z)
# # plt.show()
#
# # from sklearn import linear_model
# # clf = linear_model.LinearRegression()
# # clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
# # print clf.coef_
# # print clf.predict([[1, 2]])
# #
#
# import matplotlib.pyplot as plt
# import numpy as np
# from sklearn import datasets, linear_model
#
# # Load the diabetes dataset
# from sklearn.datasets import make_regression
#
#
# def MSE(predit, target):
#     return np.mean(np.power(predit - target, 2))
#
#
# def MAD(predit, target):
#     return np.mean(np.abs(predit - target))
# # diabetes = datasets.load_diabetes()
# diabetes = make_regression(100, 6, 1, noise=10, random_state=10)
#
# x = np.array(([1, 1], [-1, -1]))
# y = np.array(([1, -1]))
# regr3 = linear_model.LinearRegression()
# regr3.fit(x, y)
# print regr3
# # Use only one feature
# # diabetes_X = diabetes.data[:, np.newaxis, 2]
# diabetes_X = diabetes[0]
# # Split the data into training/testing sets
# diabetes_X_train = diabetes_X[:-20]
# diabetes_X_test = diabetes_X[-20:]
#
# # Split the targets into training/testing sets
# diabetes_y = diabetes[1]
# # diabetes_y_train = diabetes_y.target[:-20]
# # diabetes_y_test = diabetes_y.target[-20:]
# diabetes_y_train = diabetes_y[:-20]
# diabetes_y_test = diabetes_y[-20:]
# # Create linear regression object
# regr = linear_model.LinearRegression(normalize=True)
# regr1 = linear_model.RidgeCV(alphas=[0.1, 1.0, 10.0])
# regr2 = linear_model.Lasso(alpha=0.1)
# # Train the model using the training sets
# regr.fit(diabetes_X_train, diabetes_y_train)
# regr1.fit(diabetes_X_train, diabetes_y_train)
# regr2.fit(diabetes_X_train, diabetes_y_train)
#
# # The coefficients
# # print regr
# # print(regr.intercept_, regr.coef_)
# # print regr1
# # print(regr1.intercept_, regr1.coef_)
# # print regr2
# # print(regr2.intercept_, regr2.coef_)
# # The mean square error
#
# print("Residual sum of squares: %.2f"
#       % np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# print MSE(regr.predict(diabetes_X_test), diabetes_y_test)
# print MAD(regr.predict(diabetes_X_test), diabetes_y_test)
#
# # print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# # print("Residual sum of squares: %.2f"
# #       % np.mean((regr1.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# # print('Variance score: %.2f' % regr1.score(diabetes_X_test, diabetes_y_test))
# # print("Residual sum of squares: %.2f"
# #       % np.mean((regr2.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# # print('Variance score: %.2f' % regr2.score(diabetes_X_test, diabetes_y_test))
#
# # Plot outputs
# # plt.scatter(diabetes_X_test, diabetes_y_test,  color='black')
# # plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), '-', color='r', linewidth=2)
# # plt.plot(diabetes_X_test, regr1.predict(diabetes_X_test), '-', color='g', linewidth=2)
# # plt.plot(diabetes_X_test, regr2.predict(diabetes_X_test), '-', color='b', linewidth=2)
#
#
# from regression import LinearRegression as lr
# reg = lr()
# diabetes_X_train = np.mat(np.c_[np.ones(diabetes_X_train.shape[0]), diabetes_X_train])
# diabetes_X_test = np.mat(np.c_[np.ones(diabetes_X_test.shape[0]), diabetes_X_test])
# # weight = reg.regression(np.mat(diabetes_X_train), np.mat(diabetes_y_train))
# weight = reg.gradient_descent(np.mat(diabetes_X_train), np.mat(diabetes_y_train), 38)
# print("Residual sum of squares: %.2f"
#               % np.mean(((diabetes_X_test * weight)[:, 0].flatten().A[0] - diabetes_y_test) ** 2))
# # i = 1
# # j = 0
# # while True:
# #     weight = reg.gradient_descent(np.mat(diabetes_X_train), np.mat(diabetes_y_train), i)
# #     # print weight
# #     test = (diabetes_X_test * weight)[:, 0].flatten().A[0]
# #     mean = np.mean((test - diabetes_y_test) ** 2)
# #     i += 1
# #     if mean <= 88:
# #         j += 1
# #         print i
# #         print("Residual sum of squares: %.2f"
# #               % mean)
# #         if j > 5:
# #             break
# # print('Variance score: %.2f' % regr2.score(diabetes_X_test, diabetes_y_test))
#
# # plt.plot(diabetes_X_test[:, 1], diabetes_X_test * weight, 'o', color='b', linewidth=2)
# # plt.xticks(())
# # plt.yticks(())
# # plt.show()


# from sklearn.datasets import load_iris
# from sklearn.cross_validation import train_test_split
# from sklearn import preprocessing
#
# # 读取数据
# iris = load_iris()
#
# # 选取特征与标签
# X_iris, y_iris = iris.data, iris.target
#
# # 选择前两列数据作为特征
# X, y = X_iris[:, :2], y_iris
#
# # 选取一部分,25%的训练数据作为测试集
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state = 33)

# 对原特征数据进行标准化预处理,这个其实挺重要,但是经常被一些选手忽略
# scaler = preprocessing.StandardScaler()
# X_train = scaler.fit_transform(X_train)
# X_test = scaler.transform(X_test)

from sklearn.linear_model import SGDClassifier

# 选择使用SGD分类器,适合大规模数据,随机梯度下降方法估计参数
# clf = SGDClassifier()

# clf.fit(X_train, y_train)
#
# # 导入评价包
# from sklearn import metrics
#
# y_train_predict = clf.predict(X_train)
#
# # 内测,使用训练样本进行准确性能评估
# print metrics.accuracy_score(y_train, y_train_predict)
#
# # 标准外测,使用测试样本进行准确性能评估
# y_predict = clf.predict(X_test)
# print metrics.accuracy_score(y_test, y_predict)
#
# print metrics.classification_report(y_test, y_predict, target_names=iris.target_names)

# # 如果想详细探查SGDClassifier的分类性能,我们需要充分利用数据,因此需要把数据切分为N个部分,每个部分都用于测试一次模型性能。
#
# from sklearn.cross_validation import cross_val_score, KFold
# from sklearn.pipeline import Pipeline
# from sklearn.preprocessing import StandardScaler
# # 这里使用Pipeline,便于精简模型搭建,一般而言,模型在fit之前,对数据需要feature_extraction, preprocessing, 等必要步骤。
# # 这里我们使用默认的参数配置
# clf = Pipeline([('scaler', StandardScaler()), ('sgd_classifier', SGDClassifier())])
#
# # 5折交叉验证整个数据集合
# cv = KFold(X.shape[0], 10, shuffle=True, random_state=33)
#
# scores = cross_val_score(clf, X, y, cv=cv)
# print scores
#
# # 计算一下模型综合性能,平均精度和标准差
# print scores.mean(), scores.std()
#
# from scipy.stats import sem
# import numpy as np
#
# # 这里使用的偏差计算函数略有不同,参考链接
# print np.mean(scores), sem(scores)

# 载入手写数字的图像像素数据。对于图像处理,除了后续的各种启发式提取有效特征以外,
# 最直接常用的就是像素数据,每个像素都是一个数值,反映颜色。
from sklearn.datasets import load_digits
# digits = load_digits()
# # 这些经典数据的存储格式非常统一。这是好习惯,统一了接口,也便于快速使用。
#
# X_digits, y_digits = digits.data, digits.target
#
# from sklearn.decomposition import PCA
# from matplotlib import pyplot as plt
# # 最关键的参数就是n_components = 2个主成分
#
# estimator = PCA(n_components=2)
#
# X_pca = estimator.fit_transform(X_digits)
# # scikit-learn的接口设计的很统一。
#
# # 聚类问题经常需要直观的展现数据,降维度的一个直接目的也为此;因此我们这里多展现几个图片直观一些。
#
#
# def plot_pca_scatter():
#     colors = ['black', 'blue', 'purple', 'yellow', 'white', 'red', 'lime', 'cyan', 'orange', 'gray']
#     for i in xrange(len(colors)):
#         px = X_pca[:, 0][y_digits == i]
#         py = X_pca[:, 1][y_digits == i]
#         plt.scatter(px, py, c=colors[i])
#     plt.legend(digits.target_names)
#     plt.xlabel('First Principal Component')
#     plt.ylabel('Second Principal Component')
#     plt.show()
#
# plot_pca_scatter()


# from sklearn.datasets import fetch_20newsgroups
# import numpy as np
# news = fetch_20newsgroups(subset='all')
# # 我们首先使用grid_search的单核版本
# from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
# X_train, X_test, y_train, y_test = train_test_split(news.data[:3000], news.target[:3000], test_size=0.25, random_state=33)
#
#
# from sklearn.svm import SVC
# from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
#
# clf = Pipeline([('vect', TfidfVectorizer(stop_words='english', analyzer='word')), ('svc', SVC())])
#
# # 这里需要试验的2个超参数的的个数分别是43 svc__gamma的参数共有10^-2, 10^-1...
# # 这样我们一共有12种的超参数组合,12个不同参数下的模型
# parameters = {'svc__gamma': np.logspace(-2, 1, 4), 'svc__C': np.logspace(-1, 1, 3)}
#
# # 再考虑每个模型需要交叉验证3次,因此一共需要训练36次模型,根据下面的结果,单线程下,每个模型的训练任务耗时5秒左右。
# gs = GridSearchCV(clf, parameters, verbose=2, refit=True, cv=3)
#
# time_ = gs.fit(X_train, y_train)
# print gs.best_params_, gs.best_score_, time_
#
# print gs.score(X_test, y_test)

# 首先预读房价数据
from sklearn.datasets import load_boston
import numpy as np
from sklearn.cross_validation import train_test_split
# 依然如故,我们对数据进行分割
boston = load_boston()
X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, test_size = 0.25, random_state=33)

from sklearn.preprocessing import StandardScaler

# 正规化的目的在于避免原始特征值差异过大,导致训练得到的参数权重不一
scalerX = StandardScaler().fit(X_train)
X_train = scalerX.transform(X_train)
X_test = scalerX.transform(X_test)

scalery = StandardScaler().fit(y_train)
y_train = scalery.transform(y_train)
y_test = scalery.transform(y_test)
# 先把评价模块写好,依然是默认5折交叉验证,只是这里的评价指标不再是精度,而是另一个函数R2,大体上,这个得分多少代表有多大百分比的回归结果可以被训练器覆盖和解释
from sklearn.cross_validation import *


def train_and_evaluate(clf, X_train, y_train):
    cv = KFold(X_train.shape[0], 5, shuffle=True, random_state=33)
    scores = cross_val_score(clf, X_train, y_train, cv=cv)
    print 'Average coefficient of determination using 5-fold cross validation:', np.mean(scores)

#最后让我们看看有多少种回归模型可以被使用(其实有更多)。
# 比较有代表性的有3
# 先用线性模型尝试, SGD_Regressor
from sklearn import linear_model
# 这里有一个正则化的选项penalty,目前14维特征也许不会有太大影响
# clf_sgd = linear_model.SGDRegressor(loss='squared_loss', penalty=None, random_state=42)
# train_and_evaluate(clf_sgd, X_train, y_train)

# 再换一个SGD_Regressorpenalty参数为l2,结果貌似影响不大,因为特征太少,正则化意义不大
clf_sgd_l2 = linear_model.SGDRegressor(loss='squared_loss', penalty='l2', random_state=42)
train_and_evaluate(clf_sgd_l2, X_train, y_train)
clf = Pipeline([('svc', linear_model.SGDRegressor())])
parameters = {'svc__penalty': ['l1', 'l2']}
gs = GridSearchCV(clf, parameters, verbose=2, refit=True, cv=3)
time_ = gs.fit(X_train, y_train)
print gs.best_params_, gs.best_score_
print gs.score(X_test, y_test)

>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn import ensemble
from sklearn import linear_model
from sklearn.grid_search import GridSearchCV
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
from collections import Counter

LISTINGSFILE = 'listings.csv'

cols = ['price',
        'accommodates',
        'bedrooms',
        'beds',
        'neighbourhood_cleansed',
        'room_type',
        'cancellation_policy',
        'instant_bookable',
        'reviews_per_month',
        'number_of_reviews',
        'availability_30',
        'review_scores_rating'
        ]

# read the file into a dataframe
df = pd.read_csv(LISTINGSFILE, usecols=cols)
# nb_counts = Counter(df.neighbourhood_cleansed)
# tdf = pd.DataFrame.from_dict(nb_counts, orient='index').sort_values(by=0)
# tdf.plot(kind='bar')

# first fixup 'reviews_per_month' where there are no reviews
df['reviews_per_month'].fillna(0, inplace=True)

# just drop rows with bad/weird values
# (we could do more here)
df = df[df.bedrooms != 0]
df = df[df.beds != 0]
df = df[df.price != 0]
df = df.dropna(axis=0)

df = df[df.bedrooms == 1]

# remove the $ from the price and convert to float
df['price'] = df['price'].replace('[\$,)]', '', regex=True).replace('[(]', '-', regex=True).astype(float)

# get feature encoding for categorical variables
n_dummies = pd.get_dummies(df.neighbourhood_cleansed)
rt_dummies = pd.get_dummies(df.room_type)
xcl_dummies = pd.get_dummies(df.cancellation_policy)

# convert boolean column to a single boolean value indicating whether this listing has instant booking available
ib_dummies = pd.get_dummies(df.instant_bookable, prefix="instant")
ib_dummies = ib_dummies.drop('instant_f', axis=1)
# print ib_dummies.head()

# replace the old columns with our new one-hot encoded ones
alldata = pd.concat((df.drop(['neighbourhood_cleansed', \
                    'room_type', 'cancellation_policy', 'instant_bookable'], axis=1), \
                    n_dummies.astype(int), rt_dummies.astype(int), \
                    xcl_dummies.astype(int), ib_dummies.astype(int)), \
                    axis=1)
# allcols = alldata.columns

# scattercols = ['price', 'accommodates', 'number_of_reviews', 'reviews_per_month', 'beds', 'availability_30', 'review_scores_rating']
# axs = pd.scatter_matrix(alldata[scattercols], figsize=(12, 12), c='red')
# plt.show()

rs = 1
ests = [ linear_model.LinearRegression(), linear_model.Ridge(),
        linear_model.Lasso(), linear_model.ElasticNet(),
        linear_model.BayesianRidge(), linear_model.OrthogonalMatchingPursuit() ]
ests_labels = np.array(['Linear', 'Ridge', 'Lasso', 'ElasticNet', 'BayesRidge', 'OMP'])
errvals = np.array([])

X_train, X_test, y_train, y_test = train_test_split(alldata.drop(['price'], axis=1),
                                                    alldata.price, test_size=0.2, random_state=20)

# for e in ests:
#     e.fit(X_train, y_train)
#     this_err = metrics.median_absolute_error(y_test, e.predict(X_test))
#     #print "got error %0.2f" % this_err
#     errvals = np.append(errvals, this_err)
#
# pos = np.arange(errvals.shape[0])
# srt = np.argsort(errvals)
# plt.figure(figsize=(7,5))
# plt.bar(pos, errvals[srt], align='center')
# plt.xticks(pos, ests_labels[srt])
# plt.xlabel('Estimator')
# plt.ylabel('Median Absolute Error')
# plt.show()

n_est = 500

tuned_parameters = {
    "n_estimators": [n_est],
    "max_depth": [4],
    "learning_rate": [0.01],
    "min_samples_split": [1],
    "loss": ['ls', 'lad']
}

gbr = ensemble.GradientBoostingRegressor()
clf = GridSearchCV(gbr, cv=3, param_grid=tuned_parameters, scoring='median_absolute_error')
preds = clf.fit(X_train, y_train)
best = clf.best_estimator_

# plot error for each round of boosting
test_score = np.zeros(n_est, dtype=np.float64)

train_score = best.train_score_
xx = enumerate(best.staged_predict(X_test))
for i, y_pred in enumerate(best.staged_predict(X_test)):
    test_score[i] = best.loss_(y_test, y_pred)

plt.figure(figsize=(10, 6))
# plt.subplot(1, 2, 1)
plt.plot(np.arange(n_est), train_score, 'darkblue', label='Training Set Error')
plt.plot(np.arange(n_est), test_score, 'red', label='Test Set Error')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Least Absolute Deviation')
plt.show()








评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值