数据科学库(HM)(Day6)——pandas学习-时间序列

文章介绍了如何使用Python的pandas库处理时间序列数据,包括读取911紧急电话数据,通过时间戳进行数据分类和计数,以及使用.resample方法进行数据重采样,展示不同月份的紧急电话分布情况。此外,还涉及了PeriodIndex和降采样的概念。

一、时间序列

时间序列是一种重要的数据形式,很多统计数据以及数据的规律和时间序列有着非常重要的联系。

  • 示例:有2015年到2017年25万条911的紧急电话的数据,统计出数据中不同类型的紧急情况的数量
    # coding=utf-8
    import pandas as pd
    import numpy as np
    from matplotlib import pyplot as plt
    
    
    df = pd.read_csv("./911.csv")
    
    print(df.head(5))
    #获取分类
    # print()df["title"].str.split(": ")
    temp_list = df["title"].str.split(": ").tolist()
    cate_list = list(set([i[0] for i in temp_list]))
    print(cate_list)
    
    #构造全为0的数组
    zeros_df = pd.DataFrame(np.zeros((df.shape[0],len(cate_list))),columns=cate_list)
    
    #赋值
    for cate in cate_list:
        zeros_df[cate][df["title"].str.contains(cate)] = 1
        # break
    # print(zeros_df)
    
    sum_ret = zeros_df.sum(axis=0)
    print(sum_ret)

    1、生成一段时间范围
    pd.date_range(start=, end=, periods=, freq='D')

    start 和 end 以及 freq 配合能够生成 start 和 end 范围内以频率 freq 的一组时间索引;
    start 和 periods 以及 freq 配合能够生成从 start 开始的频率为 freq 的 periods 个时间索引。
     

    [5 rows x 9 columns]
    cate
    EMS        124840
    Fire        37432
    Traffic     87465
    Name: title, dtype: int64

    2、在 DataFrame 中使用时间序列
    index = pd.date_range("20170101", period = 10)
    df = pd.DataFrame(np.random.rand(10), index=index)

    使用 .to_datetime 方法把时间字符串转化为时间序列:
    df["timeStamp"] = pd.to_datetime(df["timeStamp"])

二、.resample 重采样

将时间序列从一个频率转化为另一个频率的过程。

  • 将高频率数据转化为低频率数据为降采样;
  • 低频率转化为高频率为升采样
  • 示例:统计911数据中不同月份电话次数的变化情况:
    # coding=utf-8
    #911数据中不同月份不同类型的电话的次数的变化情况
    import pandas as pd
    import numpy as np
    from matplotlib import pyplot as plt
    
    #把时间字符串转为时间类型设置为索引
    df = pd.read_csv("./911.csv")
    df["timeStamp"] = pd.to_datetime(df["timeStamp"])
    
    #添加列,表示分类
    temp_list = df["title"].str.split(": ").tolist()
    cate_list = [i[0] for i in temp_list]
    # print(np.array(cate_list).reshape((df.shape[0],1)))
    df["cate"] = pd.DataFrame(np.array(cate_list).reshape((df.shape[0],1)))
    
    df.set_index("timeStamp",inplace=True)
    
    print(df.head(1))
    
    plt.figure(figsize=(20, 8), dpi=80)
    
    #分组
    for group_name,group_data in df.groupby(by="cate"):
    
        #对不同的分类都进行绘图
        count_by_month = group_data.resample("M").count()["title"]
    
        # 画图
        _x = count_by_month.index
        print(_x)
        _y = count_by_month.values
    
        _x = [i.strftime("%Y%m%d") for i in _x]
    
        plt.plot(range(len(_x)), _y, label=group_name)
    
    
    plt.xticks(range(len(_x)), _x, rotation=45)
    plt.legend(loc="best")
    plt.show()
    
    [1 rows x 9 columns]
    DatetimeIndex(['2015-12-31', '2016-01-31', '2016-02-29', '2016-03-31',
                   '2016-04-30', '2016-05-31', '2016-06-30', '2016-07-31',
                   '2016-08-31', '2016-09-30', '2016-10-31', '2016-11-30',
                   '2016-12-31', '2017-01-31', '2017-02-28', '2017-03-31',
                   '2017-04-30', '2017-05-31', '2017-06-30', '2017-07-31',
                   '2017-08-31', '2017-09-30'],
                  dtype='datetime64[ns]', name='timeStamp', freq='M')
    DatetimeIndex(['2015-12-31', '2016-01-31', '2016-02-29', '2016-03-31',
                   '2016-04-30', '2016-05-31', '2016-06-30', '2016-07-31',
                   '2016-08-31', '2016-09-30', '2016-10-31', '2016-11-30',
                   '2016-12-31', '2017-01-31', '2017-02-28', '2017-03-31',
                   '2017-04-30', '2017-05-31', '2017-06-30', '2017-07-31',
                   '2017-08-31', '2017-09-30'],
                  dtype='datetime64[ns]', name='timeStamp', freq='M')
    DatetimeIndex(['2015-12-31', '2016-01-31', '2016-02-29', '2016-03-31',
                   '2016-04-30', '2016-05-31', '2016-06-30', '2016-07-31',
                   '2016-08-31', '2016-09-30', '2016-10-31', '2016-11-30',
                   '2016-12-31', '2017-01-31', '2017-02-28', '2017-03-31',
                   '2017-04-30', '2017-05-31', '2017-06-30', '2017-07-31',
                   '2017-08-31', '2017-09-30'],
                  dtype='datetime64[ns]', name='timeStamp', freq='M')

    三、PeriodIndex 时间段

periods = pd.PeriodIndex(year=data["year"], month=data["month"], day=data["day"], hour=data["hour"], freq="H")
给这个时间段降采样:
data = df.set_index(periods).resample("10D").mean()

绘制 PM2.5 随时间的变化情况:
 

# coding=utf-8
import pandas as pd
from matplotlib import  pyplot as plt
file_path = "./PM2.5/BeijingPM20100101_20151231.csv"

df = pd.read_csv(file_path)

#把分开的时间字符串通过periodIndex的方法转化为pandas的时间类型
period = pd.PeriodIndex(year=df["year"],month=df["month"],day=df["day"],hour=df["hour"],freq="H")
df["datetime"] = period
# print(df.head(10))

#把datetime 设置为索引
df.set_index("datetime",inplace=True)

#进行降采样
df = df.resample("7D").mean()
print(df.head())
#处理缺失数据,删除缺失数据
# print(df["PM_US Post"])

data  =df["PM_US Post"]
data_china = df["PM_Nongzhanguan"]

print(data_china.head(100))
#画图
_x = data.index
_x = [i.strftime("%Y%m%d") for i in _x]
_x_china = [i.strftime("%Y%m%d") for i in data_china.index]
print(len(_x_china),len(_x_china))
_y = data.values
_y_china = data_china.values


plt.figure(figsize=(20,8),dpi=80)

plt.plot(range(len(_x)),_y,label="US_POST",alpha=0.7)
plt.plot(range(len(_x_china)),_y_china,label="CN_POST",alpha=0.7)

plt.xticks(range(0,len(_x_china),10),list(_x_china)[::10],rotation=45)

plt.legend(loc="best")

plt.show()

下面代码为什么运行到“corrmat = train_data.corr() ”时报错,报错信息为“ValueError: could not convert string to float: '2010-01-02'” import pandas as pd import seaborn as sns # seaborn就是在matplotlib基础上面的封装,方便直接传参数调用 import matplotlib.pyplot as plt import numpy as np import warnings # 所有警告类别类的基类 warnings.filterwarnings('ignore') # 读入数据 train_csv = 'trainOX.csv' train_data = pd.read_csv(train_csv) test_csv = 'test_noLabelOX.csv' test_data = pd.read_csv(test_csv) train_data.head(10) # 训练前10行 train_data.isnull().sum() # 处理缺失值 # 数据规范化 import time # 对时间的处理 # 定义了获取年份的函数,参数dt,返回值t.tm_year def getYear(dt): t = time.strptime(dt,'%Y-%m-%d') return t.tm_year # 定义了获取月份的函数,参数dt,返回值t.tm_mon def getMonth(dt): t = time.strptime(dt,'%Y-%m-%d') return t.tm_mon # 定义了获取日期的函数,参数dt,返回值t.tm_mday def getDay(dt): t = time.strptime(dt,'%Y-%m-%d') return t.tm_mday # 定义了获取星期的函数,参数dt,返回值t.tm_wday def getWeek(dt): t = time.strptime(dt, '%Y-%m-%d') return t.tm_wday # 一周的周几 # 利用train_data函数对日期数据进行处理 train_data['year']=train_data['date'].apply(getYear) train_data['month']=train_data['date'].apply(getMonth) train_data['day']=train_data['date'].apply(getDay) train_data['week']=train_data['date'].apply(getWeek) # 利用test_data函数对日期数据进行处理 test_data['year']=test_data['date'].apply(getYear) test_data['month']=test_data['date'].apply(getMonth) test_data['day']=test_data['date'].apply(getDay) test_data['week']=test_data['date'].apply(getWeek) # 拟合标准正态分布 from scipy.stats import norm sns.distplot(train_data['Label'],fit=norm) print("Skewness: %f"% train_data['Label'].skew()) # 返回峰值的不对称程度 print("Kurtosis: %f"% train_data['Label'].kurt()) # 返回数据的峰度 # 检验样本数据概率分布(例如正态分布) # 预测Label(pm2.5)值线性分布可能性检测,probplot函数计算一个当前样本最可能的线性分布 # 并用plt展示出来,我们可以直观的看到线性拟合程度并不好。 from scipy import stats fig = plt.figure() res = stats.probplot(train_data['Label'],plot=plt) # 默认检测是正态分布 train_data[train_data['Label']==0].head(10) # 做 log 转换。虽然值做 log 转换会出错,但是0值只有2条,可以去掉。 train_data = train_data.drop(train_data[train_data['Label'] == 0].index) # 删除某几列 train_data['Label_log']= np.log(train_data['Label']) sns.distplot(train_data['Label_log'],fit=norm) # 再次拟合标准正态分布 print("Skewness: %f" % train_data['Label_log'].skew()) print("Kurtosis: %f" % train_data['Label_log'].kurt()) res = stats.probplot(train_data['Label_log'], plot=plt) var = 'DEWP' # 沿着指定的轴将train_data['Label_log’]和train_data[var]]拼接到一起,axis=1表示左右拼接。 data = pd.concat([train_data['Label_log'], train_data[var]], axis=1) # scatter绘制散点数据,x,y为坐标数据,ylim=(0,10)限制范围 data.plot.scatter(x=var,y='Label_log',ylim=(0,10)) # 下面的程序同理 var = 'TEMP' data = pd.concat([train_data['Label_log'], train_data[var]], axis=1) data.plot.scatter(x=var,y='Label_log',ylim=(0,10)) var = 'Iws' data = pd.concat([train_data['Label_log'], train_data[var]], axis=1) data.plot.scatter(x=var,y='Label_log',ylim=(0,10)) var ='Ir' data = pd.concat([train_data['Label_log'], train_data[var]], axis=1) data.plot.scatter(x=var, y='Label_log',ylim=(0, 10)) var ='DEWP' data = pd.concat([train_data['Label_log'], train_data[var]], axis=1) f, ax = plt.subplots(figsize=(20,16)) fig = sns.boxplot(x=var,y='Label_log', data=data) fig.axis(ymin=0,ymax=10) # 相关性分析 # correlation matrix相关矩阵 corrmat = train_data.corr() # corrmat是相关性矩阵 f,ax = plt.subplots(figsize=(12,8)) # 绘制画布 sns.heatmap(corrmat,vmax=0.8,square=True) # 得到各特征图的热力图 k = 5 # 关系矩阵中将显示10个特征 cols_large = corrmat.nlargest(k,'Label_log')['Label_log'].index # 显示和Label_1og相关性最大的K项 cols_small = corrmat.nsmallest(k,'Label_log')['Label_log'].index # 显示机Label_log相关性最小的K项 cols =cols_large.append(cols_small) cols cm = np.corrcoef(train_data[cols].values.T) sns.set(rc = {"figure.figsize":(12,10)}) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size':10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() train_data['hour']=train_data['hour'].astype('float32') train_data['DEWP']=train_data['DEWP'].astype('float32') train_data['TEMp']=train_data['TEMp'].astype('float32') train_data['PREs']=train_data['PREs'].astype('float32') train_data['Iws']=train_data['Iws'].astype('float32') train_data['Is']=train_data['Is'].astype('float32') train_data['Ir']=train_data['Ir'].astype('float32') train_data['cbwd_NE']=train_data['cbwd_NE'].astype('float32') train_data['cbwd_NW']=train_data['cbwd_NW'].astype('float32') train_data['cbwd_SE']=train_data['cbwd_SE'].astype('float32') train_data['cbwd_cv']=train_data['cbwd_cv'].astype('float32') train_data['year']=train_data['year'].astype('float32') train_data['month']=train_data['month'].astype('float32') train_data['day']=train_data['day'].astype('float32') train_data['week']=train_data['week'].astype('float32') train_data['Label log']=train_data['Label log'].astype('float32') test_data['hour']=test_data['hour'].astype('float32') test_data['DEWP']=test_data['DEWP'].astype('float32') test_data['TEMP']=test_data['TEMP'].astype('float32') test_data['PRES']=test_data['PREs'].astype('float32') test_data['Iws']=test_data['Iws'].astype('float32') test_data['Is']=test_data['Is'].astype('float32') test_data['Ir']=test_data['Ir'].astype('float32') test_data['cbwd_NE']=test_data['cbwd_NE'].astype('float32') test_data['cbwd_NW']=test_data['cbwd_Nw'].astype('float32') test_data['cbwd_SE']=test_data['cbwd_SE'].astype('float32') test_data['cbwd_cv']=test_data['cbwd_cv'].astype('float32') test_data['year']=test_data['year'].astype('float32') test_data['month']=test_data['month'].astype('float32') test_data['day']=test_data['day'].astype('float32') test_data['week']=test_data['week'].astype('float32') train_data.columns from sklearn.linear_model import LinearRegression # 线性回归 from sklearn.model_selection import train_test_split # 拆分工具 from sklearn.metrics import mean_squared_error # 均方根误差 # train_data.drop(['date'],axis=1,inplace=True) # 删除无关属性 y=train_data['Label log'] # 类标签的转换 var =['hour','DEWP','TEMP','PRES','IWS','IS','Ir', 'cbwd_NE','cbwd_NW','cbwd_SE','cbwd_cv','year', 'month', 'day','week'] X=train_data[var] # 新的训练数据 X_train, X_val, y_train, y_val = train_test_split(X,y,test_size=0.2,random_state=42) # 对数据的拆分 reg = LinearRegression().fit(X_train,y_train) # 回归分析 y_val_pre=reg.predict(X_val) # 回归预测 y_val1 = y_val.reset_index(drop=True) print("Mean squared error:%.2f"% mean_squared_error(y_val1,y_val_pre)) df1 = pd.DataFrame(y_val_pre,columns =['p']) # 建立检验数据框 df1['r'] = y_val df1.to_csv("test1.csv",encoding = "utf-8",header=1,index=0) X_test = test_data[var] # 训练数据 y_test = reg.predict(X_test) # predict:数据预测 y_rel = np.round(np.exp(y_test)) # 对浮点数取整 df = pd.DataFrame(y_rel,columns =['pm2.5']) # DataFrame()创建一个DataFrame对象 df.to_csv("sample.csv",encoding = "utf-8",header=1,index=0) # to_csv保存csv文件
10-31
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值