scikit-learn

# -*- coding: utf-8 -*-
"""
Created on Tue Dec 13 11:46:54 2016


@author: ZENGQIAN905
"""


from __future__ import absolute_import
from __future__ import print_function
 
import numpy as np
import pandas as pd
import re
import sklearn
from sklearn.preprocessing import LabelEncoder, StandardScaler, OneHotEncoder
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import preprocessing
from scipy import stats
from sklearn.svm import SVC
import matplotlib.pyplot as plt
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import roc_curve, auc
from sklearn.ensemble import RandomForestClassifier
from bayes_opt import BayesianOptimization as BO
from sklearn.ensemble import GradientBoostingClassifier
import xgboost as xgb
from scipy import stats


np.random.seed(17411)


def is_nominal(A):
    nominal = True
    kind = set()
    A = list(pd.np.array(A))
    for i in range(0,len(A)):
        kind.add(A[i])        
        if len(kind) >= 30:
            nominal = False
            break
    return nominal
    
##  进行编码    
def build_encoder(X):
    '''only build based on train data'''
    # OneHotEncoder
    for i in range(0, X.shape[1]):
    # LabelEncoder
        label_enc = LabelEncoder()
        label_enc.fit(X.iloc[:,i])
        X.iloc[:,i] = label_enc.transform(X.iloc[:,i])
    
    X = pd.np.array(X)
    one_enc = OneHotEncoder()
    one_enc.fit(X)
    ##  对于无法转换的情况,直接忽略
    one_enc.set_params(handle_unknown = 'ignore')
    return one_enc,one_enc.transform(X).toarray()


   
def load_data(path, encoder = None):
    reader= pd.read_csv(path,iterator=True,low_memory=False)    
    loop = True
    chunkSize = 150000
    chunks = []
    while loop:
        try:
            chunk = reader.get_chunk(chunkSize)
            chunks.append(chunk)
            loop=False
        except StopIteration:
            loop = False
            print("Iteration is stopped.")


    df_D=pd.concat(chunks, ignore_index=True)
    dim = df_D.shape[1]


    ## 删除饱和度小于0.8的记录
    Drop = []
    for i in range(len(df_D)):


        if sum(df_D[i:i+1].count())<=0.5*dim:
            Drop.append(i)  
    
    df = pd.DataFrame()        
    df = df_D.iloc[:].drop(Drop)  


    ## 0. 将出生日期转化为年龄
    for i in range(len(df)):
        if not pd.isnull(list(df.iloc[i:i+1,4])[0]):
            df.iloc[i:i+1,4] = np.array([float(2016-float(list(df.iloc[i:i+1,4])[0][0:4]))])  
        
    ## 将含汉字的某列数据,提取出纯数字   
    for i in range(len(df)):
        if not pd.isnull(list(df.iloc[i:i+1,41])[0]):
            num = re.findall(r'\d+\.?\d*',list(df.iloc[i:i+1,41])[0])[0]
            df.iloc[i:i+1,43] = np.array([float(num)])
    
    
    data_nominal = []
    data_numeric = []


    for i in range(2, dim):
        if (df.iloc[:, i].count()>0.4*len(df.iloc[:, i])):
    
            if is_nominal(df.iloc[:, i]):
                data_nominal.append(i)#存放离散变量的列编号
                # df.iloc[:, i].fillna(value='U')
                j=0
                for value in df.iloc[:, i]:
                    #离散型数据,空值替换为‘U’
                    if pd.isnull(value):
                        df.iloc[j, i]='U'
                    j+=1


            else:
                data_numeric.append(i)#存放数值型变量的列编号
                ##  解决str/float的问题?
                ##  np下计算median不能包含空值
                median = (df.iloc[:, i]).median() 
                
                ## median=pd.median(df.iloc[:, i])
                ## median=np.median(df.iloc[:, i])
                
                percentile1=np.percentile(df.iloc[:, i],q=1)
                percentile99=np.percentile(df.iloc[:, i],q=99)
                j=0
                for value in df.iloc[:, i]:
                    #连续型数值变量,空值替换为中位数
                    if pd.isnull(value):
                        df.iloc[j, i]=median
                    #极端值分别替换为1%分位数和99%分位数
                    if value<percentile1:
                        df.iloc[j, i]=percentile1
                    if value>percentile99:
                        df.iloc[j, i]=percentile99
                    j+=1
 
    nominal = df.iloc[:, data_nominal]
    
    if not encoder:
        encoder,nominal = build_encoder(nominal)
        
    nominal = list(nominal)


    # standardize numeric variables
    # df.iloc[:, data_numeric] = standardize_data(df.iloc[:, data_numeric])
    numeric = (df.iloc[:, data_numeric])
    numeric = pd.np.array(numeric)
    numeric = list(numeric)


    # combine numeric and nominal variables
    X = nominal
    for i in range(np.shape(numeric)[0]):
        X[i] = np.append(X[i], numeric[i])
    ## 一般 0  表示好,1表示 坏
    y = df.loc[:, ["flag"]]
    y = pd.np.array(y)
#    for i in range(np.shape(data)[0]):
#        data[i] = np.append(data[i], y[i])
    X = np.array(X)
    y = np.array(y)


    return X, y, encoder


#X, y, encoder = load_data('D:/Users/zengqian905/Desktop/qianz_agent_2015_0_1_180.csv')


#X, y, encoder = load_data('D:/Users/zengqian905/Desktop/qianz_agent_2015_0_1_part.csv')


## 1. 将0,1 标签数据分开
'''
label1=[]
label0=[]




for i in range(len(y)):
    if y[i][0]== 0:
        label0.append(i)
    else:
        label1.append(i)


x0 = np.array([X[label0[0]]])
y0 = np.array([y[label0[0]]])
x1 = np.array([X[label1[0]]])
y1 = np.array([y[label1[0]]])


for i in range(1,len(label0)):
    x0 = np.concatenate((x0,np.array([X[label0[i]]])))
    y0 = np.concatenate((y0,np.array([y[label0[i]]])))


for i in range(1,len(label1)):
    x1 = np.concatenate((x1,np.array([X[label1[i]]])))
    y1 = np.concatenate((y1,np.array([y[label1[i]]])))
        
np.random.shuffle(x0)
np.random.shuffle(x1)
'''
'''
X = np.load('D:/Users/zengqian905/Desktop/qianli_data_change/X.npy')
y = np.load('D:/Users/zengqian905/Desktop/qianli_data_change/y.npy')


'''
'''
x0 = np.load('D:/Users/zengqian905/Desktop/qianli_data_change/x0.npy')
x1 = np.load('D:/Users/zengqian905/Desktop/qianli_data_change/x1.npy')
y0 = np.load('D:/Users/zengqian905/Desktop/qianli_data_change/y0.npy')
y1 = np.load('D:/Users/zengqian905/Desktop/qianli_data_change/y1.npy')
'''
'''
## 将数据分成train_data、test_data,/*总量五万条左右数据*/


Xtrain = np.concatenate((X[:420],X[630:31418]))
ytrain = np.concatenate((y[:420],y[630:31418]))


Xtest = np.concatenate((X[420:630],X[31418:]))
ytest = np.concatenate((y[420:630],y[31418:]))




Xtrain = np.concatenate((X[:86000],X[129794:132194]))
ytrain = np.concatenate((y[:86000],y[129794:132194]))


Xtest = np.concatenate((X[86000:129794],X[132194:]))
ytest = np.concatenate((y[86000:129794],y[132194:]))
'''
Xtrain1 = np.load('D:/Users/zengqian905/Desktop/qianli_data_change_180/Xtrain.npy')
Xtest1 = np.load('D:/Users/zengqian905/Desktop/qianli_data_change_180/Xtest.npy')
ytrain1 = np.load('D:/Users/zengqian905/Desktop/qianli_data_change_180/ytrain.npy')
ytest1 = np.load('D:/Users/zengqian905/Desktop/qianli_data_change_180/ytest.npy')


Xtrain = Xtrain1[:4200]
ytrain = ytrain1[:4200]
Xtest = Xtest1[:1740]
ytest = ytest1[:1740]


'''
##将数据随机排列后,选定train_data和test_data
Xtrain = np.concatenate((x0[:86000],x1[:2400]))
ytrain = np.concatenate((y0[:86000],y1[:2400]))


Xtest = np.concatenate((x0[86000:],x1[2400:]))
ytest = np.concatenate((y0[86000:],y1[2400:]))
'''
'''
## 缺失值填充


from sklearn.preprocessing import Imputer


Xtrain = Imputer().fit_transform(Xtrain)
ytrain = Imputer().fit_transform(ytrain)
Xtest = Imputer().fit_transform(Xtest)
ytest = Imputer().fit_transform(ytest)
'''


#训练随机森林模型,并返回其auc值
def rfc_auc(n_estimators, min_samples_split, max_features):
    rf = RandomForestClassifier(n_estimators = int(n_estimators),
                               min_samples_split = int(min_samples_split), 
                               max_features = min(max_features, 0.999), 
                               random_state = 2)
    rf.fit(Xtrain, np.ravel(ytrain))
    yprob = rf.predict_proba(Xtest)[:, 1]
    fpr, tpr, thresholds = roc_curve(ytest, yprob)
    plt.plot(fpr, tpr)
    return auc(fpr, tpr)
    
#训练AdaBoostClassifier,并返回其auc值
def ada_auc(max_depth, n_estimators):
    bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=max_depth),
                         algorithm="SAMME",
                         n_estimators=int(n_estimators))
    bdt = bdt.fit(Xtrain, np.ravel(ytrain))
    yprob_ada = bdt.predict_proba(Xtest)[:, 1]  
    # ks_value = KSmetrics(ytest,yprob_ada)
    # print(ks_value) 
    fpr, tpr, thresholds = roc_curve(ytest, yprob_ada)
    plt.plot(fpr, tpr)
    return auc(fpr, tpr)


# 训练 GradientBoostingClassifier,并返回auc值
def GBDT_auc(n_estimators,min_samples_split,max_features,max_depth,min_impurity_split):
    GBDT = GradientBoostingClassifier(learning_rate=0.1, 
                                      n_estimators=int(n_estimators),  
                                      min_samples_split=int(min_samples_split),
                                      max_depth=int(max_depth), 
                                      random_state=19943, 
                                      max_features=min(max_features,0.999),
                                      min_impurity_split=min(min_impurity_split,1e-03))
    
    GBDT.fit(Xtrain, np.ravel(ytrain))
    yprob = GBDT.predict_proba(Xtest)[:, 1]
    fpr, tpr, thresholds = roc_curve(ytest, yprob)
    plt.plot(fpr, tpr)
    return auc(fpr, tpr)


## 训练xgboostClassifier,并返回auc值
data_train = xgb.DMatrix(Xtrain,label = ytrain)
data_test = xgb.DMatrix(Xtest)    


def xgboost_auc(max_depth,lambda_,subsample,colsample_bytree,min_child_weight,eta):    
    xgboost = xgb.train({
    'booster':'gbtree',
    'objective':'binary:logistic',
    'early_stopping_rounds':None,
    'eval_metric':'auc',
    'gamma':'0.1',
    'max_depth':int(max_depth),
    'lambda':min(lambda_,1000),
    'alpha':1,
    'subsample':min(subsample,1),
    'colsample_bytree':min(colsample_bytree,1),
    'min_child_weight':int(min_child_weight),
    'eta':min(eta,0.2),
    'seed':19943,
    },data_train,num_boost_round=1500)
    
    xgb_prob = xgboost.predict(data_test,ntree_limit = xgboost.best_ntree_limit)
    fpr, tpr, thresholds = roc_curve(ytest, xgb_prob)
    plt.plot(fpr, tpr)
    return auc(fpr, tpr)


## 训练SVMClassifier,并返回auc的值
def SVM_auc(max_iter,tol):
    SVM = SVC(C=1.0,
              tol=min(tol,0.01),
              max_iter=int(max_iter),
              decision_function_shape='ovr', 
              random_state=19943)
    
    SVM.fit(Xtrain,np.ravel(ytrain))


    yprob = SVM.predict(Xtest)
    fpr, tpr, thresholds = roc_curve(ytest, yprob)
    plt.plot(fpr, tpr)
    return auc(fpr, tpr)


  
#RFC的参数范围
paramRFC = {'n_estimators': (20, 250), 
              'min_samples_split': (2, 25), 
              'max_features': (0.1, 0.999)}
#AdaBoost的参数范围
paramADA = {'max_depth': (20, 500), 
           'n_estimators': (50, 500)}
        
#GBDT的参数范围
paramGBDT = {'n_estimators':(20,200),
            'min_samples_split':(2,15),
            'max_features':(0.1,0.999),
            'max_depth':(2,10),
            'min_impurity_split':(1e-07,1e-03)}


# xgboost的参数范围
paramxgb = {
'max_depth':(5,10),
'lambda_':(500,1000),
'subsample':(0.5,1),
'colsample_bytree':(0.5,1),
'min_child_weight':(1,5),
'eta':(0.01,0.2)
}  


#SVM的参数范围
paramSVM = {'max_iter':(20,200),
            'tol':(0.001,0.01)}
'''            
if __name__ == "__main__":


#使用RFC模型的参数调优,根据模型的auc值进行参数调优
    rfcBO = BO(rfc_auc, paramRFC)
    print('-' * 53)
    rfcBO.maximize()
    print('-' * 53)
    print('Final Results (AUC)')
    print('RFC: %f' % rfcBO.res['max']['max_val'])
    print('-' * 53)
    print('RFC best param:')
    print('n_estimators:')
    print(int(rfcBO.res['max']['max_params']['n_estimators']))
    print('min_samples_split:')
    print(int(rfcBO.res['max']['max_params']['min_samples_split']))
    print('max_features:')
    print(rfcBO.res['max']['max_params']['max_features'])
    print(rfcBO.res)
'''    
'''
RFC: 0.765784
-----------------------------------------------------
ADA best param:
n_estimators:
228
min_samples_split:
24
max_features:
0.1
'''
'''
if __name__ == "__main__"
   
#使用AdaBoost模型的参数调优,根据模型的auc值进行参数调优
    adaBO = BO(ada_auc, paramADA)
    print('-' * 53)
    adaBO.maximize()
    print('-' * 53)
    print('Final Results (AUC)')
    print('ADA: %f' % adaBO.res['max']['max_val'])
    print('-' * 53)
    print('ADA best param:')
    print('max_depth:')
    print(adaBO.res['max']['max_params']['max_depth'])
    print('n_estimators:')
    print(int(rfcBO.res['max']['max_params']['n_estimators']))
'''    
    
'''
   14 | 00m12s |    0.52604 |    140.4451 |       419.9754 | 
'''




#if __name__ == "__main__":
##使用GBDT模型的参数调优,根据模型的auc值进行参数调优
#    gbdtBO = BO(GBDT_auc, paramGBDT)
#    print('-' * 53)
#    gbdtBO.maximize()
#    print('-' * 53)
#    print('Final Results (AUC)')
#    print('GBDT: %f' % gbdtBO.res['max']['max_val'])
#    print('-' * 53)
#    print('GBDT best param:')
#    print('n_estimators:')
#    print(gbdtBO.res['max']['max_params']['n_estimators'])
#    print('min_samples_split:')
#    print(int(gbdtBO.res['max']['max_params']['min_samples_split']))
#    print('max_features:')
#    print(int(gbdtBO.res['max']['max_params']['max_features']))
#    print('max_depth:')
#    print(int(gbdtBO.res['max']['max_params']['max_depth']))
#    print('min_impurity_split:')
#    print(int(gbdtBO.res['max']['max_params']['min_impurity_split']))




if __name__ == "__main__":
#使用xgboost模型的参数调优,根据模型的auc值进行参数调优
    xgbBO = BO(xgboost_auc, paramxgb)
    print('-' * 53)
    xgbBO.maximize()
    print('-' * 53)
    print('Final Results (AUC)')
    print('XGBOOST: %f' % xgbBO.res['max']['max_val'])
    print('-' * 53)
    print('xgb best param:')
    print('max_depth:')
    print(int(xgbBO.res['max']['max_params']['max_depth']))
    print('lambda_:')
    print(xgbBO.res['max']['max_params']['lambda_'])
    print('subsample:')
    print(xgbBO.res['max']['max_params']['subsample'])
    print('colsample_bytree:')
    print(xgbBO.res['max']['max_params']['colsample_bytree'])
    print('min_child_weight:')
    print(xgbBO.res['max']['max_params']['min_child_weight'])
    print('eta:')
    print(xgbBO.res['max']['max_params']['eta'])




#if __name__ == "__main__": 
#  
##使用SVM模型的参数调优,根据模型的auc值进行参数调优
#    svmBO = BO(SVM_auc, paramSVM)
#    print('-' * 53)
#    svmBO.maximize()
#    print('-' * 53)
#    print('Final Results (AUC)')
#    print('SVM: %f' % svmBO.res['max']['max_val'])
#    print('-' * 53)
#    print('SVM best param:')
#    print('max_iter:')
#    print(svmBO.res['max']['max_params']['max_iter'])
#    print('tol:')
#    print(int(svmBO.res['max']['max_params']['tol']))




#调参之后,将最优参数代入模型中
'''
## rfm model
rf = RandomForestClassifier(n_estimators = 123,
                           min_samples_split = 13, 
                           max_features = 0.5, 
                           random_state = 2)
                           
## np.ravel 将多维数组降到一维数组    
rf.fit(Xtrain, np.ravel(ytrain))
yprob = rf.predict_proba(Xtest)[:, 1]
fpr, tpr, thresholds = roc_curve(ytest, yprob)
plt.plot(fpr, tpr)
auc(fpr, tpr)


##adaboost model
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=100),
                     algorithm="SAMME",
                     n_estimators=250)
                     
bdt = bdt.fit(Xtrain, np.ravel(ytrain))
yprob_ada = bdt.predict_proba(Xtest)[:, 1]  
# ks_value = KSmetrics(ytest,yprob_ada)
# print(ks_value) 
fpr, tpr, thresholds = roc_curve(ytest, yprob_ada)
plt.plot(fpr, tpr)
auc(fpr, tpr)


## SVM model
SVM = SVC(C=1.0, kernel='rbf',gamma='auto', shrinking=True,
          probability=False, tol=0.001, cache_size=300, class_weight=None, 
          verbose=False, max_iter=10000, decision_function_shape='ovr', random_state=19943)


## np.ravel 将多维变成1维
SVM.fit(Xtrain,np.ravel(ytrain))
yprob = SVM.predict(Xtest)
fpr, tpr, thresholds = roc_curve(ytest, yprob)
plt.plot(fpr, tpr)
auc(fpr, tpr)
'''
##  GBDT model
#GBDT = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, 
#                                  n_estimators=163, subsample=1.0, criterion='friedman_mse', min_samples_split=10,
#                                  min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_depth=3, min_impurity_split=1e-07, 
#                                  init=None,random_state=19943, max_features=0.8, verbose=0, max_leaf_nodes=None, 
#                                  warm_start=False, presort='auto')


GBDT = GradientBoostingClassifier(learning_rate=0.1, 
                                      n_estimators=200,  
                                      min_samples_split=3,
                                      max_depth=3, 
                                      random_state=19943, 
                                      max_features=0.8,
                                      min_impurity_split=1e-07)


GBDT.fit(Xtrain, np.ravel(ytrain))
yprob = GBDT.predict_proba(Xtest)[:, 1]
yprob0_1 = GBDT.predict(Xtest)
fpr, tpr, thresholds = roc_curve(ytest, yprob)
plt.plot(fpr, tpr)
auc(fpr, tpr)




'''
Final Results (AUC)
GBDT: 0.778913
-----------------------------------------------------
GBDT best param:
n_estimators:
200.0
min_samples_split:
3
max_features:
0
max_depth:
3
min_impurity_split:
0


'''
### xgboost model
#data_train = xgb.DMatrix(Xtrain,label = ytrain)
#data_test = xgb.DMatrix(Xtest)
#params = {
#'booster':'gbtree',
#'objective':'binary:logistic',
#'early_stopping_rounds':None,
#'eval_metric':'auc',
#'gamma':'0.1',
#'max_depth':6,
#'lambda':949.9995,
#'subsample':0.6173,
#'colsample_bytree':0.5675,
#'min_child_weight':4.1858,
#'eta':0.0584,
#'seed':19943
#}
#
#'''
# | 26m14s |    0.78068 |              colsample_bytree 0.8082 |  eta  0.0263 |lambda_  580.1747 | 
# max_depth   8.1469 |    min_child_weight         2.0870 | subsample     0.5000 | 
#'''
#'''
#Final Results (AUC)
#XGBOOST: 0.768946
#-----------------------------------------------------
#xgb best param:
#max_depth:
#6
#lambda_:
#949.999490689
#subsample:
#0.61726290151
#colsample_bytree:
#0.567464234831
#min_child_weight:
#4.18578692738
#eta:
#0.0584290094348
#'''
#
#xgboost = xgb.train(params,data_train,num_boost_round = 2000)
#yprob = xgboost.predict(data_test,ntree_limit = xgboost.best_ntree_limit)
#fpr, tpr, thresholds = roc_curve(ytest, yprob)
#plt.plot(fpr, tpr)
#print(auc(fpr, tpr))


####  将recall,precision 根据不同的阈值,列成表格///方法1
#door = []
#for i in range(500):
#    door.append(i*0.001)
#array_total = np.array([['thresholds','recall','precision']])
#for i in range(len(door)):
#    count_TP = 0
#    count_FN = 0
#    count_FP = 0
#    for j in range(45923,47299):
#        if yprob[j]>=door[i]:
#            count_TP += 1
#        else:
#            count_FN += 1
#    for k in range(0,45923):
#        if yprob[k]>=door[i]:
#            count_FP += 1
#    recall = float(count_TP)/float((count_TP+count_FN))
#    precision = float(count_TP)/float((count_TP+count_FP))
#    array_total_new = np.array([[door[i],recall,precision]])
#    array_total = np.concatenate((array_total,array_total_new))


###  将recall,precision 根据不同的阈值,列成表格///方法2
door = []
array_total = np.array([['thresholds','recall','precision']])


for i in range(20):
    door.append(i*0.03)
for i in range(len(door)):
    yprob_0_1 = []
    for j in range(len(ytest)):
        if yprob[j] >= door[i]:
            yprob_0_1.append(1)
        else:
            yprob_0_1.append(0)
    '''        
    ## print(sklearn.metrics.classification_report(ytest, yprob_0_1))
    
    ##precision = sklearn.metrics.precision_recall_fscore_support(ytest, yprob_0_1,
              #                                  warn_for=('precision'))


    # recall = sklearn.metrics.precision_recall_fscore_support(ytest, yprob_0_1,
                   #                             warn_for=('recall'))
    '''
    precision = sklearn.metrics.precision_score(ytest, yprob_0_1)
    recall = sklearn.metrics.recall_score(ytest, yprob_0_1)


    array_total_new = np.array([[door[i],recall,precision]])
    array_total = np.concatenate((array_total,array_total_new))    


##  将fpr, tpr按照不同的阈值,列成表格
## fpr, tpr, thresholds
thres_total = np.array([['thresholds','fpr','tpr']])


for i in range(len(fpr)):
    thres_total_new = np.array([[thresholds[i],fpr[i],tpr[i]]])
    thres_total = np.concatenate((thres_total,thres_total_new))


##  保存数据
np.savetxt('D:/Users/zengqian905/Desktop/recall_precision_0.03_xgboost_180_1216_1.csv', array_total,fmt='%s',delimiter = ',')  
np.savetxt('D:/Users/zengqian905/Desktop/tpr_fpr_1216_1.csv', thres_total, fmt='%s',delimiter = ',')          


##  计算KS
def KSmetrics(ytest,yprob):  
    prob1 = []        
    prob2 = []
    for i in range(len(ytest)):


        if int(ytest[i])==0:
            prob1.append(yprob[i])
        else:
            prob2.append(yprob[i])


    ksvalue = stats.ks_2samp(prob1,prob2)
    return ksvalue


ksvalue_train = KSmetrics(np.array(ytest),np.array(yprob))
print(ksvalue_train)




###  查看变量重要性
#imporatance_GBDT = GBDT.feature_importances_
#
### 持久化模型
### compress 是用来压缩的,0表示无压缩,9表示压缩程度最高
#joblib.dump(GBDT,'GBDT_classifier.pkl',compress=3)
    
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值