np.mean(y_test == y_predict)的简要说明

本文探讨了Python中数组的比较操作,展示了如何使用numpy库进行数组元素级别的比较,并计算比较结果的平均值,以此衡量两个数组的相似度。
部署运行你感兴趣的模型镜像
# 例 1
x = [1, 0, 1, 1, 1, 1]
y = [0, 0, 0, 0, 0, 1]
print(x == y)
结果:
False
# 例 2
x = np.array([1, 0, 1, 1, 1, 1])
y = np.array([0, 0, 0, 0, 0, 1])
print(x == y)
结果:
[False True False False False True]
# 例 3
x = np.array([1, 0, 1, 1, 1, 1])
y = np.array([0, 0, 0, 0, 0, 1])
print("{:.2f}".format(np.mean(x == y)))
结果:
0.33

说明:
1、 x == y表示两个数组中的值相同时,输出True;否则输出False
2、例3对例2中结果取平均值,其中True=1,False=0;

您可能感兴趣的与本文相关的镜像

Python3.10

Python3.10

Conda
Python

Python 是一种高级、解释型、通用的编程语言,以其简洁易读的语法而闻名,适用于广泛的应用,包括Web开发、数据分析、人工智能和自动化脚本

193样本 正负比2比三 特征有53个 ,标签是乳腺癌患者her2是否表达大部分特征是心率变异的值,比如S1_Mean RR (ms) S1_SDNN (ms) S1_Mean HR (bpm) S1_SD HR (bpm) S1_Min HR (bpm) S1_Max HR (bpm)S1_RP_Lmean (beats) S1_RP_Lmax (beats) S1_RP_REC (%) S1_RP_DET (%) S1_RP_ShanEn S1_MSE_1 S1_MSE_2 S1_MSE_3 S1_MSE_4 S1_MSE_5 等,还有一些生理指标如年龄和bmi下面是我的数据操作和模型代码。写一个论文形式的模型搭建内容(包括使用了什么,为什么这么使用 对比其他这个方法好在哪里,以文本形式输出你的回答)data = pd.read_excel('C:/lydata/test4.xlsx') X = data.drop('HER2_G', axis=1) y = data['HER2_G'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y, random_state=60) kf = KFold(n_splits=5, shuffle=True, random_state=91) accuracy_scores = [] precision_scores = [] recall_scores = [] f1_scores = [] auc_scores = [] total_confusion_matrix = np.zeros((len(np.unique(y_train)), len(np.unique(y_train))), dtype=int) pca = PCA(n_components=14) pipeline = Pipeline([ ('scaler', StandardScaler()), ('smote', SMOTE(k_neighbors=4, sampling_strategy=0.94, random_state=42)), ('pca', pca), ('gb', GradientBoostingClassifier( learning_rate=0.02, n_estimators=90, subsample=0.75, min_samples_split=5, min_samples_leaf=1, max_depth=6, random_state=42, warm_start=True, tol=0.0001, ccp_alpha=0, max_features=12, )) ]) for train_index, val_index in kf.split(X_train): X_train_fold, X_val = X_train.iloc[train_index], X_train.iloc[val_index] y_train_fold, y_val = y_train.iloc[train_index], y_train.iloc[val_index] pipeline.fit(X_train_fold, y_train_fold) y_pred = pipeline.predict(X_val) y_proba = pipeline.predict_proba(X_val)[:, 1] accuracy_scores.append(accuracy_score(y_val, y_pred)) precision_scores.append(precision_score(y_val, y_pred)) recall_scores.append(recall_score(y_val, y_pred)) f1_scores.append(f1_score(y_val, y_pred)) auc_scores.append(roc_auc_score(y_val, y_proba)) cm = confusion_matrix(y_val, y_pred) total_confusion_matrix += cm accuracy = np.mean(accuracy_scores) precision = np.mean(precision_scores) recall = np.mean(recall_scores) f1 = np.mean(f1_scores) auc = np.mean(auc_scores) print("Gradient Boosting 参数:") print(pipeline.named_steps['gb'].get_params()) print(f"Gradient Boosting 平均 accuracy: {accuracy:.2f}") print(f"Gradient Boosting 平均 precision: {precision:.2f}") print(f"Gradient Boosting 平均 recall: {recall:.2f}") print(f"Gradient Boosting 平均 F1 score: {f1:.2f}") print(f"Gradient Boosting 平均 AUC score: {auc:.2f}") print("综合混淆矩阵:") print(total_confusion_matrix) pipeline.fit(X_train, y_train) y_test_pred = pipeline.predict(X_test) y_test_proba = pipeline.predict_proba(X_test)[:, 1] accuracy_test = accuracy_score(y_test, y_test_pred) precision_test = precision_score(y_test, y_test_pred) recall_test = recall_score(y_test, y_test_pred) f1_test = f1_score(y_test, y_test_pred) auc_test = roc_auc_score(y_test, y_test_proba) print(f"测试集 accuracy: {accuracy_test:.2f}") print(f"测试集 precision: {precision_test:.2f}") print(f"测试集 recall: {recall_test:.2f}") print(f"测试集 F1 score: {f1_test:.2f}") print(f"测试集 AUC score: {auc_test:.2f}")
03-27
请基于下面的框架,写一段代码:数据准备阶段 首先需要收集与剪切力相关的特征变量以及对应的标签(即实际测量到的剪切力)。这些数据可以来源于实验记录或者仿真模拟结果。 Python import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler # 假设我们有一个CSV文件包含了输入参数和输出剪切力的数据集 data = pd.read_csv('shear_force_data.csv') X = data.drop(columns=['ShearForce']) # 特征列 y = data['ShearForce'] # 目标值 # 将数据划分为训练集和测试集 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test) 构建机器学习模型 这里选用随机森林作为基础模型之一,因为它具有较强的泛化能力和鲁棒性,在处理复杂关系方面表现良好2。 Python from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error model = RandomForestRegressor(n_estimators=100, random_state=42) model.fit(X_train_scaled, y_train) predictions = model.predict(X_test_scaled) mse = mean_squared_error(y_test, predictions) print(f'Mean Squared Error: {mse}') 上述代码展示了完整的流程:加载数据、预处理、划分数据集、标准化数值范围、定义模型架构及其超参数设置最后评估性能指标均方误差(MSE)3。
03-14
算法及准确率: Min Distance(最小距离法):0.8000 KNN(k=1,最近邻算法):0.9280 Simple Neural Network(简单神经网络):0.850分析我的代码import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.datasets import mnist from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.metrics import accuracy_score, confusion_matrix from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D from tensorflow.keras.utils import to_categorical import seaborn as sns from docx import Document from docx.shared import Inches # 1. 数据准备 def load_and_sample_data(train_samples=6000, test_samples=1000): (train_images, train_labels), (test_images, test_labels) = mnist.load_data() # 合并数据集后随机抽取 all_images = np.concatenate((train_images, test_images)) all_labels = np.concatenate((train_labels, test_labels)) # 随机抽取样本 X_train, X_test, y_train, y_test = train_test_split( all_images, all_labels, train_size=train_samples, test_size=test_samples, random_state=42, stratify=all_labels ) # 归一化处理 X_train = X_train.astype('float32') / 255 X_test = X_test.astype('float32') / 255 # 为CNN添加通道维度 X_train_cnn = X_train.reshape(-1, 28, 28, 1) X_test_cnn = X_test.reshape(-1, 28, 28, 1) return X_train, X_test, y_train, y_test, X_train_cnn, X_test_cnn # 2. 最小距离分类器 def min_distance_classifier(X_train, y_train, X_test): class_centers = [] for digit in range(10): class_samples = X_train[y_train == digit] class_center = np.mean(class_samples, axis=0) class_centers.append(class_center) predictions = [] for sample in X_test: distances = [np.linalg.norm(sample - center) for center in class_centers] predictions.append(np.argmin(distances)) return np.array(predictions) # 3. KNN分类器 def knn_classifier(X_train, y_train, X_test, k_values=[1, 3, 5]): results = {} for k in k_values: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train.reshape(len(X_train), -1), y_train) pred = knn.predict(X_test.reshape(len(X_test), -1)) results[f'KNN (k={k})'] = pred return results # 4. 神经网络分类器 def neural_network_classifier(X_train, y_train, X_test): # 简单全连接网络 model = Sequential([ Flatten(input_shape=(28, 28)), Dense(128, activation='relu'), Dense(64, activation='relu'), Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # 减少训练轮数从10到3 model.fit(X_train, y_train, epochs=3, batch_size=32, verbose=0) nn_pred = np.argmax(model.predict(X_test), axis=1) return nn_pred, model # 5. CNN分类器 def cnn_classifier(X_train, y_train, X_test): # 转换为分类需要的格式 y_train_cat = to_categorical(y_train, 10) model = Sequential([ Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)), MaxPooling2D((2, 2)), Conv2D(64, (3, 3), activation='relu'), MaxPooling2D((2, 2)), Flatten(), Dense(128, activation='relu'), Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # 减少训练轮数从10到3 model.fit(X_train, y_train_cat, epochs=3, batch_size=32, verbose=0) cnn_pred = np.argmax(model.predict(X_test), axis=1) return cnn_pred, model # 6. SVM分类器 def svm_classifier(X_train, y_train, X_test): svm = SVC(kernel='rbf', C=10, gamma=0.001) svm.fit(X_train.reshape(len(X_train), -1), y_train) return svm.predict(X_test.reshape(len(X_test), -1)) # 7. 结果可视化 def plot_results(y_true, predictions, model_names): plt.figure(figsize=(15, 10)) for i, (name, pred) in enumerate(predictions.items(), 1): acc = accuracy_score(y_true, pred) cm = confusion_matrix(y_true, pred) plt.subplot(2, 3, i) sns.heatmap(cm, annot=True, fmt='d', cmap='Blues') plt.title(f'{name}\nAccuracy: {acc:.4f}') plt.xlabel('Predicted') plt.ylabel('True') plt.tight_layout() plt.savefig('confusion_matrices.png') return 'confusion_matrices.png' # 8. 生成Word文档 def generate_report(results, img_path): doc = Document() # 标题 doc.add_heading('MNIST数据集分类算法对比报告', 0) # 算法介绍 doc.add_heading('一、算法介绍', 1) doc.add_paragraph('1. 最小距离法:计算每个类别的中心点(平均值),测试样本被分类到距离最近的类中心。') doc.add_paragraph('2. K近邻法(KNN):根据最近的K个邻居的多数投票决定分类。测试了K=1,3,5三种参数。') doc.add_paragraph('3. 神经网络:使用包含两个隐藏层的全连接网络(128->64神经元)。') doc.add_paragraph('4. 卷积神经网络(CNN):使用两个卷积层(32和64滤波器)和两个全连接层。') doc.add_paragraph('5. 支持向量机(SVM):使用RBF核函数,C=10, gamma=0.001。') # 结果分析 doc.add_heading('二、结果分析', 1) doc.add_paragraph('各算法在1000个测试样本上的准确率:') # 创建表格 table = doc.add_table(rows=1, cols=2) table.style = 'LightShading' hdr_cells = table.rows[0].cells hdr_cells[0].text = '算法' hdr_cells[1].text = '准确率' for name, acc in results.items(): row_cells = table.add_row().cells row_cells[0].text = name row_cells[1].text = f'{acc:.4f}' doc.add_paragraph('\n关键观察:') doc.add_paragraph('- CNN和SVM表现最佳,展示了处理图像数据的强大能力') doc.add_paragraph('- KNN性能随K值增加略有下降,可能是噪声影响') doc.add_paragraph('- 最小距离法作为基线方法,性能明显低于其他方法') # 混淆矩阵 doc.add_heading('三、分类结果可视化', 1) doc.add_picture(img_path, width=Inches(6)) doc.add_paragraph('图1: 各算法混淆矩阵对比') # 保存文档 doc.save('MNIST_Classification_Report.docx') # 简化版主函数,确保能快速完成所有步骤 def main(): try: print("开始运行MNIST分类比较程序(精简版)...") # 加载数据 print("1. 加载和采样数据...") # 进一步减少样本数量以加快速度 X_train, X_test, y_train, y_test, X_train_cnn, X_test_cnn = load_and_sample_data(train_samples=3000, test_samples=500) print(f" 训练样本: {len(X_train)}, 测试样本: {len(X_test)}") # 只运行部分分类器以节省时间 print("2. 运行最小距离分类器...") min_dist_pred = min_distance_classifier(X_train, y_train, X_test) print(" 最小距离分类器完成") print("3. 运行KNN分类器(k=1)...") knn = KNeighborsClassifier(n_neighbors=1) knn.fit(X_train.reshape(len(X_train), -1), y_train) knn_pred = knn.predict(X_test.reshape(len(X_test), -1)) print(" KNN分类器完成") print("4. 运行简单神经网络分类器...") # 使用更简单的神经网络 model = Sequential([ Flatten(input_shape=(28, 28)), Dense(64, activation='relu'), Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # 只训练1轮 model.fit(X_train, y_train, epochs=1, batch_size=32, verbose=0) nn_pred = np.argmax(model.predict(X_test), axis=1) print(" 神经网络分类器完成") # 整合结果 print("5. 整合结果...") all_predictions = { 'Min Distance': min_dist_pred, 'KNN (k=1)': knn_pred, 'Simple Neural Network': nn_pred } # 计算准确率 print("6. 计算准确率...") accuracy_results = {} for name, pred in all_predictions.items(): accuracy_results[name] = accuracy_score(y_test, pred) print(f" {name}: {accuracy_results[name]:.4f}") # 可视化 print("7. 生成混淆矩阵图...") img_path = plot_results(y_test, all_predictions, accuracy_results) print(f" 混淆矩阵图已保存到: {img_path}") # 生成报告 print("8. 生成Word报告...") generate_report(accuracy_results, img_path) print("报告已生成:MNIST_Classification_Report.docx") except Exception as e: print(f"程序运行出错: {e}") import traceback traceback.print_exc() if __name__ == "__main__": main()
12-08
预处理文件中的除了原始是第一列为序号,第二列为碱解氮,其他六个文件都是MSC这样预处理文件中,第一列为序号第二列为碱解氮倒数第一列为intercept_b 倒数第二列为slope_a 。而我们需要对这七个文件进行CNN处理。给出完整代码 。原先这个代码生成的数据不好看 。import os import pandas as pd import numpy as np import warnings warnings.filterwarnings('ignore') # TensorFlow/Keras try: import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers, callbacks TF_AVAILABLE = True except ImportError: print("警告: TensorFlow未安装,跳过CNN分析") TF_AVAILABLE = False from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error from sklearn.preprocessing import StandardScaler # 设置路径 input_folder = r"C:\Users\yelanke\Desktop\大创\预处理数据" output_folder = r"C:\Users\yelanke\Desktop\大创\CNN" # 确保输出文件夹存在 os.makedirs(output_folder, exist_ok=True) if not TF_AVAILABLE: print("TensorFlow未安装,无法进行CNN分析") exit() def calculate_rpd(y_true, y_pred): """计算RPD""" rmse = np.sqrt(mean_squared_error(y_true, y_pred)) std_y = np.std(y_true) return std_y / rmse if rmse != 0 else 0 def format_metrics(r2, mae, rmse, rpd): """格式化评估指标,保留4位小数""" # 确保R²在合理范围内 if r2 > 0.9999: r2 = 0.9999 elif r2 < 0: r2 = 0.0 return { 'R2': round(r2, 4) if not np.isnan(r2) else 0.0000, 'MAE': round(mae, 4), 'RMSE': round(rmse, 4), 'RPD': round(rpd, 4) } def build_cnn_model(input_shape): """构建CNN模型""" model = keras.Sequential([ layers.Input(shape=input_shape), # 卷积层1 layers.Conv1D(filters=32, kernel_size=3, activation='relu', padding='same'), layers.BatchNormalization(), layers.MaxPooling1D(pool_size=2), layers.Dropout(0.2), # 卷积层2 layers.Conv1D(filters=64, kernel_size=3, activation='relu', padding='same'), layers.BatchNormalization(), layers.MaxPooling1D(pool_size=2), layers.Dropout(0.2), # 卷积层3 layers.Conv1D(filters=128, kernel_size=3, activation='relu', padding='same'), layers.BatchNormalization(), layers.MaxPooling1D(pool_size=2), layers.Dropout(0.3), # 展平层 layers.Flatten(), # 全连接层 layers.Dense(128, activation='relu'), layers.Dropout(0.3), layers.Dense(64, activation='relu'), layers.Dropout(0.3), # 输出层 layers.Dense(1) ]) # 编译模型 model.compile( optimizer=keras.optimizers.Adam(learning_rate=0.001), loss='mse', metrics=['mae'] ) return model def reshape_for_cnn(X): """将数据reshape为CNN需要的格式""" return X.reshape(X.shape[0], X.shape[1], 1) def evaluate_cnn_model(X_train, y_train, X_test, y_test): """评估CNN模型并返回预测结果""" try: # 标准化 scaler_X = StandardScaler() scaler_y = StandardScaler() X_train_scaled = scaler_X.fit_transform(X_train) X_test_scaled = scaler_X.transform(X_test) y_train_scaled = scaler_y.fit_transform(y_train.reshape(-1, 1)).ravel() # 为CNN reshape数据 X_train_cnn = reshape_for_cnn(X_train_scaled) X_test_cnn = reshape_for_cnn(X_test_scaled) # 构建模型 model = build_cnn_model((X_train_cnn.shape[1], X_train_cnn.shape[2])) # 设置早停 early_stopping = callbacks.EarlyStopping( monitor='val_loss', patience=20, restore_best_weights=True, verbose=0 ) reduce_lr = callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.5, patience=10, min_lr=0.00001, verbose=0 ) # 训练模型 history = model.fit( X_train_cnn, y_train_scaled, validation_split=0.2, epochs=100, batch_size=16, verbose=0, callbacks=[early_stopping, reduce_lr] ) # 预测 y_train_pred_scaled = model.predict(X_train_cnn, verbose=0).ravel() y_test_pred_scaled = model.predict(X_test_cnn, verbose=0).ravel() # 反标准化 y_train_pred = scaler_y.inverse_transform(y_train_pred_scaled.reshape(-1, 1)).ravel() y_test_pred = scaler_y.inverse_transform(y_test_pred_scaled.reshape(-1, 1)).ravel() # 计算评估指标 train_metrics = format_metrics( r2_score(y_train, y_train_pred), mean_absolute_error(y_train, y_train_pred), np.sqrt(mean_squared_error(y_train, y_train_pred)), calculate_rpd(y_train, y_train_pred) ) test_metrics = format_metrics( r2_score(y_test, y_test_pred), mean_absolute_error(y_test, y_test_pred), np.sqrt(mean_squared_error(y_test, y_test_pred)), calculate_rpd(y_test, y_test_pred) ) # 准备散点图数据 scatter_data = pd.DataFrame({ '实际值': np.round(y_test, 4), '预测值': np.round(y_test_pred, 4), '残差': np.round(y_test - y_test_pred, 4), '样本编号': range(1, len(y_test) + 1) }) return { '训练轮次': len(history.history['loss']), '最佳验证损失': round(min(history.history['val_loss']), 6), '最终验证损失': round(history.history['val_loss'][-1], 6), '最终训练损失': round(history.history['loss'][-1], 6), '建模集': train_metrics, '预测集': test_metrics, '建模集样本数': len(y_train), '预测集样本数': len(y_test), '散点图数据': scatter_data, '训练集预测值': y_train_pred, '测试集预测值': y_test_pred } except Exception as e: print(f" 模型评估错误: {str(e)}") return None def save_beautiful_excel(results, output_path): """保存美观的Excel报告""" if not results: print(" 警告: 没有评估结果可保存") return False # 创建DataFrame data = [] for result in results: row = { '文件名': result['文件名'], '总样本数': result['总样本数'], '训练轮次': result['训练轮次'], '最佳验证损失': result['最佳验证损失'], '最终验证损失': result['最终验证损失'], '最终训练损失': result['最终训练损失'], # 建模集指标 '建模集样本数': result['建模集样本数'], '建模集_R2': result['建模集']['R2'], '建模集_MAE': result['建模集']['MAE'], '建模集_RMSE': result['建模集']['RMSE'], '建模集_RPD': result['建模集']['RPD'], # 预测集指标 '预测集样本数': result['预测集样本数'], '预测集_R2': result['预测集']['R2'], '预测集_MAE': result['预测集']['MAE'], '预测集_RMSE': result['预测集']['RMSE'], '预测集_RPD': result['预测集']['RPD'], # 性能差异 'R2差异': round(result['建模集']['R2'] - result['预测集']['R2'], 4), 'RPD差异': round(result['建模集']['RPD'] - result['预测集']['RPD'], 4) } data.append(row) df = pd.DataFrame(data) # 使用openpyxl引擎创建Excel with pd.ExcelWriter(output_path, engine='openpyxl') as writer: # 写入主要数据 df.to_excel(writer, sheet_name='评估结果', index=False) # 获取工作簿和工作表 workbook = writer.book worksheet = writer.sheets['评估结果'] # 定义样式 from openpyxl.styles import PatternFill, Font, Alignment, Border, Side from openpyxl.formatting.rule import ColorScaleRule, CellIsRule # 表头样式 header_fill = PatternFill(start_color="C0504D", end_color="C0504D", fill_type="solid") header_font = Font(color="FFFFFF", bold=True, size=11) header_alignment = Alignment(horizontal="center", vertical="center") # 应用表头样式 for cell in worksheet[1]: cell.fill = header_fill cell.font = header_font cell.alignment = header_alignment # 设置列宽 column_widths = { 'A': 35, # 文件名 'B': 12, # 总样本数 'C': 12, # 训练轮次 'D': 14, # 最佳验证损失 'E': 14, # 最终验证损失 'F': 14, # 最终训练损失 'G': 14, # 建模集样本数 'H': 12, # 建模集_R2 'I': 12, # 建模集_MAE 'J': 12, # 建模集_RMSE 'K': 12, # 建模集_RPD 'L': 14, # 预测集样本数 'M': 12, # 预测集_R2 'N': 12, # 预测集_MAE 'O': 12, # 预测集_RMSE 'P': 12, # 预测集_RPD 'Q': 12, # R2差异 'R': 12, # RPD差异 } for col, width in column_widths.items(): worksheet.column_dimensions[col].width = width # 应用条件格式 # R2的条件格式 r2_rule = ColorScaleRule(start_type='min', start_color='FF0000', mid_type='percentile', mid_value=50, mid_color='FFFF00', end_type='max', end_color='00FF00') worksheet.conditional_formatting.add('H2:H1000', r2_rule) worksheet.conditional_formatting.add('M2:M1000', r2_rule) # RPD的条件格式 rpd_rule = ColorScaleRule(start_type='min', start_color='FF0000', mid_type='percentile', mid_value=50, mid_color='FFFF00', end_type='max', end_color='00FF00') worksheet.conditional_formatting.add('K2:K1000', rpd_rule) worksheet.conditional_formatting.add('P2:P1000', rpd_rule) # 验证损失的条件格式(越低越好) loss_rule = ColorScaleRule(start_type='min', start_color='00FF00', mid_type='percentile', mid_value=50, mid_color='FFFF00', end_type='max', end_color='FF0000') worksheet.conditional_formatting.add('D2:D1000', loss_rule) worksheet.conditional_formatting.add('E2:E1000', loss_rule) worksheet.conditional_formatting.add('F2:F1000', loss_rule) # 添加边框 thin_border = Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin')) for row in worksheet.iter_rows(min_row=1, max_row=len(df)+1, max_col=18): for cell in row: cell.border = thin_border # 创建统计汇总sheet summary_data = { '统计项': ['平均值', '中位数', '最大值', '最小值', '标准差'], '建模集_R2': [ round(df['建模集_R2'].mean(), 4), round(df['建模集_R2'].median(), 4), round(df['建模集_R2'].max(), 4), round(df['建模集_R2'].min(), 4), round(df['建模集_R2'].std(), 4) ], '预测集_R2': [ round(df['预测集_R2'].mean(), 4), round(df['预测集_R2'].median(), 4), round(df['预测集_R2'].max(), 4), round(df['预测集_R2'].min(), 4), round(df['预测集_R2'].std(), 4) ], '建模集_RPD': [ round(df['建模集_RPD'].mean(), 4), round(df['建模集_RPD'].median(), 4), round(df['建模集_RPD'].max(), 4), round(df['建模集_RPD'].min(), 4), round(df['建模集_RPD'].std(), 4) ], '预测集_RPD': [ round(df['预测集_RPD'].mean(), 4), round(df['预测集_RPD'].median(), 4), round(df['预测集_RPD'].max(), 4), round(df['预测集_RPD'].min(), 4), round(df['预测集_RPD'].std(), 4) ], '最佳验证损失': [ round(df['最佳验证损失'].mean(), 6), round(df['最佳验证损失'].median(), 6), round(df['最佳验证损失'].max(), 6), round(df['最佳验证损失'].min(), 6), round(df['最佳验证损失'].std(), 6) ] } summary_df = pd.DataFrame(summary_data) summary_df.to_excel(writer, sheet_name='统计汇总', index=False) # 格式化汇总表 summary_ws = writer.sheets['统计汇总'] for cell in summary_ws[1]: cell.fill = PatternFill(start_color="4F81BD", end_color="4F81BD", fill_type="solid") cell.font = Font(color="FFFFFF", bold=True) cell.alignment = Alignment(horizontal="center") for col in ['A', 'B', 'C', 'D', 'E', 'F']: summary_ws.column_dimensions[col].width = 15 # 创建模型性能分类sheet performance_data = [] for idx, row in df.iterrows(): # 根据R2分类 if row['预测集_R2'] >= 0.9: r2_class = '优秀' elif row['预测集_R2'] >= 0.8: r2_class = '良好' elif row['预测集_R2'] >= 0.6: r2_class = '一般' else: r2_class = '较差' # 根据RPD分类 if row['预测集_RPD'] >= 3: rpd_class = '优秀' elif row['预测集_RPD'] >= 2: rpd_class = '良好' elif row['预测集_RPD'] >= 1.5: rpd_class = '一般' else: rpd_class = '较差' # 训练效果分类 if row['最佳验证损失'] < 0.01: loss_class = '很好' elif row['最佳验证损失'] < 0.05: loss_class = '良好' elif row['最佳验证损失'] < 0.1: loss_class = '一般' else: loss_class = '较差' performance_data.append({ '文件名': row['文件名'], 'R2分类': r2_class, 'RPD分类': rpd_class, '训练效果': loss_class, '训练轮次': row['训练轮次'], '最佳验证损失': row['最佳验证损失'], '预测集_R2': row['预测集_R2'], '预测集_RPD': row['预测集_RPD'] }) performance_df = pd.DataFrame(performance_data) performance_df.to_excel(writer, sheet_name='性能分类', index=False) # 格式化性能分类表 perf_ws = writer.sheets['性能分类'] for cell in perf_ws[1]: cell.fill = PatternFill(start_color="8064A2", end_color="8064A2", fill_type="solid") cell.font = Font(color="FFFFFF", bold=True) cell.alignment = Alignment(horizontal="center") for col in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']: perf_ws.column_dimensions[col].width = 15 # 保存成功 return True def save_scatter_data(results, output_path): """保存散点图数据""" if not results: print(" 警告: 没有散点图数据可保存") return False # 使用openpyxl引擎创建Excel with pd.ExcelWriter(output_path, engine='openpyxl') as writer: for result in results: if '散点图数据' in result and result['散点图数据'] is not None: # 创建sheet名称 sheet_name = result['文件名'] # 移除扩展名 for ext in ['.xlsx', '.xls', '.csv', '.XLSX', '.XLS', '.CSV']: sheet_name = sheet_name.replace(ext, '') # 限制sheet名长度 sheet_name = sheet_name[:31] # 替换无效字符 invalid_chars = ['/', '\\', '?', '*', ':', '[', ']'] for char in invalid_chars: sheet_name = sheet_name.replace(char, '_') # 保存散点图数据 scatter_df = result['散点图数据'] scatter_df.to_excel(writer, sheet_name=sheet_name, index=False) # 获取工作表并设置样式 worksheet = writer.sheets[sheet_name] # 定义样式 from openpyxl.styles import PatternFill, Font, Alignment, Border, Side # 表头样式 header_fill = PatternFill(start_color="FF6600", end_color="FF6600", fill_type="solid") header_font = Font(color="FFFFFF", bold=True, size=11) header_alignment = Alignment(horizontal="center", vertical="center") # 应用表头样式 for cell in worksheet[1]: cell.fill = header_fill cell.font = header_font cell.alignment = header_alignment # 设置列宽 column_widths = { 'A': 15, # 实际值 'B': 15, # 预测值 'C': 15, # 残差 'D': 15 # 样本编号 } for col, width in column_widths.items(): worksheet.column_dimensions[col].width = width # 添加边框 thin_border = Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin')) for row in worksheet.iter_rows(min_row=1, max_row=len(scatter_df)+1, max_col=4): for cell in row: cell.border = thin_border # 在顶部添加模型信息 worksheet.insert_rows(1, 6) worksheet.merge_cells('A1:D1') worksheet['A1'] = f"CNN模型预测结果 - {result['文件名']}" worksheet['A1'].font = Font(bold=True, size=14) worksheet['A1'].alignment = Alignment(horizontal="center") worksheet['A2'] = f"训练轮次: {result['训练轮次']}" worksheet['B2'] = f"最佳验证损失: {result['最佳验证损失']:.6f}" worksheet['C2'] = f"最终验证损失: {result['最终验证损失']:.6f}" worksheet['A3'] = f"最终训练损失: {result['最终训练损失']:.6f}" worksheet['B3'] = f"建模集样本数: {result['建模集样本数']}" worksheet['C3'] = f"预测集样本数: {result['预测集样本数']}" worksheet['A4'] = f"建模集R²: {result['建模集']['R2']:.4f}" worksheet['B4'] = f"建模集RPD: {result['建模集']['RPD']:.4f}" worksheet['C4'] = f"建模集RMSE: {result['建模集']['RMSE']:.4f}" worksheet['A5'] = f"预测集R²: {result['预测集']['R2']:.4f}" worksheet['B5'] = f"预测集RPD: {result['预测集']['RPD']:.4f}" worksheet['C5'] = f"预测集RMSE: {result['预测集']['RMSE']:.4f}" # 设置信息行的样式 for row in range(2, 6): for col in range(1, 5): cell = worksheet.cell(row=row, column=col) cell.font = Font(bold=True) return True print("开始CNN分析...") print("=" * 60) # 获取所有文件 files = [f for f in os.listdir(input_folder) if f.lower().endswith(('.xlsx', '.xls', '.csv'))] print(f"找到 {len(files)} 个文件需要处理") all_results = [] for file_idx, filename in enumerate(files, 1): print(f"正在处理文件 {file_idx}/{len(files)}: {filename}") try: # 读取文件 file_path = os.path.join(input_folder, filename) if filename.lower().endswith('.csv'): df = pd.read_csv(file_path) else: df = pd.read_excel(file_path) print(f" 数据形状: {df.shape}") # 数据清洗 df = df.dropna() if df.shape[1] < 2: print(f" 跳过:数据列数不足,至少需要2列") continue if len(df) < 20: print(f" 跳过:样本量不足,至少需要20个样本") continue # 假设最后一列是目标变量 X = df.iloc[:, :-1].values y = df.iloc[:, -1].values # 划分建模集和预测集 X_modeling, X_prediction, y_modeling, y_prediction = train_test_split( X, y, test_size=0.3, random_state=42 ) print(f" 建模集大小: {X_modeling.shape[0]}, 预测集大小: {X_prediction.shape[0]}") # 评估模型 model_results = evaluate_cnn_model(X_modeling, y_modeling, X_prediction, y_prediction) if model_results is None: print(f" 模型评估失败") continue # 添加文件信息 model_results['文件名'] = filename model_results['总样本数'] = len(y) all_results.append(model_results) print(f" 完成: 建模集R2={model_results['建模集']['R2']:.4f}, " f"预测集R2={model_results['预测集']['R2']:.4f}, " f"训练轮次={model_results['训练轮次']}") except Exception as e: print(f" 文件处理错误: {str(e)}") continue print("\n" + "=" * 60) if all_results: # 保存评估报告 report_path = os.path.join(output_folder, 'CNN模型评估报告.xlsx') print(f"正在保存评估报告...") save_success = save_beautiful_excel(all_results, report_path) if save_success: print(f"✓ 评估报告已保存到: {report_path}") # 保存散点图数据 scatter_path = os.path.join(output_folder, 'CNN预测集散点图数据.xlsx') print(f"正在保存散点图数据...") scatter_success = save_scatter_data(all_results, scatter_path) if scatter_success: print(f"✓ 散点图数据已保存到: {scatter_path}") # 打印汇总统计 print("\n汇总统计:") print("-" * 40) # 创建临时DataFrame用于统计 temp_data = [] for r in all_results: temp_data.append({ '文件名': r['文件名'], '建模集_R2': r['建模集']['R2'], '预测集_R2': r['预测集']['R2'], '建模集_RPD': r['建模集']['RPD'], '预测集_RPD': r['预测集']['RPD'], '最佳验证损失': r['最佳验证损失'] }) df_summary = pd.DataFrame(temp_data) print(f"平均建模集R²: {df_summary['建模集_R2'].mean():.4f}") print(f"平均预测集R²: {df_summary['预测集_R2'].mean():.4f}") print(f"平均建模集RPD: {df_summary['建模集_RPD'].mean():.4f}") print(f"平均预测集RPD: {df_summary['预测集_RPD'].mean():.4f}") print(f"平均验证损失: {df_summary['最佳验证损失'].mean():.6f}") # 统计优秀模型 excellent_models = df_summary[df_summary['预测集_R2'] >= 0.9] print(f"优秀模型数量 (R²≥0.9): {len(excellent_models)}") # 显示每个文件的简要结果 print("\n各文件评估结果:") print("-" * 40) for r in all_results: print(f" {r['文件名']}: 建模集R²={r['建模集']['R2']:.4f}, " f"预测集R²={r['预测集']['R2']:.4f}, " f"训练轮次={r['训练轮次']}") else: print("警告: 没有成功处理任何文件") print("=" * 60) print("CNN分析完成!") 根据问题描述,我们需要对七个文件进行CNN处理。但是,文件分为两种类型: 原始文件(第一列为序号,第二列为碱解氮,后面的列为光谱数据) 预处理文件(MSC等预处理方法,第一列为序号,第二列为碱解氮,倒数第一列为intercept_b,倒数第二列为slope_a,中间为光谱数据) 注意:原始文件中,除了第一列序号和第二列碱解氮,其余列都是光谱数据。而预处理文件中,除了第一列序号、第二列碱解氮、倒数两列(intercept_b和slope_a),中间列是光谱数据。 因此,在读取数据时,我们需要根据文件名判断是否是原始文件,然后提取相应的特征列和目标列。 目标变量:碱解氮(第二列) 特征变量: 对于原始文件:从第三列开始到最后一列(光谱数据) 对于预处理文件:从第三列开始到倒数第三列(因为倒数两列是intercept_b和slope_a,不需要作为特征) 另外,注意第一列是序号,我们不需要。 修改思路: 读取文件后,先判断文件名是否包含“原始”二字(或者根据预处理方法名判断,但题目说除了原始文件其他都是MSC等预处理,所以我们可以通过文件名是否包含“原始”来区分) 如果不包含“原始”,则按照预处理文件处理:目标列为第二列,特征列为第三列到倒数第三列(包含) 如果包含“原始”,则按照原始文件处理:目标列为第二列,特征列为第三列到最后一列 但是,题目中提到了七个文件,一个原始,六个预处理(MSC等)。所以,我们假设文件名中如果包含“原始”则为原始文件,否则为预处理文件。 注意:预处理文件也可能有其他预处理方法,但倒数两列都是intercept_b和slope_a。 因此,我们修改读取数据部分的代码: 原始文件:特征列索引为2:(第三列到最后),目标列索引为1(第二列) 预处理文件:特征列索引为2:-2(第三列到倒数第三列),目标列索引为1(第二列) 另外,注意:有些文件可能有多余的列,比如序号列我们已经不考虑,但是确保读取正确的列。 步骤: 读取数据框 删除所有空值的行 根据文件名判断文件类型 提取对应的特征列和目标列
12-12
仅阅读、讲解以下代码 """ The Codes in this file are used to classify Human Activity using Channel State Information. The deep learning architecture used here is Bidirectional LSTM stacked with One Attention Layer. Author: https://github.com/ludlows 2019-12 """ import numpy as np import tensorflow as tf import glob import os import csv def merge_csi_label(csifile, labelfile, win_len=1000, thrshd=0.6, step=200): """ Merge CSV files into a Numpy Array X, csi amplitude feature Returns Numpy Array X, Shape(Num, Win_Len, 90) Args: csifile : str, csv file containing CSI data labelfile: str, csv fiel with activity label win_len : integer, window length thrshd : float, determine if an activity is strong enough inside a window step : integer, sliding window by step """ activity = [] with open(labelfile, 'r') as labelf: reader = csv.reader(labelf) for line in reader: label = line[0] if label == 'NoActivity': activity.append(0) else: activity.append(1) activity = np.array(activity) csi = [] with open(csifile, 'r') as csif: reader = csv.reader(csif) for line in reader: line_array = np.array([float(v) for v in line]) # extract the amplitude only line_array = line_array[1:91] csi.append(line_array[np.newaxis,...]) csi = np.concatenate(csi, axis=0) assert(csi.shape[0] == activity.shape[0]) # screen the data with a window index = 0 feature = [] while index + win_len <= csi.shape[0]: cur_activity = activity[index:index+win_len] if np.sum(cur_activity) < thrshd * win_len: index += step continue cur_feature = np.zeros((1, win_len, 90)) cur_feature[0] = csi[index:index+win_len, :] feature.append(cur_feature) index += step return np.concatenate(feature, axis=0) def extract_csi_by_label(raw_folder, label, labels, save=False, win_len=1000, thrshd=0.6, step=200): """ Returns all the samples (X,y) of "label" in the entire dataset Args: raw_foler: The path of Dataset folder label : str, could be one of labels labels : list of str, ['bed', 'fall', 'pickup', 'run', 'sitdown', 'standup', 'walk'] save : boolean, choose whether save the numpy array win_len : integer, window length thrshd : float, determine if an activity is strong enough inside a window step : integer, sliding window by step """ print('Starting Extract CSI for Label {}'.format(label)) label = label.lower() if not label in labels: raise ValueError("The label {} should be among 'bed','fall','pickup','run','sitdown','standup','walk'".format(labels)) data_path_pattern = os.path.join(raw_folder, 'input_*' + label + '*.csv') input_csv_files = sorted(glob.glob(data_path_pattern)) annot_csv_files = [os.path.basename(fname).replace('input_', 'annotation_') for fname in input_csv_files] annot_csv_files = [os.path.join(raw_folder, fname) for fname in annot_csv_files] feature = [] index = 0 for csi_file, label_file in zip(input_csv_files, annot_csv_files): index += 1 if not os.path.exists(label_file): print('Warning! Label File {} doesn\'t exist.'.format(label_file)) continue feature.append(merge_csi_label(csi_file, label_file, win_len=win_len, thrshd=thrshd, step=step)) print('Finished {:.2f}% for Label {}'.format(index / len(input_csv_files) * 100,label)) feat_arr = np.concatenate(feature, axis=0) if save: np.savez_compressed("X_{}_win_{}_thrshd_{}percent_step_{}.npz".format( label, win_len, int(thrshd*100), step), feat_arr) # one hot feat_label = np.zeros((feat_arr.shape[0], len(labels))) feat_label[:, labels.index(label)] = 1 return feat_arr, feat_label def train_valid_split(numpy_tuple, train_portion=0.9, seed=379): """ Returns Train and Valid Datset with the format of (x_train, y_train, x_valid, y_valid), where x_train and y_train are shuffled randomly. Args: numpy_tuple : tuple of numpy array: (x_bed, x_fall, x_pickup, x_run, x_sitdown, x_standup, x_walk) train_portion: float, range (0,1) seed : random seed """ np.random.seed(seed=seed) x_train = [] x_valid = [] y_valid = [] y_train = [] for i, x_arr in enumerate(numpy_tuple): index = np.random.permutation([i for i in range(x_arr.shape[0])]) split_len = int(train_portion * x_arr.shape[0]) x_train.append(x_arr[index[:split_len], ...]) tmpy = np.zeros((split_len,7)) tmpy[:, i] = 1 y_train.append(tmpy) x_valid.append(x_arr[index[split_len:],...]) tmpy = np.zeros((x_arr.shape[0]-split_len,7)) tmpy[:, i] = 1 y_valid.append(tmpy) x_train = np.concatenate(x_train, axis=0) y_train = np.concatenate(y_train, axis=0) x_valid = np.concatenate(x_valid, axis=0) y_valid = np.concatenate(y_valid, axis=0) index = np.random.permutation([i for i in range(x_train.shape[0])]) x_train = x_train[index, ...] y_train = y_train[index, ...] return x_train, y_train, x_valid, y_valid def extract_csi(raw_folder, labels, save=False, win_len=1000, thrshd=0.6, step=200): """ Return List of Array in the format of [X_label1, y_label1, X_label2, y_label2, .... X_Label7, y_label7] Args: raw_folder: the folder path of raw CSI csv files, input_* annotation_* labels : all the labels existing in the folder save : boolean, choose whether save the numpy array win_len : integer, window length thrshd : float, determine if an activity is strong enough inside a window step : integer, sliding window by step """ ans = [] for label in labels: feature_arr, label_arr = extract_csi_by_label(raw_folder, label, labels, save, win_len, thrshd, step) ans.append(feature_arr) ans.append(label_arr) return tuple(ans) class AttenLayer(tf.keras.layers.Layer): """ Attention Layers used to Compute Weighted Features along Time axis Args: num_state : number of hidden Attention state 2019-12, https://github.com/ludlows """ def __init__(self, num_state, **kw): super(AttenLayer, self).__init__(**kw) self.num_state = num_state def build(self, input_shape): self.kernel = self.add_weight('kernel', shape=[input_shape[-1], self.num_state]) self.bias = self.add_weight('bias', shape=[self.num_state]) self.prob_kernel = self.add_weight('prob_kernel', shape=[self.num_state]) def call(self, input_tensor): atten_state = tf.tanh(tf.tensordot(input_tensor, self.kernel, axes=1) + self.bias) logits = tf.tensordot(atten_state, self.prob_kernel, axes=1) prob = tf.nn.softmax(logits) weighted_feature = tf.reduce_sum(tf.multiply(input_tensor, tf.expand_dims(prob, -1)), axis=1) return weighted_feature # for saving the model def get_config(self): config = super().get_config().copy() config.update({ 'num_state': self.num_state,}) return config class CSIModelConfig: """ class for Human Activity Recognition ("bed", "fall", "pickup", "run", "sitdown", "standup", "walk") Using CSI (Channel State Information) Specifically, the author here wants to classify Human Activity using Channel State Information. The deep learning architecture used here is Bidirectional LSTM stacked with One Attention Layer. 2019-12, https://github.com/ludlows Args: win_len : integer (1000 default) window length for batching sequence step : integer (200 default) sliding window by this step thrshd : float (0.6 default) used to check if the activity is intensive inside a window downsample: integer >=1 (2 default) downsample along the time axis """ def __init__(self, win_len=1000, step=200, thrshd=0.6, downsample=2): self._win_len = win_len self._step = step self._thrshd = thrshd self._labels = ("bed", "fall", "pickup", "run", "sitdown", "standup", "walk") self._downsample = downsample def preprocessing(self, raw_folder, save=False): """ Returns the Numpy Array for training within the format of (X_lable1, y_label1, ...., X_label7, y_label7) Args: raw_folder: the folder containing raw CSI save : choose if save the numpy array """ numpy_tuple = extract_csi(raw_folder, self._labels, save, self._win_len, self._thrshd, self._step) if self._downsample > 1: return tuple([v[:, ::self._downsample,...] if i%2 ==0 else v for i, v in enumerate(numpy_tuple)]) return numpy_tuple def load_csi_data_from_files(self, np_files): """ Returns the Numpy Array for training within the format of (X_lable1, y_label1, ...., X_label7, y_label7) Args: np_files: ('x_bed.npz', 'x_fall.npz', 'x_pickup.npz', 'x_run.npz', 'x_sitdown.npz', 'x_standup.npz', 'x_walk.npz') """ if len(np_files) != 7: raise ValueError('There should be 7 numpy files for bed, fall, pickup, run, sitdown, standup, walk.') x = [np.load(f)['arr_0'] for f in np_files] if self._downsample > 1: x = [arr[:,::self._downsample, :] for arr in x] y = [np.zeros((arr.shape[0], len(self._labels))) for arr in x] numpy_list = [] for i in range(len(self._labels)): y[i][:,i] = 1 numpy_list.append(x[i]) numpy_list.append(y[i]) return tuple(numpy_list) def build_model(self, n_unit_lstm=200, n_unit_atten=400): """ Returns the Tensorflow Model which uses AttenLayer """ if self._downsample > 1: length = len(np.ones((self._win_len,))[::self._downsample]) x_in = tf.keras.Input(shape=(length, 90)) else: x_in = tf.keras.Input(shape=(self._win_len, 90)) x_tensor = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=n_unit_lstm, return_sequences=True))(x_in) x_tensor = AttenLayer(n_unit_atten)(x_tensor) pred = tf.keras.layers.Dense(len(self._labels), activation='softmax')(x_tensor) model = tf.keras.Model(inputs=x_in, outputs=pred) return model @staticmethod def load_model(hdf5path): """ Returns the Tensorflow Model for AttenLayer Args: hdf5path: str, the model file path """ model = tf.keras.models.load_model(hdf5path, custom_objects={'AttenLayer':AttenLayer}) return model if __name__ == "__main__": import sys if len(sys.argv) != 2: print("Error! Correct Command: python3 csimodel.py Dataset_folder_path") raw_data_foler = sys.argv[1] # preprocessing cfg = CSIModelConfig(win_len=1000, step=200, thrshd=0.6, downsample=2) numpy_tuple = cfg.preprocessing('Dataset/Data/', save=True) # load previous saved numpy files, ignore this if you haven't saved numpy array to files before # numpy_tuple = cfg.load_csi_data_from_files(('x_bed.npz', 'x_fall.npz', 'x_pickup.npz', 'x_run.npz', 'x_sitdown.npz', 'x_standup.npz', 'x_walk.npz')) x_bed, y_bed, x_fall, y_fall, x_pickup, y_pickup, x_run, y_run, x_sitdown, y_sitdown, x_standup, y_standup, x_walk, y_walk = numpy_tuple x_train, y_train, x_valid, y_valid = train_valid_split( (x_bed, x_fall, x_pickup, x_run, x_sitdown, x_standup, x_walk), train_portion=0.9, seed=379) # parameters for Deep Learning Model model = cfg.build_model(n_unit_lstm=200, n_unit_atten=400) # train model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() model.fit( x_train, y_train, batch_size=128, epochs=60, validation_data=(x_valid, y_valid), callbacks=[ tf.keras.callbacks.ModelCheckpoint('best_atten.hdf5', monitor='val_accuracy', save_best_only=True, save_weights_only=False) ]) # load the best model model = cfg.load_model('best_atten.hdf5') y_pred = model.predict(x_valid) from sklearn.metrics import confusion_matrix print(confusion_matrix(np.argmax(y_valid, axis=1), np.argmax(y_pred, axis=1)))
最新发布
01-06
评论 1
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值