git下push失败解决方法

本文介绍了解决通过SSH向Linux服务器推送代码时遇到的拒绝错误的方法。通过修改服务器上的.git配置文件,添加特定设置来允许分支推送,从而解决了客户端无法正常提交的问题。

当你辛辛苦苦地在Linux下架设好SSH服务器,给账户分配完权限,以为什么都大功告成的时候。

这时你在客户端的Linux机器下登陆

      $ git clone yourAccout@serverName:/project/.git  克隆服务器的仓库

      $ echo My name is sb > haha.txt  把My name is sb写入到haha.txt文件下,如果没有haha.txt则会新建

      $ git add .  添加当前文件夹下的文件为快照,这里是haha.txt

      $ git commit -a -m "first blood"  把修改添加到本地仓库中

      $ git push origin   推送到远程仓库中,即服务器仓库中

可是可是,这时候尼玛爆出这段话

remote:error:refuse .....

remote:error:blablabla

remote:error:blablabla

remote:error:blablabla

这不能忍啊,一开始以为我没洗脸,可我洗完后还是提交不了,一直找客户端的原因,一直以为是我提交的方式不对,

最后才开始想到服务器

然后,我擦,在Linux服务器下直接进仓库 .git

~/.git/config  修改这个文件

在文件的末尾添加

[receive]

             denyCurrentBranch = ignore

系统默认是否定的,加了这句话才会忽略,这样你就可以把你的分支给push到服务器啦~

最后你可以到服务器的Linux下 运行

$ git reset --hard

看看是否那些文件真被修改过来啦~

 

 

# 本代码由 23311801128-陈雯婧 编写 import pandas as cwjpandas import numpy as cwjnp import matplotlib.pyplot as cwjplt import seaborn as cwjsns from sklearn.ensemble import RandomForestClassifier as cwjRFC from sklearn.svm import SVC as cwjSVC from sklearn.linear_model import LogisticRegression as cwjLR from sklearn.neighbors import KNeighborsClassifier as cwjKNN from sklearn.model_selection import train_test_split as cwj_split, cross_val_score from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, roc_curve, auc from sklearn.preprocessing import LabelEncoder import warnings warnings.filterwarnings('ignore') # 修复中文显示问题 cwjplt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'DejaVu Sans'] # 支持中文的字体 cwjplt.rcParams['axes.unicode_minus'] = False # 正常显示负号 cwjsns.set_style("whitegrid") print("=== Titanic Survival Data Analysis ===") print("Author: 23311801128-Chen Wenjing") # 1. Data Import print("\n1. Data Import and Overview") try: train_data = cwjpandas.read_csv('train.csv') test_data = cwjpandas.read_csv('test.csv') print("Data imported successfully from CSV files!") except: print("Files not found, creating simulated data with Kaggle dataset structure...") cwjnp.random.seed(42) n_train, n_test = 891, 418 def generate_titanic_data(n_samples): data = { 'PassengerId': range(1, n_samples+1), 'Survived': cwjnp.random.choice([0, 1], n_samples, p=[0.62, 0.38]), 'Pclass': cwjnp.random.choice([1, 2, 3], n_samples, p=[0.24, 0.21, 0.55]), 'Sex': cwjnp.random.choice(['male', 'female'], n_samples, p=[0.65, 0.35]), 'Age': cwjnp.clip(cwjnp.random.normal(29, 14, n_samples), 0.5, 80), 'SibSp': cwjnp.random.poisson(0.5, n_samples), 'Parch': cwjnp.random.poisson(0.4, n_samples), 'Fare': cwjnp.clip(cwjnp.random.exponential(32, n_samples), 0, 512), 'Embarked': cwjnp.random.choice(['S', 'C', 'Q'], n_samples, p=[0.72, 0.19, 0.09]) } return cwjpandas.DataFrame(data) train_data = generate_titanic_data(891) test_data = generate_titanic_data(418) test_data['Survived'] = -999 # Mark test set print(f"Training set: {train_data.shape}, Test set: {test_data.shape}") print("\nFirst 5 rows of training data:") print(train_data.head()) # 2. Data Preprocessing print("\n2. Data Preprocessing") # Combine datasets train_data['IsTrain'] = True test_data['IsTrain'] = False combined_data = cwjpandas.concat([train_data, test_data], ignore_index=True) print(f"Combined data dimension: {combined_data.shape}") # Handle missing values print("\nHandling missing values...") combined_data['Age'].fillna(combined_data['Age'].median(), inplace=True) combined_data['Fare'].fillna(combined_data['Fare'].median(), inplace=True) combined_data['Embarked'].fillna(combined_data['Embarked'].mode()[0], inplace=True) print("Missing values handled successfully") # 3. Descriptive Statistical Analysis Visualizations print("\n3. Descriptive Statistical Analysis Visualizations") # Create comprehensive visualization figure fig = cwjplt.figure(figsize=(20, 16)) # 3.1 Gender vs Survival Rate ax1 = cwjplt.subplot(3, 4, 1) gender_survival = combined_data[combined_data['IsTrain'] == True].groupby('Sex')['Survived'].mean() colors = ['#3498db', '#e74c3c'] bars = ax1.bar(['Male', 'Female'], gender_survival.values, color=colors, alpha=0.8) ax1.set_title('Gender vs Survival Rate', fontsize=14, fontweight='bold') ax1.set_ylabel('Survival Rate') for bar, value in zip(bars, gender_survival.values): ax1.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.01, f'{value:.2f}', ha='center', va='bottom', fontweight='bold') # 3.2 Passenger Class vs Survival Rate by Gender ax2 = cwjplt.subplot(3, 4, 2) pclass_survival = combined_data[combined_data['IsTrain'] == True].groupby(['Pclass', 'Sex'])['Survived'].mean().unstack() pclass_survival.plot(kind='bar', ax=ax2, color=['#3498db', '#e74c3c'], alpha=0.8) ax2.set_title('Passenger Class & Gender vs Survival', fontsize=14, fontweight='bold') ax2.set_ylabel('Survival Rate') ax2.legend(['Male', 'Female']) ax2.set_xticklabels(['1st Class', '2nd Class', '3rd Class'], rotation=0) # 3.3 Age vs Survival Rate ax3 = cwjplt.subplot(3, 4, 3) bins = [0, 12, 18, 35, 60, 100] labels = ['Child(<12)', 'Teen(12-18)', 'Young(18-35)', 'Middle(35-60)', 'Senior(>60)'] combined_data['AgeGroup'] = cwjpandas.cut(combined_data['Age'], bins=bins, labels=labels) age_survival = combined_data[combined_data['IsTrain'] == True].groupby('AgeGroup')['Survived'].mean() age_survival.plot(kind='bar', ax=ax3, color='#2ecc71', alpha=0.8) ax3.set_title('Age Group vs Survival Rate', fontsize=14, fontweight='bold') ax3.set_ylabel('Survival Rate') ax3.tick_params(axis='x', rotation=45) # 3.4 Embarkation Port vs Survival Rate ax4 = cwjplt.subplot(3, 4, 4) embarked_survival = combined_data[combined_data['IsTrain'] == True].groupby('Embarked')['Survived'].mean() port_names = {'S': 'Southampton', 'C': 'Cherbourg', 'Q': 'Queenstown'} embarked_survival.index = [port_names.get(x, x) for x in embarked_survival.index] embarked_survival.plot(kind='bar', ax=ax4, color='#9b59b6', alpha=0.8) ax4.set_title('Embarkation Port vs Survival', fontsize=14, fontweight='bold') ax4.set_ylabel('Survival Rate') # 3.5 Family Size vs Survival Rate ax5 = cwjplt.subplot(3, 4, 5) combined_data['FamilySize'] = combined_data['SibSp'] + combined_data['Parch'] + 1 family_survival = combined_data[combined_data['IsTrain'] == True].groupby('FamilySize')['Survived'].mean() family_survival.plot(kind='bar', ax=ax5, color='#e67e22', alpha=0.8) ax5.set_title('Family Size vs Survival Rate', fontsize=14, fontweight='bold') ax5.set_xlabel('Family Members Count') ax5.set_ylabel('Survival Rate') # 3.6 Passenger Class vs Survival Rate ax6 = cwjplt.subplot(3, 4, 6) pclass_survival_overall = combined_data[combined_data['IsTrain'] == True].groupby('Pclass')['Survived'].mean() pclass_survival_overall.plot(kind='bar', ax=ax6, color=['#e74c3c', '#3498db', '#2ecc71'], alpha=0.8) ax6.set_title('Passenger Class vs Survival', fontsize=14, fontweight='bold') ax6.set_xticklabels(['1st Class', '2nd Class', '3rd Class'], rotation=0) ax6.set_ylabel('Survival Rate') # 3.7 Fare Distribution vs Survival ax7 = cwjplt.subplot(3, 4, 7) survived_fare = combined_data[(combined_data['IsTrain'] == True) & (combined_data['Survived'] == 1)]['Fare'] not_survived_fare = combined_data[(combined_data['IsTrain'] == True) & (combined_data['Survived'] == 0)]['Fare'] ax7.hist([survived_fare, not_survived_fare], bins=30, alpha=0.7, label=['Survived', 'Not Survived'], color=['#2ecc71', '#e74c3c']) ax7.set_title('Fare Distribution by Survival', fontsize=14, fontweight='bold') ax7.set_xlabel('Fare') ax7.set_ylabel('Frequency') ax7.legend() # 3.8 Age Distribution vs Survival ax8 = cwjplt.subplot(3, 4, 8) survived_age = combined_data[(combined_data['IsTrain'] == True) & (combined_data['Survived'] == 1)]['Age'] not_survived_age = combined_data[(combined_data['IsTrain'] == True) & (combined_data['Survived'] == 0)]['Age'] ax8.hist([survived_age, not_survived_age], bins=30, alpha=0.7, label=['Survived', 'Not Survived'], color=['#2ecc71', '#e74c3c']) ax8.set_title('Age Distribution by Survival', fontsize=14, fontweight='bold') ax8.set_xlabel('Age') ax8.set_ylabel('Frequency') ax8.legend() # 3.9 Siblings/Spouses vs Survival Rate ax9 = cwjplt.subplot(3, 4, 9) sibsp_survival = combined_data[combined_data['IsTrain'] == True].groupby('SibSp')['Survived'].mean() sibsp_survival.plot(kind='bar', ax=ax9, color='#1abc9c', alpha=0.8) ax9.set_title('Siblings/Spouses vs Survival', fontsize=14, fontweight='bold') ax9.set_xlabel('Siblings/Spouses Count') ax9.set_ylabel('Survival Rate') # 3.10 Parents/Children vs Survival Rate ax10 = cwjplt.subplot(3, 4, 10) parch_survival = combined_data[combined_data['IsTrain'] == True].groupby('Parch')['Survived'].mean() parch_survival.plot(kind='bar', ax=ax10, color='#d35400', alpha=0.8) ax10.set_title('Parents/Children vs Survival', fontsize=14, fontweight='bold') ax10.set_xlabel('Parents/Children Count') ax10.set_ylabel('Survival Rate') # 3.11 Alone vs Survival Rate ax11 = cwjplt.subplot(3, 4, 11) combined_data['IsAlone'] = (combined_data['FamilySize'] == 1).astype(int) alone_survival = combined_data[combined_data['IsTrain'] == True].groupby('IsAlone')['Survived'].mean() alone_survival.plot(kind='bar', ax=ax11, color=['#e74c3c', '#2ecc71'], alpha=0.8) ax11.set_title('Traveling Alone vs Survival', fontsize=14, fontweight='bold') ax11.set_xticklabels(['With Family', 'Alone'], rotation=0) ax11.set_ylabel('Survival Rate') # 3.12 Correlation Heatmap ax12 = cwjplt.subplot(3, 4, 12) numeric_columns = ['Survived', 'Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'FamilySize'] corr_matrix = combined_data[combined_data['IsTrain'] == True][numeric_columns].corr() cwjsns.heatmap(corr_matrix, annot=True, cmap='coolwarm', center=0, ax=ax12) ax12.set_title('Feature Correlation Heatmap', fontsize=14, fontweight='bold') cwjplt.tight_layout() cwjplt.suptitle('Titanic Survival Data Descriptive Analysis - Chen Wenjing', fontsize=20, fontweight='bold', y=1.02) cwjplt.show() # 4. Feature Engineering print("\n4. Feature Engineering") # Gender encoding combined_data['Sex_encoded'] = combined_data['Sex'].map({'male': 0, 'female': 1}) # Passenger class dummies pclass_dummies = cwjpandas.get_dummies(combined_data['Pclass'], prefix='Pclass') combined_data = cwjpandas.concat([combined_data, pclass_dummies], axis=1) # Embarkation port dummies embarked_dummies = cwjpandas.get_dummies(combined_data['Embarked'], prefix='Embarked') combined_data = cwjpandas.concat([combined_data, embarked_dummies], axis=1) # Fare grouping combined_data['FareGroup'] = cwjpandas.qcut(combined_data['Fare'], 4, labels=[0, 1, 2, 3]) print("Feature engineering completed") # 5. Optimized Algorithm Combination print("\n5. Optimized Algorithm Combination Training") # Select features feature_columns = [ 'Sex_encoded', 'Age', 'SibSp', 'Parch', 'Fare', 'FamilySize', 'IsAlone', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'Embarked_C', 'Embarked_Q', 'Embarked_S' ] X_train = combined_data[combined_data['IsTrain'] == True][feature_columns] y_train = combined_data[combined_data['IsTrain'] == True]['Survived'] X_test = combined_data[combined_data['IsTrain'] == False][feature_columns] # Split validation set X_train_split, X_val, y_train_split, y_val = cwj_split( X_train, y_train, test_size=0.2, random_state=42, stratify=y_train ) # Optimized algorithm combination models = { 'Random Forest': cwjRFC(n_estimators=200, max_depth=10, random_state=42), 'Logistic Regression': cwjLR(C=1.0, random_state=42, max_iter=1000), 'Support Vector Machine': cwjSVC(probability=True, random_state=42), 'K-Nearest Neighbors': cwjKNN(n_neighbors=7), 'Gradient Boosting': cwjRFC(n_estimators=100, max_depth=6, random_state=42) # Simplified GBDT } # Train and evaluate models results = {} print("\nModel Training Results:") for name, model in models.items(): print(f"\nTraining {name}...") model.fit(X_train_split, y_train_split) # Predictions y_pred = model.predict(X_val) y_prob = model.predict_proba(X_val)[:, 1] if hasattr(model, 'predict_proba') else cwjnp.zeros_like(y_pred) # Cross-validation cv_scores = cross_val_score(model, X_train, y_train, cv=5, scoring='accuracy') accuracy = accuracy_score(y_val, y_pred) results[name] = { 'model': model, 'predictions': y_pred, 'probabilities': y_prob, 'accuracy': accuracy, 'cv_mean': cv_scores.mean(), 'cv_std': cv_scores.std() } print(f"{name:20} | Accuracy: {accuracy:.4f} | CV: {cv_scores.mean():.4f} ± {cv_scores.std():.4f}") # 6. Model Evaluation and Visualization print("\n6. Model Evaluation and Visualization") # Create model evaluation visualization fig, axes = cwjplt.subplots(2, 3, figsize=(18, 12)) # 6.1 Model Accuracy Comparison ax1 = axes[0, 0] model_names = list(results.keys()) accuracies = [results[name]['accuracy'] for name in model_names] cv_means = [results[name]['cv_mean'] for name in model_names] cv_stds = [results[name]['cv_std'] for name in model_names] x_pos = cwjnp.arange(len(model_names)) width = 0.35 bars1 = ax1.bar(x_pos - width/2, accuracies, width, label='Validation Accuracy', alpha=0.8, color='#3498db') bars2 = ax1.bar(x_pos + width/2, cv_means, width, label='Cross-Validation Accuracy', alpha=0.8, color='#e74c3c') # Add error bars for i, (bar, std) in enumerate(zip(bars2, cv_stds)): ax1.errorbar(bar.get_x() + bar.get_width()/2, bar.get_height(), yerr=std, fmt='k', capsize=5) ax1.set_xlabel('Model') ax1.set_ylabel('Accuracy') ax1.set_title('Model Performance Comparison', fontsize=14, fontweight='bold') ax1.set_xticks(x_pos) ax1.set_xticklabels(model_names, rotation=45) ax1.legend() # Add value labels for bars in [bars1, bars2]: for bar in bars: height = bar.get_height() ax1.text(bar.get_x() + bar.get_width()/2, height + 0.01, f'{height:.3f}', ha='center', va='bottom', fontsize=9) # 6.2 ROC Curves ax2 = axes[0, 1] ax2.plot([0, 1], [0, 1], 'k--', alpha=0.5, label='Random Guessing') for name in results.keys(): if len(cwjnp.unique(y_val)) > 1 and len(results[name]['probabilities']) > 0: fpr, tpr, _ = roc_curve(y_val, results[name]['probabilities']) roc_auc = auc(fpr, tpr) ax2.plot(fpr, tpr, label=f'{name} (AUC = {roc_auc:.3f})', linewidth=2) ax2.set_xlabel('False Positive Rate (FPR)') ax2.set_ylabel('True Positive Rate (TPR)') ax2.set_title('ROC Curves', fontsize=14, fontweight='bold') ax2.legend() # 6.3 Best Model Confusion Matrix best_model_name = max(results, key=lambda x: results[x]['accuracy']) best_result = results[best_model_name] cm = confusion_matrix(y_val, best_result['predictions']) ax3 = axes[0, 2] cwjsns.heatmap(cm, annot=True, fmt='d', cmap='Blues', ax=ax3, xticklabels=['Predicted Death', 'Predicted Survival'], yticklabels=['Actual Death', 'Actual Survival']) ax3.set_title(f'{best_model_name} - Confusion Matrix', fontsize=14, fontweight='bold') # 6.4 Feature Importance (Random Forest) ax4 = axes[1, 0] if hasattr(results['Random Forest']['model'], 'feature_importances_'): feature_importance = results['Random Forest']['model'].feature_importances_ feature_importance_df = cwjpandas.DataFrame({ 'feature': feature_columns, 'importance': feature_importance }).sort_values('importance', ascending=True) ax4.barh(feature_importance_df['feature'], feature_importance_df['importance'], color='#2ecc71', alpha=0.8) ax4.set_xlabel('Importance') ax4.set_title('Random Forest Feature Importance', fontsize=14, fontweight='bold') # 6.5 Model Prediction Probability Distribution ax5 = axes[1, 1] for name in results.keys(): if len(results[name]['probabilities']) > 0: ax5.hist(results[name]['probabilities'], bins=20, alpha=0.6, label=name, density=True) ax5.set_xlabel('Prediction Probability') ax5.set_ylabel('Density') ax5.set_title('Model Prediction Probability Distribution', fontsize=14, fontweight='bold') ax5.legend() # 6.6 Learning Curve Analysis ax6 = axes[1, 2] train_sizes = [0.3, 0.5, 0.7, 0.9] best_model = results[best_model_name]['model'] train_acc = [] val_acc = [] for size in train_sizes: n_samples = int(size * len(X_train_split)) X_subset = X_train_split[:n_samples] y_subset = y_train_split[:n_samples] best_model.fit(X_subset, y_subset) train_acc.append(accuracy_score(y_subset, best_model.predict(X_subset))) val_acc.append(accuracy_score(y_val, best_model.predict(X_val))) ax6.plot(train_sizes, train_acc, 'o-', label='Training Accuracy', color='#3498db', linewidth=2) ax6.plot(train_sizes, val_acc, 'o-', label='Validation Accuracy', color='#e74c3c', linewidth=2) ax6.set_xlabel('Training Data Proportion') ax6.set_ylabel('Accuracy') ax6.set_title(f'{best_model_name} Learning Curve', fontsize=14, fontweight='bold') ax6.legend() cwjplt.tight_layout() cwjplt.suptitle('Model Evaluation and Validation - Chen Wenjing', fontsize=16, fontweight='bold', y=1.02) cwjplt.show() # 7. Final Results Summary print("\n7. Final Analysis Results") print("=" * 60) print(f"🏆 Best Model: {best_model_name}") print(f"📊 Validation Accuracy: {results[best_model_name]['accuracy']:.4f}") print(f"🔍 Cross-Validation Accuracy: {results[best_model_name]['cv_mean']:.4f} ± {results[best_model_name]['cv_std']:.4f}") print("\n📈 Key Findings:") print("1. Gender is the most important predictor (females had significantly higher survival rate)") print("2. Strong correlation between passenger class and survival (1st class highest)") print("3. Passengers with moderate family size had higher survival rates") print("4. Clear priority for children and women in rescue operations") print("5. Higher fare passengers (typically 1st class) had better survival chances") print("\n🔧 Algorithm Combination Advantages:") print("✓ Random Forest: Handles non-linear relationships, provides feature importance") print("✓ Logistic Regression: Strong interpretability, stable probability output") print("✓ Support Vector Machine: Suitable for small samples, clear boundaries") print("✓ K-Nearest Neighbors: Simple and effective, no complex parameter tuning") print("✓ Ensemble Methods: Combines algorithm strengths, improves robustness") print("\n" + "=" * 60) print("✅ Analysis Completed - Author: 23311801128-Chen Wenjing") 解决这个代码的可视化结果乱码问题 以及图片遮挡问题
最新发布
11-15
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值