以下是一个基于支持向量机的中文文本分类示例代码,包括数据集的读取、数据预处理、特征提取、模型训练、模型评估等步骤,可以直接应用到项目中:
import os
import jieba
import numpy as np
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# 读取数据集
def load_dataset(path):
texts, labels = [], []
for label_name in os.listdir(path):
label_path = os.path.join(path, label_name)
label = label_name.split('.')[0]
for file_name in os.listdir(label_path):
file_path = os.path.join(label_path, file_name)
with open(file_path, 'r', encoding='utf-8') as f:
text = f.read()
texts.append(text)
labels.append(label)
return texts, labels
# 数据预处理和分词
def preprocess(text):
word