import pandas as pd from transformers import BertTokenizer import re import html from nltk.corpus import stopwords from nltk.tokenize import word_tokenize # 加载BERT分词器 tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # 读取Excel文件中的文本数据(假设在第一个sheet中) file_path = 'Indonesia news overall.xlsx' df = pd.read_excel(file_path, header=0) # 使用第一行作为列名 # 合并标题和内容到一列 df['合并文本'] = df['标题'].astype(str) + ' ' + df['内容'].astype(str) # 对合并文本进行BERT分词 tokenized_texts = [] original_texts = [] # 用于存储还原后的文本 for text in df['合并文本'].values.tolist(): # 去除HTML标签 text = html.unescape(text) # 分词 tokens = word_tokenize(text) # 去除停用词 stop_words = set(stopwords.words('english')) tokens = [token for token in tokens if token.lower() not in stop_words] # 将标记的ID转换回原始单词形式 input_ids = tokenizer.convert_tokens_to_ids(tokens) original_tokens = tokenizer.convert_ids_to_tokens(input_ids) # 去除特殊字符和标点符号 cleaned_tokens = [token for token in original_tokens if re.match(r'^\w+$', token)] # 将分词结果合并为一个字符串 merged_text = " ".join(cleaned_tokens) tokenized_texts.append(merged_text) original_texts.append(text) # 保留原始文本 # 将分词结果添加为新的列 df['Tokenized_Content'] = tokenized_texts # 将还原后的文本添加为新的列 df['Original_Content'] = original_texts # 逆向分词还原并将结果添加为新的列 reconstructed_texts = [] for input_ids in tokenizer(df['Tokenized_Content'].tolist(), return_tensors='pt', padding=True, truncation=True)['input_ids']: reconstructed_text = tokenizer.decode(input_ids, skip_special_tokens=True) reconstructed_texts.append(reconstructed_text) df['Reconstructed_Content'] = reconstructed_texts # 将带有分词结果的DataFrame写入新的Excel文件 output_file_path = 'Merged_Tokenized_News.xlsx' df.to_excel(output_file_path, index=False) print("合并分词结果后的数据已保存至 Merged_Tokenized_News.xlsx")
bert分词器及文本还原
最新推荐文章于 2025-04-14 23:36:16 发布