LangChain提供了丰富的RAG功能实现包,开发者可以基于这些工具包快速构建企业级Word文档RAG系统或个人智能助理。本项目采用Docx2txtLoader加载word文档,而没采用UnstructuredWordDocumentLoader,因为UnstructuredWordDocumentLoader依赖较多,更适合英文、结构复杂的文档,国内较少使用。
import os
from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain_community.document_loaders import TextLoader
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.document_loaders import Docx2txtLoader
EMBEDDING_MODEL_NAME = "./models/st_paraphrase-multilingual-MiniLM-L12-v2" # 使用本地HuggingFace模型进行文本嵌入
LLM_MODEL = "qwen3-max"
DASHSCOPE_API_KEY = os.getenv("DASHSCOPE_API_KEY")
llm = ChatOpenAI(
openai_api_key=DASHSCOPE_API_KEY,
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model=LLM_MODEL
)
# --- 1. 加载和处理文档 ---
#使用Docx2txtLoader加载docx文件
loader = Docx2txtLoader("your_file.docx")
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
splits = text_splitter.split_documents(docs)
# --- 2. 创建向量存储和检索器 ---
embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)
retriever = vectorstore.as_retriever()
# --- 3. 构建 RAG Chain ---
template = """仅根据以下上下文来回答问题:
{context}
问题: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
rag_chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
# --- 4. 用户输入问题并调用 RAG Chain ---
while True:
question = input("请输入您的问题(输入 quit 或 exit 退出):")
if question.strip().lower() in ("quit", "exit"):
print("程序已退出。")
break
response = rag_chain.invoke(question)
print(response)
1280

被折叠的 条评论
为什么被折叠?



