流式响应
多轮对话
1.文件结构
├── app.py
│
├── 📁 templates/ # 前端模板目录
│ ├── index.html # 主页面HTML文件
│ ├── marked.min.js
2.python依赖
Flask==2.3.3
Flask-CORS==4.0.0
openai==1.3.0
langchain==0.1.0
langchain-community==0.0.10
langchain-core==0.1.0
langchain-openai==0.0.5
python-dotenv==1.0.0
3.app.py
import json
import os
from typing import Generator
from dotenv import load_dotenv
from flask import Flask, request, jsonify, render_template, send_file
from flask_cors import CORS
from langchain.memory import ConversationBufferWindowMemory
from langchain_core.messages import HumanMessage, AIMessage
from langchain_openai import ChatOpenAI
# 加载环境变量
load_dotenv('config.env')
app = Flask(__name__)
CORS(app)
# 配置
DASHSCOPE_API_KEY = os.getenv('DASHSCOPE_API_KEY', 'your-dashscope-api-key')
# 初始化模型
llm = ChatOpenAI(
model="qwen-plus",
openai_api_key=DASHSCOPE_API_KEY,
openai_api_base="https://dashscope.aliyuncs.com/compatible-mode/v1",
temperature=0.7,
streaming=True
)
# 内存管理 - 使用滑动窗口保持最近10轮对话
memory = ConversationBufferWindowMemory(k=10, return_messages=True)
# 存储用户会话
user_sessions = {}
def get_user_memory(user_id: str):
"""获取或创建用户的内存"""
if user_id not in user_sessions:
user_sessions[user_id] = ConversationBufferWindowMemory(k=10, return_messages=True)
return user_sessions[user_id]
def generate_streaming_response(user_id: str, question: str) -> Generator[str, None, None]:
"""生成流式响应"""
try:
# 获取用户内存
user_memory = get_user_memory(user_id)
# 获取历史对话
history = user_memory.chat_memory.messages
# 构建消息列表
messages = []
for msg in history:
if isinstance(msg, HumanMessage):
messages.append({"role": "user", "content": msg.content})
elif isinstance(msg, AIMessage):
messages.append({"role": "assistant", "content": msg.content})
# 添加当前问题
messages.append({"role": "user", "content": question})
# 调用通义千问Plus
response = llm.stream(messages)
full_response = ""
for chunk in response:
if hasattr(chunk, 'content') and chunk.content:
content = chunk.content
full_response += content
yield f"data: {json.dumps({'content': content, 'done': False}, ensure_ascii=False)}\n\n"
# 保存到内存
user_memory.chat_memory.add_user_message(question)
user_memory.chat_memory.add_ai_message(full_response)
# 发送完成信号
sources = [
{"id": 1, "title": "通义千问Plus", "url": "https://dashscope.aliyuncs.com/"},
{"id": 2, "title": "LangChain上下文管理", "url": "https://python.langchain.com/"}
]
if '吃饭' not in question:
sources = None
yield f"data: {json.dumps({'content': '', 'done': True, 'sources': sources}, ensure_ascii=False)}\n\n"
except Exception as e:
error_msg = f"生成回答时出错: {str(e)}"
yield f"data: {json.dumps({'content': error_msg, 'done': True, 'error': True}, ensure_ascii=False)}\n\n"
print('问题回答完成')
@app.route('/')
def index():
"""主页面"""
return render_template('index.html')
@app.route('/marked.min.js')
def markedjs():
return render_template('marked.min.js')
@app.route('/api/chat', methods=['POST'])
def chat():
"""处理聊天请求"""
try:
data = request.get_json()
question = data.get('question', '').strip()
user_id = data.get('user_id', 'default_user')
if not question:
return jsonify({'error': '问题不能为空'}), 400
# 生成流式响应
def generate():
for chunk in generate_streaming_response(user_id, question):
yield chunk
return app.response_class(
generate(),
mimetype='text/plain',
headers={
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Content-Type': 'text/event-stream',
}
)
except Exception as e:
return jsonify({'error': f'服务器错误: {str(e)}'}), 500
@app.route('/api/clear_history', methods=['POST'])
def clear_history():
"""清除对话历史"""
try:
data = request.get_json()
user_id = data.get('user_id', 'default_user')
if user_id in user_sessions:
user_sessions[user_id].clear()
return jsonify({'message': '历史记录已清除'})
except Exception as e:
return jsonify({'error': f'清除历史记录失败: {str(e)}'}), 500
@app.route('/api/history', methods=['GET'])
def get_history():
"""获取对话历史"""
try:
user_id = request.args.get('user_id', 'default_user')
user_memory = get_user_memory(user_id)
history = []
for msg in user_memory.chat_memory.messages:
if isinstance(msg, HumanMessage):
history.append({'type': 'user', 'content': msg.content})
elif isinsta

最低0.47元/天 解锁文章
251

被折叠的 条评论
为什么被折叠?



