以 Siliconflow (硅基流动) 为例(其他云端供应商类似):
from langchain.chat_models import init_chat_model
model = init_chat_model(
model_provider="openai",
model="deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
base_url="https://api.siliconflow.cn/v1/",
api_key="sk-n********************ywocwd",
temperature=0
)
以 本地 Ollama 为例:
from langchain.chat_models import init_chat_model
model = init_chat_model(
model_provider="ollama",
model="qwen2.5:3b",
temperature=0
)
对接MCP服务流式输出
想要什么服务就去这搜:MCP Servers
MCP接口包,装就完了
pip install -U langchain-mcp-adapters
from langchain_mcp_adapters.client import MultiServerMCPClient
from langgraph.prebuilt import create_react_agent
# 初始化 MCP 客户端
async with MultiServerMCPClient(
{
"tavily-mcp": {
"command": "npx",
"args": [
"-y",
"tavily-mcp"
],
"env": {
"TAVILY_API_KEY": "tvly-dev-a**********xAQ"
},
"disabled": False,
"autoApprove": []
}
}
) as client:
# 初始化 Agent
agent = create_react_agent(
model=model,
tools=client.get_tools(),
)
# 调用 Agent
stream = agent.astream({"messages": [
{"role": "system", "content": "You are a helpful assistant.Please use your tools solving the problem."},
{"role": "user", "content": "帮我查一下广州的天气?"}
]})
# 流式输出结果
async for chunk in stream:
print(chunk)
1380

被折叠的 条评论
为什么被折叠?



