MCP Client 开发 -32000 报错

在开发 MCP Client 的过程中,发生了 -32000 报错,源码如下:

import json
from typing import Optional
from contextlib import AsyncExitStack

from openai import OpenAI

from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client

class MCPClient:
    def __init__(self):
        """ 初始化 MCP 客户端 """

        self.exit_stack = AsyncExitStack()
        self.openai_api_key = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxx", # 替换成自己的 API_KEY
        self.base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
        self.model = "qwen-turbo"
 
        self.client = OpenAI(api_key=self.openai_api_key, base_url=self.base_url) # 创建OpenAI client
        self.session: Optional[ClientSession] = None
        self.exit_stack = AsyncExitStack()        

    async def connect_to_server(self, server_script_path: str):
        """ 连接到 MCP 服务器

        Args:
            服务脚本的文件路径: Path to the server script (.py or .js)
        """
        is_python = server_script_path.endswith('.py')
        is_js = server_script_path.endswith('.js')
        if not (is_python or is_js):
            raise ValueError("Server script must be a .py or .js file")
        
        command = "python" if is_python else "node"
        server_params = StdioServerParameters(
            command = command,
            args = [server_script_path],
            env = None
        )

        # 启动 MCP 服务器并建立通信
        stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
        self.stdio, self.write = stdio_transport
        self.session = await self.exit_stack.enter_async_context(ClientSession(self.stdio, self.write))

        await self.session.initialize()

        # 列出 MCP 服务器上的工具
        response = await self.session.list_tools()
        tools = response.tools
        print("\nConnected to server with tools:", [tool.name for tool in tools])     
        
    async def process_query(self, query: str) -> str:
        """
        使用大模型处理查询并调用可用的 MCP 工具 (Function Calling)
        """
        messages = [{"role": "user", "content": query}]
        
        response = await self.session.list_tools()
        
        available_tools = [{
            "type": "function",
            "function": {
                "name": tool.name,
                "description": tool.description,
                "parameters":  tool.inputSchema
            }
        } for tool in response.tools]
        # print(available_tools)
        
        response = self.client.chat.completions.create(
            model=self.model,            
            messages=messages,
            tools=available_tools     
        )
        
        content = response.choices[0]
        if content.finish_reason == "tool_calls":
            # 如何是需要使用工具,就解析工具
            tool_call = content.message.tool_calls[0]
            tool_name = tool_call.function.name
            tool_args = json.loads(tool_call.function.arguments)
            
            # 执行工具
            result = await self.session.call_tool(tool_name, tool_args)
            print(f"\n\n[Calling tool {tool_name} with args {tool_args}]\n\n")
            
            # 将模型返回的调用哪个工具数据和工具执行完成后的数据都存入messages中
            messages.append(content.message.model_dump())
            messages.append({
                "role": "tool",
                "content": result.content[0].text,
                "tool_call_id": tool_call.id,
            })
            
            # 将上面的结果再返回给大模型用于生产最终的结果
            response = self.client.chat.completions.create(
                model=self.model,
                messages=messages,
            )
            return response.choices[0].message.content
            
        return content.message.content
    
    async def chat_loop(self):
        """运行交互式聊天循环"""
        print("\n🤖 MCP 客户端已启动!输入 'quit' 退出")

        while True:
            try:
                query = input("\n你: ").strip()
                if query.lower() == 'quit':
                    break
                
                response = await self.process_query(query)  # 发送用户输入到 OpenAI API
                print(f"\n🤖 OpenAI: {response}")

            except Exception as e:
                print(f"\n⚠️ 发生错误: {str(e)}")

    async def cleanup(self):
        """清理资源"""
        await self.exit_stack.aclose()

async def main():

    client = MCPClient()
    try:
        # 下面的路径替换成自己的 mcp server 脚本的路径
        await client.connect_to_server(r"C:\Users\User\Desktop\mcp\mcp_server.py")
        await client.chat_loop()
    finally:
        await client.cleanup()

if __name__ == "__main__":
    import asyncio
    asyncio.run(main())
 

上面红框出发生如下报错:

Traceback (most recent call last):
 line 46, in connect_to_server
    await self.session.initialize()
  File "C:\ProgramData\anaconda3\Lib\site-packages\mcp\client\session.py", line 123, in initialize
    result = await self.send_request(
             ^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\ProgramData\anaconda3\Lib\site-packages\mcp\shared\session.py", line 286, in send_request
    raise McpError(response_or_error.error)
mcp.shared.exceptions.McpError: Connection closed

经过调试后打印出如下日志:

Received response for request 0: jsonrpc='2.0' id=0 error=ErrorData(code=-32000, message='Connection closed', data=None)

最后解决结果:

这个报错一般是初始化 mcp server 出现了问题,首先 mcp_server.py 的脚本路径一定要正确,然后 mcp_server.py  脚本要能够独立运行,博主的报错原因是 在 mcp_server.py 中引入本地自己开发的包,但是没有在 sys.path 中加入该包的搜索路径,使得 mcp_server.py 无法运行,最终导致上面图中的报错。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值