深度学习系列81:MCP快速上手

MCP 是一种开放协议,通过标准化的服务器实现,使 AI 模型能够安全地与本地和远程资源进行交互。MCP 可帮助你在 LLM 之上构建智能代理和复杂的工作流。MCP 采用客户端-服务器架构,主机应用程序可以连接到多个服务器。
这里用个demo展示一下如何使用MCP。首先需要pip install mcp安装库。

1. 服务端

服务端用@mcp.tool注册服务即可:

import json,httpx,requests,logging,sys
from typing import Any
import pandas as pd
from pandasql import sqldf
from mcp.server.fastmcp  import FastMCP
import warnings
from qwen_agent.llm import get_chat_model
from dotenv import load_dotenv
logger = logging.getLogger(__name__)
load_dotenv()
warnings.filterwarnings("ignore")
mcp = FastMCP("Server")
bearer = os.getenv('BEARER')
llm = get_chat_model({
            "model": os.getenv('MODEL_NAME'),
            "model_server": os.getenv('MODEL_SERVER'),
            "api_key":os.getenv('MODEL_KEY')
        })

@mcp.tool()
def query_hour(question: str, start_at: str="2025-04-01", end_at: str="2025-06-01") -> str:
    """
    输入关于时间范围相关问题,返回问题的查询结果。
    :param question: 关于时间范围的问题
    :param start_at: 查询开始时间,时间格式为"2025-01-01"
    :param end_at: 查询结束时间,时间格式为"2025-06-01"
    :return: 问题的查询结果
    """
    res = requests.post("***",json = {"start_at": start_at,"end_at": end_at},verify=False,headers ={"Authorization":bearer}).json()
    for response in llm.chat([{"role": "system", "content": f"结果信息如下{str(res)},请尽量简洁回复问题"},{"role": "user", "content": f"{question}"}]):
        pass
    return response[0]['content']
 
if __name__ == "__main__":
    # 以标准 I/O 方式运行 MCP 服务器
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)
    logger.info('start server')
    mcp.run(transport='stdio')

2. 客户端

客户端可以用任意的mcp前端,这里手动撸一个:

import asyncio,logging,os,json
from typing import Optional
from contextlib import AsyncExitStack
from qwen_agent.llm import get_chat_model
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from dotenv import load_dotenv
load_dotenv()
class MCPClient:
    def __init__(self):
        """初始化 MCP 客户端"""
        self.exit_stack = AsyncExitStack()
        self.llm = get_chat_model({
            "model": os.getenv('MODEL_NAME')',
            "model_server": os.getenv('MODEL_SERVER')',,
            "api_key":'MODEL_KEY'
        })
        self.session: Optional[ClientSession] = None
        self.exit_stack = AsyncExitStack()        
 
    async def connect_to_server(self, server_script_path: str):
        """连接到 MCP 服务器并列出可用工具"""
        is_python = server_script_path.endswith('.py')
        is_js = server_script_path.endswith('.js')
        if not (is_python or is_js):
            raise ValueError("服务器脚本必须是 .py 或 .js 文件")
        command = "python" if is_python else "node"
        server_params = StdioServerParameters(
            command=command,
            args=[server_script_path],
            env=None
        )
 
        # 启动 MCP 服务器并建立通信
        stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
        self.stdio, self.write = stdio_transport
        self.session = await self.exit_stack.enter_async_context(ClientSession(self.stdio, self.write))
        await self.session.initialize()
 
        # 列出 MCP 服务器上的工具
        response = await self.session.list_tools()
        print("\n已连接到服务器,支持以下工具:", [tool.name for tool in response.tools])     
        
    async def process_query(self, query: str) -> str:
        """
        使用大模型处理查询并调用可用的MCP工具 (Function Calling)
        """
        messages = [{"role": "user", "content": query}]
        tool_response = await self.session.list_tools()
        functions = [{"name": tool.name,"description": tool.description,"parameters": tool.inputSchema} for tool in tool_response.tools]
        for responses in self.llm.chat(messages=messages,functions=functions):
            pass
        for message in responses:
            if fn_call := message.get("function_call", None):
                fn_name: str = fn_call['name']
                fn_args: dict = json.loads(fn_call["arguments"])
                result = await self.session.call_tool(fn_name, fn_args)
                fn_res: str = result.content[0].text
                messages.append({
                    "role": "function",
                    "name": fn_name,
                    "content": fn_res,
                })
        # 将上面的结果再返回给大模型用于生产最终的结果
        for responses in self.llm.chat(messages=messages, functions=functions):
            pass
        return responses[0]['content'] 
    
    async def chat_loop(self):
        """运行交互式聊天循环"""
        print("\n🤖 MCP 客户端已启动!输入 'quit' 退出")
        while True:
            try:
                query = input("\n你: ").strip()
                if query.lower() == 'quit':
                    break
                
                response = await self.process_query(query)  # 发送用户输入到API
                if '错误' in response:
                    response = await self.process_query(response+query)
                print(f"\n🤖 LighthouseAI: {response}")
 
            except Exception as e:
                print(f"\n⚠️ 发生错误: {str(e)}")
 
    async def cleanup(self):
        """清理资源"""
        await self.exit_stack.aclose()
 
async def main():
    if len(sys.argv) < 2:
        print("Usage: python client.py <path_to_server_script>")
        sys.exit(1)
    client = MCPClient()
    try:
        await client.connect_to_server(sys.argv[1])
        await client.chat_loop()
    finally:
        await client.cleanup()
 
if __name__ == "__main__":
    import sys
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)
    asyncio.run(main())

3. 使用方式

启动MCP clinet: uv run client.py server.py
会弹出一个聊天界面,可以在后面输入问题,mcp服务器会根据问题找到相对应的tool进行回复:
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值