DataWhale “动手学 Agent 应用开发” Task04:MCP原理与最简实践

学习心得

本次利用 MCP 协议写了一个工具封装到Task03魔改版的 Langgraph 项目中了。以前经常听说 Langchain 、 LangGraph 、 MCP啥的,现在在 DW 的学习中也自己实现了,感觉很棒!

MCP代码

from mcp.server.fastmcp import FastMCP
from ddgs.ddgs import DDGS

# 创建一个MCP Server
mcp = FastMCP("AgentMCP")

@mcp.tool()
def search_internet(query: str) -> str:
    """
    使用 DuckDuckGo 搜索引擎在互联网上搜索信息。
    当你需要回答关于时事、事实或任何你不知道的知识时,应该使用这个工具。
    :param query: str, 需要搜索的关键词或问题。
    :return: str, 搜索结果的摘要。
    """
    try:
        with DDGS() as ddgs:
            # max_results=3 限制了结果数量,避免信息过载
            results = list(ddgs.text(query, region="cn-zh",max_results=3))
            if not results:
                return "未找到相关信息。"
            # 将结果格式化为易于阅读的字符串
            return "\n".join([f"[{i+1}] {r['title']}: {r['body']}" for i, r in enumerate(results)])
    except Exception as e:
        return f"搜索时发生错误: {e}"

LangGraph 代码

tool.py

import json, os
import requests
from typing import List
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parents[1]))
from langchain_core.tools import tool
from mcp_proj.mvp import search_internet
@tool
def google_search(search_query: str) -> str:
    """
    谷歌搜索。当你需要访问互联网、查询百科知识、了解时事新闻时使用。
    输入应该是一个搜索查询字符串。
    """
    api_key = os.getenv("SERPER_API_KEY")
    if not api_key:
        return "错误:环境变量 SERPER_API_KEY 未设置。"
        
    url = "https://google.serper.dev/search"
    payload = json.dumps({"q": search_query})
    headers = {
        'X-API-KEY': api_key,
        'Content-Type': 'application/json'
    }

    try:
        response = requests.post(url, headers=headers, data=payload).json()
        organic_results = response.get('organic', [])
        
        if not organic_results:
            return "未找到相关结果"

        # 格式化搜索结果
        formatted_results = []
        for result in organic_results[:5]:
            title = result.get('title', '无标题')
            snippet = result.get('snippet', '无描述')
            link = result.get('link', '')
            formatted_results.append(f"标题: {title}\n摘要: {snippet}\n链接: {link}")
        
        return "\n\n".join(formatted_results)
        
    except Exception as e:
        return f"搜索时出现错误: {str(e)}"

# 将所有工具放入一个列表
tools = [google_search,search_internet]

if __name__ == "__main__":
    # 测试工具
    print(search_internet("美国最近一次阅兵原因 2025"))

agent.py

import os
from typing import List, TypedDict, Annotated
import operator

from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage, ToolMessage
from langchain_openai import ChatOpenAI

from langgraph.graph import StateGraph, END

from tool import google_search,search_internet


class AgentState(TypedDict):
    """
    Defines the state of our agent. It's a dictionary that will be passed
    between the nodes of our graph.
    
    Attributes:
        messages: A list of messages that represents the conversation history.
                  The `operator.add` annotation means that new messages are
                  appended to this list rather than replacing it.
    """
    messages: Annotated[list[AnyMessage], operator.add]


class LangGraphAgent:
    def __init__(self):
        
        # We use ChatOpenAI wrapper here as it's compatible with the OpenAI API format
        # that DeepSeek uses. This simplifies tool binding.
        self.model = ChatOpenAI(
            api_key=os.getenv('DEEPSEEK_API_KEY'),
            base_url="https://api.deepseek.com",
            model="deepseek-chat",
            temperature=0.6,
            max_tokens=2000,
        )

        # 2. Bind the tools to the model
        # This allows the model to "know" what tools it can call and how to format
        # the arguments for them. We are binding the google_search method.
        self.model_with_tools = self.model.bind_tools([google_search,search_internet])

        # 3. Define the graph
        self.graph = self._build_graph()

    def _build_graph(self) -> StateGraph:
        """
        Builds the LangGraph agent graph.
        """
        # Initialize a new graph with our defined state
        graph = StateGraph(AgentState)

        # Add the two primary nodes: the model caller and the tool executor
        graph.add_node("call_model", self.call_model)
        graph.add_node("call_tool", self.call_tool)

        # The entry point for the graph is the `call_model` node
        graph.set_entry_point("call_model")

        # Define the conditional logic for routing after the model is called
        graph.add_conditional_edges(
            "call_model",
            self.should_continue,
            {
                "continue": "call_tool",  # If tool call, go to call_tool
                "end": END               # If no tool call, end the graph
            }
        )

        # After a tool is called, we always route back to the model to process the results
        graph.add_edge("call_tool", "call_model")
        
        # Compile the graph into a runnable application
        return graph.compile()

    def should_continue(self, state: AgentState) -> str:
        """
        Conditional Edge: Determines the next step after the model's response.
        
        Checks the last message in the state. If it contains tool calls,
        it signals to continue to the `call_tool` node. Otherwise, it signals to end.
        """
        last_message = state['messages'][-1]
        
        # If the last message has tool calls, we need to execute them.
        if last_message.tool_calls:
            return "continue"
        # Otherwise, the model has provided a final answer, so we end.
        return "end"

    def call_model(self, state: AgentState) -> dict:
        """
        Node: Invokes the language model to decide the next action or give a final answer.
        
        Appends the model's response to the message history in the state.
        """
        print("[Agent] Thinking...")
        response = self.model_with_tools.invoke(state['messages'])
        return {"messages": [response]}

    def call_tool(self, state: AgentState) -> dict:
        """
        Node: Executes the tool called by the model.
        
        It checks the last message for tool calls, executes them, and appends
        the tool's output to the message history as a ToolMessage.
        """
        last_message = state['messages'][-1]
        tool_messages = []    
        for tool_call in last_message.tool_calls:
            tool_name = tool_call['name']
            print(f"[Agent] Executing tool: {tool_name} with args {tool_call['args']}")
            
            # Change 3: Call the function directly, no class instance needed
            if tool_name == "google_search":
                response = google_search.invoke(tool_call['args'])
            elif tool_name=="search_internet":
                response = search_internet(**tool_call['args'])
    
            tool_messages.append(ToolMessage(content=str(response), tool_call_id=tool_call['id']))
        
        return {"messages": tool_messages}
        
    def run(self, query: str) -> str:
        """
        Runs the agent with a given user query.
        """
        print(f"[Agent] Starting process for query: {query}")

        # The initial state includes a system prompt and the user's query
        initial_state: AgentState = {
            "messages": [
                SystemMessage(content="You are a helpful assistant."),
                HumanMessage(content=query)
            ]
        }
        
        # The `stream` method executes the graph and yields the final state.
        # We are interested in the final list of messages.
        final_state = self.graph.invoke(initial_state)

        # The final answer is the content of the last message from the assistant.
        final_answer = final_state['messages'][-1].content
        print(f"[Agent] Final Answer: {final_answer}")
        return final_answer

if __name__ == '__main__':
    agent = LangGraphAgent()
    response = agent.run("怎么学好 Agent 相关技术?")
    # You can also try a query that requires a tool:
    # response = agent.run("What is the current capital of Brazil and what was the previous one?")
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值