创建条件分支
from typing import TypedDict
from langgraph.constants import START, END
from langgraph.graph import StateGraph
class State(TypedDict):
message:str
log:int
def node1(state:State):
return {"message":"node1"}
def node2(state:State):
print("进入node2")
return {"message":"node2"}
def node3(state:State):
print("进入node3")
return {"message":"node3"}
buildgraph = StateGraph(State)
buildgraph.add_node(node1)
buildgraph.add_node(node2)
buildgraph.add_node(node3)
#条件边的路由
def router(state:State):
print(state["log"])
if state["log"]==1:
return "node2"
return "node3"
buildgraph.add_edge(START,"node1")
buildgraph.add_conditional_edges("node1",router,path_map=["node2","node3"])
buildgraph.add_edge("node2",END)
buildgraph.add_edge("node3",END)
graph= buildgraph.compile()
#log输入1时会进入node2节点,其他则进入3节点
print(graph.invoke({'message':"1","log":2}))
mapreduce分支
这种思想和传统分支的不同点是,mapreduce适用于对于相同函数处理不同的信息,是并行分支,最后对所有的信息进行聚合,有点像正常分支的升级版
import operator
from typing import TypedDict, Annotated
from langchain_openai import ChatOpenAI
from langgraph.constants import START, END
from langgraph.graph import StateGraph, add_messages
from langgraph.types import Send
from pydantic import BaseModel
sub_prompt="""生成以{topic}为主题三个的相关示例,返回类型是列表"""
joke_prompt="""以{sub}为主题生成一个笑话"""
best_prompt="""从{jokes}中找出最好笑的笑话"""
model = ChatOpenAI(
api_key="你的key",
base_url="你用的url",
model="模型名称" # 使用正确的模型名称
)
class AllState(TypedDict):
topic:str
subs: list
jokes: Annotated[list,operator.add]
best:str
class sub:
sub: str
class subState(BaseModel):
subs:list[str]
class jokeState(BaseModel):
joke: str
class bestState(BaseModel):
best:str
def genertsub(state:AllState):
prompt=sub_prompt.format(topic=state["topic"])
result=model.with_structured_output(subState).invoke(prompt)
print(result)
return {"subs":result}
def genertjoke(state:sub):
prompt=joke_prompt.format(sub=state["sub"])
result = model.with_structured_output(jokeState).invoke(prompt)
print(result)
return {"jokes":[result]}
def genertbest(state:AllState):
prompt=best_prompt.format(jokes=state["jokes"])
result = model.with_structured_output(bestState).invoke(prompt)
print(result)
return {"best":result}
buildgraph = StateGraph(AllState)
#和上面的路由不太一样的是使用了Send类,可以向发送节点指定参数,这里就实现了map,对所有参数都执行了同一个节点
def router(state:AllState):
return [Send("genertjoke",{"sub" : f}) for f in state["subs"].subs]
buildgraph.add_node(genertsub)
buildgraph.add_node(genertjoke)
buildgraph.add_node(genertbest)
buildgraph.add_edge(START,"genertsub")
buildgraph.add_conditional_edges("genertsub",router,path_map=["genertjoke"])
buildgraph.add_edge("genertjoke","genertbest")
buildgraph.add_edge("genertbest",END)
graph= buildgraph.compile()
print(graph.invoke({'topic':"动物"}))
当然langgraph也支持并行分支,当我们不添加条件边时,他就会进行一种类并行的操作,因为python本身是不支持这种同步的操作的,所以采取的策略是异步,所以当分支多起来之后很有可能会出现分支处理时的顺序每次不一样,这是因为python内部资源分布所导致的。
builder.add_edge(START, "a")
builder.add_edge("a", "b")
builder.add_edge("a", "c")
builder.add_edge("b", "d")
builder.add_edge("c", "d")
builder.add_edge("d", END)
图中的循环
添加router终止机制
对于有循环的图,我们通常会在条件边中设置终止机制。
import operator
from typing import TypedDict, Annotated
from langchain_openai import ChatOpenAI
from langgraph.constants import START, END
from langgraph.graph import StateGraph, add_messages
from langgraph.types import Send
from pydantic import BaseModel
class State(TypedDict):
message:Annotated[list,operator.add]
def node1(state:State):
print(state["message"])
return {"message":["1"]}
def node2(state:State):
print(state["message"])
return {"message":["2"]}
graphbuild = StateGraph(State)
graphbuild.add_node(node1)
graphbuild.add_node(node2)
#设置终止机制,当状态中的message列表的长度大于等于5时到END节点
def router(state:State):
if len(state["message"])<5:
return "node2"
return END
graphbuild.add_edge(START,"node1")
graphbuild.add_conditional_edges("node1",router)
graphbuild.add_edge("node2","node1")
graph= graphbuild.compile()
result=graph.invoke({"message":[]})
print(result)
recursion_limit循环次数限制
当然对于一个循环而言他默认是有25次的循环次数的,超过这个次数就会报错,我们可以通过invoke时在config中加入recursion_limit的设置
result = graph.invoke({"aggregate": []}, {"recursion_limit": 25})
remaining_steps:RemainingSteps限制前返回状态
同时如果想在到达递归的限制前返回状态,就需要在状态中设置remaining_steps:RemainingSteps属性
class State(TypedDict):
value: str
action_result: str
remaining_steps: RemainingSteps
Command对象
如果想在节点中做到同时更新状态并决定下一个节点,那么可以通过返回一个Command对象实现
import operator
from typing import TypedDict, Annotated
from langchain_openai import ChatOpenAI
from langgraph.constants import START, END
from langgraph.graph import StateGraph, add_messages
from langgraph.types import Send, Command
from pydantic import BaseModel
class State(TypedDict):
message:Annotated[list,operator.add]
def node1(state:State):
print(state["message"])
#update是更新的状态,goto是下一个节点,graph默认是None代表节点在当前图Command.PARENT则是父图,resume
return Command(
update={"message":["1"]},
goto="node2",
)
def node2(state:State):
print(state["message"])
return {"message":["2"]}
graphbuild = StateGraph(State)
graphbuild.add_node(node1)
graphbuild.add_node(node2)
graphbuild.add_edge(START,"node1")
graph= graphbuild.compile()
result=graph.invoke({"message":[]})
print(result)
在运行图时添加配置
我们可以通过自定义一个config字典对象,可以配置model,SystemMessage等字段,在invoke的时候通过config=传入,然后在节点用RunnableConfig接收,这样就可以在运行图时配置一些内容了,比如前端通过设置系统提示词或者更改模型,都可以通过接口传入config数据,再对图添加配置
from typing import TypedDict, Annotated, Sequence
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_openai import ChatOpenAI
from langgraph.constants import START, END
from langgraph.graph import StateGraph
openai_model = ChatOpenAI(api_key="你的key",
base_url="你用的url",
model="模型名称" )
from langchain_core.runnables.config import RunnableConfig
Anthropic_model = ChatOpenAI(api_key="你的key",
base_url="你用的url",
model="模型名称" )
models = {
"anthropic": Anthropic_model,
"openai": openai_model,
}
class State(TypedDict):
message:list
graph= StateGraph(State)
def call(state:State,config:RunnableConfig):
model_name = config["configurable"].get("model")
llm=models[model_name]
message=state["message"]
if "SystemMessage" in config["configurable"]:
message=[SystemMessage(content=config["configurable"].get("SystemMessage"))]+message
response = llm.invoke(message)
print(response.content)
return {"message" : [response.content]}
graph.add_node(call)
graph.add_edge(START,'call')
graph.add_edge('call',END)
graph_compile = graph.compile()
config = {"configurable":{"model":"anthropic"}}
graph_compile.invoke({"message":[HumanMessage(content="who are you")]},config=config)
config = {"configurable":{"model":"openai","SystemMessage":"用中文回答"}}
graph_compile.invoke({"message":[HumanMessage(content="who are you")]},config=config)
在节点添加重试策略
我们可以通过添加重试策略去捕获在节点中抛出的异常,这样就可以重新执行该节点,通常重试策略会添加到一些决策性的节点上,比如通过api获取数据,包含复杂逻辑,或者是一些对于流程走向非常关键的节点,而对于数据处理的节点不会添加重试策略,因为他们要么是0要么是1。
from typing import TypedDict
from langgraph.graph import StateGraph
# from langgraph.prebuilt import ToolNode
from langgraph.types import RetryPolicy
class State(TypedDict):
message: str
def api_call_function(state:State):
"""模拟可能失败的API调用"""
import random
if random.random() < 0.3: # 30%失败率
print("重试1")
raise ConnectionError("API连接失败")
return {"result": "success"}
def api_call_function2(state:State):
"""模拟超时"""
import random
if random.random() < 0.7: # 30%失败率
print("重试2")
raise TimeoutError("超时")
return {"result": "success"}
# 构建工作流
builder = StateGraph(dict)
builder.add_node("api_call", api_call_function,retry=RetryPolicy(max_attempts=3,retry_on=(ConnectionError, TimeoutError)))
builder.add_node("api_call2", api_call_function2,retry=RetryPolicy(max_attempts=3,retry_on=(TimeoutError)))
builder.set_entry_point("api_call")
builder.add_edge("api_call", "api_call2")
builder.set_finish_point("api_call2")
workflow = builder.compile()
workflow.invoke({"message","1111"})
添加持久化
对于一个产品来说,我们希望在同一会话中的对话可以被llm给记住也就是memory,我们可以通过checkpoint机制添加MemorySaver来实现(可以不用内存,也可以用redis等,这里就讲一下最基础的内存)
import operator
from typing import TypedDict, Annotated
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph
# from langgraph.prebuilt import ToolNode
from langgraph.types import RetryPolicy
model = ChatOpenAI(api_key="你的key",
base_url="你用的url",
model="模型名称" )
class State(TypedDict):
message: Annotated[list,operator.add]
def call(state: State):
response = model.invoke(state["message"])
print(response.content)
return {"message":[response]}
# 构建工作流
builder = StateGraph(State)
builder.add_node("api_call", call)
builder.set_entry_point("api_call")
builder.set_finish_point("api_call")
#创建记忆对象
saver = MemorySaver()
#构建图时设置检查点
workflow = builder.compile(checkpointer=saver)
workflow.invoke({"message":["我是麻薯"]},{"configurable":{"thread_id":'1'}})
workflow.invoke({"message":["我是谁"]},{"configurable":{"thread_id":'1'}})
人工介入
对于一些智能体,可能有时候需要人工的介入才能继续执行,这种人机交互的动作也非常重要,在一个节点中使用interrupt就可以暂停一个图的进程,而在用户提交或者其他操作后,我们需要使用Command才能恢复图的状态
from typing import TypedDict
from langgraph.checkpoint.memory import MemorySaver
from langgraph.constants import START, END
from langgraph.graph import StateGraph
# from langgraph.prebuilt import ToolNode
from langgraph.types import RetryPolicy, interrupt, Command
class State(TypedDict):
message: str
it:str
def node_1(state:State):
print("进入节点1")
def node_2(state:State):
print("进入节点2")
it=interrupt("中断节点2")
return {"it":it}
def node_3(state:State):
print("进入节点3")
# 构建工作流
builder = StateGraph(dict)
builder.add_node(node_1)
builder.add_node(node_2)
builder.add_node(node_3)
builder.add_edge(START,"node_1")
builder.add_edge("node_1","node_2")
builder.add_edge("node_2","node_3")
builder.add_edge("node_3",END)
saver = MemorySaver()
thread={"configurable":{"thread_id":"1"}}
workflow = builder.compile(checkpointer=saver)
workflow.invoke({"message","1111"},config=thread)
workflow.invoke(Command(resume="go to node_3"),config=thread)
注意需要添加检查点,和线程config才可以运行
工具使用
创建绑定工具
对于一个agent,调用工具是非常常用的步骤, langgraph对于工具的使用也非常简单,只需要对工具方法加一个@tool的装饰器,然后在实体模型时.bind_tools绑定一下就可以,绑定需要传入工具方法的数组。
from langchain_core.tools import tool
from langgraph.prebuilt import ToolNode
@tool
def get_weather(location: str):
"""Call to get the current weather."""
if location.lower() in ["sf", "san francisco"]:
return "It's 60 degrees and foggy."
else:
return "It's 90 degrees and sunny."
@tool
def get_coolest_cities():
"""Get a list of coolest cities"""
return "nyc, sf"
tools = [get_weather, get_coolest_cities]
tool_node = ToolNode(tools)
from langchain_openai import ChatOpenAI
model_with_tools = ChatOpenAI(api_key="你的key",
base_url="你用的url",
model="模型名称").bind_tools(tools)
print(model_with_tools.invoke("what's the weather in sf?").tool_calls)
print("######")
print(tool_node.invoke({"messages": [model_with_tools.invoke("what's the weather in sf?")]}))
我们上面模拟了两个节点,第一次invoke用大模型生成了带有tool_calls的参数,然后再传入tool_node这个节点,他会自动解析tool_calls属性然后去执行被大模型决策使用的工具,当然这个tool_node可以不用langgraph.prebuilt这个包下自带的,我们也可以自己写一个节点,用于工具的使用。
简单的react实现
react智能体,就是反复思考和行动的agent,在langchain中非常简单,只需要实体一个对象就可以实现,但是对于颗粒度的把控不太好,langgraph则可以解决这一问题,对于这种反复的动作则可以用循环的方式实现,只需要一个路由用于判断状态中最后一个消息是否带有tool_calls就可以了。
from langchain_core.tools import tool
from langgraph.prebuilt import ToolNode
@tool
def get_weather(location: str):
"""Call to get the current weather."""
if location.lower() in ["sf", "san francisco"]:
return "It's 60 degrees and foggy."
else:
return "It's 90 degrees and sunny."
@tool
def get_coolest_cities():
"""Get a list of coolest cities"""
return "nyc, sf"
tools = [get_weather, get_coolest_cities]
tool_node = ToolNode(tools)
from langgraph.graph import StateGraph, MessagesState, START, END
from langchain_openai import ChatOpenAI
model_with_tools = ChatOpenAI(api_key="你的key",
base_url="你用的url",
model="模型名称").bind_tools(tools)
def should_continue(state: MessagesState):
messages = state["messages"]
last_message = messages[-1]
if last_message.tool_calls:
return "tools"
return END
def call_model(state: MessagesState):
messages = state["messages"]
response = model_with_tools.invoke(messages)
return {"messages": [response]}
workflow = StateGraph(MessagesState)
# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("tools", tool_node)
workflow.add_edge(START, "agent")
workflow.add_conditional_edges("agent", should_continue, ["tools", END])
workflow.add_edge("tools", "agent")
app = workflow.compile()
for chunk in app.stream(
{"messages": [("human", "what's the weather in the coldest cities?")]},
stream_mode="values",
):
chunk["messages"][-1].pretty_print()
InjectedState实现工具中注入状态
有时候在调用工具时需要额外的参数,但是这个参数又不希望暴露出去,InjectedState这个注解类就是很好的选择,通常配合Annocated使用,会在调用工具时自动把状态图的状态注入进去,不需要声明args,就可以在工具中直接使用。
from typing import List
from typing_extensions import Annotated, TypedDict
from langchain_core.messages import BaseMessage, AIMessage
from langchain_core.tools import tool
from langgraph.prebuilt import InjectedState, ToolNode
class AgentState(TypedDict):
messages: List[BaseMessage]
foo: str
@tool
#注入了AgentState
def state_tool(x: int, state: Annotated[dict, InjectedState]) -> str:
'''Do something with state.'''
if len(state["messages"][0].content) > 2:
return state["foo"] + str(x)
else:
return "not enough messages"
@tool
def foo_tool(x: int, foo: Annotated[str, InjectedState("foo")]) -> str:
'''Do something else with state.'''
return foo + str(x + 1)
node = ToolNode([state_tool, foo_tool])
tool_call1 = {"name": "state_tool", "args": {"x": 3}, "id": "1", "type": "tool_call"}
tool_call2 = {"name": "foo_tool", "args": {"x": 1}, "id": "2", "type": "tool_call"}
state = {
"messages": [AIMessage("fasdkjflkadfjsk", tool_calls=[tool_call1, tool_call2])],
"foo": "bar",
}
print(node.invoke(state))
当然我们也可以添加内存持久化,实现相同线程下的图状态共享,比如实现子代理使用工具希望把历史消息传给子代理,或者工具的输入需要历史代理的步骤去验证上下文
传递配置到tool中
有的时候工具的操作需要一些配置,比如用户id等等,我们可以通过config传入,然后让tool接收RunnableConfig运行配置对象就可以了。
@tool(parse_docstring=True) # 装饰器将此函数标记为工具,并解析文档字符串
def update_favorite_pets(
pets: List[str],
config: RunnableConfig, # 运行配置对象,包含用户ID等信息
) -> None:
for chunk in graph.stream(
inputs, {"configurable": {"user_id": "123"}}, stream_mode="values"
):
chunk["messages"][-1].pretty_print() # 打印最后一条消息
在tool中更新图状态
这个就更简单了,我们前面提到了Command类,就可以用它实现。其中要注意的是,不要忘记把messages也给返回了。
return Command(
update={
# update the state keys
"user_info": user_info,
# update the message history
"messages": [
ToolMessage(
"Successfully looked up user information", tool_call_id=tool_call_id
)
],
},
)
133

被折叠的 条评论
为什么被折叠?



