Runnable - LCEL 核心构建块详解
Runnable 是构建 LCEL (LangChain Expression Language) 的核心构建块,用于构建可组合的 AI 工作流。
核心 Runnable 类型
1. RunnableMap (RunnableParallel)
核心功能: 并行执行多个任务,为每个任务提供相同的输入,以字典形式返回输出结果
语法:
# 方法1:直接使用字典
chain = RunnableMap({
"key1": runnable1,
"key2": runnable2,
"key3": runnable3
})
# 方法2:使用管道操作符
chain = runnable_base | RunnableMap({
"key1": runnable1,
"key2": runnable2,
"key3": runnable3
})
示例:
from langchain_core.runnables import RunnableMap, RunnableLambda
def add_one(x: int) -> int:
return x + 1
def mul_two(x: int) -> int:
return x * 2
def mul_three(x: int) -> int:
return x * 3
runnable_1 = RunnableLambda(add_one)
runnable_2 = RunnableLambda(mul_two)
runnable_3 = RunnableLambda(mul_three)
# 创建并行链
parallel_chain = runnable_1 | RunnableMap({
"mul_two": runnable_2,
"mul_three": runnable_3
})
# 执行
result = parallel_chain.invoke(5) # 输出: {'mul_two': 12, 'mul_three': 18}
2. RunnableSequence
核心功能: 线性链式执行,按顺序执行多个 Runnable,前一个的输出作为下一个的输入
语法:
# 方法1:使用管道操作符
chain = prompt | llm | output_parser
# 方法2:使用构造函数
chain = RunnableSequence([
prompt,
llm,
output_parser
])
示例:
from langchain_core.runnables import RunnableSequence
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from langchain.output_parsers import StrOutputParser
# 创建提示模板
prompt = PromptTemplate.from_template("Tell me a joke about {topic}")
# 创建 LLM
llm = ChatOpenAI(model="gpt-3.5-turbo")
# 创建输出解析器
output_parser = StrOutputParser()
# 创建序列
chain1 = RunnableSequence([prompt, llm, output_parser])
chain2 = prompt | llm | output_parser
# 执行
result = chain.invoke({"topic": "cats"})
复杂示例:
# 步骤1:分析用户输入的餐厅偏好
gather_preferences_prompt = ChatPromptTemplate.from_template(
"用户输入了一些餐厅偏好:{preferences}\n请将用户的偏好总结为清晰的需求:"
)
# 步骤2:根据需求推荐餐厅
recommend_restaurants_prompt = ChatPromptTemplate.from_template(
"基于用户需求:{summarized_preferences}\n请推荐3家适合的餐厅,并说明推荐理由:"
)
# 步骤3:总结推荐内容
summarize_recommendations_prompt = ChatPromptTemplate.from_template(
"以下是餐厅推荐和推荐理由:{recommendations}\n请总结成2-3句话,供用户快速参考:"
)
# 创建各步骤链
gather_preferences_chain = gather_preferences_prompt | llm
recommend_restaurants_chain = recommend_restaurants_prompt | llm
summarize_recommendations_chain = summarize_recommendations_prompt | llm
# 创建 RunnableSequence
restaurant_chain = RunnableSequence(
gather_preferences_chain,
recommend_restaurants_chain,
summarize_recommendations_chain
)
# 执行
result = restaurant_chain.invoke({
"preferences": "我喜欢安静的环境,最好有素食选项,并且价格适中。"
})
3. RunnableLambda
核心功能: 将 Python 函数包装为 Runnable 对象,使函数可以无缝集成到 LCEL 链式工作流中
语法:
from langchain_core.runnables import RunnableLambda
# 1. 简单 lambda 函数
lambda_chain = RunnableLambda(lambda x: x * 2)
# 2. 带有文档字符串的函数
def add_one(x: int) -> int:
"""将输入加一"""
return x + 1
add_one_chain = RunnableLambda(add_one)
# 3. 处理字典输入
def extract_name(data: dict) -> str:
"""从字典中提取姓名"""
return data["name"]
extract_name_chain = RunnableLambda(extract_name)
4. RunnableBranch
核心功能: 根据条件选择不同的执行路径,是条件路由的核心组件
语法:
from langchain_core.runnables import RunnableBranch
# 基本语法
RunnableBranch(
(条件, 执行链路),
(条件, 执行链路),
...,
else_runnable
)
# 简洁语法
RunnableBranch(
(lambda x: x["topic"] == "langchain", langchain_chain),
(lambda x: x["topic"] == "anthropic", anthropic_chain),
general_chain
)
运行流程:
- 输入被传递给条件判断函数
- 从左到右检查每个条件
- 第一个匹配条件的分支将被触发
- 如果没有条件匹配,将使用
else_runnable - 触发的分支的输出作为整个
RunnableBranch的输出
示例:
from langchain_core.runnables import RunnableLambda, RunnableBranch
def classify_topic(input):
"""根据输入分类主题"""
if "langchain" in input["topic"].lower():
return "langchain"
elif "anthropic" in input["topic"].lower():
return "anthropic"
else:
return "general"
# 创建分类链
classify_chain = RunnableLambda(classify_topic)
# 创建条件路由
branch_chain = RunnableBranch(
("langchain", langchain_chain),
("anthropic", anthropic_chain),
general_chain
)
# 创建完整链
full_chain = {"topic": classify_chain} | branch_chain
# 执行
result = full_chain.invoke({
"topic": "LangChain如何使用?",
"question": "如何使用LangChain的Runnable?"
})
智能路由系统示例:
class IntelligentRouter:
def __init__(self):
self.router_chain = self._build_router()
def _build_router(self) -> RunnableBranch:
"""构建智能路由系统"""
return RunnableBranch(
# 紧急问题路由
(self._is_urgent_question, self._create_urgent_chain()),
# 技术问题路由
(self._is_technical_question, self._create_technical_chain()),
# 商务问题路由
(self._is_business_question, self._create_business_chain()),
# 默认通用路由
self._create_general_chain()
)
def _is_urgent_question(self, data: dict) -> bool:
"""判断是否为紧急问题"""
urgent_keywords = ["紧急", "立刻", "马上", "故障", "错误"]
return any(keyword in data["question"] for keyword in urgent_keywords)
def _is_technical_question(self, data: dict) -> bool:
"""判断是否为技术问题"""
tech_keywords = ["代码", "编程", "API", "部署", "技术"]
return any(keyword in data["question"] for keyword in tech_keywords)
def _is_business_question(self, data: dict) -> bool:
"""判断是否为商务问题"""
business_keywords = ["价格", "合同", "客户", "销售", "商务"]
return any(keyword in data["question"] for keyword in business_keywords)
def _create_urgent_chain(self):
"""创建紧急问题处理链"""
return (
ChatPromptTemplate.from_template("""
[紧急处理] 请立即处理以下问题:
问题:{question}
请提供快速解决方案:
""")
| ChatOpenAI(temperature=0.1)
| StrOutputParser()
)
def _create_technical_chain(self):
"""创建技术问题处理链"""
return (
ChatPromptTemplate.from_template("""
作为技术专家,请解答以下技术问题:
问题:{question}
请提供详细的技术方案:
""")
| ChatOpenAI(model="gpt-4")
| StrOutputParser()
)
def _create_business_chain(self):
"""创建商务问题处理链"""
return (
ChatPromptTemplate.from_template("""
作为商务顾问,请回答以下商务问题:
问题:{question}
请提供专业的商务建议:
""")
| ChatOpenAI(temperature=0.3)
| StrOutputParser()
)
def _create_general_chain(self):
"""创建通用问题处理链"""
return (
ChatPromptTemplate.from_template("回答以下问题:{question}")
| ChatOpenAI()
| StrOutputParser()
)
def route_question(self, question: str) -> str:
"""路由问题"""
return self.router_chain.invoke({"question": question})
# 使用智能路由
router = IntelligentRouter()
results = router.route_question("系统出现紧急故障,无法启动")
print("紧急问题处理:", results)
5. RunnableWithFallbacks
核心功能: 在主 Runnable 失败时自动切换到备用 Runnable,提高系统鲁棒性
语法:
from langchain_core.runnables import RunnableWithFallbacks
# 基本语法:首选.with_fallbacks(备选)
primary_runnable.with_fallbacks([fallback_runnable1, fallback_runnable2, ...])
# 构造函数
RunnableWithFallbacks(
primary_runnable,
fallbacks=[fallback_runnable1, fallback_runnable2, ...]
)
运行流程:
- 尝试调用
primary_runnable - 如果
primary_runnable成功执行,返回结果 - 如果
primary_runnable抛出异常,尝试调用第一个fallback_runnable - 如果第一个
fallback_runnable也失败,尝试调用第二个fallback_runnable,依此类推 - 如果所有 fallback 都失败,抛出最后一个异常
示例:
from langchain_core.runnables import RunnableWithFallbacks
from langchain_openai import ChatOpenAI
from langchain_community.chat_models import ChatAnthropic
from openai import RateLimitError
# 创建主模型
openai_llm = ChatOpenAI(model="gpt-4o-mini", max_retries=0)
# 创建备用模型
anthropic_llm = ChatAnthropic(model="claude-3-sonnet-20240229")
# 创建带回退的链
llm = openai_llm.with_fallbacks([anthropic_llm])
# 创建提示
prompt = "Tell me a joke about {topic}"
# 创建完整链
chain = prompt | llm | StrOutputParser()
# 执行(模拟 API 错误)
try:
from unittest.mock import patch
with patch("openai.resources.chat.completions.Completions.create", side_effect=RateLimitError("Rate limit", response=None, body="")):
result = chain.invoke({"topic": "cats"})
except RateLimitError as e:
print("Primary model failed, using fallback")
# 理论上这里会自动使用 anthropic_llm
result = chain.invoke({"topic": "cats"})
print(result)
6. RunnableRetry
核心功能: 对特定异常进行自动重试,使用指数退避策略
语法:
from langchain_core.runnables import RunnableRetry
from openai import RateLimitError
import requests.exceptions
# 完整参数示例
retry_chain = RunnableRetry(
runnable=your_chain, # 要重试的 Runnable
# 停止条件参数
stop_after_attempt=3, # 最大重试次数(含首次尝试)
stop_after_delay=30, # 最大总延迟时间(秒)
# 等待策略参数
wait_exponential_jitter=True, # 指数退避 + 抖动
wait_min_seconds=1, # 最小等待时间
wait_max_seconds=10, # 最大等待时间
# 重试条件参数
retry_if_exception_type=(
RateLimitError,
requests.exceptions.ConnectionError,
requests.exceptions.Timeout
),
# 高级配置
before_retry=log_retry_attempt, # 重试前的回调
reraise_after_retry_exhausted=True # 重试耗尽后是否重新抛出异常
)
运行流程:
- 尝试调用 Runnable
- 如果成功,返回结果
- 如果失败,检查是否在重试异常列表中
- 如果是,按照指数退避策略等待一段时间后重试
- 重复步骤 1-4,直到达到最大重试次数
- 如果所有重试都失败,抛出最后一个异常
7. RunnablePassthrough
核心功能: 保持输入不变,不进行任何处理,通常用于在链中保留原始输入
语法:
from langchain_core.runnables import RunnablePassthrough
# 1. 简单使用
passthrough = RunnablePassthrough()
# 2. 与 RunnableParallel 结合
runnable = RunnableParallel(
original=RunnablePassthrough(),
processed=lambda x: x * 2
)
运行流程:
- 输入被传递给
RunnablePassthrough RunnablePassthrough不做任何处理,直接返回输入- 输出就是输入本身
示例:
from langchain_core.runnables import RunnablePassthrough
# 创建一个简单的链
chain = RunnablePassthrough()
# 执行
result = chain.invoke(5) # 输出: 5
# 与 RunnableParallel 结合
from langchain_core.runnables import RunnableParallel
runnable = RunnableParallel(
original=RunnablePassthrough(),
doubled=lambda x: x * 2
)
result = runnable.invoke(5) # 输出: {'original': 5, 'doubled': 10}
8. RunnableAssign
核心功能: 将多个 Runnable 的结果合并到输入字典中
示例:
from langchain_core.runnables import RunnableParallel, RunnableAssign, RunnablePassthrough
# 创建基础链
base_chain = RunnablePassthrough()
# 创建并行链
parallel_chain = RunnableParallel(
base=base_chain,
extra=RunnableAssign(
mult=lambda x: x["num"] * 3
)
)
# 创建完整链
chain = parallel_chain | RunnableAssign(
square=lambda x: x["base"] ** 2
)
# 执行
result = chain.invoke({"num": 5})
# 输出: {'base': {'num': 5}, 'extra': {'num': 5, 'mult': 15}, 'square': 25}
9. RunnablePick
核心功能: 从输入字典中提取特定键的值,通常用于从复杂输入中选择特定部分进行处理
语法:
from langchain_core.runnables import RunnablePick
# 1. 提取单个键
pick_chain = RunnablePick("key1")
# 2. 提取多个键
pick_chain = RunnablePick(["key1", "key2"])
运行流程:
- 输入被传递给
RunnablePick - 如果是单个键,提取该键的值
- 如果是多个键,提取这些键的值组成的字典
- 输出是提取的值或字典
示例:
from langchain_core.runnables import RunnableParallel, RunnablePick, RunnableLambda
# 创建一个复杂输入
input_data = {
"user": {
"name": "Alice",
"age": 30,
"email": "alice@example.com"
},
"product": {
"name": "Laptop",
"price": 1200
}
}
# 提取用户信息
user_info = RunnablePick("user") | RunnablePick(["name", "age"])
# 提取产品信息
product_info = RunnablePick("product") | RunnablePick(["name", "price"])
# 创建并行链
parallel_chain = user_info | product_info
# 执行
result = parallel_chain.invoke(input_data)
# 输出: {'user': {'name': 'Alice', 'age': 30}, 'product': {'name': 'Laptop', 'price': 1200}}

被折叠的 条评论
为什么被折叠?



