2.4.6的实战练习中的第一问:
-
扩展消息属性: 创建一个用户消息,并在
meta_dict
中增加若干条元数据(如用户偏好、语言设置等)。将该消息传递给ChatAgent
,观察系统在回答中是否有所变化。
这个消息我不知道是应该添加在哪里呢?
一开始我是添加到这里:extend_sys_msg_meta_dicts=extend_sys_msg_meta_dicts
# 获取 API Key
api_key = os.getenv('QWEN_API_KEY')
# 创建模型
model = ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type="Qwen/Qwen2.5-72B-Instruct",
url='https://api-inference.modelscope.cn/v1/',
api_key=api_key
)
# 助手的元数据
assistant_meta_dict = {
"assistant_role": "professor",
"role": "teacher",
"task": "explain calculus"
}
# 用户的元数据
user_meta_dict = {
"user_role": "graduate_student",
"role": "learner"
}
def main(model=model, chat_turn_limit=50) -> None:
task_prompt = "泰勒级数表达" # 设置任务目标
extend_sys_msg_meta_dicts = [assistant_meta_dict, user_meta_dict]
role_play_session = RolePlaying(
assistant_role_name="微积分数学专家", # 设置AI助手角色名
assistant_agent_kwargs=dict(model=model),
user_role_name="研一学生", # 设置用户角色名
user_agent_kwargs=dict(model=model),
task_prompt=task_prompt,
with_task_specify=True,
task_specify_agent_kwargs=dict(model=model),
extend_sys_msg_meta_dicts=extend_sys_msg_meta_dicts,
output_language='Chinese', # 设置输出语言
)
为什么这么思考呢,是因为这个extend_sys_msg_meta_dicts变量它参与了 RolePlaying初始化,在初始化代码中init(camel.societies.role_playing.RolePlaying.__init__): 有这么一段表达
(
init_assistant_sys_msg,
init_user_sys_msg,
sys_msg_meta_dicts,
) = self._get_sys_message_info(
assistant_role_name,
user_role_name,
sys_msg_generator,
extend_sys_msg_meta_dicts=extend_sys_msg_meta_dicts,
)
即:camel.societies.role_playing.RolePlaying._get_sys_message_info 这个方法中会对extend_sys_msg_meta_dicts进行处理,并且返回:return init_assistant_sys_msg, init_user_sys_msg, sys_msg_meta_dicts。通过返回值看到有变量:init_user_sys_msg会在 agent的初始化中用到:camel.societies.role_playing.RolePlaying._init_agents
self._init_agents(
init_assistant_sys_msg,
init_user_sys_msg,
assistant_agent_kwargs=assistant_agent_kwargs,
user_agent_kwargs=user_agent_kwargs,
output_language=output_language,
)
这样就会把 变量init_user_sys_msg: 一层层调用过程中传递给:camel.messages.base.BaseMessage.make_assistant_message,meta_dict属性值自然会填充成功
但是现在我的疑问是 这个camel.agents.chat_agent.ChatAgent这里初始化user_agent 时候
self.assistant_agent = ChatAgent(
init_assistant_sys_msg,
output_language=output_language,
**(assistant_agent_kwargs or {}),
)
self.assistant_sys_msg = self.assistant_agent.system_message
self.user_agent = ChatAgent(
init_user_sys_msg,
output_language=output_language,
**(user_agent_kwargs or {}),
)
self.user_sys_msg = self.user_agent.system_message
这里的ChatAgent类中的初始化函数中,没有 创建user的message的调用,即:没有出现 BaseMessage.make_user_message。
def __init__(
self,
system_message: Optional[Union[BaseMessage, str]] = None,
model: Optional[
Union[BaseModelBackend, List[BaseModelBackend]]
] = None,
memory: Optional[AgentMemory] = None,
message_window_size: Optional[int] = None,
token_limit: Optional[int] = None,
output_language: Optional[str] = None,
tools: Optional[List[Union[FunctionTool, Callable]]] = None,
external_tools: Optional[List[Union[FunctionTool, Callable]]] = None,
response_terminators: Optional[List[ResponseTerminator]] = None,
scheduling_strategy: str = "round_robin",
single_iteration: bool = False, # 这个参数最终影响 _handle_step 中的 single_step
) -> None:
# Initialize the system message, converting string to BaseMessage if needed
if isinstance(system_message, str):
system_message = BaseMessage.make_assistant_message(
role_name='Assistant', content=system_message
)
self.orig_sys_message: Optional[BaseMessage] = system_message
self._system_message: Optional[BaseMessage] = system_message
self.role_name: str = (
getattr(system_message, 'role_name', None) or "assistant"
)
self.role_type: RoleType = (
getattr(system_message, 'role_type', None) or RoleType.ASSISTANT
)
self.model_backend = ModelManager(
model
if model is not None
else ModelFactory.create(
model_platform=ModelPlatformType.DEFAULT,
model_type=ModelType.DEFAULT,
),
scheduling_strategy=scheduling_strategy,
)
self.model_type = self.model_backend.model_type
# Initialize tools
self.tools: List[FunctionTool] = (
self._initialize_tools(tools) if tools else []
)
self.external_tools: List[FunctionTool] = (
self._initialize_tools(external_tools) if external_tools else []
)
self.external_tool_names: List[str] = [
tool.get_function_name() for tool in self.external_tools
]
self.all_tools = self.tools + self.external_tools or []
# Create tool dictionaries and configure backend tools if necessary
self.tool_dict = {
tool.get_function_name(): tool for tool in self.all_tools
}
# If the user set tools from `ChatAgent`, it will override the
# configured tools in `BaseModelBackend`.
if self.all_tools:
logger.warning(
"Overriding the configured tools in `BaseModelBackend` with the tools from `ChatAgent`."
)
tool_schema_list = [
tool.get_openai_tool_schema() for tool in self.all_tools
]
self.model_backend.model_config_dict['tools'] = tool_schema_list
self.model_token_limit = token_limit or self.model_backend.token_limit
context_creator = ScoreBasedContextCreator(
self.model_backend.token_counter,
self.model_token_limit,
)
self.memory: AgentMemory = memory or ChatHistoryMemory(
context_creator, window_size=message_window_size
)
self.output_language: Optional[str] = output_language
if self.output_language is not None:
self.set_output_language(self.output_language)
self.terminated: bool = False
self.response_terminators = response_terminators or []
self.init_messages()
self.tool_prompt_added = False
self.single_iteration = single_iteration