-
chatglm3 工具调用流程如下
1.客户端将所有的工具信息放入tools字段,内容字段放自然提示语言
2.apiserver 收到请求后会根据自然语言和tools 工具生成对应的函数名和参数
3.客户端调用函数并获得执行结果(这一步也可以在apiserver做)
4.客户端根据函数执行结果重新调用chatglm 生成总结语言(如果apiserver 做直接再次调用chatglm函数)
5.chatglm 返回总结性语言
具体实现 toolregister
import importlib
import inspect
import os
import subprocess
import traceback
from copy import deepcopy
from pprint import pformat
from types import GenericAlias
from typing import get_origin, Annotated
_TOOL_HOOKS = {}
_TOOL_DESCRIPTIONS = {}
def register_tool(func: callable):
tool_name = func.__name__
tool_description = inspect.getdoc(func).strip()
python_params = inspect.signature(func).parameters
tool_params = []
for name, param in python_params.items():
annotation = param.annotation
if annotation is inspect.Parameter.empty:
raise TypeError(f"Parameter `{name}` missing type annotation")
if get_origin(annotation) != Annotated:
raise TypeError(f"Annotation type for `{name}` must be typing.Annotated")
typ, (description, required) = annotation.__origin__, annotation.__metadata__
typ: str = str(typ) if isinstance(typ, GenericAlias) else typ.__name__
if not isinstance(description, str):
raise TypeError(f"Description for `{name}` must be a string")
if not isinstance(required, bool):
raise TypeError(f"Required for `{name}` must be a bool")
tool_params.append({
"name": name,
"description": description,
"type": typ,
"required": required
})
tool_def = {
"name": tool_name,
"description": tool_description,
"parameters": tool_params
}
_TOOL_HOOKS[tool_name] = func
_TOOL_DESCRIPTIONS[tool_name] = tool_def
return func
def dispatch_tool(tool_name: str, tool_params: dict) -> str:
if tool_name not in _TOOL_HOOKS:
return f"Tool `{tool_name}` not found. Please use a provided tool."
tool_call = _TOOL_HOOKS[tool_name]
try:
ret = tool_call(**tool_params)
except:
ret = traceback.format_exc()
return str(ret)
def get_tools() -> dict:
return deepcopy(_TOOL_DESCRIPTIONS)
def get_function_tools() -> list:
tools = []
for tool_name, tool_description in _TOOL_DESCRIPTIONS.items():
tool_def = {
"type": "function",
"function": {
"name": tool_name,
"description": tool_description["description"],
"parameters": {
"type": "object",
"properties": {},
"required": [param["name"] for param in tool_description["parameters"]]
}
}
}
# 添加参数定义到参数字段
for param in tool_description["parameters"]:
tool_def["function"]["parameters"]["properties"][param["name"]] = {
"type": param["type"],
"description": param["description"]
}
tools.append(tool_def)
return tools
# Tool Definitions
@register_tool
def random_number_generator(
seed: Annotated[int, 'The random seed used by the generator', True],
range: Annotated[tuple[int, int], 'The range of the generated numbers', True],
) -> int:
"""
Generates a random number x, s.t. range[0] <= x < range[1]
"""
if not isinstance(seed, int):
raise TypeError("Seed must be an integer")
if not isinstance(range, tuple):
raise TypeError("Range must be a tuple")
if not isinstance(range[0], int) or not isinstance(range[1], int):
raise TypeError("Range must be a tuple of integers")
import random
return random.Random(seed).randint(*range)
@register_tool
def get_weather(
city_name: Annotated[str, 'The name of the city to be queried', True],
) -> str:
"""
Get the current weather for `city_name`
"""
if not isinstance(city_name, str):
raise TypeError("City name must be a string")
key_selection = {
"current_condition": ["temp_C", "FeelsLikeC", "humidity", "weatherDesc", "observation_time"],
}
import requests
try:
resp = requests.get(f"https://wttr.in/{city_name}?format=j1")
resp.raise_for_status()
resp = resp.json()
ret = {k: {_v: resp[k][0][_v] for _v in v} for k, v in key_selection.items()}
except:
import traceback
ret = "Error encountered while fetching weather data!\n" + traceback.format_exc()
return str(ret)
@register_tool
def get_shell(
query: Annotated[str, 'The command should run in Linux shell', True],
) -> str:
"""
Use shell to run command
"""
if not isinstance(query, str):
raise TypeError("Command must be a string")
try:
result = subprocess.run(query, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
text=True)
return result.stdout
except subprocess.CalledProcessError as e:
return e.stderr
if __name__ == "__main__":
# print(dispatch_tool("get_weather", {"city_name": "beijing"}))
print(get_tools())
openai_api_request.py
"""
This script is an example of using the OpenAI API to create various interactions with a ChatGLM3 model.
It includes functions to:
1. Conduct a basic chat session, asking about weather conditions in multiple cities.
2. Initiate a simple chat in Chinese, asking the model to tell a short story.
3. Retrieve and print embeddings for a given text input.
Each function demonstrates a different aspect of the API's capabilities, showcasing how to make requests
and handle responses.
"""
import sys
import json
from openai import OpenAI
from tool_register import get_tools, dispatch_tool
base_url = "http://192.168.1.152:8000/v1/"
client = OpenAI(api_key="EMPTY", base_url=base_url)
def function_chat(query:str):
messages = [{"role": "user", "content": query}]
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather in a given city_name",
"parameters": {
"type": "object",
"properties": {
"city_name": {
"type": "string",
"description": "The name of the city to be queried",
}
},
"required": ["city_name"],
},
},
},
{
"type": "function",
"function": {
"name": "md5",
"description": "encodestr the string to md5",
"parameters": {
"type": "object",
"properties": {
"encodestr": {
"type": "string",
"description": "The string is need md5",
}
},
"required": ["encodestr"],
},
},
},
{
"type": "function",
"function": {
"name": "NmapIp",
"description": "scan ip result",
"parameters": {
"type": "object",
"properties": {
"ip": {
"type": "string",
"description": "to scan the ip",
}
},
"required": ["ip"],
},
},
}
]
response = client.chat.completions.create(
model="chatglm3-6b",
messages=messages,
tools=tools,
tool_choice="auto",
)
print("response2:")
print(response)
if response.choices[0].message.function_call:
function_call = response.choices[0].message.function_call
print(f"Function Call Response: {function_call.model_dump()}")
function_args = json.loads(function_call.arguments)
tool_response = dispatch_tool(function_call.name, function_args)
print(f"Tool Call Response: {tool_response}")
messages.append(response.choices[0].message)
messages.append(
{
"role": "function",
"name": function_call.name,
"content": tool_response, # 调用函数返回结果
}
)
response = client.chat.completions.create(
model="chatglm3-6b",
messages=messages,
tools=tools,
tool_choice="auto",
)
print("respones2")
print(response.choices[0].message)
else:
reply = response.choices[0].message.content
print(f"Final Reply: \n{reply}")
return
def simple_chat(use_stream=True):
messages = [
{
"role": "system",
"content": "You are ChatGLM3, a large language model trained by Zhipu.AI. Follow the user's "
"instructions carefully. Respond using markdown.",
},
{
"role": "user",
"content": "你好,请你用生动的话语给我讲一个小故事吧"
}
]
response = client.chat.completions.create(
model="chatglm3-6b",
messages=messages,
stream=use_stream,
max_tokens=256,
temperature=0.8,
presence_penalty=1.1,
top_p=0.8)
if response:
if use_stream:
for chunk in response:
print(chunk.choices[0].delta.content)
else:
content = response.choices[0].message.content
print(content)
else:
print("Error:", response.status_code)
def embedding():
response = client.embeddings.create(
model="bge-large-zh-1.5",
input=["你好,给我讲一个故事,大概100字"],
)
embeddings = response.data[0].embedding
print("嵌入完成,维度:", len(embeddings))
if __name__ == "__main__":
function_chat(sys.argv[1])
参数 python openai_api_request.py 南京今天天气是什么
6万+

被折叠的 条评论
为什么被折叠?



