messages =[{'role':'system','content':'You are a helpful assistant.'}]# 您可以自定义设置对话轮数,当前为3for i inrange(3):
user_input =input("请输入:")# 将用户问题信息添加到messages列表中
messages.append({'role':'user','content': user_input})
assistant_output = call(messages).choices[0].message.content
# 将大模型的回复信息添加到messages列表中
messages.append({'role':'assistant','content': assistant_output})print(f'用户输入:{user_input}')print(f'模型输出:{assistant_output}')print('\n')
二. 采用ollama调用部署的模型库
1. 安装 openai
pip install openai
2. 创建客户端
from openai import OpenAI
import os
client = OpenAI(# 如果您没有配置环境变量,请在此处用您的API Key进行替换
api_key='ollama',# 填写DashScope服务的base_url
base_url="http://localhost:11434/v1",)
3. 定义交互函数
# model = 'deepseek-r1:7b'
model ='gemma:2b'defgenerate_responses(prompt):
response = client.chat.completions.create(
model=model,
messages=[#{"role": "system", "content": "You are a helpful assistant who provides information to users."},{"role":"user","content": prompt},],
temperature=0.7,
top_p=0.8)return response.choices[0].message.content