import streamlit as st
from langchain.memory import ConversationBufferMemory
from langchain_openai import ChatOpenAI
from langchain.chains import ConversationChain
from PIL import Image
st.title("AI大模型")
st.header('你的问答助手')
img = Image.open("大模型.jpeg")
st.image(img, width=700)
#streamlit的会话状态
if 'history' not in st.session_state:
st.session_state['history'] = ConversationBufferMemory()
#设置模型的随机性和最大token数
token =st.sidebar.select_slider("请输入最大token数:",options=[0,256,512,1024,2048])
temp= st.sidebar.select_slider("请输入temperature",options=[i/10 for i in range(0,11)])
#实例化大模型
if token>0 and temp>0:
chat_llm = ChatOpenAI(model='',
openai_api_key='',
temperature=temp,
max_tokens=token,
streaming=True)
conversation=ConversationChain(llm=chat_llm,memory=st.session_state['history'])
else:
st.write("请输入大模型参数!")
#遍历会话状态
for i in st.session_state['history'].chat_memory.messages:
st.chat_message(i.type).write(i.content)
if a:=st.chat_input():
st.chat_message("user").write(a)
res=conversation.invoke(a)
st.chat_message("ai").write(res["response"])
streamlit搭建简单的大模型问答平台
于 2024-10-17 20:17:19 首次发布