一、chatglm6b web服务
from transformers import AutoModel, AutoTokenizer
import gradio as gr
#model_name_or_path="THUDM/ChatGLM-6B"#"conf/snapshots"
model_name_or_path="../conf/snapshots"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModel.from_pretrained(model_name_or_path, trust_remote_code=True).half().quantize(4).cuda()
model = model.eval()
MAX_TURNS = 20
MAX_BOXES = MAX_TURNS * 2
def predict(input, history=None):
if history is None:
history = []
response, history = model.chat(tokenizer, input, history)
updates = []
for query, response in history:
updates.append(gr.update(visible=True, value="用户:" + query))
updates.append(gr.update(visible=True, value="AI