1.从hagging face下载模型

2.把下载的模型文件,放到/usr/local/Qwen2-7B目录下

3.创建虚拟环境,安装依赖
1.环境安装
sudo yum update -y
sudo yum install -y python3 python3-pip git
2.创建虚拟环境并激活
python3 -m venv qwen2_env
source qwen2_env/bin/activate
conda activate qwen2_env
conda install numpy transformers torch langchain sentence-transformers jieba requests streamlit
3.将diet_ui.py文件放到根目录下
import streamlit as st
import requests
import re
import logging
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from langchain.memory import ConversationBufferMemory
from langchain.chains import LLMChain
from langchain.llms import HuggingFacePipeline
from langchain.prompts import PromptTemplate
from transformers import pipeline
from sentence_transformers import SentenceTransformer, models
import numpy as np
import jieba
# 配置日志记录
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
# 加载 Qwen2 模型和分词器
try:
tokenizer = AutoTokenizer.from_pretrained("/usr/local/Qwen2-7B", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained("/usr/local/Qwen2-7B", device_map="auto", trust_remote_code=True).eval()
logging.info("分词器和模型加载成功")
except Exception as e:
logging.error(f"加载分词器或模型时发生错误: {e}")
raise
# 公司业务接口地址
PORTFOLIO_API_URL = "https://xxxxx/plugin/portfolio"
# 扩充关键词列表
KEYWORDS = ["方案", "配餐", "计划", "饮食安排", "食谱", "卡路里", "餐单", "膳食"]
# 允许的话题列表,添加配餐相关关键词
ALLOWED_TOPICS = ["健康", "营养", "饮食", "订单", "优惠券", "吃饭", "运动", "养生", "客服"] + KEYWORDS
# 初始化会话记忆,最多存储 10 轮对话
memory = ConversationBufferMemory(k=10)
# 创建 HuggingFacePipeline
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=500,
do_sample=True,
top_p=0.85,
temperature=0.35
)
llm = HuggingFacePipeline(pipeline=pipe)
# 创建一个简单的提示模板
prompt_template = PromptTemplate(
input_variables=["inp

最低0.47元/天 解锁文章
3万+

被折叠的 条评论
为什么被折叠?



