1. 环境安装
conda create -n lmdeploy python=3.10 -y
conda activate lmdeploy
conda install pytorch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 pytorch-cuda=12.1 -c pytorch -c nvidia -y
pip install timm==1.0.8 openai==1.40.3 lmdeploy[all]==0.5.3
pip install datasets==2.19.2
2.对internLm1.8b进行量化
lmdeploy lite auto_awq \
/root/models/internlm2_5-1_8b-chat \
--calib-dataset 'ptb' \
--calib-samples 128 \
--calib-seqlen 2048 \
--w-bits 4 \
--w-group-size 128 \
--batch-size 1 \
--search-scale False \
--work-dir /root/models/internlm2_5-1_8b-chat-w4a16-4bit
量化结果(1.8B大概45分钟结束)
2.启用KVcache并部署量化后模型服务
conda activate lmdeploy
lmdeploy serve api_server \
/root/models/internlm2_5-1_8b-chat-w4a16-4bit \
--model-format awq \
--quant-policy 4 \
--cache-max-entry-count 0.4\
--server-name 0.0.0.0 \
--server-port 23333 \
--tp 1
服务启动后显存占用情况
命令行交互
conda activate lmdeploy
lmdeploy serve api_client http://localhost:23333
gradio本地交互
lmdeploy serve gradio http://localhost:23333 \
--server-name 0.0.0.0 \
--server-port 6006