VLLM Server启动的服务,怎么写一个python程序调用

部署运行你感兴趣的模型镜像

vllmserver启动的命令

#!/bin/bash

export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
export NCCL_P2P_DISABLE=0  # L20支持P2P,保持启用
export NCCL_IB_DISABLE=1   # 禁用InfiniBand(如果没有)

python -m vllm.entrypoints.openai.api_server \
--model Qwen/Qwen3-Next-80B-A3B-Instruct \
--served-model-name Qwen3-Next-80B-A3B-Instruct \
--host 0.0.0.0 \
--port 8000 \
--tensor-parallel-size 8 \
--pipeline-parallel-size 1 \
--gpu-memory-utilization 0.90 \
--max-model-len 24576 \
--max-num-seqs 128 \
--max-num-batched-tokens 16384 \
--trust-remote-code \
--disable-log-requests \
--dtype bfloat16 \
--enable-prefix-caching \
--use-v2-block-manager \
--worker-use-ray \
--engine-use-ray \
--disable-custom-all-reduce  # L20可能不支持自定义all-reduce

python调用的代码

#!/usr/bin/env python3
"""
直接调用vLLM服务器的Python程序
使用OpenAI客户端库调用本地vLLM服务器
"""

from openai import OpenAI
import asyncio
import json
from typing import List, Dict, Any, Optional

# 配置vLLM服务器
VLLM_BASE_URL = "http://localhost:8000/v1"  # vLLM服务器的OpenAI兼容API地址
MODEL_NAME = "Qwen3-Next-80B-A3B-Instruct"  # 你启动的模型名称

# 创建OpenAI客户端,指向本地vLLM服务器
client = OpenAI(
    base_url=VLLM_BASE_URL,
    api_key="fake-key"  # vLLM不需要真实的API key,但OpenAI客户端需要一个值
)

def simple_chat(prompt: str, system_message: str = "你是一个有用的AI助手。") -> str:
    """简单的聊天函数"""
    try:
        response = client.chat.completions.create(
            model=MODEL_NAME,
            messages=[
                {"role": "system", "content": system_message},
                {"role": "user", "content": prompt}
            ],
            max_tokens=2048,
            temperature=0.7,
            top_p=0.9
        )
        return response.choices[0].message.content
    except Exception as e:
        return f"错误: {e}"

def chat_with_history(messages: List[Dict[str, str]], **kwargs) -> str:
    """带历史记录的聊天函数"""
    try:
        response = client.chat.completions.create(
            model=MODEL_NAME,
            messages=messages,
            max_tokens=kwargs.get('max_tokens', 2048),
            temperature=kwargs.get('temperature', 0.7),
            top_p=kwargs.get('top_p', 0.9),
            stream=False
        )
        return response.choices[0].message.content
    except Exception as e:
        return f"错误: {e}"

def stream_chat(prompt: str, system_message: str = "你是一个有用的AI助手。"):
    """流式聊天函数"""
    try:
        messages = [
            {"role": "system", "content": system_message},
            {"role": "user", "content": prompt}
        ]
        
        stream = client.chat.completions.create(
            model=MODEL_NAME,
            messages=messages,
            max_tokens=2048,
            temperature=0.7,
            stream=True  # 开启流式响应
        )
        
        print("AI回复: ", end="", flush=True)
        full_response = ""
        for chunk in stream:
            if chunk.choices[0].delta.content:
                content = chunk.choices[0].delta.content
                print(content, end="", flush=True)
                full_response += content
        print("\n")
        return full_response
        
    except Exception as e:
        print(f"流式聊天错误: {e}")
        return ""

def text_completion(prompt: str, **kwargs) -> str:
    """文本补全函数"""
    try:
        response = client.completions.create(
            model=MODEL_NAME,
            prompt=prompt,
            max_tokens=kwargs.get('max_tokens', 1024),
            temperature=kwargs.get('temperature', 0.7),
            top_p=kwargs.get('top_p', 0.9)
        )
        return response.choices[0].text
    except Exception as e:
        return f"错误: {e}"

def get_models():
    """获取可用模型列表"""
    try:
        models = client.models.list()
        return [model.id for model in models.data]
    except Exception as e:
        print(f"获取模型列表错误: {e}")
        return []

def interactive_chat():
    """交互式聊天"""
    print("=== vLLM 交互式聊天 ===")
    print("输入 'exit' 或 'quit' 退出")
    print("输入 'clear' 清空历史记录")
    print("输入 'stream' 切换流式模式")
    print("-" * 40)
    
    history = []
    stream_mode = False
    
    while True:
        try:
            user_input = input("\n你: ").strip()
            
            if user_input.lower() in ['exit', 'quit']:
                print("再见!")
                break
            elif user_input.lower() == 'clear':
                history = []
                print("历史记录已清空")
                continue
            elif user_input.lower() == 'stream':
                stream_mode = not stream_mode
                print(f"流式模式: {'开启' if stream_mode else '关闭'}")
                continue
            elif not user_input:
                continue
            
            # 添加用户消息到历史
            history.append({"role": "user", "content": user_input})
            
            if stream_mode:
                # 流式响应
                response = stream_chat_with_history(history)
            else:
                # 普通响应
                response = chat_with_history(history)
                print(f"\nAI: {response}")
            
            # 添加AI回复到历史
            history.append({"role": "assistant", "content": response})
            
            # 限制历史记录长度,避免上下文过长
            if len(history) > 20:
                history = history[-20:]
                
        except KeyboardInterrupt:
            print("\n\n再见!")
            break
        except Exception as e:
            print(f"\n错误: {e}")

def stream_chat_with_history(messages: List[Dict[str, str]]) -> str:
    """带历史记录的流式聊天"""
    try:
        stream = client.chat.completions.create(
            model=MODEL_NAME,
            messages=messages,
            max_tokens=2048,
            temperature=0.7,
            stream=True
        )
        
        print("\nAI: ", end="", flush=True)
        full_response = ""
        for chunk in stream:
            if chunk.choices[0].delta.content:
                content = chunk.choices[0].delta.content
                print(content, end="", flush=True)
                full_response += content
        print()
        return full_response
        
    except Exception as e:
        print(f"流式聊天错误: {e}")
        return ""

def main():
    """主函数 - 演示各种调用方式"""
    print("=== vLLM Python客户端测试 ===")
    
    # 1. 检查服务器连接
    print("1. 检查服务器连接...")
    models = get_models()
    if models:
        print(f"✓ 连接成功!可用模型: {models}")
    else:
        print("✗ 连接失败,请检查vLLM服务器是否运行")
        return
    
    # 2. 简单聊天测试
    print("\n2. 简单聊天测试...")
    response = simple_chat("请简单介绍一下Python编程语言")
    print(f"回复: {response}")
    
    # 3. 流式聊天测试
    print("\n3. 流式聊天测试...")
    stream_chat("请写一个Python快速排序算法")
    
    # 4. 文本补全测试
    print("\n4. 文本补全测试...")
    completion = text_completion("def fibonacci(n):")
    print(f"补全结果: {completion}")
    
    # 5. 带参数的聊天
    print("\n5. 带参数的聊天测试...")
    messages = [
        {"role": "system", "content": "你是一个专业的Python教师。"},
        {"role": "user", "content": "请解释Python中的装饰器概念"}
    ]
    response = chat_with_history(messages, temperature=0.3, max_tokens=1000)
    print(f"回复: {response}")
    
    # 6. 启动交互式聊天
    print("\n6. 启动交互式聊天...")
    interactive_chat()

if __name__ == "__main__":
    # 可以直接调用各种函数进行测试
    main()
    
    # 或者直接使用单个函数:
    # print(simple_chat("你好!"))
    # stream_chat("请详细介绍机器学习")
    # interactive_chat()

您可能感兴趣的与本文相关的镜像

Python3.11

Python3.11

Conda
Python

Python 是一种高级、解释型、通用的编程语言,以其简洁易读的语法而闻名,适用于广泛的应用,包括Web开发、数据分析、人工智能和自动化脚本

评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

MonkeyKing.sun

对你有帮助的话,可以打赏

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值