check hidden process(ring0 PspCidTable)

本文介绍了一种Windows内核驱动程序实现的进程枚举技术。通过利用系统内部的数据结构,该方法能够获取所有运行中的进程信息,包括进程ID和名称。文中详细解释了如何通过遍历句柄表来查找进程对象,并从中读取所需信息。
//Form http://hi.baidu.com/antirootkit
#include <ntddk.h>
#include <windef.h>
#include "ListProc.h"

/*ListProc.h的内容

#pragma once

//记录进程信息的结构体ProcessInfo
typedef struct PROCESS_PROC
{
ULONG addr; //进程地址(对象(体)指针)
int pid; //进程ID
UCHAR name[16]; //进程名
struct PROCESS_PROC *next; //单向链表指针
} ProcessInfo;

#define IOCTL_GETPROC_LIST CTL_CODE(0x8000, 0x800, METHOD_BUFFERED, FILE_ANY_ACCESS)

*/
WCHAR gDeviceName[]=L"//Device//proclistdrv";
WCHAR gDosDeviceName[]=L"//??//proclistdrv";

//_OBJECT_HEADER结构以及基于该结构实现从对象(体)指针获得对象(头)指针的宏
typedef struct _OBJECT_HEADER {
union {
struct {
LONG PointerCount;
LONG HandleCount;
};
LIST_ENTRY Entry;
};
POBJECT_TYPE Type;
UCHAR NameInfoOffset;
UCHAR HandleInfoOffset;
UCHAR QuotaInfoOffset;
UCHAR Flags;

union {
//POBJECT_CREATE_INFORMATION ObjectCreateInfo;
PVOID QuotaBlockCharged;
};

PSECURITY_DESCRIPTOR SecurityDescriptor;

QUAD Body;
} OBJECT_HEADER, *POBJECT_HEADER;

#define OBJECT_TO_OBJECT_HEADER(obj) CONTAINING_RECORD( (obj), OBJECT_HEADER, Body )
//系统偏移量(因系统而异,可使用Windbg查询)
#define TYPE 0X08 //_OBJECT_HEADER中的偏移
#define NEXTFREETABLEENTRY 0X04 //_HANDLE_TABLE_ENTRY中的偏移
#define OFFSET_EPROCESS_IMAGENAME 0x0
#define OFFSET_EPROCESS_PID 0x1
#define OFFSET_EPROCESS_FLAGS 0x2
ProcessInfo *head,*p;

//声明函数
NTSTATUS DriverEntry(IN PDRIVER_OBJECT DriverObject,IN PUNICODE_STRING RegistryPath);

// 启用应用层访问支持
NTSTATUS
ProcCreateClose (
IN PDEVICE_OBJECT DeviceObject,
IN PIRP Irp
);

// 接受来自应用层的控制
NTSTATUS
ProcDeviceControl (
IN PDEVICE_OBJECT DeviceObject,
IN PIRP Irp
);
VOID Unload(IN PDRIVER_OBJECT DriverObject);
ULONG GetCidAddr();
ULONG GetProcessType();
VOID IsValidProcess();
VOID RecordInfo(ULONG i);
DWORD GetPlantformDependentInfo(DWORD eprocessflag);
//DriverEntry函数定义
NTSTATUS DriverEntry(IN PDRIVER_OBJECT DriverObject,IN PUNICODE_STRING RegistryPath)
{
UNICODE_STRING DeviceName;
UNICODE_STRING DosDeviceName;
NTSTATUS Status;
PDEVICE_OBJECT pDeviceObject=NULL;

DriverObject->DriverUnload=Unload;

DriverObject->MajorFunction[IRP_MJ_CREATE] = ProcCreateClose;
DriverObject->MajorFunction[IRP_MJ_CLOSE] = ProcCreateClose;
DriverObject->MajorFunction[IRP_MJ_DEVICE_CONTROL] = ProcDeviceControl;

RtlInitUnicodeString(&DeviceName,gDeviceName);
RtlInitUnicodeString(&DosDeviceName,gDosDeviceName);
IoCreateDevice(DriverObject,0,&DeviceName,FILE_DEVICE_UNKNOWN,0,FALSE,&pDeviceObject);
pDeviceObject->Flags|=DO_BUFFERED_IO;
Status = IoCreateSymbolicLink(&DosDeviceName,&DeviceName);
if(Status)
DbgPrint("IoCreateSymbolicLink Return %0x/n",Status);

IsValidProcess();
return TRUE;
}

//Unload函数定义
VOID Unload(IN PDRIVER_OBJECT DriverObject)
{
DbgPrint("unload!");
}

//通过搜索PsLookupProcessByProcessId函数,获取PspCidTable的地址
ULONG GetCidAddr()
{
PUCHAR addr;
PUCHAR p;
UNICODE_STRING pslookup;
ULONG cid;

RtlInitUnicodeString (&pslookup,
L"PsLookupProcessByProcessId");
addr = (PUCHAR) MmGetSystemRoutineAddress(&pslookup);//MmGetSystemRoutineAddress可以通过函数名获得函数地址
for (p=addr;p<addr+PAGE_SIZE;p++)
{
if((*(PUSHORT)p==0x35ff)&&(*(p+6)==0xe8))
{
cid=*(PULONG)(p+2);
return cid;
break;
}
}
return 0;
}

//通过当前进程获取进程对象的类型指针
ULONG GetProcessType()
{
ULONG eproc;
ULONG type;
ULONG total;
eproc=(ULONG)PsGetCurrentProcess();//PsGetCurrentProcess获取当前活动进程的地址,实际上就是对象(体)指针
eproc=(ULONG)OBJECT_TO_OBJECT_HEADER(eproc);
type=*(PULONG)(eproc+TYPE);
return type;
}

//判断是否是进程对象,是则记录,不是则放弃
VOID IsValidProcess()
{
ULONG PspCidTable;
ULONG TableCode;
ULONG table1,table2;
ULONG object,objectheader;
ULONG NextFreeTableEntry;
ULONG processtype,type;
ULONG flags;
ULONG i;
PspCidTable=GetCidAddr();
processtype=GetProcessType();
if(PspCidTable==0)
{
return ;
}
else
{
//TableCode的最后两位在XP中决定了句柄表的层数
TableCode=*(PULONG)(*(PULONG)PspCidTable);
if((TableCode&0x3)==0x0)
{
table1=TableCode;
table2=0x0;
}
if((TableCode&0x3)==0x1)
{
TableCode=TableCode&0xfffffffc;
table1=*(PULONG)TableCode;
table2=*(PULONG)(TableCode+0x4);
}
//对cid从0x0到0x4e1c进行遍历
for(i=0x0;i<0x4e1c;i++)
{
if(i<=0x800)
{
if(MmIsAddressValid((PULONG)(table1+i*2)))
{
object=*(PULONG)(table1+i*2);
if(MmIsAddressValid((PULONG)(table1+i*2+NEXTFREETABLEENTRY)))
{
NextFreeTableEntry=*(PULONG)(table1+i*2+NEXTFREETABLEENTRY);
if(NextFreeTableEntry==0x0)//正常的_HANDLE_TABLE_ENTRY中NextFreeTableEntry为0x0
{
object=((object | 0x80000000)& 0xfffffff8);//转换为对象(体)指针
objectheader=(ULONG)OBJECT_TO_OBJECT_HEADER(object);//获取对象(头)指针
if(MmIsAddressValid((PULONG)(objectheader+TYPE)))
{
type=*(PULONG)(objectheader+TYPE);
if(type==processtype)
{
flags=*(PULONG)((ULONG)object+GetPlantformDependentInfo(OFFSET_EPROCESS_FLAGS));
if((flags&0xc)!=0xc)
RecordInfo(object);//flags显示进程没有退出
}
}
}
}
}
}
else
{
if(table2!=0)
{
if(MmIsAddressValid((PULONG)(table2+(i-0x800)*2)))
{
object=*(PULONG)(table2+(i-0x800)*2);
if(MmIsAddressValid((PULONG)((table2+(i-0x800)*2)+NEXTFREETABLEENTRY)))
{
NextFreeTableEntry=*(PULONG)((table2+(i-0x800)*2)+NEXTFREETABLEENTRY);
if(NextFreeTableEntry==0x0)
{
object=((object | 0x80000000)& 0xfffffff8);
objectheader=(ULONG)OBJECT_TO_OBJECT_HEADER(object);
if(MmIsAddressValid((PULONG)(objectheader+TYPE)))
{
type=*(PULONG)(objectheader+TYPE);
if(type==processtype)
{
flags=*(PULONG)((ULONG)object+GetPlantformDependentInfo(OFFSET_EPROCESS_FLAGS));
if((flags&0xc)!=0xc)
RecordInfo(object);
}
}
}
}
}
}
}
}
}
}

//使用单向链表记录进程信息
VOID RecordInfo(ULONG i)
{
ProcessInfo *r;
if(head==NULL)
{
if((head=(ProcessInfo *)ExAllocatePool(NonPagedPool,sizeof(ProcessInfo)))==NULL)
{
return;
}
head->addr=0x0;
}
if (head->addr==0x0)
{
head->addr=i;
p=head;
}
else
{
if((r=(ProcessInfo *)ExAllocatePool(NonPagedPool,sizeof(ProcessInfo)))==NULL)
{
return;
}
p->next=r;
r->addr=i;
r->next=NULL;
p=r;
}
}
///////////////////////////////////////////////////////////////////////////////
// ProcCreateClose
// 接受应用层调用
//
NTSTATUS
ProcCreateClose(
IN PDEVICE_OBJECT DeviceObject,
IN PIRP Irp
)
{
DbgPrint(" Create or Close ok.../n");
Irp->IoStatus.Status = STATUS_SUCCESS;
Irp->IoStatus.Information = 0;
IoCompleteRequest(Irp,IO_NO_INCREMENT);
return STATUS_SUCCESS;
}

///////////////////////////////////////////////////////////////////////////////
// ProcDeviceControl
// 应用程序控制位置
//
NTSTATUS
ProcDeviceControl (
IN PDEVICE_OBJECT DeviceObject,
IN PIRP Irp
)
{
PIO_STACK_LOCATION io_stack;
NTSTATUS status;

io_stack = IoGetCurrentIrpStackLocation(Irp);

if (io_stack->MajorFunction==IRP_MJ_DEVICE_CONTROL)
{
switch (io_stack->Parameters.DeviceIoControl.IoControlCode)
{
case IOCTL_GETPROC_LIST:
{
ProcessInfo* pi = (ProcessInfo*) Irp->AssociatedIrp.SystemBuffer;
ProcessInfo* q;
int j = 0;
//获取进程的ID和进程名
for (p=head;p;p=p->next)
{
p->pid=*(int *)(p->addr+GetPlantformDependentInfo(OFFSET_EPROCESS_PID));
strcpy(p->name,(UCHAR *)(p->addr+GetPlantformDependentInfo(OFFSET_EPROCESS_IMAGENAME)));
}
for(p=head;p;p=p->next)
{
ProcessInfo* pnext;
pi->addr = p->addr;
RtlCopyMemory(pi->name, p->name, 16);
pi->pid = p->pid;
if(p->next)
pnext = (++pi);
else
pnext = NULL;
pi->next = pnext;
pi = pnext;
}
//释放链表
p=head;
q=p->next;
while(q!=NULL)
{
ExFreePool(p);
p=q;
q=p->next;
}
ExFreePool(p);
}
return STATUS_SUCCESS;
}
}
return STATUS_SUCCESS;
}

//sor,我没2000平台
DWORD GetPlantformDependentInfo(DWORD eprocessflag)
{
DWORD current_build;
DWORD ans = 0;

PsGetVersion(NULL, NULL, ¤t_build, NULL);

switch(eprocessflag){
case OFFSET_EPROCESS_IMAGENAME:
if (current_build == 2195) //2000
{

ans = 0xxxx;
}
if (current_build == 2600) //XP
{
ans = 0x174;

}
if (current_build == 3790) //2003
{
ans = 0x164;
}
break;
case OFFSET_EPROCESS_PID:
if (current_build == 2195) //2000
{

ans = 0xxxx;
}
if (current_build == 2600) //XP
{
ans = 0x084;

}
if (current_build == 3790) //2003
{
ans = 0x94;
}
break;
case OFFSET_EPROCESS_FLAGS:
// if (current_build == 2195) //2000
// {

ans = 0xxxx;
// }
if (current_build == 2600) //XP
{
ans = 0x248;

}
if (current_build == 3790) //2003
{
ans = 0x240;
}
break;
default:
break;
}

return ans;
}
 
export NCCL_DEBUG=INFO export NCCL_TIMEOUT=1800 export NCCL_IB_DISABLE=0 export NCCL_P2P_DISABLE=0 export VLLM_LOGGING_LEVEL=DEBUG export CUDA_LAUNCH_BLOCKING=1 vllm serve \ /mnt/data/zhaoshukuo/try/GLM45V/aqw \ --served-model-name GLM-4.5V-AWQ \ --tool-call-parser glm45 \ --reasoning-parser glm45 \ --enable-auto-tool-choice \ --allowed-local-media-path / \ --media-io-kwargs '{"video": {"num_frames": -1}}' \ --enable-expert-parallel \ --tensor-parallel-size 4 \ --swap-space 16 \ --max-num-seqs 64 \ --max-model-len 8192 \ --max-seq-len-to-capture 8192 \ --gpu-memory-utilization 0.85 \ --port 8000 2>&1 | tee vllm.loggroup vllm.platform_plugins found. DEBUG 11-26 09:55:03 [platforms/__init__.py:34] Checking if TPU platform is available. DEBUG 11-26 09:55:03 [platforms/__init__.py:52] TPU platform is not available because: No module named 'libtpu' DEBUG 11-26 09:55:03 [platforms/__init__.py:58] Checking if CUDA platform is available. DEBUG 11-26 09:55:04 [platforms/__init__.py:78] Confirmed CUDA platform is available. DEBUG 11-26 09:55:04 [platforms/__init__.py:106] Checking if ROCm platform is available. DEBUG 11-26 09:55:04 [platforms/__init__.py:120] ROCm platform is not available because: No module named 'amdsmi' DEBUG 11-26 09:55:04 [platforms/__init__.py:127] Checking if XPU platform is available. DEBUG 11-26 09:55:04 [platforms/__init__.py:146] XPU platform is not available because: No module named 'intel_extension_for_pytorch' DEBUG 11-26 09:55:04 [platforms/__init__.py:153] Checking if CPU platform is available. DEBUG 11-26 09:55:04 [platforms/__init__.py:58] Checking if CUDA platform is available. DEBUG 11-26 09:55:04 [platforms/__init__.py:78] Confirmed CUDA platform is available. INFO 11-26 09:55:04 [platforms/__init__.py:216] Automatically detected platform cuda. DEBUG 11-26 09:55:06 [entrypoints/utils.py:168] Setting VLLM_WORKER_MULTIPROC_METHOD to 'spawn' DEBUG 11-26 09:55:06 [plugins/__init__.py:36] Available plugins for group vllm.general_plugins: DEBUG 11-26 09:55:06 [plugins/__init__.py:38] - lora_filesystem_resolver -> vllm.plugins.lora_resolvers.filesystem_resolver:register_filesystem_resolver DEBUG 11-26 09:55:06 [plugins/__init__.py:41] All plugins in this group will be loaded. Set `VLLM_PLUGINS` to control which plugins to load. (APIServer pid=2077064) INFO 11-26 09:55:06 [entrypoints/openai/api_server.py:1896] vLLM API server version 0.10.2 (APIServer pid=2077064) INFO 11-26 09:55:06 [entrypoints/utils.py:328] non-default args: {'model_tag': '/mnt/data/zhaoshukuo/try/GLM45V/aqw', 'enable_auto_tool_choice': True, 'tool_call_parser': 'glm45', 'model': '/mnt/data/zhaoshukuo/try/GLM45V/aqw', 'allowed_local_media_path': '/', 'max_model_len': 8192, 'served_model_name': ['GLM-4.5V-AWQ'], 'reasoning_parser': 'glm45', 'tensor_parallel_size': 4, 'enable_expert_parallel': True, 'gpu_memory_utilization': 0.85, 'swap_space': 16.0, 'media_io_kwargs': {'video': {'num_frames': -1}}, 'max_num_seqs': 64} (APIServer pid=2077064) INFO 11-26 09:55:12 [config/__init__.py:742] Resolved architecture: Glm4vMoeForConditionalGeneration (APIServer pid=2077064) `torch_dtype` is deprecated! Use `dtype` instead! (APIServer pid=2077064) INFO 11-26 09:55:12 [config/__init__.py:1815] Using max model len 8192 (APIServer pid=2077064) WARNING 11-26 09:55:12 [_ipex_ops.py:16] Import error msg: No module named 'intel_extension_for_pytorch' (APIServer pid=2077064) DEBUG 11-26 09:55:12 [engine/arg_utils.py:1736] Setting max_num_batched_tokens to 2048 for OPENAI_API_SERVER usage context. (APIServer pid=2077064) DEBUG 11-26 09:55:12 [config/parallel.py:407] Defaulting to use mp for distributed inference (APIServer pid=2077064) INFO 11-26 09:55:12 [config/scheduler.py:222] Chunked prefill is enabled with max_num_batched_tokens=2048. DEBUG 11-26 09:55:17 [plugins/__init__.py:28] No plugins for group vllm.platform_plugins found. DEBUG 11-26 09:55:17 [platforms/__init__.py:34] Checking if TPU platform is available. DEBUG 11-26 09:55:17 [platforms/__init__.py:52] TPU platform is not available because: No module named 'libtpu' DEBUG 11-26 09:55:17 [platforms/__init__.py:58] Checking if CUDA platform is available. DEBUG 11-26 09:55:17 [platforms/__init__.py:78] Confirmed CUDA platform is available. DEBUG 11-26 09:55:17 [platforms/__init__.py:106] Checking if ROCm platform is available. DEBUG 11-26 09:55:17 [platforms/__init__.py:120] ROCm platform is not available because: No module named 'amdsmi' DEBUG 11-26 09:55:17 [platforms/__init__.py:127] Checking if XPU platform is available. DEBUG 11-26 09:55:17 [platforms/__init__.py:146] XPU platform is not available because: No module named 'intel_extension_for_pytorch' DEBUG 11-26 09:55:17 [platforms/__init__.py:153] Checking if CPU platform is available. DEBUG 11-26 09:55:17 [platforms/__init__.py:58] Checking if CUDA platform is available. DEBUG 11-26 09:55:17 [platforms/__init__.py:78] Confirmed CUDA platform is available. INFO 11-26 09:55:17 [platforms/__init__.py:216] Automatically detected platform cuda. (EngineCore_DP0 pid=2078120) INFO 11-26 09:55:19 [v1/engine/core.py:654] Waiting for init message from front-end. (EngineCore_DP0 pid=2078120) DEBUG 11-26 09:55:19 [v1/engine/core.py:662] Received init message: EngineHandshakeMetadata(addresses=EngineZmqAddresses(inputs=['ipc:///tmp/f4a14c39-ad3e-40b3-b090-c2fc0484cfa7'], outputs=['ipc:///tmp/d2369b27-310b-428e-b6f9-3f1ca539b2e3'], coordinator_input=None, coordinator_output=None, frontend_stats_publish_address=None), parallel_config={'data_parallel_master_ip': '127.0.0.1', 'data_parallel_master_port': 0, '_data_parallel_master_port_list': [], 'data_parallel_size': 1}) (EngineCore_DP0 pid=2078120) DEBUG 11-26 09:55:19 [v1/engine/core.py:494] Has DP Coordinator: False, stats publish address: None (APIServer pid=2077064) DEBUG 11-26 09:55:19 [v1/engine/utils.py:856] HELLO from local core engine process 0. (EngineCore_DP0 pid=2078120) DEBUG 11-26 09:55:19 [plugins/__init__.py:36] Available plugins for group vllm.general_plugins: (EngineCore_DP0 pid=2078120) DEBUG 11-26 09:55:19 [plugins/__init__.py:38] - lora_filesystem_resolver -> vllm.plugins.lora_resolvers.filesystem_resolver:register_filesystem_resolver (EngineCore_DP0 pid=2078120) DEBUG 11-26 09:55:19 [plugins/__init__.py:41] All plugins in this group will be loaded. Set `VLLM_PLUGINS` to control which plugins to load. (EngineCore_DP0 pid=2078120) INFO 11-26 09:55:19 [v1/engine/core.py:76] Initializing a V1 LLM engine (v0.10.2) with config: model='/mnt/data/zhaoshukuo/try/GLM45V/aqw', speculative_config=None, tokenizer='/mnt/data/zhaoshukuo/try/GLM45V/aqw', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=8192, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, data_parallel_size=1, disable_custom_all_reduce=False, quantization=awq_marlin, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend='glm45'), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=GLM-4.5V-AWQ, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, pooler_config=None, compilation_config={"level":3,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output","vllm.mamba_mixer2","vllm.mamba_mixer","vllm.short_conv","vllm.linear_attention","vllm.plamo2_mamba_mixer","vllm.gdn_attention"],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"cudagraph_mode":1,"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"pass_config":{},"max_capture_size":128,"local_cache_dir":null} (EngineCore_DP0 pid=2078120) WARNING 11-26 09:55:19 [executor/multiproc_worker_utils.py:273] Reducing Torch parallelism from 48 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. (EngineCore_DP0 pid=2078120) DEBUG 11-26 09:55:19 [distributed/device_communicators/shm_broadcast.py:243] Binding to ipc:///tmp/d8a67148-3336-480e-8849-cae9ef87a4a7 (EngineCore_DP0 pid=2078120) INFO 11-26 09:55:19 [distributed/device_communicators/shm_broadcast.py:289] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 16777216, 10, 'psm_bb27267d'), local_subscribe_addr='ipc:///tmp/d8a67148-3336-480e-8849-cae9ef87a4a7', remote_subscribe_addr=None, remote_addr_ipv6=False) DEBUG 11-26 09:55:22 [plugins/__init__.py:28] No plugins for group vllm.platform_plugins found. DEBUG 11-26 09:55:22 [platforms/__init__.py:34] Checking if TPU platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:52] TPU platform is not available because: No module named 'libtpu' DEBUG 11-26 09:55:22 [platforms/__init__.py:58] Checking if CUDA platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:78] Confirmed CUDA platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:106] Checking if ROCm platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:120] ROCm platform is not available because: No module named 'amdsmi' DEBUG 11-26 09:55:22 [platforms/__init__.py:127] Checking if XPU platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:146] XPU platform is not available because: No module named 'intel_extension_for_pytorch' DEBUG 11-26 09:55:22 [platforms/__init__.py:153] Checking if CPU platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:58] Checking if CUDA platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:78] Confirmed CUDA platform is available. INFO 11-26 09:55:22 [platforms/__init__.py:216] Automatically detected platform cuda. DEBUG 11-26 09:55:22 [plugins/__init__.py:28] No plugins for group vllm.platform_plugins found. DEBUG 11-26 09:55:22 [platforms/__init__.py:34] Checking if TPU platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:52] TPU platform is not available because: No module named 'libtpu' DEBUG 11-26 09:55:22 [platforms/__init__.py:58] Checking if CUDA platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:78] Confirmed CUDA platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:106] Checking if ROCm platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:120] ROCm platform is not available because: No module named 'amdsmi' DEBUG 11-26 09:55:22 [platforms/__init__.py:127] Checking if XPU platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:146] XPU platform is not available because: No module named 'intel_extension_for_pytorch' DEBUG 11-26 09:55:22 [platforms/__init__.py:153] Checking if CPU platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:58] Checking if CUDA platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:78] Confirmed CUDA platform is available. INFO 11-26 09:55:22 [platforms/__init__.py:216] Automatically detected platform cuda. DEBUG 11-26 09:55:22 [plugins/__init__.py:28] No plugins for group vllm.platform_plugins found. DEBUG 11-26 09:55:22 [platforms/__init__.py:34] Checking if TPU platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:52] TPU platform is not available because: No module named 'libtpu' DEBUG 11-26 09:55:22 [platforms/__init__.py:58] Checking if CUDA platform is available. DEBUG 11-26 09:55:22 [plugins/__init__.py:28] No plugins for group vllm.platform_plugins found. DEBUG 11-26 09:55:22 [platforms/__init__.py:34] Checking if TPU platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:52] TPU platform is not available because: No module named 'libtpu' DEBUG 11-26 09:55:22 [platforms/__init__.py:58] Checking if CUDA platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:78] Confirmed CUDA platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:106] Checking if ROCm platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:120] ROCm platform is not available because: No module named 'amdsmi' DEBUG 11-26 09:55:22 [platforms/__init__.py:127] Checking if XPU platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:146] XPU platform is not available because: No module named 'intel_extension_for_pytorch' DEBUG 11-26 09:55:22 [platforms/__init__.py:153] Checking if CPU platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:58] Checking if CUDA platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:78] Confirmed CUDA platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:106] Checking if ROCm platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:78] Confirmed CUDA platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:120] ROCm platform is not available because: No module named 'amdsmi' INFO 11-26 09:55:22 [platforms/__init__.py:216] Automatically detected platform cuda. DEBUG 11-26 09:55:22 [platforms/__init__.py:127] Checking if XPU platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:146] XPU platform is not available because: No module named 'intel_extension_for_pytorch' DEBUG 11-26 09:55:22 [platforms/__init__.py:153] Checking if CPU platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:58] Checking if CUDA platform is available. DEBUG 11-26 09:55:22 [platforms/__init__.py:78] Confirmed CUDA platform is available. INFO 11-26 09:55:22 [platforms/__init__.py:216] Automatically detected platform cuda. DEBUG 11-26 09:55:24 [plugins/__init__.py:36] Available plugins for group vllm.general_plugins: DEBUG 11-26 09:55:24 [plugins/__init__.py:38] - lora_filesystem_resolver -> vllm.plugins.lora_resolvers.filesystem_resolver:register_filesystem_resolver DEBUG 11-26 09:55:24 [plugins/__init__.py:41] All plugins in this group will be loaded. Set `VLLM_PLUGINS` to control which plugins to load. DEBUG 11-26 09:55:24 [compilation/decorators.py:153] Inferred dynamic dimensions for forward method of <class 'vllm.model_executor.models.llama.LlamaModel'>: ['input_ids', 'positions', 'intermediate_tensors', 'inputs_embeds'] DEBUG 11-26 09:55:24 [compilation/decorators.py:153] Inferred dynamic dimensions for forward method of <class 'vllm.model_executor.models.llama_eagle3.LlamaModel'>: ['input_ids', 'positions', 'hidden_states'] DEBUG 11-26 09:55:24 [plugins/__init__.py:36] Available plugins for group vllm.general_plugins: DEBUG 11-26 09:55:24 [plugins/__init__.py:38] - lora_filesystem_resolver -> vllm.plugins.lora_resolvers.filesystem_resolver:register_filesystem_resolver DEBUG 11-26 09:55:24 [plugins/__init__.py:41] All plugins in this group will be loaded. Set `VLLM_PLUGINS` to control which plugins to load. DEBUG 11-26 09:55:24 [plugins/__init__.py:36] Available plugins for group vllm.general_plugins: DEBUG 11-26 09:55:24 [plugins/__init__.py:38] - lora_filesystem_resolver -> vllm.plugins.lora_resolvers.filesystem_resolver:register_filesystem_resolver DEBUG 11-26 09:55:24 [plugins/__init__.py:41] All plugins in this group will be loaded. Set `VLLM_PLUGINS` to control which plugins to load. DEBUG 11-26 09:55:24 [utils/__init__.py:3126] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x72c93d6aecb0> DEBUG 11-26 09:55:24 [config/__init__.py:3769] enabled custom ops: Counter() DEBUG 11-26 09:55:24 [config/__init__.py:3771] disabled custom ops: Counter() DEBUG 11-26 09:55:24 [distributed/device_communicators/shm_broadcast.py:313] Connecting to ipc:///tmp/d8a67148-3336-480e-8849-cae9ef87a4a7 DEBUG 11-26 09:55:24 [distributed/device_communicators/shm_broadcast.py:243] Binding to ipc:///tmp/bbece080-1353-46ba-b830-8a0104e5bff8 INFO 11-26 09:55:24 [distributed/device_communicators/shm_broadcast.py:289] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_65450998'), local_subscribe_addr='ipc:///tmp/bbece080-1353-46ba-b830-8a0104e5bff8', remote_subscribe_addr=None, remote_addr_ipv6=False) DEBUG 11-26 09:55:24 [compilation/decorators.py:153] Inferred dynamic dimensions for forward method of <class 'vllm.model_executor.models.llama.LlamaModel'>: ['input_ids', 'positions', 'intermediate_tensors', 'inputs_embeds'] DEBUG 11-26 09:55:24 [compilation/decorators.py:153] Inferred dynamic dimensions for forward method of <class 'vllm.model_executor.models.llama_eagle3.LlamaModel'>: ['input_ids', 'positions', 'hidden_states'] DEBUG 11-26 09:55:24 [compilation/decorators.py:153] Inferred dynamic dimensions for forward method of <class 'vllm.model_executor.models.llama.LlamaModel'>: ['input_ids', 'positions', 'intermediate_tensors', 'inputs_embeds'] DEBUG 11-26 09:55:24 [compilation/decorators.py:153] Inferred dynamic dimensions for forward method of <class 'vllm.model_executor.models.llama_eagle3.LlamaModel'>: ['input_ids', 'positions', 'hidden_states'] DEBUG 11-26 09:55:25 [plugins/__init__.py:36] Available plugins for group vllm.general_plugins: DEBUG 11-26 09:55:25 [plugins/__init__.py:38] - lora_filesystem_resolver -> vllm.plugins.lora_resolvers.filesystem_resolver:register_filesystem_resolver DEBUG 11-26 09:55:25 [plugins/__init__.py:41] All plugins in this group will be loaded. Set `VLLM_PLUGINS` to control which plugins to load. DEBUG 11-26 09:55:25 [compilation/decorators.py:153] Inferred dynamic dimensions for forward method of <class 'vllm.model_executor.models.llama.LlamaModel'>: ['input_ids', 'positions', 'intermediate_tensors', 'inputs_embeds'] DEBUG 11-26 09:55:25 [compilation/decorators.py:153] Inferred dynamic dimensions for forward method of <class 'vllm.model_executor.models.llama_eagle3.LlamaModel'>: ['input_ids', 'positions', 'hidden_states'] DEBUG 11-26 09:55:26 [utils/__init__.py:3126] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x72e563bfacb0> DEBUG 11-26 09:55:26 [config/__init__.py:3769] enabled custom ops: Counter() DEBUG 11-26 09:55:26 [config/__init__.py:3771] disabled custom ops: Counter() DEBUG 11-26 09:55:26 [distributed/device_communicators/shm_broadcast.py:313] Connecting to ipc:///tmp/d8a67148-3336-480e-8849-cae9ef87a4a7 DEBUG 11-26 09:55:26 [distributed/device_communicators/shm_broadcast.py:243] Binding to ipc:///tmp/1d706924-3dcd-442e-a6ea-76df37bc5207 INFO 11-26 09:55:26 [distributed/device_communicators/shm_broadcast.py:289] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_79b8de8b'), local_subscribe_addr='ipc:///tmp/1d706924-3dcd-442e-a6ea-76df37bc5207', remote_subscribe_addr=None, remote_addr_ipv6=False) DEBUG 11-26 09:55:26 [utils/__init__.py:3126] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x77a106052d40> DEBUG 11-26 09:55:26 [config/__init__.py:3769] enabled custom ops: Counter() DEBUG 11-26 09:55:26 [config/__init__.py:3771] disabled custom ops: Counter() DEBUG 11-26 09:55:26 [distributed/device_communicators/shm_broadcast.py:313] Connecting to ipc:///tmp/d8a67148-3336-480e-8849-cae9ef87a4a7 DEBUG 11-26 09:55:26 [distributed/device_communicators/shm_broadcast.py:243] Binding to ipc:///tmp/a9e5885c-1717-48af-a1ff-35107b69be56 INFO 11-26 09:55:26 [distributed/device_communicators/shm_broadcast.py:289] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_697dff7b'), local_subscribe_addr='ipc:///tmp/a9e5885c-1717-48af-a1ff-35107b69be56', remote_subscribe_addr=None, remote_addr_ipv6=False) DEBUG 11-26 09:55:26 [utils/__init__.py:3126] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x70dcf993ace0> DEBUG 11-26 09:55:26 [config/__init__.py:3769] enabled custom ops: Counter() DEBUG 11-26 09:55:26 [config/__init__.py:3771] disabled custom ops: Counter() DEBUG 11-26 09:55:26 [distributed/device_communicators/shm_broadcast.py:313] Connecting to ipc:///tmp/d8a67148-3336-480e-8849-cae9ef87a4a7 DEBUG 11-26 09:55:26 [distributed/device_communicators/shm_broadcast.py:243] Binding to ipc:///tmp/505e590b-caf4-488b-8538-dae20cc00e3e INFO 11-26 09:55:26 [distributed/device_communicators/shm_broadcast.py:289] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a9db77ef'), local_subscribe_addr='ipc:///tmp/505e590b-caf4-488b-8538-dae20cc00e3e', remote_subscribe_addr=None, remote_addr_ipv6=False) DEBUG 11-26 09:55:26 [distributed/parallel_state.py:988] world_size=4 rank=3 local_rank=3 distributed_init_method=tcp://127.0.0.1:43765 backend=nccl DEBUG 11-26 09:55:26 [distributed/parallel_state.py:988] world_size=4 rank=0 local_rank=0 distributed_init_method=tcp://127.0.0.1:43765 backend=nccl DEBUG 11-26 09:55:27 [distributed/parallel_state.py:988] world_size=4 rank=2 local_rank=2 distributed_init_method=tcp://127.0.0.1:43765 backend=nccl [W1126 09:55:27.568728656 ProcessGroupNCCL.cpp:981] Warning: TORCH_NCCL_AVOID_RECORD_STREAMS is the default now, this environment variable is thus deprecated. (function operator()) DEBUG 11-26 09:55:27 [distributed/parallel_state.py:988] world_size=4 rank=1 local_rank=1 distributed_init_method=tcp://127.0.0.1:43765 backend=nccl [W1126 09:55:27.590138133 ProcessGroupNCCL.cpp:981] Warning: TORCH_NCCL_AVOID_RECORD_STREAMS is the default now, this environment variable is thus deprecated. (function operator()) [W1126 09:55:27.683236502 ProcessGroupNCCL.cpp:981] Warning: TORCH_NCCL_AVOID_RECORD_STREAMS is the default now, this environment variable is thus deprecated. (function operator()) [W1126 09:55:27.685907722 ProcessGroupNCCL.cpp:981] Warning: TORCH_NCCL_AVOID_RECORD_STREAMS is the default now, this environment variable is thus deprecated. (function operator()) [Gloo] Rank 2 is connected to 3 peer ranks. Expected number of connected peer ranks is : 3 [Gloo] Rank 0 is connected to 3 peer ranks. Expected number of connected peer ranks is : 3 [Gloo] Rank 1 is connected to 3 peer ranks. Expected number of connected peer ranks is : 3 [Gloo] Rank 3 is connected to 3 peer ranks. Expected number of connected peer ranks is : 3 DEBUG 11-26 09:55:27 [distributed/parallel_state.py:1040] Detected 1 nodes in the distributed environment DEBUG 11-26 09:55:27 [distributed/parallel_state.py:1040] Detected 1 nodes in the distributed environment DEBUG 11-26 09:55:27 [distributed/parallel_state.py:1040] Detected 1 nodes in the distributed environment DEBUG 11-26 09:55:27 [distributed/parallel_state.py:1040] Detected 1 nodes in the distributed environment [Gloo] Rank 1 is connected to 3 peer ranks. Expected number of connected peer ranks is : 3 [Gloo] Rank 0 is connected to 3 peer ranks. Expected number of connected peer ranks is : 3 [Gloo] Rank 3 is connected to 3 peer ranks. Expected number of connected peer ranks is : 3 [Gloo] Rank 2 is connected to 3 peer ranks. Expected number of connected peer ranks is : 3 INFO 11-26 09:55:27 [utils/__init__.py:1433] Found nccl from library libnccl.so.2 INFO 11-26 09:55:27 [utils/__init__.py:1433] Found nccl from library libnccl.so.2 INFO 11-26 09:55:27 [distributed/device_communicators/pynccl.py:70] vLLM is using nccl==2.27.3 INFO 11-26 09:55:27 [distributed/device_communicators/pynccl.py:70] vLLM is using nccl==2.27.3 INFO 11-26 09:55:27 [utils/__init__.py:1433] Found nccl from library libnccl.so.2 INFO 11-26 09:55:27 [distributed/device_communicators/pynccl.py:70] vLLM is using nccl==2.27.3 INFO 11-26 09:55:27 [utils/__init__.py:1433] Found nccl from library libnccl.so.2 INFO 11-26 09:55:27 [distributed/device_communicators/pynccl.py:70] vLLM is using nccl==2.27.3 (APIServer pid=2077064) DEBUG 11-26 09:55:29 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. crscuav:2078839:2078839 [0] NCCL INFO Bootstrap: Using ens4f0:10.0.15.62<0> crscuav:2078839:2078839 [0] NCCL INFO cudaDriverVersion 13000 crscuav:2078839:2078839 [0] NCCL INFO NCCL version 2.27.3+cuda12.9 crscuav:2078839:2078839 [0] NCCL INFO NET/Plugin: Could not find: libnccl-net.so. crscuav:2078839:2078839 [0] NCCL INFO NCCL_IB_DISABLE set by environment to 0. crscuav:2078839:2078839 [0] NCCL INFO NET/IB : No device found. crscuav:2078839:2078839 [0] NCCL INFO NET/IB : Using [RO]; OOB ens4f0:10.0.15.62<0> crscuav:2078839:2078839 [0] NCCL INFO NET/Socket : Using [0]ens4f0:10.0.15.62<0> [1]br-b61e73a4fe09:172.19.0.1<0> [2]br-e72ee67be866:172.20.0.1<0> [3]br-9be5020f8a2d:172.22.0.1<0> [4]br-ee6cb631a4c8:172.18.0.1<0> [5]br-a496200eb446:172.21.0.1<0> [6]vethd5eaad9:fe80::c435:3aff:feb7:4266%vethd5eaad9<0> [7]vethcd8be0d:fe80::fc:3bff:fe56:b623%vethcd8be0d<0> [8]veth11e32eb:fe80::9816:dfff:fed9:7a9a%veth11e32eb<0> [9]vethda60942:fe80::7c65:2eff:feed:6378%vethda60942<0> [10]vethec4605e:fe80::5884:2eff:fe0e:d3bf%vethec4605e<0> [11]veth7b5e9fc:fe80::9410:91ff:fec2:8116%veth7b5e9fc<0> [12]veth8736c92:fe80::7472:59ff:fe85:4cf0%veth8736c92<0> [13]vethbbe4a16:fe80::e0a1:85ff:fe90:b161%vethbbe4a16<0> [14]veth7ab719e:fe80::cc0f:dbff:fe43:3a3%veth7ab719e<0> [15]veth7dd5387:fe80::9826:2ff:feef:14a7%veth7dd5387<0> crscuav:2078839:2078839 [0] NCCL INFO Initialized NET plugin Socket crscuav:2078839:2078839 [0] NCCL INFO Assigned NET plugin Socket to comm crscuav:2078839:2078839 [0] NCCL INFO Using network Socket crscuav:2078839:2078839 [0] NCCL INFO ncclCommInitRank comm 0x2c31b1a0 rank 0 nranks 4 cudaDev 0 nvmlDev 0 busId 8000 commId 0x58cf36552e62fa95 - Init START crscuav:2078839:2078839 [0] NCCL INFO RAS client listening socket at 127.0.0.1<28028> crscuav:2078839:2078839 [0] NCCL INFO Bootstrap timings total 2.966586 (create 0.000021, send 0.000104, recv 2.965968, ring 0.000198, delay 0.000000) crscuav:2078839:2078839 [0] NCCL INFO Setting affinity for GPU 0 to 0-23,48-71 crscuav:2078839:2078839 [0] NCCL INFO NVLS multicast support is not available on dev 0 (NVLS_NCHANNELS 0) crscuav:2078839:2078839 [0] NCCL INFO comm 0x2c31b1a0 rank 0 nRanks 4 nNodes 1 localRanks 4 localRank 0 MNNVL 0 crscuav:2078839:2078839 [0] NCCL INFO Channel 00/04 : 0 1 2 3 crscuav:2078839:2078839 [0] NCCL INFO Channel 01/04 : 0 1 2 3 crscuav:2078839:2078839 [0] NCCL INFO Channel 02/04 : 0 1 2 3 crscuav:2078839:2078839 [0] NCCL INFO Channel 03/04 : 0 1 2 3 crscuav:2078839:2078839 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 crscuav:2078839:2078839 [0] NCCL INFO P2P Chunksize set to 131072 crscuav:2078839:2078839 [0] NCCL INFO PROFILER/Plugin: Could not find: libnccl-profiler.so. crscuav:2078839:2078839 [0] NCCL INFO Check P2P Type isAllDirectP2p 1 directMode 0 crscuav:2078839:2079820 [0] NCCL INFO [Proxy Service UDS] Device 0 CPU core 49 crscuav:2078839:2079817 [0] NCCL INFO [Proxy Service] Device 0 CPU core 23 crscuav:2078839:2078839 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 512 | 512 crscuav:2078839:2078839 [0] NCCL INFO 4 coll channels, 4 collnet channels, 0 nvls channels, 4 p2p channels, 2 p2p channels per peer crscuav:2078839:2078839 [0] NCCL INFO CC Off, workFifoBytes 1048576 crscuav:2078839:2078839 [0] NCCL INFO TUNER/Plugin: Could not find: libnccl-tuner.so. Using internal tuner plugin. crscuav:2078839:2078839 [0] NCCL INFO ncclCommInitRank comm 0x2c31b1a0 rank 0 nranks 4 cudaDev 0 nvmlDev 0 busId 8000 commId 0x58cf36552e62fa95 - Init COMPLETE crscuav:2078839:2078839 [0] NCCL INFO Init timings - ncclCommInitRank: rank 0 nranks 4 total 3.24 (kernels 0.18, alloc 0.01, bootstrap 2.97, allgathers 0.00, topo 0.03, graphs 0.00, connections 0.00, rest 0.05) crscuav:2078839:2079825 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM crscuav:2078839:2079825 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM crscuav:2078839:2079825 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM crscuav:2078839:2079825 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM crscuav:2078839:2079825 [0] N(APIServer pid=2077064) DEBUG 11-26 09:55:39 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:55:49 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:55:59 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:56:09 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:56:19 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:56:29 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:56:39 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:56:49 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:56:59 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:57:09 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:57:19 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:57:29 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:57:39 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:57:49 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:57:59 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:58:09 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:58:19 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:58:29 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:58:39 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:58:49 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:58:59 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:59:09 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:59:19 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:59:29 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:59:39 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start. (APIServer pid=2077064) DEBUG 11-26 09:59:49 [v1/engine/utils.py:773] Waiting for 1 local, 0 remote core engine proc(s) to start.模型一直加载不出来,可能是什么问题?
11-27
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值