UVA 216

本文介绍了一个使用回溯法寻找连接多个点的最短电缆路径的C语言程序。该程序定义了一个递归函数来尝试所有可能的节点排列,并计算每种排列的总电缆长度,以确定连接所有点所需的最短电缆长度。
#include <stdio.h>
#include <string.h>
#include <math.h>
#define MAX 200
int n;
int p[MAX], ans[MAX], used[MAX]; 
float x[MAX], y[MAX], min;
float cal( float x1, float y1, float x2, float y2 )
{
	return sqrt( pow(x1-x2, 2) + pow(y1-y2, 2) ) + 16; 
}
void dfs( int val, int cur )
{
	int i;
	float sum = 0;
	if( cur == n )
	{
		for( i = 0; i < n-1; i++ )
			sum += cal( x[ p[i] ], y[ p[i] ], x[ p[i+1] ], y[ p[i+1] ] );
		if( sum < min )
		{
			min = sum;
			for( i = 0; i < n; i++ )
				ans[i] = p[i];
		}
	}
	else
	{
		used[val] = 1;
		for( i = 0; i < n; i++ )
			if( !used[i] )
			{	
				p[cur] = i;
				dfs( i, cur+1 ); 
			}
		used[val] = 0;
	}
}
int main()
{
	int cout = 1, i;
	while( scanf( "%d", &n ) && n )
	{
		min = 100000;
		printf( "**********************************************************\n" );
		printf( "Network #%d\n", cout++ );
		for( i = 0; i < n; i++  )
			scanf( "%f%f", &x[i], &y[i] );
		for( i = 0; i < n; i++ )
		{
			memset( used, 0, sizeof(used) );
			p[0] = i;
			dfs( i, 1 );
		}
		for( i = 0; i < n-1; i++ )
		printf( "Cable requirement to connect (%d,%d) to (%d,%d) is %.2f feet.\n", (int)x[ans[i]], (int)y[ans[i]], (int)x[ans[i+1]], (int)y[ans[i+1]], cal(x[ans[i]], y[ans[i]], x[ans[i+1]], y[ans[i+1]])  );
		printf( "Number of feet of cable required is %.2f.\n", min );

	}
	return 0;
}

(vllm) wen@DESKTOP-3H5GS3M:~$ CUDA_DEVICE_ORDER=PCI_BUS_ID PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True CUDA_VISIBLE_DEVICES=1 vllm serve /home/wen/models/Qwen/huihui-ai--Huihui-Qwen3-30B-A3B-Instruct-2507-abliterated --host 0.0.0.0 --port 8000 --gpu-memory-utilization 0.7 --max-model-len 1024 --cpu-offload-gb 30 INFO 11-10 20:23:43 [__init__.py:216] Automatically detected platform cuda. (APIServer pid=994232) INFO 11-10 20:23:46 [api_server.py:1839] vLLM API server version 0.11.0 (APIServer pid=994232) INFO 11-10 20:23:46 [utils.py:233] non-default args: {'model_tag': '/home/wen/models/Qwen/huihui-ai--Huihui-Qwen3-30B-A3B-Instruct-2507-abliterated', 'host': '0.0.0.0', 'model': '/home/wen/models/Qwen/huihui-ai--Huihui-Qwen3-30B-A3B-Instruct-2507-abliterated', 'max_model_len': 1024, 'gpu_memory_utilization': 0.7, 'cpu_offload_gb': 30.0} (APIServer pid=994232) INFO 11-10 20:23:46 [model.py:547] Resolved architecture: Qwen3MoeForCausalLM (APIServer pid=994232) `torch_dtype` is deprecated! Use `dtype` instead! (APIServer pid=994232) INFO 11-10 20:23:46 [model.py:1510] Using max model len 1024 (APIServer pid=994232) INFO 11-10 20:23:46 [scheduler.py:205] Chunked prefill is enabled with max_num_batched_tokens=2048. INFO 11-10 20:23:49 [__init__.py:216] Automatically detected platform cuda. (EngineCore_DP0 pid=994278) INFO 11-10 20:23:52 [core.py:644] Waiting for init message from front-end. (EngineCore_DP0 pid=994278) INFO 11-10 20:23:52 [core.py:77] Initializing a V1 LLM engine (v0.11.0) with config: model='/home/wen/models/Qwen/huihui-ai--Huihui-Qwen3-30B-A3B-Instruct-2507-abliterated', speculative_config=None, tokenizer='/home/wen/models/Qwen/huihui-ai--Huihui-Qwen3-30B-A3B-Instruct-2507-abliterated', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=1024, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, data_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, structured_outputs_config=StructuredOutputsConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_parser=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=/home/wen/models/Qwen/huihui-ai--Huihui-Qwen3-30B-A3B-Instruct-2507-abliterated, enable_prefix_caching=True, chunked_prefill_enabled=True, pooler_config=None, compilation_config={"level":3,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output","vllm.mamba_mixer2","vllm.mamba_mixer","vllm.short_conv","vllm.linear_attention","vllm.plamo2_mamba_mixer","vllm.gdn_attention","vllm.sparse_attn_indexer"],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"cudagraph_mode":[2,1],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"use_inductor_graph_partition":false,"pass_config":{},"max_capture_size":512,"local_cache_dir":null} (EngineCore_DP0 pid=994278) WARNING 11-10 20:23:52 [interface.py:381] Using 'pin_memory=False' as WSL is detected. This may slow down the performance. [Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 [Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 [Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 [Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 [Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 [Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 (EngineCore_DP0 pid=994278) INFO 11-10 20:23:53 [parallel_state.py:1208] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0 (EngineCore_DP0 pid=994278) INFO 11-10 20:23:53 [topk_topp_sampler.py:55] Using FlashInfer for top-p & top-k sampling. (EngineCore_DP0 pid=994278) INFO 11-10 20:23:53 [gpu_model_runner.py:2602] Starting to load model /home/wen/models/Qwen/huihui-ai--Huihui-Qwen3-30B-A3B-Instruct-2507-abliterated... (EngineCore_DP0 pid=994278) INFO 11-10 20:23:53 [gpu_model_runner.py:2634] Loading model from scratch... (EngineCore_DP0 pid=994278) INFO 11-10 20:23:53 [cuda.py:366] Using Flash Attention backend on V1 engine. (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] EngineCore failed to start. (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] Traceback (most recent call last): (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/engine/core.py", line 699, in run_engine_core (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] engine_core = EngineCoreProc(*args, **kwargs) (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/engine/core.py", line 498, in __init__ (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] super().__init__(vllm_config, executor_class, log_stats, (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/engine/core.py", line 83, in __init__ (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] self.model_executor = executor_class(vllm_config) (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/executor/executor_base.py", line 54, in __init__ (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] self._init_executor() (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/executor/uniproc_executor.py", line 55, in _init_executor (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] self.collective_rpc("load_model") (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/executor/uniproc_executor.py", line 83, in collective_rpc (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] return [run_method(self.driver_worker, method, args, kwargs)] (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/utils/__init__.py", line 3122, in run_method (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] return func(*args, **kwargs) (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/worker/gpu_worker.py", line 213, in load_model (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] self.model_runner.load_model(eep_scale_up=eep_scale_up) (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/worker/gpu_model_runner.py", line 2635, in load_model (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] self.model = model_loader.load_model( (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/model_executor/model_loader/base_loader.py", line 45, in load_model (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] model = initialize_model(vllm_config=vllm_config, (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/model_executor/model_loader/utils.py", line 63, in initialize_model (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] return model_class(vllm_config=vllm_config, prefix=prefix) (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/model_executor/models/qwen3_moe.py", line 593, in __init__ (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] self.model = Qwen3MoeModel(vllm_config=vllm_config, (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/compilation/decorators.py", line 201, in __init__ (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] old_init(self, vllm_config=vllm_config, prefix=prefix, **kwargs) (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/model_executor/models/qwen3_moe.py", line 389, in __init__ (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] self.start_layer, self.end_layer, self.layers = make_layers( (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/model_executor/models/utils.py", line 629, in make_layers (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] [PPMissingLayer() for _ in range(start_layer)] + [ (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/model_executor/models/utils.py", line 630, in <listcomp> (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] maybe_offload_to_cpu(layer_fn(prefix=f"{prefix}.{idx}")) (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/model_executor/models/utils.py", line 560, in maybe_offload_to_cpu (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] assert uva_available, ("V1 CPU offloading requires" (EngineCore_DP0 pid=994278) ERROR 11-10 20:23:54 [core.py:708] AssertionError: V1 CPU offloading requires uva (pin memory) support (EngineCore_DP0 pid=994278) Process EngineCore_DP0: (EngineCore_DP0 pid=994278) Traceback (most recent call last): (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap (EngineCore_DP0 pid=994278) self.run() (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/multiprocessing/process.py", line 108, in run (EngineCore_DP0 pid=994278) self._target(*self._args, **self._kwargs) (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/engine/core.py", line 712, in run_engine_core (EngineCore_DP0 pid=994278) raise e (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/engine/core.py", line 699, in run_engine_core (EngineCore_DP0 pid=994278) engine_core = EngineCoreProc(*args, **kwargs) (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/engine/core.py", line 498, in __init__ (EngineCore_DP0 pid=994278) super().__init__(vllm_config, executor_class, log_stats, (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/engine/core.py", line 83, in __init__ (EngineCore_DP0 pid=994278) self.model_executor = executor_class(vllm_config) (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/executor/executor_base.py", line 54, in __init__ (EngineCore_DP0 pid=994278) self._init_executor() (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/executor/uniproc_executor.py", line 55, in _init_executor (EngineCore_DP0 pid=994278) self.collective_rpc("load_model") (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/executor/uniproc_executor.py", line 83, in collective_rpc (EngineCore_DP0 pid=994278) return [run_method(self.driver_worker, method, args, kwargs)] (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/utils/__init__.py", line 3122, in run_method (EngineCore_DP0 pid=994278) return func(*args, **kwargs) (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/worker/gpu_worker.py", line 213, in load_model (EngineCore_DP0 pid=994278) self.model_runner.load_model(eep_scale_up=eep_scale_up) (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/worker/gpu_model_runner.py", line 2635, in load_model (EngineCore_DP0 pid=994278) self.model = model_loader.load_model( (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/model_executor/model_loader/base_loader.py", line 45, in load_model (EngineCore_DP0 pid=994278) model = initialize_model(vllm_config=vllm_config, (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/model_executor/model_loader/utils.py", line 63, in initialize_model (EngineCore_DP0 pid=994278) return model_class(vllm_config=vllm_config, prefix=prefix) (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/model_executor/models/qwen3_moe.py", line 593, in __init__ (EngineCore_DP0 pid=994278) self.model = Qwen3MoeModel(vllm_config=vllm_config, (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/compilation/decorators.py", line 201, in __init__ (EngineCore_DP0 pid=994278) old_init(self, vllm_config=vllm_config, prefix=prefix, **kwargs) (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/model_executor/models/qwen3_moe.py", line 389, in __init__ (EngineCore_DP0 pid=994278) self.start_layer, self.end_layer, self.layers = make_layers( (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/model_executor/models/utils.py", line 629, in make_layers (EngineCore_DP0 pid=994278) [PPMissingLayer() for _ in range(start_layer)] + [ (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/model_executor/models/utils.py", line 630, in <listcomp> (EngineCore_DP0 pid=994278) maybe_offload_to_cpu(layer_fn(prefix=f"{prefix}.{idx}")) (EngineCore_DP0 pid=994278) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/model_executor/models/utils.py", line 560, in maybe_offload_to_cpu (EngineCore_DP0 pid=994278) assert uva_available, ("V1 CPU offloading requires" (EngineCore_DP0 pid=994278) AssertionError: V1 CPU offloading requires uva (pin memory) support [rank0]:[W1110 20:23:54.986794378 ProcessGroupNCCL.cpp:1538] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) (APIServer pid=994232) Traceback (most recent call last): (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/bin/vllm", line 7, in <module> (APIServer pid=994232) sys.exit(main()) (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/entrypoints/cli/main.py", line 54, in main (APIServer pid=994232) args.dispatch_function(args) (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/entrypoints/cli/serve.py", line 57, in cmd (APIServer pid=994232) uvloop.run(run_server(args)) (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/uvloop/__init__.py", line 82, in run (APIServer pid=994232) return loop.run_until_complete(wrapper()) (APIServer pid=994232) File "uvloop/loop.pyx", line 1518, in uvloop.loop.Loop.run_until_complete (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/uvloop/__init__.py", line 61, in wrapper (APIServer pid=994232) return await main (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/entrypoints/openai/api_server.py", line 1884, in run_server (APIServer pid=994232) await run_server_worker(listen_address, sock, args, **uvicorn_kwargs) (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/entrypoints/openai/api_server.py", line 1902, in run_server_worker (APIServer pid=994232) async with build_async_engine_client( (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/contextlib.py", line 199, in __aenter__ (APIServer pid=994232) return await anext(self.gen) (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/entrypoints/openai/api_server.py", line 180, in build_async_engine_client (APIServer pid=994232) async with build_async_engine_client_from_engine_args( (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/contextlib.py", line 199, in __aenter__ (APIServer pid=994232) return await anext(self.gen) (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/entrypoints/openai/api_server.py", line 225, in build_async_engine_client_from_engine_args (APIServer pid=994232) async_llm = AsyncLLM.from_vllm_config( (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/utils/__init__.py", line 1572, in inner (APIServer pid=994232) return fn(*args, **kwargs) (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/engine/async_llm.py", line 207, in from_vllm_config (APIServer pid=994232) return cls( (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/engine/async_llm.py", line 134, in __init__ (APIServer pid=994232) self.engine_core = EngineCoreClient.make_async_mp_client( (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/engine/core_client.py", line 102, in make_async_mp_client (APIServer pid=994232) return AsyncMPClient(*client_args) (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/engine/core_client.py", line 769, in __init__ (APIServer pid=994232) super().__init__( (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/engine/core_client.py", line 448, in __init__ (APIServer pid=994232) with launch_core_engines(vllm_config, executor_class, (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/contextlib.py", line 142, in __exit__ (APIServer pid=994232) next(self.gen) (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/engine/utils.py", line 732, in launch_core_engines (APIServer pid=994232) wait_for_engine_startup( (APIServer pid=994232) File "/home/wen/miniconda3/envs/vllm/lib/python3.10/site-packages/vllm/v1/engine/utils.py", line 785, in wait_for_engine_startup (APIServer pid=994232) raise RuntimeError("Engine core initialization failed. " (APIServer pid=994232) RuntimeError: Engine core initialization failed. See root cause above. Failed core proc(s): {}
最新发布
11-11
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值