# ComfyUI Error Report
## Error Details
- **Node ID:** 5
- **Node Type:** Joy_caption_two
- **Exception Type:** RuntimeError
- **Exception Message:** All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:
[(torch.Size([1, 8388608]), device(type='cpu')), (torch.Size([262144]), device(type='cpu')), (torch.Size([4096, 4096]), device(type='cpu'))]
## Stack Trace
```
File "C:\comfyui2\ComfyUI\execution.py", line 349, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "C:\comfyui2\ComfyUI\execution.py", line 224, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "C:\comfyui2\ComfyUI\execution.py", line 196, in _map_node_over_list
process_inputs(input_dict, i)
File "C:\comfyui2\ComfyUI\execution.py", line 185, in process_inputs
results.append(getattr(obj, func)(**inputs))
File "C:\comfyui2\ComfyUI\custom_nodes\ComfyUI_SLK_joy_caption_two\joy_caption_two_node.py", line 407, in generate
generate_ids = text_model.generate(input_ids, inputs_embeds=input_embeds, attention_mask=attention_mask,
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\utils\_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\generation\utils.py", line 2215, in generate
result = self._sample(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\generation\utils.py", line 3206, in _sample
outputs = self(**model_inputs, return_dict=True)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 1190, in forward
outputs = self.model(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 945, in forward
layer_outputs = decoder_layer(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 676, in forward
hidden_states, self_attn_weights, present_key_value = self.self_attn(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 559, in forward
query_states = self.q_proj(hidden_states)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\peft\tuners\lora\bnb.py", line 467, in forward
result = self.base_layer(x, *args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\nn\modules.py", line 484, in forward
return bnb.matmul_4bit(x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state).to(inp_dtype)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\autograd\_functions.py", line 533, in matmul_4bit
return MatMul4Bit.apply(A, B, out, bias, quant_state)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\autograd\function.py", line 575, in apply
return super().apply(*args, **kwargs) # type: ignore[misc]
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\autograd\_functions.py", line 462, in forward
output = torch.nn.functional.linear(A, F.dequantize_4bit(B, quant_state).to(A.dtype).t(), bias)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\functional.py", line 1363, in dequantize_4bit
is_on_gpu([A, absmax, out])
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\functional.py", line 464, in is_on_gpu
raise RuntimeError(
```
## System Information
- **ComfyUI Version:** 0.3.33
- **Arguments:** main.py
- **OS:** nt
- **Python Version:** 3.10.11 (tags/v3.10.11:7d4cc5a, Apr 5 2023, 00:38:17) [MSC v.1929 64 bit (AMD64)]
- **Embedded Python:** false
- **PyTorch Version:** 2.7.0+cu126
## Devices
- **Name:** cuda:0 NVIDIA GeForce GTX 1660 Ti : cudaMallocAsync
- **Type:** cuda
- **VRAM Total:** 6442123264
- **VRAM Free:** 5379194880
- **Torch VRAM Total:** 0
- **Torch VRAM Free:** 0
## Logs
```
2025-05-12T12:33:00.245025 - Adding extra search path checkpoints C:\baidunetdiskdownload\sd-webui-aki\sd-webui-aki-v4.10\models\Stable-diffusion
2025-05-12T12:33:00.245025 - Adding extra search path configs C:\baidunetdiskdownload\sd-webui-aki\sd-webui-aki-v4.10\models\Stable-diffusion
2025-05-12T12:33:00.246022 - Adding extra search path vae C:\baidunetdiskdownload\sd-webui-aki\sd-webui-aki-v4.10\models\VAE
2025-05-12T12:33:00.246022 - Adding extra search path loras C:\baidunetdiskdownload\sd-webui-aki\sd-webui-aki-v4.10\models\Lora
2025-05-12T12:33:00.246022 - Adding extra search path loras C:\baidunetdiskdownload\sd-webui-aki\sd-webui-aki-v4.10\models\LyCORIS
2025-05-12T12:33:00.247019 - Adding extra search path upscale_models C:\baidunetdiskdownload\sd-webui-aki\sd-webui-aki-v4.10\models\ESRGAN
2025-05-12T12:33:00.247019 - Adding extra search path upscale_models C:\baidunetdiskdownload\sd-webui-aki\sd-webui-aki-v4.10\models\RealESRGAN
2025-05-12T12:33:00.247019 - Adding extra search path upscale_models C:\baidunetdiskdownload\sd-webui-aki\sd-webui-aki-v4.10\models\SwinIR
2025-05-12T12:33:00.247019 - Adding extra search path embeddings C:\baidunetdiskdownload\sd-webui-aki\sd-webui-aki-v4.10\embeddings
2025-05-12T12:33:00.247019 - Adding extra search path hypernetworks C:\baidunetdiskdownload\sd-webui-aki\sd-webui-aki-v4.10\models\hypernetworks
2025-05-12T12:33:00.248017 - Adding extra search path controlnet C:\baidunetdiskdownload\sd-webui-aki\sd-webui-aki-v4.10\extensions\sd-webui-controlnet
2025-05-12T12:33:00.823554 - [START] Security scan2025-05-12T12:33:00.823554 -
2025-05-12T12:33:02.367066 - [DONE] Security scan2025-05-12T12:33:02.367066 -
2025-05-12T12:33:02.643316 - ## ComfyUI-Manager: installing dependencies done.2025-05-12T12:33:02.643316 -
2025-05-12T12:33:02.643316 - ** ComfyUI startup time:2025-05-12T12:33:02.644221 - 2025-05-12T12:33:02.644221 - 2025-05-12 12:33:02.6432025-05-12T12:33:02.644221 -
2025-05-12T12:33:02.644221 - ** Platform:2025-05-12T12:33:02.644221 - 2025-05-12T12:33:02.644221 - Windows2025-05-12T12:33:02.645181 -
2025-05-12T12:33:02.645181 - ** Python version:2025-05-12T12:33:02.645181 - 2025-05-12T12:33:02.645181 - 3.10.11 (tags/v3.10.11:7d4cc5a, Apr 5 2023, 00:38:17) [MSC v.1929 64 bit (AMD64)]2025-05-12T12:33:02.645181 -
2025-05-12T12:33:02.645181 - ** Python executable:2025-05-12T12:33:02.645181 - 2025-05-12T12:33:02.646180 - C:\Users\张三\AppData\Local\Programs\Python\Python310\python.exe2025-05-12T12:33:02.646180 -
2025-05-12T12:33:02.646180 - ** ComfyUI Path:2025-05-12T12:33:02.646180 - 2025-05-12T12:33:02.646180 - C:\comfyui2\ComfyUI2025-05-12T12:33:02.646180 -
2025-05-12T12:33:02.646180 - ** ComfyUI Base Folder Path:2025-05-12T12:33:02.647178 - 2025-05-12T12:33:02.647178 - C:\comfyui2\ComfyUI2025-05-12T12:33:02.647178 -
2025-05-12T12:33:02.647178 - ** User directory:2025-05-12T12:33:02.647178 - 2025-05-12T12:33:02.648176 - C:\comfyui2\ComfyUI\user2025-05-12T12:33:02.648176 -
2025-05-12T12:33:02.648176 - ** ComfyUI-Manager config path:2025-05-12T12:33:02.648176 - 2025-05-12T12:33:02.648176 - C:\comfyui2\ComfyUI\user\default\ComfyUI-Manager\config.ini2025-05-12T12:33:02.648176 -
2025-05-12T12:33:02.648176 - ** Log path:2025-05-12T12:33:02.648176 - 2025-05-12T12:33:02.648176 - C:\comfyui2\ComfyUI\user\comfyui.log2025-05-12T12:33:02.648176 -
2025-05-12T12:33:04.301068 -
Prestartup times for custom nodes:
2025-05-12T12:33:04.302066 - 0.0 seconds: C:\comfyui2\ComfyUI\custom_nodes\comfyui-easy-use
2025-05-12T12:33:04.302066 - 4.1 seconds: C:\comfyui2\ComfyUI\custom_nodes\ComfyUI-Manager
2025-05-12T12:33:04.302066 -
2025-05-12T12:33:06.807275 - Checkpoint files will always be loaded safely.
2025-05-12T12:33:07.017820 - Total VRAM 6144 MB, total RAM 16294 MB
2025-05-12T12:33:07.018365 - pytorch version: 2.7.0+cu126
2025-05-12T12:33:09.352975 - xformers version: 0.0.30
2025-05-12T12:33:09.352975 - Set vram state to: NORMAL_VRAM
2025-05-12T12:33:09.352975 - Device: cuda:0 NVIDIA GeForce GTX 1660 Ti : cudaMallocAsync
2025-05-12T12:33:09.686612 - Using xformers attention
2025-05-12T12:33:11.295329 - Python version: 3.10.11 (tags/v3.10.11:7d4cc5a, Apr 5 2023, 00:38:17) [MSC v.1929 64 bit (AMD64)]
2025-05-12T12:33:11.295743 - ComfyUI version: 0.3.33
2025-05-12T12:33:11.358578 - ComfyUI frontend version: 1.18.10
2025-05-12T12:33:11.361570 - [Prompt Server] web root: C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\comfyui_frontend_package\static
2025-05-12T12:33:13.337748 - [33mModule 'diffusers' load failed. If you don't have it installed, do it:[0m2025-05-12T12:33:13.338688 -
2025-05-12T12:33:13.339179 - [33mpip install diffusers[0m2025-05-12T12:33:13.339179 -
2025-05-12T12:33:13.688119 - [34m[ComfyUI-Easy-Use] server: [0mv1.3.0 [92mLoaded[0m2025-05-12T12:33:13.688119 -
2025-05-12T12:33:13.688119 - [34m[ComfyUI-Easy-Use] web root: [0mC:\comfyui2\ComfyUI\custom_nodes\comfyui-easy-use\web_version/v2 [92mLoaded[0m2025-05-12T12:33:13.689074 -
2025-05-12T12:33:13.717123 - ### Loading: ComfyUI-Manager (V3.31.13)
2025-05-12T12:33:13.717123 - [ComfyUI-Manager] network_mode: public
2025-05-12T12:33:14.072667 - ### ComfyUI Version: v0.3.33-10-g577de83c | Released on '2025-05-11'
2025-05-12T12:33:14.311459 -
Import times for custom nodes:
2025-05-12T12:33:14.311459 - 0.0 seconds: C:\comfyui2\ComfyUI\custom_nodes\websocket_image_save.py
2025-05-12T12:33:14.313453 - 0.0 seconds: C:\comfyui2\ComfyUI\custom_nodes\aigodlike-comfyui-translation
2025-05-12T12:33:14.313453 - 0.0 seconds: C:\comfyui2\ComfyUI\custom_nodes\ComfyUI_SLK_joy_caption_two
2025-05-12T12:33:14.314451 - 0.0 seconds: C:\comfyui2\ComfyUI\custom_nodes\comfyui-custom-scripts
2025-05-12T12:33:14.314451 - 0.6 seconds: C:\comfyui2\ComfyUI\custom_nodes\ComfyUI-Manager
2025-05-12T12:33:14.315449 - 0.7 seconds: C:\comfyui2\ComfyUI\custom_nodes\comfyui-easy-use
2025-05-12T12:33:14.315449 -
2025-05-12T12:33:14.362371 - Starting server
2025-05-12T12:33:14.363371 - To see the GUI go to: http://127.0.0.1:8188
2025-05-12T12:33:19.620936 - [ComfyUI-Manager] default cache updated: https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main/model-list.json
2025-05-12T12:33:19.678088 - [ComfyUI-Manager] default cache updated: https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main/extension-node-map.json
2025-05-12T12:33:19.845706 - [ComfyUI-Manager] default cache updated: https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main/alter-list.json
2025-05-12T12:33:19.955392 - [ComfyUI-Manager] default cache updated: https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main/github-stats.json
2025-05-12T12:33:20.436522 - [ComfyUI-Manager] default cache updated: https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main/custom-node-list.json
2025-05-12T12:34:14.934119 - Cannot connect to comfyregistry.2025-05-12T12:34:14.934119 -
2025-05-12T12:34:14.940242 - FETCH DATA from: https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main/custom-node-list.json2025-05-12T12:34:14.940242 - 2025-05-12T12:34:19.785536 - got prompt
2025-05-12T12:34:19.982016 - C:\comfyui2\ComfyUI\models\clip\siglip-so400m-patch14-3842025-05-12T12:34:19.982016 -
2025-05-12T12:34:20.277243 - [DONE]2025-05-12T12:34:20.277243 -
2025-05-12T12:34:20.377933 - [ComfyUI-Manager] All startup tasks have been completed.
2025-05-12T12:34:26.095833 - Loading VLM's custom vision model2025-05-12T12:34:26.096832 -
2025-05-12T12:34:28.260562 - Requested to load SiglipVisionTransformer
2025-05-12T12:34:28.272048 - loaded completely 9.5367431640625e+25 809.1729736328125 True
2025-05-12T12:34:53.793004 - Requested to load ImageAdapter
2025-05-12T12:34:53.793004 - loaded completely 9.5367431640625e+25 41.0390625 True
2025-05-12T12:35:10.488040 - Loading tokenizer2025-05-12T12:35:10.488040 -
2025-05-12T12:35:11.002348 - Loading LLM: unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit2025-05-12T12:35:11.003346 -
2025-05-12T12:35:11.003346 - C:\comfyui2\ComfyUI\models\LLM\Meta-Llama-3.1-8B-Instruct-bnb-4bit2025-05-12T12:35:11.003346 -
2025-05-12T12:35:11.005339 - Successfully modified 'base_model_name_or_path' value in 'C:\comfyui2\ComfyUI\models\Joy_caption_two\text_model\adapter_config.json'.2025-05-12T12:35:11.005339 -
2025-05-12T12:35:24.237554 - Unused kwargs: ['_load_in_4bit', '_load_in_8bit', 'quant_method']. These kwargs are not used in <class 'transformers.utils.quantization_config.BitsAndBytesConfig'>.
2025-05-12T12:35:30.940677 - !!! Exception during processing !!! All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:
[(torch.Size([1, 8388608]), device(type='cpu')), (torch.Size([262144]), device(type='cpu')), (torch.Size([4096, 4096]), device(type='cpu'))]
2025-05-12T12:35:30.962005 - Traceback (most recent call last):
File "C:\comfyui2\ComfyUI\execution.py", line 349, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "C:\comfyui2\ComfyUI\execution.py", line 224, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "C:\comfyui2\ComfyUI\execution.py", line 196, in _map_node_over_list
process_inputs(input_dict, i)
File "C:\comfyui2\ComfyUI\execution.py", line 185, in process_inputs
results.append(getattr(obj, func)(**inputs))
File "C:\comfyui2\ComfyUI\custom_nodes\ComfyUI_SLK_joy_caption_two\joy_caption_two_node.py", line 407, in generate
generate_ids = text_model.generate(input_ids, inputs_embeds=input_embeds, attention_mask=attention_mask,
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\utils\_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\generation\utils.py", line 2215, in generate
result = self._sample(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\generation\utils.py", line 3206, in _sample
outputs = self(**model_inputs, return_dict=True)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 1190, in forward
outputs = self.model(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 945, in forward
layer_outputs = decoder_layer(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 676, in forward
hidden_states, self_attn_weights, present_key_value = self.self_attn(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 559, in forward
query_states = self.q_proj(hidden_states)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\peft\tuners\lora\bnb.py", line 467, in forward
result = self.base_layer(x, *args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\nn\modules.py", line 484, in forward
return bnb.matmul_4bit(x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state).to(inp_dtype)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\autograd\_functions.py", line 533, in matmul_4bit
return MatMul4Bit.apply(A, B, out, bias, quant_state)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\autograd\function.py", line 575, in apply
return super().apply(*args, **kwargs) # type: ignore[misc]
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\autograd\_functions.py", line 462, in forward
output = torch.nn.functional.linear(A, F.dequantize_4bit(B, quant_state).to(A.dtype).t(), bias)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\functional.py", line 1363, in dequantize_4bit
is_on_gpu([A, absmax, out])
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\functional.py", line 464, in is_on_gpu
raise RuntimeError(
RuntimeError: All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:
[(torch.Size([1, 8388608]), device(type='cpu')), (torch.Size([262144]), device(type='cpu')), (torch.Size([4096, 4096]), device(type='cpu'))]
2025-05-12T12:35:30.967286 - Prompt executed in 71.18 seconds
2025-05-12T12:51:12.715731 - got prompt
2025-05-12T12:51:12.907212 - loaded completely 9.5367431640625e+25 809.1729736328125 True
2025-05-12T12:51:36.518268 - loaded completely 9.5367431640625e+25 41.0390625 True
2025-05-12T12:51:53.412534 - !!! Exception during processing !!! All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:
[(torch.Size([1, 8388608]), device(type='cpu')), (torch.Size([262144]), device(type='cpu')), (torch.Size([4096, 4096]), device(type='cpu'))]
2025-05-12T12:51:53.419550 - Traceback (most recent call last):
File "C:\comfyui2\ComfyUI\execution.py", line 349, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "C:\comfyui2\ComfyUI\execution.py", line 224, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "C:\comfyui2\ComfyUI\execution.py", line 196, in _map_node_over_list
process_inputs(input_dict, i)
File "C:\comfyui2\ComfyUI\execution.py", line 185, in process_inputs
results.append(getattr(obj, func)(**inputs))
File "C:\comfyui2\ComfyUI\custom_nodes\ComfyUI_SLK_joy_caption_two\joy_caption_two_node.py", line 407, in generate
generate_ids = text_model.generate(input_ids, inputs_embeds=input_embeds, attention_mask=attention_mask,
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\utils\_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\generation\utils.py", line 2215, in generate
result = self._sample(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\generation\utils.py", line 3206, in _sample
outputs = self(**model_inputs, return_dict=True)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 1190, in forward
outputs = self.model(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 945, in forward
layer_outputs = decoder_layer(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 676, in forward
hidden_states, self_attn_weights, present_key_value = self.self_attn(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 559, in forward
query_states = self.q_proj(hidden_states)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\peft\tuners\lora\bnb.py", line 467, in forward
result = self.base_layer(x, *args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\nn\modules.py", line 484, in forward
return bnb.matmul_4bit(x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state).to(inp_dtype)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\autograd\_functions.py", line 533, in matmul_4bit
return MatMul4Bit.apply(A, B, out, bias, quant_state)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\autograd\function.py", line 575, in apply
return super().apply(*args, **kwargs) # type: ignore[misc]
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\autograd\_functions.py", line 462, in forward
output = torch.nn.functional.linear(A, F.dequantize_4bit(B, quant_state).to(A.dtype).t(), bias)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\functional.py", line 1363, in dequantize_4bit
is_on_gpu([A, absmax, out])
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\functional.py", line 464, in is_on_gpu
raise RuntimeError(
RuntimeError: All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:
[(torch.Size([1, 8388608]), device(type='cpu')), (torch.Size([262144]), device(type='cpu')), (torch.Size([4096, 4096]), device(type='cpu'))]
2025-05-12T12:51:53.426451 - Prompt executed in 40.69 seconds
2025-05-12T13:27:37.964284 - got prompt
2025-05-12T13:27:38.213613 - loaded completely 9.5367431640625e+25 809.1729736328125 True
2025-05-12T13:28:03.041961 - loaded completely 9.5367431640625e+25 41.0390625 True
2025-05-12T13:28:19.913094 - !!! Exception during processing !!! All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:
[(torch.Size([1, 8388608]), device(type='cpu')), (torch.Size([262144]), device(type='cpu')), (torch.Size([4096, 4096]), device(type='cpu'))]
2025-05-12T13:28:19.919269 - Traceback (most recent call last):
File "C:\comfyui2\ComfyUI\execution.py", line 349, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "C:\comfyui2\ComfyUI\execution.py", line 224, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "C:\comfyui2\ComfyUI\execution.py", line 196, in _map_node_over_list
process_inputs(input_dict, i)
File "C:\comfyui2\ComfyUI\execution.py", line 185, in process_inputs
results.append(getattr(obj, func)(**inputs))
File "C:\comfyui2\ComfyUI\custom_nodes\ComfyUI_SLK_joy_caption_two\joy_caption_two_node.py", line 407, in generate
generate_ids = text_model.generate(input_ids, inputs_embeds=input_embeds, attention_mask=attention_mask,
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\utils\_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\generation\utils.py", line 2215, in generate
result = self._sample(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\generation\utils.py", line 3206, in _sample
outputs = self(**model_inputs, return_dict=True)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 1190, in forward
outputs = self.model(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 945, in forward
layer_outputs = decoder_layer(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 676, in forward
hidden_states, self_attn_weights, present_key_value = self.self_attn(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 559, in forward
query_states = self.q_proj(hidden_states)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\peft\tuners\lora\bnb.py", line 467, in forward
result = self.base_layer(x, *args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\nn\modules.py", line 484, in forward
return bnb.matmul_4bit(x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state).to(inp_dtype)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\autograd\_functions.py", line 533, in matmul_4bit
return MatMul4Bit.apply(A, B, out, bias, quant_state)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\autograd\function.py", line 575, in apply
return super().apply(*args, **kwargs) # type: ignore[misc]
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\autograd\_functions.py", line 462, in forward
output = torch.nn.functional.linear(A, F.dequantize_4bit(B, quant_state).to(A.dtype).t(), bias)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\functional.py", line 1363, in dequantize_4bit
is_on_gpu([A, absmax, out])
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\functional.py", line 464, in is_on_gpu
raise RuntimeError(
RuntimeError: All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:
[(torch.Size([1, 8388608]), device(type='cpu')), (torch.Size([262144]), device(type='cpu')), (torch.Size([4096, 4096]), device(type='cpu'))]
2025-05-12T13:28:19.925246 - Prompt executed in 41.93 seconds
2025-05-12T14:01:51.529873 - got prompt
2025-05-12T14:01:51.690142 - loaded completely 9.5367431640625e+25 809.1729736328125 True
2025-05-12T14:01:56.251833 - [ComfyUI-Manager] The ComfyRegistry cache update is still in progress, so an outdated cache is being used.2025-05-12T14:01:56.251833 -
2025-05-12T14:01:56.355173 - FETCH DATA from: C:\comfyui2\ComfyUI\user\default\ComfyUI-Manager\cache\1514988643_custom-node-list.json2025-05-12T14:01:56.355173 - 2025-05-12T14:01:56.372727 - [DONE]2025-05-12T14:01:56.372727 -
2025-05-12T14:01:56.465008 - FETCH DATA from: C:\comfyui2\ComfyUI\user\default\ComfyUI-Manager\cache\746607195_github-stats.json2025-05-12T14:01:56.465008 - 2025-05-12T14:01:56.472987 - [DONE]2025-05-12T14:01:56.473984 -
2025-05-12T14:01:56.476976 - FETCH DATA from: C:\comfyui2\ComfyUI\user\default\ComfyUI-Manager\cache\832903789_extras.json2025-05-12T14:01:56.476976 - 2025-05-12T14:01:56.477973 - [DONE]2025-05-12T14:01:56.477973 -
2025-05-12T14:01:56.646551 - FETCH DATA from: C:\comfyui2\ComfyUI\user\default\ComfyUI-Manager\cache\1742899825_extension-node-map.json2025-05-12T14:01:56.646551 - 2025-05-12T14:01:56.656524 - [DONE]2025-05-12T14:01:56.656524 -
2025-05-12T14:02:15.505597 - loaded completely 9.5367431640625e+25 41.0390625 True
2025-05-12T14:02:31.244068 - !!! Exception during processing !!! All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:
[(torch.Size([1, 8388608]), device(type='cpu')), (torch.Size([262144]), device(type='cpu')), (torch.Size([4096, 4096]), device(type='cpu'))]
2025-05-12T14:02:31.248613 - Traceback (most recent call last):
File "C:\comfyui2\ComfyUI\execution.py", line 349, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "C:\comfyui2\ComfyUI\execution.py", line 224, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "C:\comfyui2\ComfyUI\execution.py", line 196, in _map_node_over_list
process_inputs(input_dict, i)
File "C:\comfyui2\ComfyUI\execution.py", line 185, in process_inputs
results.append(getattr(obj, func)(**inputs))
File "C:\comfyui2\ComfyUI\custom_nodes\ComfyUI_SLK_joy_caption_two\joy_caption_two_node.py", line 407, in generate
generate_ids = text_model.generate(input_ids, inputs_embeds=input_embeds, attention_mask=attention_mask,
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\utils\_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\generation\utils.py", line 2215, in generate
result = self._sample(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\generation\utils.py", line 3206, in _sample
outputs = self(**model_inputs, return_dict=True)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 1190, in forward
outputs = self.model(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 945, in forward
layer_outputs = decoder_layer(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 676, in forward
hidden_states, self_attn_weights, present_key_value = self.self_attn(
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\models\llama\modeling_llama.py", line 559, in forward
query_states = self.q_proj(hidden_states)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\peft\tuners\lora\bnb.py", line 467, in forward
result = self.base_layer(x, *args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1751, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1762, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\nn\modules.py", line 484, in forward
return bnb.matmul_4bit(x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state).to(inp_dtype)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\autograd\_functions.py", line 533, in matmul_4bit
return MatMul4Bit.apply(A, B, out, bias, quant_state)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\autograd\function.py", line 575, in apply
return super().apply(*args, **kwargs) # type: ignore[misc]
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\autograd\_functions.py", line 462, in forward
output = torch.nn.functional.linear(A, F.dequantize_4bit(B, quant_state).to(A.dtype).t(), bias)
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\functional.py", line 1363, in dequantize_4bit
is_on_gpu([A, absmax, out])
File "C:\Users\张三\AppData\Local\Programs\Python\Python310\lib\site-packages\bitsandbytes\functional.py", line 464, in is_on_gpu
raise RuntimeError(
RuntimeError: All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:
[(torch.Size([1, 8388608]), device(type='cpu')), (torch.Size([262144]), device(type='cpu')), (torch.Size([4096, 4096]), device(type='cpu'))]
2025-05-12T14:02:31.253592 - Prompt executed in 39.72 seconds
```
## Attached Workflow
Please make sure that workflow does not contain any sensitive information such as API keys or passwords.
```
{"id":"00000000-0000-0000-0000-000000000000","revision":0,"last_node_id":6,"last_link_id":6,"nodes":[{"id":2,"type":"LoadImage","pos":[-536.7379150390625,-152.25942993164062],"size":[270,314],"flags":{},"order":0,"mode":0,"inputs":[{"label":"image","localized_name":"图像","name":"image","type":"COMBO","widget":{"name":"image"},"link":null},{"label":"upload","localized_name":"选择文件上传","name":"upload","type":"IMAGEUPLOAD","widget":{"name":"upload"},"link":null}],"outputs":[{"label":"IMAGE","localized_name":"图像","name":"IMAGE","type":"IMAGE","links":[5]},{"label":"MASK","localized_name":"遮罩","name":"MASK","type":"MASK","links":null}],"properties":{"cnr_id":"comfy-core","ver":"0.3.33","Node name for S&R":"LoadImage"},"widgets_values":["CN_6.jpg","image"]},{"id":4,"type":"ShowText|pysssss","pos":[247.84080505371094,74.16905975341797],"size":[366.1156921386719,159.55372619628906],"flags":{},"order":3,"mode":0,"inputs":[{"label":"text","localized_name":"text","name":"text","type":"STRING","link":4}],"outputs":[{"label":"STRING","localized_name":"字符串","name":"STRING","shape":6,"type":"STRING","links":null}],"properties":{"cnr_id":"comfyui-custom-scripts","ver":"1.2.5","Node name for S&R":"ShowText|pysssss"},"widgets_values":[]},{"id":6,"type":"Joy_caption_two_load","pos":[-148.00003051757812,-86.00003051757812],"size":[270,58],"flags":{},"order":1,"mode":0,"inputs":[{"label":"model","localized_name":"model","name":"model","type":"COMBO","widget":{"name":"model"},"link":null}],"outputs":[{"label":"JoyTwoPipeline","localized_name":"JoyTwoPipeline","name":"JoyTwoPipeline","type":"JoyTwoPipeline","links":[6]}],"properties":{"cnr_id":"comfyui_slk_joy_caption_two","ver":"667751cab945bd8e9fb0be4d557d47e36821350a","Node name for S&R":"Joy_caption_two_load"},"widgets_values":["unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit"]},{"id":5,"type":"Joy_caption_two","pos":[-139.20001220703125,33.199951171875],"size":[270,126],"flags":{},"order":2,"mode":0,"inputs":[{"label":"joy_two_pipeline","localized_name":"joy_two_pipeline","name":"joy_two_pipeline","type":"JoyTwoPipeline","link":6},{"label":"image","localized_name":"image","name":"image","type":"IMAGE","link":5},{"label":"caption_type","localized_name":"caption_type","name":"caption_type","type":"COMBO","widget":{"name":"caption_type"},"link":null},{"label":"caption_length","localized_name":"caption_length","name":"caption_length","type":"COMBO","widget":{"name":"caption_length"},"link":null},{"label":"low_vram","localized_name":"low_vram","name":"low_vram","type":"BOOLEAN","widget":{"name":"low_vram"},"link":null}],"outputs":[{"label":"STRING","localized_name":"字符串","name":"STRING","type":"STRING","links":[4]}],"properties":{"cnr_id":"comfyui_slk_joy_caption_two","ver":"667751cab945bd8e9fb0be4d557d47e36821350a","Node name for S&R":"Joy_caption_two"},"widgets_values":["Descriptive","long",true]}],"links":[[4,5,0,4,0,"STRING"],[5,2,0,5,1,"IMAGE"],[6,6,0,5,0,"JoyTwoPipeline"]],"groups":[],"config":{},"extra":{"frontendVersion":"1.18.10"},"version":0.4}
```
## Additional Context
(Please add any additional context or steps to reproduce the error here)
以上代码是什么意思