if __name__ == '__main__':
model = YOLO(model="weight_file/yolov8/yolov8.yaml", task="detect").load("weight_file/yolov8/yolov8n.pt")
# 训练模型
model.train(task="detect",
data="datasets/detect/FlickrSportLogos-10/FlickrSportLogos-10.yaml",
# device=0,
device=[0, 1],
batch=64,
epochs=20,
imgsz=640,
workers=2,
lr0=0.001,
lrf=0.001,
optimizer="Adam",
warmup_epochs=5,
weight_decay=0.001,
dropout=0.1,
augment=True,
hsv_h=0.015,
hsv_s=0.7,
hsv_v=0.4,
flipud=0.5,
fliplr=0.1,
mosaic=1.0,
mixup=0.5,
)
以下是控制台输出:
D:\anaconda3\envs\yolov8\python.exe E:\pycharmProject\logo_detect\yolo-detect-train-and-val.py
WARNING ⚠️ no model scale passed. Assuming scale='n'.
Transferred 355/355 items from pretrained weights
---------------------模型加载完毕---------------------
New https://pypi.org/project/ultralytics/8.3.241 available 😃 Update with 'pip install -U ultralytics'
Ultralytics 8.3.43 🚀 Python-3.8.19 torch-1.10.1 CUDA:0 (NVIDIA GeForce RTX 3060, 12287MiB)
CUDA:1 (NVIDIA GeForce RTX 3060, 12288MiB)
WARNING ⚠️ Upgrade to torch>=2.0.0 for deterministic training.
engine\trainer: task=detect, mode=train, model=weight_file/yolov8/yolov8.yaml, data=datasets/detect/FlickrSportLogos-10/FlickrSportLogos-10.yaml, epochs=20, time=None, patience=100, batch=64, imgsz=640, save=True, save_period=-1, cache=False, device=[0, 1], workers=2, project=None, name=train, exist_ok=False, pretrained=weight_file/yolov8/yolov8n.pt, optimizer=Adam, verbose=True, seed=0, deterministic=True, single_cls=False, rect=False, cos_lr=False, close_mosaic=10, resume=False, amp=True, fraction=1.0, profile=False, freeze=None, multi_scale=False, overlap_mask=True, mask_ratio=4, dropout=0.1, val=True, split=val, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, vid_stride=1, stream_buffer=False, visualize=False, augment=True, agnostic_nms=False, classes=None, retina_masks=False, embed=None, show=False, save_frames=False, save_txt=False, save_conf=False, save_crop=False, show_labels=True, show_conf=True, show_boxes=True, line_width=None, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=True, opset=None, workspace=None, nms=False, lr0=0.001, lrf=0.001, momentum=0.937, weight_decay=0.001, warmup_epochs=5, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, pose=12.0, kobj=1.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.5, fliplr=0.1, bgr=0.0, mosaic=1.0, mixup=0.5, copy_paste=0.0, copy_paste_mode=flip, auto_augment=randaugment, erasing=0.4, crop_fraction=1.0, cfg=None, tracker=botsort.yaml, save_dir=runs\detect\train
Overriding model.yaml nc=80 with nc=10
WARNING ⚠️ no model scale passed. Assuming scale='n'.
from n params module arguments
0 -1 1 464 ultralytics.nn.modules.conv.Conv [3, 16, 3, 2]
1 -1 1 4672 ultralytics.nn.modules.conv.Conv [16, 32, 3, 2]
2 -1 1 7360 ultralytics.nn.modules.block.C2f [32, 32, 1, True]
3 -1 1 18560 ultralytics.nn.modules.conv.Conv [32, 64, 3, 2]
4 -1 2 49664 ultralytics.nn.modules.block.C2f [64, 64, 2, True]
5 -1 1 73984 ultralytics.nn.modules.conv.Conv [64, 128, 3, 2]
6 -1 2 197632 ultralytics.nn.modules.block.C2f [128, 128, 2, True]
7 -1 1 295424 ultralytics.nn.modules.conv.Conv [128, 256, 3, 2]
8 -1 1 460288 ultralytics.nn.modules.block.C2f [256, 256, 1, True]
9 -1 1 164608 ultralytics.nn.modules.block.SPPF [256, 256, 5]
10 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest']
11 [-1, 6] 1 0 ultralytics.nn.modules.conv.Concat [1]
12 -1 1 148224 ultralytics.nn.modules.block.C2f [384, 128, 1]
13 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest']
14 [-1, 4] 1 0 ultralytics.nn.modules.conv.Concat [1]
15 -1 1 37248 ultralytics.nn.modules.block.C2f [192, 64, 1]
16 -1 1 36992 ultralytics.nn.modules.conv.Conv [64, 64, 3, 2]
17 [-1, 12] 1 0 ultralytics.nn.modules.conv.Concat [1]
18 -1 1 123648 ultralytics.nn.modules.block.C2f [192, 128, 1]
19 -1 1 147712 ultralytics.nn.modules.conv.Conv [128, 128, 3, 2]
20 [-1, 9] 1 0 ultralytics.nn.modules.conv.Concat [1]
21 -1 1 493056 ultralytics.nn.modules.block.C2f [384, 256, 1]
22 [15, 18, 21] 1 753262 ultralytics.nn.modules.head.Detect [10, [64, 128, 256]]
YOLOv8 summary: 225 layers, 3,012,798 parameters, 3,012,782 gradients
Transferred 319/355 items from pretrained weights
DDP: debug command D:\anaconda3\envs\yolov8\python.exe -m torch.distributed.run --nproc_per_node 2 --master_port 49631 C:\Users\yoimiya\AppData\Roaming\Ultralytics\DDP\_temp_g62aeueg1866284070656.py
NOTE: Redirects are currently not supported in Windows or MacOs.
Ultralytics 8.3.43 🚀 Python-3.8.19 torch-1.10.1 CUDA:0 (NVIDIA GeForce RTX 3060, 12287MiB)
CUDA:1 (NVIDIA GeForce RTX 3060, 12288MiB)
WARNING ⚠️ Upgrade to torch>=2.0.0 for deterministic training.
Overriding model.yaml nc=80 with nc=10
WARNING ⚠️ no model scale passed. Assuming scale='n'.
Transferred 319/355 items from pretrained weights
Freezing layer 'model.22.dfl.conv.weight'
AMP: running Automatic Mixed Precision (AMP) checks...
Traceback (most recent call last):
File "C:\Users\yoimiya\AppData\Roaming\Ultralytics\DDP\_temp_g62aeueg1866284070656.py", line 13, in <module>
results = trainer.train()
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\engine\trainer.py", line 207, in train
self._do_train(world_size)
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\engine\trainer.py", line 322, in _do_train
self._setup_train(world_size)
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\engine\trainer.py", line 267, in _setup_train
dist.broadcast(self.amp, src=0) # broadcast the tensor from rank 0 to all other ranks (returns None)
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\distributed_c10d.py", line 1167, in broadcast
work.wait()
RuntimeError: Invalid scalar type
WARNING:torch.distributed.elastic.multiprocessing.api:Sending process 13432 closing signal CTRL_C_EVENT
Traceback (most recent call last):
File "C:\Users\yoimiya\AppData\Roaming\Ultralytics\DDP\_temp_g62aeueg1866284070656.py", line 13, in <module>
results = trainer.train()
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\engine\trainer.py", line 207, in train
self._do_train(world_size)
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\engine\trainer.py", line 322, in _do_train
self._setup_train(world_size)
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\engine\trainer.py", line 264, in _setup_train
self.amp = torch.tensor(check_amp(self.model), device=self.device)
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\utils\checks.py", line 692, in check_amp
assert amp_allclose(YOLO("yolo11n.pt"), im)
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\utils\checks.py", line 679, in amp_allclose
a = m(batch, imgsz=imgsz, device=device, verbose=False)[0].boxes.data # FP32 inference
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\engine\model.py", line 179, in __call__
return self.predict(source, stream, **kwargs)
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\engine\model.py", line 557, in predict
return self.predictor.predict_cli(source=source) if is_cli else self.predictor(source=source, stream=stream)
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\engine\predictor.py", line 173, in __call__
return list(self.stream_inference(source, model, *args, **kwargs)) # merge list of Result into one
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\autograd\grad_mode.py", line 45, in generator_context
response = gen.send(None)
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\engine\predictor.py", line 248, in stream_inference
self.run_callbacks("on_predict_start")
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\engine\predictor.py", line 404, in run_callbacks
callback(self)
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\utils\callbacks\hub.py", line 90, in on_predict_start
events(predictor.args)
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\hub\utils.py", line 238, in __call__
smart_request("post", self.url, json=data, retry=0, verbose=False)
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\hub\utils.py", line 165, in smart_request
threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True).start()
File "D:\anaconda3\envs\yolov8\lib\threading.py", line 857, in start
self._started.wait()
File "D:\anaconda3\envs\yolov8\lib\threading.py", line 558, in wait
signaled = self._cond.wait(timeout)
File "D:\anaconda3\envs\yolov8\lib\threading.py", line 302, in wait
waiter.acquire()
KeyboardInterrupt
WARNING:torch.distributed.elastic.agent.server.api:Received 2 death signal, shutting down workers
ERROR:torch.distributed.elastic.multiprocessing.errors.error_handler:{
"message": {
"message": "SignalException: Process 2768 got signal: 2",
"extraInfo": {
"py_callstack": "Traceback (most recent call last):\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\site-packages\\torch\\distributed\\elastic\\multiprocessing\\errors\\__init__.py\", line 345, in wrapper\n return f(*args, **kwargs)\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\site-packages\\torch\\distributed\\run.py\", line 719, in main\n run(args)\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\site-packages\\torch\\distributed\\run.py\", line 710, in run\n elastic_launch(\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\site-packages\\torch\\distributed\\launcher\\api.py\", line 131, in __call__\n return launch_agent(self._config, self._entrypoint, list(args))\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\site-packages\\torch\\distributed\\launcher\\api.py\", line 252, in launch_agent\n result = agent.run()\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\site-packages\\torch\\distributed\\elastic\\metrics\\api.py\", line 125, in wrapper\n result = f(*args, **kwargs)\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\site-packages\\torch\\distributed\\elastic\\agent\\server\\api.py\", line 709, in run\n result = self._invoke_run(role)\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\site-packages\\torch\\distributed\\elastic\\agent\\server\\api.py\", line 844, in _invoke_run\n run_result = self._monitor_workers(self._worker_group)\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\site-packages\\torch\\distributed\\elastic\\metrics\\api.py\", line 125, in wrapper\n result = f(*args, **kwargs)\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\site-packages\\torch\\distributed\\elastic\\agent\\server\\local_elastic_agent.py\", line 207, in _monitor_workers\n result = self._pcontext.wait(0)\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\site-packages\\torch\\distributed\\elastic\\multiprocessing\\api.py\", line 287, in wait\n return self._poll()\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\site-packages\\torch\\distributed\\elastic\\multiprocessing\\api.py\", line 676, in _poll\n self.close() # terminate all running procs\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\site-packages\\torch\\distributed\\elastic\\multiprocessing\\api.py\", line 330, in close\n self._close(death_sig=death_sig, timeout=timeout)\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\site-packages\\torch\\distributed\\elastic\\multiprocessing\\api.py\", line 720, in _close\n handler.proc.wait(time_to_wait)\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\subprocess.py\", line 1083, in wait\n return self._wait(timeout=timeout)\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\subprocess.py\", line 1377, in _wait\n result = _winapi.WaitForSingleObject(self._handle,\n File \"D:\\anaconda3\\envs\\yolov8\\lib\\site-packages\\torch\\distributed\\elastic\\multiprocessing\\api.py\", line 60, in _terminate_process_handler\n raise SignalException(f\"Process {os.getpid()} got signal: {sigval}\", sigval=sigval)\ntorch.distributed.elastic.multiprocessing.api.SignalException: Process 2768 got signal: 2\n",
"timestamp": "1766632988"
}
}
}
Traceback (most recent call last):
File "D:\anaconda3\envs\yolov8\lib\runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "D:\anaconda3\envs\yolov8\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\run.py", line 723, in <module>
main()
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\elastic\multiprocessing\errors\__init__.py", line 345, in wrapper
return f(*args, **kwargs)
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\run.py", line 719, in main
run(args)
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\run.py", line 710, in run
elastic_launch(
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\launcher\api.py", line 131, in __call__
return launch_agent(self._config, self._entrypoint, list(args))
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\launcher\api.py", line 252, in launch_agent
result = agent.run()
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\elastic\metrics\api.py", line 125, in wrapper
result = f(*args, **kwargs)
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\elastic\agent\server\api.py", line 709, in run
result = self._invoke_run(role)
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\elastic\agent\server\api.py", line 844, in _invoke_run
run_result = self._monitor_workers(self._worker_group)
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\elastic\metrics\api.py", line 125, in wrapper
result = f(*args, **kwargs)
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\elastic\agent\server\local_elastic_agent.py", line 207, in _monitor_workers
result = self._pcontext.wait(0)
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\elastic\multiprocessing\api.py", line 287, in wait
return self._poll()
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\elastic\multiprocessing\api.py", line 676, in _poll
self.close() # terminate all running procs
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\elastic\multiprocessing\api.py", line 330, in close
self._close(death_sig=death_sig, timeout=timeout)
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\elastic\multiprocessing\api.py", line 720, in _close
handler.proc.wait(time_to_wait)
File "D:\anaconda3\envs\yolov8\lib\subprocess.py", line 1083, in wait
return self._wait(timeout=timeout)
File "D:\anaconda3\envs\yolov8\lib\subprocess.py", line 1377, in _wait
result = _winapi.WaitForSingleObject(self._handle,
File "D:\anaconda3\envs\yolov8\lib\site-packages\torch\distributed\elastic\multiprocessing\api.py", line 60, in _terminate_process_handler
raise SignalException(f"Process {os.getpid()} got signal: {sigval}", sigval=sigval)
torch.distributed.elastic.multiprocessing.api.SignalException: Process 2768 got signal: 2
Traceback (most recent call last):
File "E:\pycharmProject\logo_detect\yolo-detect-train-and-val.py", line 61, in <module>
model.train(task="detect",
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\engine\model.py", line 805, in train
self.trainer.train()
File "D:\anaconda3\envs\yolov8\lib\site-packages\ultralytics\engine\trainer.py", line 200, in train
subprocess.run(cmd, check=True)
File "D:\anaconda3\envs\yolov8\lib\subprocess.py", line 495, in run
stdout, stderr = process.communicate(input, timeout=timeout)
File "D:\anaconda3\envs\yolov8\lib\subprocess.py", line 1020, in communicate
self.wait()
File "D:\anaconda3\envs\yolov8\lib\subprocess.py", line 1083, in wait
return self._wait(timeout=timeout)
File "D:\anaconda3\envs\yolov8\lib\subprocess.py", line 1377, in _wait
result = _winapi.WaitForSingleObject(self._handle,
KeyboardInterrupt
Process finished with exit code -1073741510 (0xC000013A: interrupted by Ctrl+C)
最新发布