错误:spawn.py, line 134, in _check_not_importing_main

博客主要讲述了Python多进程启动时出现的错误,错误提示表明在当前进程完成启动阶段前尝试启动新进程。原因可能是未使用fork启动子进程且主模块未用正确写法。解决办法是把相应代码移到main中。

错误信息:

Python\Python38\lib\multiprocessing\spawn.py", line 134, in _check_not_importing_main
    raise RuntimeError('''
RuntimeError:
        An attempt has been made to start a new process before the
        current process has finished its bootstrapping phase.

        This probably means that you are not using fork to start your
        child processes and you have forgotten to use the proper idiom
        in the main module:

            if __name__ == '__main__':
                freeze_support()
                ...

        The "freeze_support()" line can be omitted if the program
        is not going to be frozen to produce an executable.
 

原来的代码:

# 省略一些代码...
results = model.train(data='coco128.yaml', epochs=3)

解决:

把相应的代码移到main中,例如:

def test():
    # 省略一些代码...
    results = model.train(data='coco128.yaml', epochs=3)
    # ...

if __name__ == '__main__':
    test()

Traceback (most recent call last): File "<string>", line 1, in <module> File "D:\soft\lib\multiprocessing\spawn.py", line 116, in spawn_main exitcode = _main(fd, parent_sentinel) File "D:\soft\lib\multiprocessing\spawn.py", line 125, in _main prepare(preparation_data) File "D:\soft\lib\multiprocessing\spawn.py", line 236, in prepare _fixup_main_from_path(data['init_main_from_path']) File "D:\soft\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path main_content = runpy.run_path(main_path, File "D:\soft\lib\runpy.py", line 289, in run_path return _run_module_code(code, init_globals, run_name, File "D:\soft\lib\runpy.py", line 96, in _run_module_code _run_code(code, mod_globals, init_globals, File "D:\soft\lib\runpy.py", line 86, in _run_code Traceback (most recent call last): File "<string>", line 1, in <module> exec(code, run_globals) File "D:\soft\PythonProject\data exercise.py", line 183, in <module> File "D:\soft\lib\multiprocessing\spawn.py", line 116, in spawn_main features = extract_features( File "D:\soft\PythonProject\.venv\lib\site-packages\tsfresh\feature_extraction\extraction.py", line 164, in extract_features exitcode = _main(fd, parent_sentinel) File "D:\soft\lib\multiprocessing\spawn.py", line 125, in _main result = _do_extraction( File "D:\soft\PythonProject\.venv\lib\site-packages\tsfresh\feature_extraction\extraction.py", line 270, in _do_extraction prepare(preparation_data) File "D:\soft\lib\multiprocessing\spawn.py", line 236, in prepare distributor = MultiprocessingDistributor( File "D:\soft\PythonProject\.venv\lib\site-packages\tsfresh\utilities\distribution.py", line 462, in __init__ _fixup_main_from_path(data['init_main_from_path']) File "D:\soft\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path main_content = runpy.run_path(main_path, File "D:\soft\lib\runpy.py", line 289, in run_path self.pool = Pool( File "D:\soft\lib\multiprocessing\context.py", line 119, in Pool return Pool(processes, initializer, initargs, maxtasksperchild, File "D:\soft\lib\multiprocessing\pool.py", line 215, in __init__ return _run_module_code(code, init_globals, run_name, File "D:\soft\lib\runpy.py", line 96, in _run_module_code self._repopulate_pool() File "D:\soft\lib\multiprocessing\pool.py", line 306, in _repopulate_pool _run_code(code, mod_globals, init_globals, File "D:\soft\lib\runpy.py", line 86, in _run_code return self._repopulate_pool_static(self._ctx, self.Process, File "D:\soft\lib\multiprocessing\pool.py", line 329, in _repopulate_pool_static exec(code, run_globals) File "D:\soft\PythonProject\data exercise.py", line 183, in <module> w.start() File "D:\soft\lib\multiprocessing\process.py", line 121, in start features = extract_features( self._popen = self._Popen(self) File "D:\soft\PythonProject\.venv\lib\site-packages\tsfresh\feature_extraction\extraction.py", line 164, in extract_features File "D:\soft\lib\multiprocessing\context.py", line 336, in _Popen result = _do_extraction( return Popen(process_obj) File "D:\soft\PythonProject\.venv\lib\site-packages\tsfresh\feature_extraction\extraction.py", line 270, in _do_extraction File "D:\soft\lib\multiprocessing\popen_spawn_win32.py", line 45, in __init__ prep_data = spawn.get_preparation_data(process_obj._name) File "D:\soft\lib\multiprocessing\spawn.py", line 154, in get_preparation_data distributor = MultiprocessingDistributor( File "D:\soft\PythonProject\.venv\lib\site-packages\tsfresh\utilities\distribution.py", line 462, in __init__ _check_not_importing_main() File "D:\soft\lib\multiprocessing\spawn.py", line 134, in _check_not_importing_main self.pool = Pool( File "D:\soft\lib\multiprocessing\context.py", line 119, in Pool raise RuntimeError(''' RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. return Pool(processes, initializer, initargs, maxtasksperchild, File "D:\soft\lib\multiprocessing\pool.py", line 215, in __init__ self._repopulate_pool() File "D:\soft\lib\multiprocessing\pool.py", line 306, in _repopulate_pool return self._repopulate_pool_static(self._ctx, self.Process, File "D:\soft\lib\multiprocessing\pool.py", line 329, in _repopulate_pool_static w.start() File "D:\soft\lib\multiprocessing\process.py", line 121, in start self._popen = self._Popen(self) File "D:\soft\lib\multiprocessing\context.py", line 336, in _Popen Traceback (most recent call last): File "<string>", line 1, in <module> return Popen(process_obj) File "D:\soft\lib\multiprocessing\popen_spawn_win32.py", line 45, in __init__ prep_data = spawn.get_preparation_data(process_obj._name) File "D:\soft\lib\multiprocessing\spawn.py", line 154, in get_preparation_data File "D:\soft\lib\multiprocessing\spawn.py", line 116, in spawn_main _check_not_importing_main() File "D:\soft\lib\multiprocessing\spawn.py", line 134, in _check_not_importing_main exitcode = _main(fd, parent_sentinel) File "D:\soft\lib\multiprocessing\spawn.py", line 125, in _main raise RuntimeError(''' RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. prepare(preparation_data) File "D:\soft\lib\multiprocessing\spawn.py", line 236, in prepare _fixup_main_from_path(data['init_main_from_path']) File "D:\soft\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path main_content = runpy.run_path(main_path, File "D:\soft\lib\runpy.py", line 289, in run_path return _run_module_code(code, init_globals, run_name, File "D:\soft\lib\runpy.py", line 96, in _run_module_code _run_code(code, mod_globals, init_globals, File "D:\soft\lib\runpy.py", line 86, in _run_code exec(code, run_globals) File "D:\soft\PythonProject\data exercise.py", line 183, in <module> features = extract_features( File "D:\soft\PythonProject\.venv\lib\site-packages\tsfresh\feature_extraction\extraction.py", line 164, in extract_features result = _do_extraction( File "D:\soft\PythonProject\.venv\lib\site-packages\tsfresh\feature_extraction\extraction.py", line 270, in _do_extraction distributor = MultiprocessingDistributor( File "D:\soft\PythonProject\.venv\lib\site-packages\tsfresh\utilities\distribution.py", line 462, in __init__ self.pool = Pool( File "D:\soft\lib\multiprocessing\context.py", line 119, in Pool return Pool(processes, initializer, initargs, maxtasksperchild, File "D:\soft\lib\multiprocessing\pool.py", line 215, in __init__ self._repopulate_pool() File "D:\soft\lib\multiprocessing\pool.py", line 306, in _repopulate_pool return self._repopulate_pool_static(self._ctx, self.Process, File "D:\soft\lib\multiprocessing\pool.py", line 329, in _repopulate_pool_static w.start() File "D:\soft\lib\multiprocessing\process.py", line 121, in start self._popen = self._Popen(self) File "D:\soft\lib\multiprocessing\context.py", line 336, in _Popen return Popen(process_obj) File "D:\soft\lib\multiprocessing\popen_spawn_win32.py", line 45, in __init__ prep_data = spawn.get_preparation_data(process_obj._name) File "D:\soft\lib\multiprocessing\spawn.py", line 154, in get_preparation_data _check_not_importing_main() File "D:\soft\lib\multiprocessing\spawn.py", line 134, in _check_not_importing_main raise RuntimeError(''' RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. Traceback (most recent call last): File "<string>", line 1, in <module> File "D:\soft\lib\multiprocessing\spawn.py", line 116, in spawn_main exitcode = _main(fd, parent_sentinel) File "D:\soft\lib\multiprocessing\spawn.py", line 125, in _main prepare(preparation_data) File "D:\soft\lib\multiprocessing\spawn.py", line 236, in prepare _fixup_main_from_path(data['init_main_from_path']) File "D:\soft\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path main_content = runpy.run_path(main_path, File "D:\soft\lib\runpy.py", line 289, in run_path return _run_module_code(code, init_globals, run_name, File "D:\soft\lib\runpy.py", line 96, in _run_module_code _run_code(code, mod_globals, init_globals, File "D:\soft\lib\runpy.py", line 86, in _run_code exec(code, run_globals) File "D:\soft\PythonProject\data exercise.py", line 183, in <module> features = extract_features( File "D:\soft\PythonProject\.venv\lib\site-packages\tsfresh\feature_extraction\extraction.py", line 164, in extract_features result = _do_extraction( File "D:\soft\PythonProject\.venv\lib\site-packages\tsfresh\feature_extraction\extraction.py", line 270, in _do_extraction distributor = MultiprocessingDistributor( File "D:\soft\PythonProject\.venv\lib\site-packages\tsfresh\utilities\distribution.py", line 462, in __init__ self.pool = Pool( File "D:\soft\lib\multiprocessing\context.py", line 119, in Pool return Pool(processes, initializer, initargs, maxtasksperchild, File "D:\soft\lib\multiprocessing\pool.py", line 215, in __init__ self._repopulate_pool() File "D:\soft\lib\multiprocessing\pool.py", line 306, in _repopulate_pool return self._repopulate_pool_static(self._ctx, self.Process, File "D:\soft\lib\multiprocessing\pool.py", line 329, in _repopulate_pool_static w.start() File "D:\soft\lib\multiprocessing\process.py", line 121, in start self._popen = self._Popen(self) File "D:\soft\lib\multiprocessing\context.py", line 336, in _Popen return Popen(process_obj) File "D:\soft\lib\multiprocessing\popen_spawn_win32.py", line 45, in __init__ prep_data = spawn.get_preparation_data(process_obj._name) File "D:\soft\lib\multiprocessing\spawn.py", line 154, in get_preparation_data _check_not_importing_main() File "D:\soft\lib\multiprocessing\spawn.py", line 134, in _check_not_importing_main raise RuntimeError(''' RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. 进程已结束,退出代码为 -1
10-14
C:\ProgramData\miniconda3\envs\torch\python.exe D:\桌面\point\scripts\train_model.py ✅ 共发现 153 个点云文件,用于训练 ✅ 共发现 153 个点云文件,用于训练 ✅ 共发现 153 个点云文件,用于训练 ✅ 共发现 153 个点云文件,用于训练 ✅ 共发现 153 个点云文件,用于训练 Traceback (most recent call last): File "<string>", line 1, in <module> File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 105, in spawn_main exitcode = _main(fd) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 114, in _main prepare(preparation_data) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 225, in prepare _fixup_main_from_path(data['init_main_from_path']) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 277, in _fixup_main_from_path run_name="__mp_main__") File "C:\ProgramData\miniconda3\envs\torch\lib\runpy.py", line 263, in run_path pkg_name=pkg_name, script_name=fname) File "C:\ProgramData\miniconda3\envs\torch\lib\runpy.py", line 96, in _run_module_code mod_name, mod_spec, pkg_name, script_name) File "C:\ProgramData\miniconda3\envs\torch\lib\runpy.py", line 85, in _run_code exec(code, run_globals) File "D:\桌面\point\scripts\train_model.py", line 31, in <module> for i, points in enumerate(loader): File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 352, in __iter__ return self._get_iterator() File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 294, in _get_iterator return _MultiProcessingDataLoaderIter(self) File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 801, in __init__ w.start() File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\process.py", line 112, in start self._popen = self._Popen(self) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\context.py", line 223, in _Popen return _default_context.get_context().Process._Popen(process_obj) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\context.py", line 322, in _Popen return Popen(process_obj) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\popen_spawn_win32.py", line 46, in __init__ prep_data = spawn.get_preparation_data(process_obj._name) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 143, in get_preparation_data _check_not_importing_main() File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 136, in _check_not_importing_main is not going to be frozen to produce an executable.''') RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. Traceback (most recent call last): File "<string>", line 1, in <module> File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 105, in spawn_main exitcode = _main(fd) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 114, in _main prepare(preparation_data) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 225, in prepare _fixup_main_from_path(data['init_main_from_path']) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 277, in _fixup_main_from_path run_name="__mp_main__") File "C:\ProgramData\miniconda3\envs\torch\lib\runpy.py", line 263, in run_path pkg_name=pkg_name, script_name=fname) File "C:\ProgramData\miniconda3\envs\torch\lib\runpy.py", line 96, in _run_module_code mod_name, mod_spec, pkg_name, script_name) File "C:\ProgramData\miniconda3\envs\torch\lib\runpy.py", line 85, in _run_code exec(code, run_globals) File "D:\桌面\point\scripts\train_model.py", line 31, in <module> for i, points in enumerate(loader): File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 352, in __iter__ return self._get_iterator() File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 294, in _get_iterator return _MultiProcessingDataLoaderIter(self) File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 801, in __init__ w.start() File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\process.py", line 112, in start self._popen = self._Popen(self) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\context.py", line 223, in _Popen return _default_context.get_context().Process._Popen(process_obj) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\context.py", line 322, in _Popen return Popen(process_obj) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\popen_spawn_win32.py", line 46, in __init__ prep_data = spawn.get_preparation_data(process_obj._name) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 143, in get_preparation_data _check_not_importing_main() File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 136, in _check_not_importing_main is not going to be frozen to produce an executable.''') RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. Traceback (most recent call last): File "<string>", line 1, in <module> File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 105, in spawn_main exitcode = _main(fd) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 114, in _main prepare(preparation_data) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 225, in prepare _fixup_main_from_path(data['init_main_from_path']) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 277, in _fixup_main_from_path run_name="__mp_main__") File "C:\ProgramData\miniconda3\envs\torch\lib\runpy.py", line 263, in run_path pkg_name=pkg_name, script_name=fname) File "C:\ProgramData\miniconda3\envs\torch\lib\runpy.py", line 96, in _run_module_code mod_name, mod_spec, pkg_name, script_name) File "C:\ProgramData\miniconda3\envs\torch\lib\runpy.py", line 85, in _run_code exec(code, run_globals) File "D:\桌面\point\scripts\train_model.py", line 31, in <module> for i, points in enumerate(loader): File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 352, in __iter__ return self._get_iterator() File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 294, in _get_iterator return _MultiProcessingDataLoaderIter(self) File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 801, in __init__ w.start() File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\process.py", line 112, in start self._popen = self._Popen(self) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\context.py", line 223, in _Popen return _default_context.get_context().Process._Popen(process_obj) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\context.py", line 322, in _Popen return Popen(process_obj) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\popen_spawn_win32.py", line 46, in __init__ prep_data = spawn.get_preparation_data(process_obj._name) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 143, in get_preparation_data _check_not_importing_main() File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 136, in _check_not_importing_main is not going to be frozen to produce an executable.''') RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. Traceback (most recent call last): File "<string>", line 1, in <module> File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 105, in spawn_main exitcode = _main(fd) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 114, in _main prepare(preparation_data) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 225, in prepare _fixup_main_from_path(data['init_main_from_path']) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 277, in _fixup_main_from_path run_name="__mp_main__") File "C:\ProgramData\miniconda3\envs\torch\lib\runpy.py", line 263, in run_path pkg_name=pkg_name, script_name=fname) File "C:\ProgramData\miniconda3\envs\torch\lib\runpy.py", line 96, in _run_module_code mod_name, mod_spec, pkg_name, script_name) File "C:\ProgramData\miniconda3\envs\torch\lib\runpy.py", line 85, in _run_code exec(code, run_globals) File "D:\桌面\point\scripts\train_model.py", line 31, in <module> for i, points in enumerate(loader): File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 352, in __iter__ return self._get_iterator() File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 294, in _get_iterator return _MultiProcessingDataLoaderIter(self) File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 801, in __init__ w.start() File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\process.py", line 112, in start self._popen = self._Popen(self) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\context.py", line 223, in _Popen return _default_context.get_context().Process._Popen(process_obj) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\context.py", line 322, in _Popen return Popen(process_obj) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\popen_spawn_win32.py", line 46, in __init__ prep_data = spawn.get_preparation_data(process_obj._name) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 143, in get_preparation_data _check_not_importing_main() File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\spawn.py", line 136, in _check_not_importing_main is not going to be frozen to produce an executable.''') RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. Traceback (most recent call last): File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 872, in _try_get_data data = self._data_queue.get(timeout=timeout) File "C:\ProgramData\miniconda3\envs\torch\lib\multiprocessing\queues.py", line 105, in get raise Empty _queue.Empty The above exception was the direct cause of the following exception: Traceback (most recent call last): File "D:\桌面\point\scripts\train_model.py", line 31, in <module> for i, points in enumerate(loader): File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 435, in __next__ data = self._next_data() File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 1068, in _next_data idx, data = self._get_data() File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 1034, in _get_data success, data = self._try_get_data() File "C:\ProgramData\miniconda3\envs\torch\lib\site-packages\torch\utils\data\dataloader.py", line 885, in _try_get_data raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str)) from e RuntimeError: DataLoader worker (pid(s) 16692, 21200, 16312, 13568) exited unexpectedly 进程已结束,退出代码为 1
07-23
Traceback (most recent call last): File "<string>", line 1, in <module> File "D:\Python310\lib\multiprocessing\spawn.py", line 116, in spawn_main exitcode = _main(fd, parent_sentinel) File "D:\Python310\lib\multiprocessing\spawn.py", line 125, in _main prepare(preparation_data) File "D:\Python310\lib\multiprocessing\spawn.py", line 236, in prepare _fixup_main_from_path(data['init_main_from_path']) File "D:\Python310\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path main_content = runpy.run_path(main_path, File "D:\Python310\lib\runpy.py", line 269, in run_path return _run_module_code(code, init_globals, run_name, File "D:\Python310\lib\runpy.py", line 96, in _run_module_code _run_code(code, mod_globals, init_globals, File "D:\Python310\lib\runpy.py", line 86, in _run_code exec(code, run_globals) File "D:\PycharmProjects\cnn\GoogLeNet.py", line 98, in <module> for images, labels in trainloader: File "D:\Python310\lib\site-packages\torch\utils\data\dataloader.py", line 493, in __iter__ return self._get_iterator() File "D:\Python310\lib\site-packages\torch\utils\data\dataloader.py", line 424, in _get_iterator return _MultiProcessingDataLoaderIter(self) File "D:\Python310\lib\site-packages\torch\utils\data\dataloader.py", line 1171, in __init__ w.start() File "D:\Python310\lib\multiprocessing\process.py", line 121, in start self._popen = self._Popen(self) File "D:\Python310\lib\multiprocessing\context.py", line 224, in _Popen return _default_context.get_context().Process._Popen(process_obj) File "D:\Python310\lib\multiprocessing\context.py", line 327, in _Popen return Popen(process_obj) File "D:\Python310\lib\multiprocessing\popen_spawn_win32.py", line 45, in __init__ prep_data = spawn.get_preparation_data(process_obj._name) File "D:\Python310\lib\multiprocessing\spawn.py", line 154, in get_preparation_data _check_not_importing_main() File "D:\Python310\lib\multiprocessing\spawn.py", line 134, in _check_not_importing_main raise RuntimeError(''' RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. Traceback (most recent call last): File "<string>", line 1, in <module> File "D:\Python310\lib\multiprocessing\spawn.py", line 116, in spawn_main exitcode = _main(fd, parent_sentinel) File "D:\Python310\lib\multiprocessing\spawn.py", line 125, in _main prepare(preparation_data) File "D:\Python310\lib\multiprocessing\spawn.py", line 236, in prepare _fixup_main_from_path(data['init_main_from_path']) File "D:\Python310\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path main_content = runpy.run_path(main_path, File "D:\Python310\lib\runpy.py", line 269, in run_path return _run_module_code(code, init_globals, run_name, File "D:\Python310\lib\runpy.py", line 96, in _run_module_code _run_code(code, mod_globals, init_globals, File "D:\Python310\lib\runpy.py", line 86, in _run_code exec(code, run_globals) File "D:\PycharmProjects\cnn\GoogLeNet.py", line 98, in <module> for images, labels in trainloader: File "D:\Python310\lib\site-packages\torch\utils\data\dataloader.py", line 493, in __iter__ return self._get_iterator() File "D:\Python310\lib\site-packages\torch\utils\data\dataloader.py", line 424, in _get_iterator return _MultiProcessingDataLoaderIter(self) File "D:\Python310\lib\site-packages\torch\utils\data\dataloader.py", line 1171, in __init__ w.start() File "D:\Python310\lib\multiprocessing\process.py", line 121, in start self._popen = self._Popen(self) File "D:\Python310\lib\multiprocessing\context.py", line 224, in _Popen return _default_context.get_context().Process._Popen(process_obj) File "D:\Python310\lib\multiprocessing\context.py", line 327, in _Popen return Popen(process_obj) File "D:\Python310\lib\multiprocessing\popen_spawn_win32.py", line 45, in __init__ prep_data = spawn.get_preparation_data(process_obj._name) File "D:\Python310\lib\multiprocessing\spawn.py", line 154, in get_preparation_data _check_not_importing_main() File "D:\Python310\lib\multiprocessing\spawn.py", line 134, in _check_not_importing_main raise RuntimeError(''' RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. Traceback (most recent call last): File "D:\Python310\lib\site-packages\torch\utils\data\dataloader.py", line 1284, in _try_get_data data = self._data_queue.get(timeout=timeout) File "D:\Python310\lib\multiprocessing\queues.py", line 114, in get raise Empty _queue.Empty The above exception was the direct cause of the following exception: Traceback (most recent call last): File "D:\PycharmProjects\cnn\GoogLeNet.py", line 98, in <module> for images, labels in trainloader: File "D:\Python310\lib\site-packages\torch\utils\data\dataloader.py", line 733, in __next__ data = self._next_data() File "D:\Python310\lib\site-packages\torch\utils\data\dataloader.py", line 1491, in _next_data idx, data = self._get_data() File "D:\Python310\lib\site-packages\torch\utils\data\dataloader.py", line 1453, in _get_data success, data = self._try_get_data() File "D:\Python310\lib\site-packages\torch\utils\data\dataloader.py", line 1297, in _try_get_data raise RuntimeError( RuntimeError: DataLoader worker (pid(s) 15576, 17120) exited unexpectedly
06-17
t call last): File "<string>", line 1, in <module> File "D:\Software\anaconda3\envs\yolov11\lib\multiprocessing\spawn.py", line 116, in spawn_main exitcode = _main(fd, parent_sentinel) File "D:\Software\anaconda3\envs\yolov11\lib\multiprocessing\spawn.py", line 125, in _main prepare(preparation_data) File "D:\Software\anaconda3\envs\yolov11\lib\multiprocessing\spawn.py", line 236, in prepare _fixup_main_from_path(data['init_main_from_path']) File "D:\Software\anaconda3\envs\yolov11\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path main_content = runpy.run_path(main_path, File "D:\Software\anaconda3\envs\yolov11\lib\runpy.py", line 289, in run_path return _run_module_code(code, init_globals, run_name, File "D:\Software\anaconda3\envs\yolov11\lib\runpy.py", line 96, in _run_module_code _run_code(code, mod_globals, init_globals, File "D:\Software\anaconda3\envs\yolov11\lib\runpy.py", line 86, in _run_code exec(code, run_globals) File "E:\File\pycharm_file\ultralytics-main\1.py", line 4, in <module> model.train( File "E:\File\pycharm_file\ultralytics-main\ultralytics\engine\model.py", line 799, in train self.trainer.train() File "E:\File\pycharm_file\ultralytics-main\ultralytics\engine\trainer.py", line 227, in train self._do_train(world_size) File "E:\File\pycharm_file\ultralytics-main\ultralytics\engine\trainer.py", line 348, in _do_train self._setup_train(world_size) File "E:\File\pycharm_file\ultralytics-main\ultralytics\engine\trainer.py", line 307, in _setup_train self.train_loader = self.get_dataloader( File "E:\File\pycharm_file\ultralytics-main\ultralytics\models\yolo\detect\train.py", line 90, in get_dataloader return build_dataloader(dataset, batch_size, workers, shuffle, rank) # return dataloader File "E:\File\pycharm_file\ultralytics-main\ultralytics\data\build.py", line 203, in build_dataloader return InfiniteDataLoader( File "E:\File\pycharm_file\ultralytics-main\ultralytics\data\build.py", line 59, in __init__ self.iterator = super().__iter__() File "D:\Software\anaconda3\envs\yolov11\lib\site-packages\torch\utils\data\dataloader.py", line 494, in __iter__ return self._get_iterator() File "D:\Software\anaconda3\envs\yolov11\lib\site-packages\torch\utils\data\dataloader.py", line 427, in _get_iterator return _MultiProcessingDataLoaderIter(self) File "D:\Software\anaconda3\envs\yolov11\lib\site-packages\torch\utils\data\dataloader.py", line 1170, in __init__ w.start() File "D:\Software\anaconda3\envs\yolov11\lib\multiprocessing\process.py", line 121, in start self._popen = self._Popen(self) File "D:\Software\anaconda3\envs\yolov11\lib\multiprocessing\context.py", line 224, in _Popen return _default_context.get_context().Process._Popen(process_obj) File "D:\Software\anaconda3\envs\yolov11\lib\multiprocessing\context.py", line 336, in _Popen return Popen(process_obj) File "D:\Software\anaconda3\envs\yolov11\lib\multiprocessing\popen_spawn_win32.py", line 45, in __init__ prep_data = spawn.get_preparation_data(process_obj._name) File "D:\Software\anaconda3\envs\yolov11\lib\multiprocessing\spawn.py", line 154, in get_preparation_data _check_not_importing_main() File "D:\Software\anaconda3\envs\yolov11\lib\multiprocessing\spawn.py", line 134, in _check_not_importing_main raise RuntimeError(''' RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable.
最新发布
11-10
评论 1
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值