python:cannot import name downsample

本文详细介绍了如何在Lasagne库中进行必要的代码更新,包括修改pool.py中的池化操作及conv.py中的卷积操作,确保代码与Theano的最新版本兼容。

修改python库中的lasagne/layers/pool.py文件

# 第6行
# from theano.tensor.signal import downsample
from theano.tensor.signal.pool import pool_2d 

# 第236行
    def get_output_for(self, input, **kwargs):
        """
        pooled = downsample.max_pool_2d(input,
                                        ds=self.pool_size,
                                        st=self.stride,
                                        ignore_border=self.ignore_border,
                                        padding=self.pad,
                                        mode=self.mode,
                                        )
        """

        pooled = pool_2d(input,
                        ws=self.pool_size,
                        stride=self.stride,
                        ignore_border=self.ignore_border,
                        pad=self.pad,
                        mode=self.mode,
                        )
        return pooled

修改python库中的lasagne/layers/conv.py

# 第489行
 """
            conved = self.convolution(input, self.W, subsample=self.stride,
                                      image_shape=input_shape,
                                      filter_shape=self.get_W_shape(),
                                      border_mode=border_mode)

            """
            conved = self.convolution(input, self.W, subsample=self.stride,
                                      input_shape=input_shape,
                                      filter_shape=self.get_W_shape(),
                                      border_mode=border_mode)
/usr/bin/python3.10 /home/dell/PycharmProjects/LightWeight_exercise/pointMLP/infer.py �� Final inference started — using trained model to preserve ~7% points. �� Input: /home/dell/PycharmProjects/LightWeight_exercise/点云轻量化 �� Output: /home/dell/PycharmProjects/LightWeight_exercise/data1/part1/output  Loading: /home/dell/PycharmProjects/LightWeight_exercise/点云轻量化/5044335.ply (format: .ply)  Original points count: 4,362,721 �� Target ratio: 0.075 → aim for ~0 pts �� Model: pointmlp_downsample.pth ⚡ Device: cuda �� Workers: 4 Process ForkPoolWorker-1: Process ForkPoolWorker-2: Process ForkPoolWorker-4: Process ForkPoolWorker-3: Traceback (most recent call last): File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap self.run() File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "/usr/lib/python3.10/multiprocessing/pool.py", line 114, in worker task = get() File "/usr/lib/python3.10/multiprocessing/queues.py", line 367, in get return _ForkingPickler.loads(res) File "/home/dell/.local/lib/python3.10/site-packages/torch/multiprocessing/reductions.py", line 180, in rebuild_cuda_tensor torch.cuda._lazy_init() File "/home/dell/.local/lib/python3.10/site-packages/torch/cuda/__init__.py", line 398, in _lazy_init raise RuntimeError( RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method Traceback (most recent call last): Traceback (most recent call last): Traceback (most recent call last): File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap self.run() File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap self.run() File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap self.run() File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "/usr/lib/python3.10/multiprocessing/pool.py", line 114, in worker task = get() File "/usr/lib/python3.10/multiprocessing/pool.py", line 114, in worker task = get() File "/usr/lib/python3.10/multiprocessing/pool.py", line 114, in worker task = get() File "/usr/lib/python3.10/multiprocessing/queues.py", line 367, in get return _ForkingPickler.loads(res) File "/usr/lib/python3.10/multiprocessing/queues.py", line 367, in get return _ForkingPickler.loads(res) File "/usr/lib/python3.10/multiprocessing/queues.py", line 367, in get return _ForkingPickler.loads(res) File "/home/dell/.local/lib/python3.10/site-packages/torch/multiprocessing/reductions.py", line 180, in rebuild_cuda_tensor torch.cuda._lazy_init() File "/home/dell/.local/lib/python3.10/site-packages/torch/multiprocessing/reductions.py", line 180, in rebuild_cuda_tensor torch.cuda._lazy_init() File "/home/dell/.local/lib/python3.10/site-packages/torch/multiprocessing/reductions.py", line 180, in rebuild_cuda_tensor torch.cuda._lazy_init() File "/home/dell/.local/lib/python3.10/site-packages/torch/cuda/__init__.py", line 398, in _lazy_init raise RuntimeError( File "/home/dell/.local/lib/python3.10/site-packages/torch/cuda/__init__.py", line 398, in _lazy_init raise RuntimeError( File "/home/dell/.local/lib/python3.10/site-packages/torch/cuda/__init__.py", line 398, in _lazy_init raise RuntimeError( RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method Process ForkPoolWorker-5: Traceback (most recent call last): File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap self.run() File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "/usr/lib/python3.10/multiprocessing/pool.py", line 114, in worker task = get() File "/usr/lib/python3.10/multiprocessing/queues.py", line 367, in get return _ForkingPickler.loads(res) File "/home/dell/.local/lib/python3.10/site-packages/torch/multiprocessing/reductions.py", line 180, in rebuild_cuda_tensor torch.cuda._lazy_init() File "/home/dell/.local/lib/python3.10/site-packages/torch/cuda/__init__.py", line 398, in _lazy_init raise RuntimeError( RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method Process ForkPoolWorker-6: Traceback (most recent call last): File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap self.run() File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "/usr/lib/python3.10/multiprocessing/pool.py", line 114, in worker task = get() File "/usr/lib/python3.10/multiprocessing/queues.py", line 367, in get return _ForkingPickler.loads(res) File "/home/dell/.local/lib/python3.10/site-packages/torch/multiprocessing/reductions.py", line 180, in rebuild_cuda_tensor torch.cuda._lazy_init() File "/home/dell/.local/lib/python3.10/site-packages/torch/cuda/__init__.py", line 398, in _lazy_init raise RuntimeError( RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method Process ForkPoolWorker-7: Traceback (most recent call last): File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap self.run() File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "/usr/lib/python3.10/multiprocessing/pool.py", line 114, in worker task = get() File "/usr/lib/python3.10/multiprocessing/queues.py", line 367, in get return _ForkingPickler.loads(res) File "/home/dell/.local/lib/python3.10/site-packages/torch/multiprocessing/reductions.py", line 180, in rebuild_cuda_tensor torch.cuda._lazy_init() File "/home/dell/.local/lib/python3.10/site-packages/torch/cuda/__init__.py", line 398, in _lazy_init raise RuntimeError( RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method
最新发布
11-21
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值