C:\Users\00\.conda\envs\Anaconda\Lib\site-packages\torch\optim\lr_scheduler.py:62: UserWarning: The verbose parameter is deprecated. Please use get_last_lr() to access the learning rate.
warnings.warn(
开始训练...
训练轮次 1/15: 0%| | 0/573 [00:00<?, ?it/s]
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[3], line 91
88 optimizer.zero_grad()
90 # 前向传播
---> 91 outputs = model(inputs)
92 loss = criterion(outputs.view(-1, 2), labels.view(-1))
94 # 反向传播
File ~\.conda\envs\Anaconda\Lib\site-packages\torch\nn\modules\module.py:1736, in Module._wrapped_call_impl(self, *args, **kwargs)
1734 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1735 else:
-> 1736 return self._call_impl(*args, **kwargs)
File ~\.conda\envs\Anaconda\Lib\site-packages\torch\nn\modules\module.py:1747, in Module._call_impl(self, *args, **kwargs)
1742 # If we don't have any hooks, we want to skip the rest of the logic in
1743 # this function, and just call forward.
1744 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1745 or _global_backward_pre_hooks or _global_backward_hooks
1746 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1747 return forward_call(*args, **kwargs)
1749 result = None
1750 called_always_called_hooks = set()
Cell In[2], line 31, in EfficientLSTM.forward(self, x)
29 def forward(self, x):
30 # 嵌入
---> 31 x = self.embedding(x)
33 # LSTM
34 lstm_out, _ = self.lstm(x)
File ~\.conda\envs\Anaconda\Lib\site-packages\torch\nn\modules\module.py:1736, in Module._wrapped_call_impl(self, *args, **kwargs)
1734 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1735 else:
-> 1736 return self._call_impl(*args, **kwargs)
File ~\.conda\envs\Anaconda\Lib\site-packages\torch\nn\modules\module.py:1747, in Module._call_impl(self, *args, **kwargs)
1742 # If we don't have any hooks, we want to skip the rest of the logic in
1743 # this function, and just call forward.
1744 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1745 or _global_backward_pre_hooks or _global_backward_hooks
1746 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1747 return forward_call(*args, **kwargs)
1749 result = None
1750 called_always_called_hooks = set()
File ~\.conda\envs\Anaconda\Lib\site-packages\torch\nn\modules\sparse.py:190, in Embedding.forward(self, input)
189 def forward(self, input: Tensor) -> Tensor:
--> 190 return F.embedding(
191 input,
192 self.weight,
193 self.padding_idx,
194 self.max_norm,
195 self.norm_type,
196 self.scale_grad_by_freq,
197 self.sparse,
198 )
File ~\.conda\envs\Anaconda\Lib\site-packages\torch\nn\functional.py:2551, in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
2545 # Note [embedding_renorm set_grad_enabled]
2546 # XXX: equivalent to
2547 # with torch.no_grad():
2548 # torch.embedding_renorm_
2549 # remove once script supports set_grad_enabled
2550 _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
-> 2551 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
TypeError: embedding(): argument 'indices' (position 2) must be Tensor, not list.编译与训练阶段