super(LinearNet,self).__init__()是什么意思

本文提供了一个优快云博客的链接示例,尽管具体内容未直接给出,但可以推测这是一篇关于编程技术的文章。

https://blog.youkuaiyun.com/zyh19980527/article/details/107206483/

ValueError Traceback (most recent call last) Cell In[35], line 4 2 accuracy=[0]*20 3 for epoch in range(epochs): ----> 4 train_loop(lin_model,train_dataset) 5 loss[epoch],accuracy[epoch]=test_loop(lin_model,test_dataset,loss_fn) 6 print(f"epoch: {epoch+1}, loss: {loss[epoch]}, accuracy: {accuracy[epoch]}") Cell In[32], line 6, in train_loop(model, dataset) 4 # loss_total=0 5 for batch,(data,label) in enumerate(dataset.create_tuple_iterator()): ----> 6 loss=train_step(data,label) Cell In[30], line 2, in train_step(data, label) 1 def train_step(data,label): ----> 2 (loss,_),grads=grad_fn(data,label) 3 optimizer(grads) 4 return loss File D:\10_The_Programs\4_The_Codes\00_virtual_environment\DeepLearning\Lib\site-packages\mindspore\ops\composite\base.py:638, in _Grad.__call__.<locals>.after_grad(*args, **kwargs) 637 def after_grad(*args, **kwargs): --> 638 return grad_(fn_, weights)(*args, **kwargs) File D:\10_The_Programs\4_The_Codes\00_virtual_environment\DeepLearning\Lib\site-packages\mindspore\common\api.py:187, in _wrap_func.<locals>.wrapper(*arg, **kwargs) 185 @wraps(fn) 186 def wrapper(*arg, **kwargs): --> 187 results = fn(*arg, **kwargs) 188 return _convert_python_data(results) File D:\10_The_Programs\4_The_Codes\00_virtual_environment\DeepLearning\Lib\site-packages\mindspore\ops\composite\base.py:610, in _Grad.__call__.<locals>.after_grad(*args, **kwargs) 608 @_wrap_func 609 def after_grad(*args, **kwargs): --> 610 run_args, res = self._pynative_forward_run(fn, grad_, weights, *args, **kwargs) 611 if self.has_aux: 612 out = _pynative_executor.grad_aux(fn, grad_, weights, grad_position, *run_args) File D:\10_The_Programs\4_The_Codes\00_virtual_environment\DeepLearning\Lib\site-packages\mindspore\ops\composite\base.py:671, in _Grad._pynative_forward_run(self, fn, grad, weights, *args, **kwargs) 669 _pynative_executor.set_grad_flag(True) 670 _pynative_executor.new_graph(fn, *args, **kwargs) --> 671 outputs = fn(*args, **kwargs) 672 _pynative_executor.end_graph(fn, outputs, *args, **kwargs) 673 run_forward = True File D:\10_The_Programs\4_The_Codes\00_virtual_environment\DeepLearning\Lib\site-packages\mindspore\ops\composite\base.py:578, in _Grad.__call__.<locals>.aux_fn(*args, **kwargs) 577 def aux_fn(*args, **kwargs): --> 578 outputs = fn(*args, **kwargs) 579 if not isinstance(outputs, tuple) or len(outputs) < 2: 580 raise ValueError("When has_aux is True, origin fn requires more than one outputs.") Cell In[28], line 2, in forward_fn(data, label) 1 def forward_fn(data,label): ----> 2 logits=lin_model(data) 3 loss=loss_fn(logits,label) 4 return loss,logits File D:\10_The_Programs\4_The_Codes\00_virtual_environment\DeepLearning\Lib\site-packages\mindspore\nn\cell.py:1355, in Cell.__call__(self, *args, **kwargs) 1352 if not (self.requires_grad or self._dynamic_shape_inputs or self.mixed_precision_type): 1353 if not (self._forward_pre_hook or self._forward_hook or self._backward_pre_hook or self._backward_hook or 1354 self._shard_fn or self._recompute_cell or (self.has_bprop and _pynative_executor.requires_grad())): -> 1355 return self.construct(*args, **kwargs) 1357 return self._run_construct(*args, **kwargs) 1359 return self._complex_call(*args, **kwargs) Cell In[25], line 10, in linear_net.construct(self, x) 8 def construct(self,x): 9 x=self.flatten(x) ---> 10 x=self.dense1(x) 11 x=self.dense2(x) 12 return x File D:\10_The_Programs\4_The_Codes\00_virtual_environment\DeepLearning\Lib\site-packages\mindspore\nn\cell.py:1355, in Cell.__call__(self, *args, **kwargs) 1352 if not (self.requires_grad or self._dynamic_shape_inputs or self.mixed_precision_type): 1353 if not (self._forward_pre_hook or self._forward_hook or self._backward_pre_hook or self._backward_hook or 1354 self._shard_fn or self._recompute_cell or (self.has_bprop and _pynative_executor.requires_grad())): -> 1355 return self.construct(*args, **kwargs) 1357 return self._run_construct(*args, **kwargs) 1359 return self._complex_call(*args, **kwargs) File D:\10_The_Programs\4_The_Codes\00_virtual_environment\DeepLearning\Lib\site-packages\mindspore\nn\layer\basic.py:744, in Dense.construct(self, x) 742 x = self.matmul(x, self.weight) 743 if self.has_bias: --> 744 x = self.bias_add(x, self.bias) 745 if self.activation_flag: 746 x = self.activation(x) File D:\10_The_Programs\4_The_Codes\00_virtual_environment\DeepLearning\Lib\site-packages\mindspore\ops\auto_generate\gen_ops_prim.py:3859, in BiasAdd.__call__(self, input_x, bias) 3858 def __call__(self, input_x, bias): -> 3859 return super().__call__(input_x, bias, self.data_format) File D:\10_The_Programs\4_The_Codes\00_virtual_environment\DeepLearning\Lib\site-packages\mindspore\ops\primitive.py:413, in Primitive.__call__(self, *args) 411 if should_elim: 412 return output --> 413 return _run_op(self, self.name, args) File D:\10_The_Programs\4_The_Codes\00_virtual_environment\DeepLearning\Lib\site-packages\mindspore\ops\primitive.py:1022, in _run_op(obj, op_name, args) 1020 def _run_op(obj, op_name, args): 1021 """Single op execution function supported by ge in PyNative mode.""" -> 1022 res = _pynative_executor.run_op_async(obj, op_name, args) 1023 # Add for jit context. 1024 if jit_context(): File D:\10_The_Programs\4_The_Codes\00_virtual_environment\DeepLearning\Lib\site-packages\mindspore\common\api.py:1638, in _PyNativeExecutor.run_op_async(self, *args) 1628 def run_op_async(self, *args): 1629 """ 1630 Run single op async. 1631 (...) 1636 StubNode, result of run op. 1637 """ -> 1638 return self._executor.run_op_async(*args) ValueError: For 'MatMul' the input dimensions must be equal, but got 'x1_col': 784 and 'x2_row': 100. ---------------------------------------------------- - C++ Call Stack: (For framework developers) ---------------------------------------------------- kins/agent-working-dir/workspace/executor0/mindspore/mindspore/ops/infer/ops_func_impl//matmul.cc:200 mindspore::ops::MatMulFuncImpl::InferShape 我的神经网络训练时出现这种情况是什么意思?我用的库是Mindspore?
最新发布
10-29
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值