paddle复现pytorch踩坑(二):paddleAPI对照表(百度论文复现营活动提供)

本文提供了详细的PaddlePaddle与PyTorch API映射表,帮助用户在两个框架间进行无缝迁移。涵盖了从基本操作到深度学习模块的转换,包括激活函数、池化、卷积、归一化、损失函数等,助力开发者理解和复现论文实验。

百度提供了paddlepaddle对应pytorch的API对照表
环境:paddlepaddle1.8
百度论文复现营活动:论文复现营

Pytorch的API名称 Paddle的API名称
torch.abs fluid.layers.abs(x, name=None)
torch.acos fluid.layers.acos(x, name=None)
torch.add fluid.layers.elementwise_add(x, y, axis=-1, act=None, name=None)
torch.allclose fluid.layers.allclose
torch.arange fluid.layers.range(start, end, step, dtype)
torch.argmax fluid.layers.argmax
torch.argmin fluid.layers.argmin
torch.argsort fluid.layers.argsort(input, axis=-1, descending=False, name=None)
torch.as_strided fluid.layers.strided_slice(input, axes, starts, ends, strides):
torch.asin fluid.layers.asin(x, name=None)
torch.atan fluid.layers.atan(x, name=None)
torch.cat fluid.layers.concat(input, axis=0, name=None)
torch.ceil fluid.layers.ceil
torch.chunk fluid.layers.unstack(x, axis=0, num=None)
torch.cos fluid.layers.cos
torch.cumsum fluid.layers.cumsum(x, axis=None, exclusive=None, reverse=None)
torch.diag fluid.layer.diag(diagonal)
torch.diag_embed fluid.layer.diag_embed
torch.div fluid.layers.elementwise_div(x, y, axis=-1, act=None, name=None)
torch.eq fluid.layers.equal(x, y, cond=None)
torch.equal fluid.layers.elementwise_equal(x, y, name=None)
torch.equal fluid.layers.equal(x, y, cond=None)
torch.erf fluid.layers.erf(x)
torch.exp fluid.layers.exp(x, name=None)
torch.eye fluid.layers.eye(num_rows, num_columns=None, batch_shape=None, dtype=‘float32’)
torch.flatten fluid.layers.flatten
torch.flip fluid.layers.flip
torch.floor fluid.layers.floor(x, name=None)
torch.fmod fluid.layers.elementwise_mod(x, y, axis=-1, act=None, name=None)
torch.from_numpy fluid.dygraph.to_variable(value, block=None, name=None)
torch.full fluid.layers.full(shape, fill_value, out=None, dtype=None, device=None, stop_gradient=True, name=None)
torch.full_like fluid.layers.full_like
torch.gather fluid.layers.gather(input, axis, index, out=None, sparse_grad=False, name=None)
torch.ge fluid.layers.greater_equal
torch.gt fluid.layers.greater_than
torch.le fluid.layers.less_equal
torch.linspace fluid.layers.linspace(start, stop, num, dtype)
torch.log fluid.layers.log(x, name=None)
torch.logical_not fluid.layers.logical_not(x, out=None, name=None)
torch.logical_xor fluid.layers.logical_xor(x, y, out=None, name=None)
torch.lt fluid.layers.less_than
torch.masked_select fluid.layers.masked_select
torch.matmul fluid.layers.matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None)
torch.max fluid.layers.max(input, axis=None, keepdim=False, out=None, name=None) -> (Tensor, LongTensor)
torch.mean fluid.layers.reduce_mean(input, dim=None, keep_dim=False, name=None)
torch.min fluid.layers.min
torch.mm fluid.layers.mm
torch.mul fluid.layers.mul
torch.mv fluid.layers.matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None)
torch.ne fluid.layers.not_equal
torch.nn.AdaptiveAvgPool2d fluid.layers.adaptive_pool2d
torch.nn.AdaptiveAvgPool3d fluid.layers.adaptive_pool3d
torch.nn.AdaptiveMaxPool2d fluid.layers.adaptive_pool2d
torch.nn.AdaptiveMaxPool3d fluid.layers.adaptive_pool3d
torch.nn.add_module fluid.dygraph.Layer.add_sublayer(name, sublayer)
torch.nn.append fluid.dygraph.LayerList.append(sublayer)
torch.nn.append fluid.dygraph.ParameterList.append(parameter)
torch.nn.AvgPool2d fluid.layers.pool2d(pool_type为’avg’)
torch.nn.AvgPool3d fluid.layers.pool3d(pool_type为’avg’)
torch.nn.BatchNorm1d fluid.layers.batch_norm(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout=‘NCHW’, in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False)
torch.nn.BatchNorm2d fluid.layers.batch_norm(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout=‘NCHW’, in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False)
torch.nn.BatchNorm3d fluid.layers.batch_norm(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout=‘NCHW’, in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False)
torch.nn.Bilinear fluid.layers.bilinear_tensor_product(x, y, size, act=None, name=None, param_attr=None, bias_attr=None)
torch.nn.ConstantPad1d fluid.layers.pad
torch.nn.ConstantPad2d fluid.layers.pad / fluid.layers.pad2d
torch.nn.ConstantPad3d
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值