paddle作业2-4

题1:

#作业2-4
import paddle
from paddle.nn import Conv2D, Linear, MaxPool2D
import numpy as np
import paddle.nn.functional as F
class Net(paddle.nn.Layer):
    def __init__(self):
        super().__init__()
        self.conv1 = Conv2D(in_channels=1, out_channels=3, kernel_size=3)
        self.conv2 = Conv2D(in_channels=3, out_channels=10, kernel_size=3)
        self.maxpool = MaxPool2D(kernel_size=2, stride=2)
        # self.fc = Linear(in_features=)
    def forward(self, x):
        x = self.conv1(x)
        print("conv1--shape:{}".format(x.shape))
        print("content:",x)
        x = self.maxpool(x)
        print("conv1-maxpool--shape:{}".format(x.shape))
        print("content:",x)
        x = self.conv2(x)
        print("conv2--shape:{}".format(x.shape))
        print("content:",x)
        x = self.maxpool(x)
        print("conv2-maxpool--shape:{}".format(x.shape))
        print("content:",x)
        return x

u = np.random.rand(1,1,64,64)
u = paddle.to_tensor(u).astype('float32')
net = Net()
net(u)

输出:
conv1--shape:[1, 3, 62, 62]
content: Tensor(shape=[1, 3, 62, 62], dtype=float32, place=CUDAPlace(0), stop_gradient=False,
       [[[[-1.03177869, -0.77748996, -0.37205794, ..., -1.16914952, -1.03177857, -0.58381915],
          [-0.67789686, -1.58549762, -0.97597510, ..., -0.56963080, -1.82829785, -1.42108345],
          [ 0.25646657, -0.19316070, -0.06493981, ..., -1.43284070, -1.58845973, -1.06161082],
          ...,
          [-1.06657410, -0.12878723, -0.89841843, ...,  0.47988266, -0.54047906, -1.42637694],
          [-0.84822047,  0.06302693, -0.20786586, ..., -0.75426561, -0.53132033, -0.93996078],
          [-1.38931227,  0.21905223, -0.71452707, ..., -1.00175893, -1.16727090, -1.04180014]],

         [[-0.34005663,  1.01721966,  0.46573788, ...,  0.36177707,  0.49334481,  0.93489450],
          [-0.16840670, -0.19559138, -0.35780585, ..., -0.10786653, -0.10211647,  0.27730283],
          [ 0.47618026, -0.57042152,  0.33241624, ..., -0.68744487, -0.44638035,  0.59348673],
          ...,
          [ 0.43587872,  0.37292024, -0.55591142, ...,  0.35407147, -0.51093435,  0.15134723],
          [ 0.77757734, -0.19600271, -0.73693144, ...,  0.06336728, -0.21787606,  0.04249654],
          [ 0.45646501,  0.97877854, -0.10154613, ..., -0.20721525,  0.36874118, -0.34399495]],

         [[ 0.03909174, -0.24251559,  0.32627419, ..., -0.35300124,  0.05717748, -0.15156426],
          [-0.75969607, -0.72273302, -0.21693535, ...,  0.16040878, -0.60953814, -0.76194024],
          [ 0.24897744,  0.20954224, -0.35446906, ..., -0.53350484, -0.65325189, -0.71377838],
          ...,
          [-0.64520627, -0.13371752, -0.00226474, ...,  0.43722275,  0.04000597, -0.68827093],
          [-0.47013941,  0.06369685,  0.07199863, ...,  0.15542001,  0.02171265, -0.53287303],
          [-0.38482872,  0.12792701,  0.20928183, ..., -0.41419211, -0.34249121, -0.37504444]]]])
conv1-maxpool--shape:[1, 3, 31, 31]
content: Tensor(shape=[1, 3, 31, 31], dtype=float32, place=CUDAPlace(0), stop_gradient=False,
       [[[[-0.67789686, -0.37205794, -0.62262011, ...,  0.18379803,  0.14368679, -0.58381915],
          [ 0.25646657, -0.06493981, -0.66757733, ...,  0.08953179,  0.10545183, -0.91407675],
          [ 0.16089660,  0.06359247, -0.30854785, ...,  0.04017820, -0.31908986, -0.17216061],
          ...,
          [-0.38638410,  0.43577391, -0.52118969, ..., -0.50289881, -0.37256008, -0.08074930],
          [-0.12878723, -0.89841843, -0.11317723, ..., -0.04990890,  0.47988266, -0.40975910],
          [ 0.21905223, -0.11454697, -0.56476015, ..., -0.02640892, -0.06468204, -0.53132033]],

         [[ 1.01721966,  0.50235915,  0.48203111, ...,  1.17043245,  0.36177707,  0.93489450],
          [ 0.47618026,  0.49586532,  0.79181617, ..., -0.04261628,  0.30811083,  0.59348673],
          [ 0.99659878,  0.19138895,  0.81142896, ...,  0.85095757,  0.70458364,  0.81303638],
          ...,
          [ 0.68253225,  0.43649405,  0.30807868, ...,  0.93992198,  0.08084880,  0.42516100],
          [ 0.43587872,  0.00076535,  0.90764409, ...,  0.52016580,  0.75320625,  0.33648261],
          [ 0.97877854,  1.03922081,  0.63778490, ...,  0.31145221,  0.45375410,  0.36874118]],

         [[ 0.03909174,  0.32627419, -0.11361890, ...,  0.28261650,  0.16040878,  0.05717748],
          [ 0.24897744,  0.18517545, -0.32565966, ..., -0.11045824,  0.06884519, -0.30248123],
          [ 0.24063696,  0.01648869, -0.21375470, ...,  0.18693715, -0.00632723, -0.00128613],
          ...,
          [-0.14140783,  0.34060207, -0.04902748, ..., -0.13388914, -0.24003626, -0.10710047],
          [-0.13371752, -0.00226474,  0.00684804, ...,  0.11446057,  0.43722275,  0.39217085],
          [ 0.12792701,  0.20928183,  0.22429550, ...,  0.40111300,  0.15542001,  0.02171265]]]])
conv2--shape:[1, 10, 29, 29]
content: Tensor(shape=[1, 10, 29, 29], dtype=float32, place=CUDAPlace(0), stop_gradient=False,
       [[[[ 0.44623196,  0.62388515,  0.17902687, ...,  0.72025532,  0.53890043,  0.24215084],
          [ 1.18032026,  0.27674440, -0.19034995, ...,  0.23119810, -0.05347760,  1.21775162],
          [ 0.63521343,  0.55254978, -0.01318052, ...,  0.48960438,  0.47024673,  0.33397472],
          ...,
          [ 0.30813912,  0.09670782,  0.40232271, ..., -0.10699759,  0.89743155, -0.19544327],
          [-0.10760272,  1.14544046,  0.04281932, ...,  0.48013151,  0.30368382,  0.67060882],
          [ 0.52567440,  0.11980435,  0.17403756, ...,  0.73117226, -0.06727744, -0.12170704]],

         [[ 1.21443903,  0.51579273,  0.85561645, ...,  0.78079736,  0.41128790,  0.91624510],
          [ 0.65348673,  0.45180458,  0.52336526, ...,  0.11084333,  0.14484397, -0.34574771],
          [ 0.59918010,  0.56237960,  0.84820282, ...,  0.83417332,  0.47399926,  0.71474850],
          ...,
          [ 0.32853597, -0.23827338,  0.12160541, ...,  0.79289854,  0.47846511,  0.79586089],
          [ 1.23849845,  0.18347394,  0.56944412, ...,  0.58740354, -0.08288956,  0.29207882],
          [ 0.07357436,  1.00488615,  0.61807150, ...,  0.32044291,  0.12979138,  0.83888710]],

         [[ 0.61238801, -0.20202814,  0.38081983, ...,  0.34322435,  0.65360999,  0.17780209],
          [ 0.08400173,  0.23523483,  0.57989526, ...,  0.26658696,  0.19919813, -0.53452939],
          [-0.00422187,  0.05944386,  0.65046120, ..., -0.08986238
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值