PINN学习(二)

一、数学方程

使用一个简单的偏微分方程

要拟合的显式方程为u = u(t,x)

二、模型搭建

网络搭建:

class Net(nn.Module):
    def __init__(self, NL, NN):
        # NL是有多少层隐藏层
        # NN是每层的神经元数量
        super(Net, self).__init__()
        self.input_layer = nn.Linear(2, NN)
        self.hidden_layer = nn.ModuleList([nn.Linear(NN, NN) for i in range(NL)])
        self.output_layer = nn.Linear(NN, 1)

    def forward(self, x):
        o = self.act(self.input_layer(x))
        for i, li in enumerate(self.hidden_layer):
            o = self.act(li(o))
        out = self.output_layer(o)
        return out

    def act(self, x):
        return torch.tanh(x)

 PDE函数定义:

def f(x):
    u = net(x)
    u_x = torch.autograd.grad(u, x,grad_outputs=torch.ones_like(net(x)), create_graph=True,allow_unused=True)
    u_t = torch.autograd.grad(u, x,grad_outputs=torch.ones_like(net(x)), create_graph=True,allow_unused=True)
    d_x= u_x[0][:, 1].unsqueeze(-1)
    d_t = u_t[0][:, 0].unsqueeze(-1)
    u_xx=torch.autograd.grad(d_x, x, grad_outputs=torch.ones_like(d_x),  create_graph=True,allow_unused=True)[0][:, 1].unsqueeze(-1)

    w = torch.tensor(0.01 / np.pi)
    f = d_t + u * d_x - w * u_xx
    return f

全部代码:

import torch
import torch.nn as nn
import numpy as np
from matplotlib import cm

from torch.autograd import Variable
import matplotlib.pyplot as plt

class Net(nn.Module):
    def __init__(self, NL, NN):
        # NL是有多少层隐藏层
        # NN是每层的神经元数量
        super(Net, self).__init__()
        self.input_layer = nn.Linear(2, NN)
        self.hidden_layer = nn.ModuleList([nn.Linear(NN, NN) for i in range(NL)])
        self.output_layer = nn.Linear(NN, 1)

    def forward(self, x):
        o = self.act(self.input_layer(x))
        for i, li in enumerate(self.hidden_layer):
            o = self.act(li(o))
        out = self.output_layer(o)
        return out

    def act(self, x):
        return torch.tanh(x)

net=Net(4,30)
mse_cost_function = torch.nn.MSELoss(reduction='mean') # Mean squared error
optimizer = torch.optim.Adam(net.parameters(),lr=1e-4)

# 定义PDE方程
def f(x):
    u = net(x)
    u_x = torch.autograd.grad(u, x,grad_outputs=torch.ones_like(net(x)), create_graph=True,allow_unused=True)
    u_t = torch.autograd.grad(u, x,grad_outputs=torch.ones_like(net(x)), create_graph=True,allow_unused=True)
    d_x= u_x[0][:, 1].unsqueeze(-1)
    d_t = u_t[0][:, 0].unsqueeze(-1)
    u_xx=torch.autograd.grad(d_x, x, grad_outputs=torch.ones_like(d_x),  create_graph=True,allow_unused=True)[0][:, 1].unsqueeze(-1)

    w = torch.tensor(0.01 / np.pi)
    f = d_t + u * d_x - w * u_xx
    return f


#boundary
t_bc = np.zeros((2000,1))
x_bc = np.random.uniform(low=-1.0, high=1.0, size=(2000,1))
# compute u based on BC
u_bc = -np.sin(np.pi*x_bc)

#initial
x_inr=np.ones((2000,1))
x_inl=-np.ones((2000,1))
t_in=np.random.uniform(low=0, high=1.0, size=(2000,1))
u_in= np.zeros((2000,1))

iterations = 10000
for epoch in range(iterations):
    optimizer.zero_grad()  # to make the gradients zero

    # Loss based on boundary conditions
    pt_x_bc = Variable(torch.from_numpy(x_bc).float(), requires_grad=False)
    pt_t_bc = Variable(torch.from_numpy(t_bc).float(), requires_grad=False)
    pt_u_bc = Variable(torch.from_numpy(u_bc).float(), requires_grad=False)

    net_bc_out = net(torch.cat([ pt_t_bc,pt_x_bc],1))  # output of u(x,t)
    mse_u1 = mse_cost_function(net_bc_out, pt_u_bc)

    # Loss based on initial value
    pt_x_inr = Variable(torch.from_numpy(x_inr).float(), requires_grad=False)
    pt_x_inl = Variable(torch.from_numpy(x_inl).float(), requires_grad=False)
    pt_t_in = Variable(torch.from_numpy(t_in).float(), requires_grad=False)
    pt_u_in = Variable(torch.from_numpy(u_in).float(), requires_grad=False)

    net_bc_inr = net(torch.cat([ pt_t_in,pt_x_inr],1)) # output of u(x,t)
    net_bc_inl = net(torch.cat([ pt_t_in,pt_x_inl],1))

    mse_u2r = mse_cost_function(net_bc_inr, pt_u_in)
    mse_u2l = mse_cost_function(net_bc_inl, pt_u_in)

    # Loss based on PDE
    x_collocation = np.random.uniform(low=-1.0, high=1.0, size=(2000, 1))
    t_collocation = np.random.uniform(low=0.0, high=1.0, size=(2000, 1))
    all_zeros = np.zeros((2000, 1))

    pt_x_collocation = Variable(torch.from_numpy(x_collocation).float(), requires_grad=True)
    pt_t_collocation = Variable(torch.from_numpy(t_collocation).float(), requires_grad=True)
    pt_all_zeros = Variable(torch.from_numpy(all_zeros).float(), requires_grad=False)

    f_out = f(torch.cat([pt_t_collocation, pt_x_collocation],1))  # output of f(x,t)
    mse_f = mse_cost_function(f_out, pt_all_zeros)

    # Combining the loss functions
    loss = mse_u1+mse_u2r+mse_u2l + mse_f

    loss.backward()  # This is for computing gradients using backward propagation
    optimizer.step()  # This is equivalent to : theta_new = theta_old - alpha * derivative of J w.r.t theta

    with torch.autograd.no_grad():
        if epoch%1000==0:
            print(epoch, "Traning Loss:", loss.data)

## 画图 ##
t = np.linspace(0, 1, 100)
x = np.linspace(-1, 1, 256)
ms_t, ms_x = np.meshgrid(t, x)
x = np.ravel(ms_x).reshape(-1, 1)
t = np.ravel(ms_t).reshape(-1, 1)
pt_x = Variable(torch.from_numpy(x).float(), requires_grad=True)
pt_t = Variable(torch.from_numpy(t).float(), requires_grad=True)
pt_u0 = net(torch.cat([pt_t, pt_x], 1))
u = pt_u0.data.cpu().numpy()

pt_u0 = u.reshape(256, 100)

fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.set_zlim([-1, 1])
ax.plot_surface(ms_t, ms_x, pt_u0, cmap=cm.RdYlBu_r, edgecolor='blue', linewidth=0.0003, antialiased=True)
ax.set_xlabel('t')
ax.set_ylabel('x')
ax.set_zlabel('u')
plt.savefig('Preddata.png')
plt.close(fig)



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值