pytorch实现NS方程求解-基础PINN

本文详细介绍了使用PyTorch实现物理 informed 深度学习(PINN)来解决Kovasznay流动问题、CVD问题和障碍流问题。通过设置适当的网络结构和损失函数,PINN能有效逼近复杂的边界条件。文中提供了相关代码示例,展示了PINN在处理PDE约束优化问题中的应用。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

博客内容参考知乎添加链接描述

问题描述Kovasznay problem

参考文献:Dockhorn, T. . “A Discussion on Solving Partial Differential Equations using Neural Networks.” (2019).
{ − ν Δ u + u u x + v u y + p x = f 1 , − ν Δ v + u v x + v v y + p y = f 2 , u x + v y = 0 , u ∣ ∂ Ω = u 0 , v ∣ ∂ Ω = v 0 . \left\{\begin{array}{l} -\nu \Delta u + uu_x + v u_y + p_x = f_1,\\ -\nu \Delta v + uv_x + vv_y + p_y=f_2,\\ u_x + v_y=0,\\ u|_{\partial \Omega} = u_0,\\ v|_{\partial \Omega}=v_0. \end{array}\right. νΔu+uux+vuy+px=f1,νΔv+uvx+vvy+py=f2,ux+vy=0,uΩ=u0,vΩ=v0.
u 1 ( x 1 , x 2 ) = 1 − exp ⁡ ( λ x 1 ) cos ⁡ 2 π x 2 , u 2 ( x 1 , x 2 ) = λ 2 π exp ⁡ ( λ x 1 ) sin ⁡ 2 π x 2 , p ( x 1 , x 2 ) = 1 2 ( 1 − exp ⁡ ( 2 λ x 1 ) ) + C , λ = 1 2 ν − 1 4 ν 2 + 4 π 2 , ν = 0.025 , f 1 = 0 , f 2 = 0. \begin{aligned} u_{1}&\left(x_{1}, x_{2}\right) =1-\exp \left(\lambda x_{1}\right) \cos 2 \pi x_{2}, \\ u_{2}&\left(x_{1}, x_{2}\right) =\frac{\lambda}{2 \pi} \exp \left(\lambda x_{1}\right) \sin 2 \pi x_{2}, \\ p&\left(x_{1}, x_{2}\right) =\frac{1}{2}\left(1-\exp \left(2 \lambda x_{1}\right)\right)+C,\\ \lambda&=\frac{1}{2\nu} - \sqrt{\frac{1}{4\nu^2} + 4\pi^2},\nu = 0.025,\\ f_1&=0,f_2=0. \end{aligned} u1u2pλf1(x1,x2)=1exp(λx1)cos2πx2,(x1,x2)=2πλexp(λx1)sin2πx2,(x1,x2)=21(1exp(2λx1))+C,=2ν14ν21+4π2 ,ν=0.025,=0,f2=0.
其中 Ω = [ − 0.5 , 1.0 ] × [ − 0.5 , 1.5 ] \Omega=[-0.5,1.0]\times[-0.5,1.5] Ω=[0.5,1.0]×[0.5,1.5],内点集用均匀采样得到 [ 64 , 64 ] [64,64] [64,64]网格点,边界集均匀采样得到 [ 10 , 10 ] [10,10] [10,10]边界点,使用全连接网络,损失函数利用强形式PINN,三个网络参数都设置为[2,25,25,1],利用BFGS优化器迭代2000次可以得到下列结果。
注意,下面结果必须要使用BFGS优化器才能达到,这里给出linesearch.py,bfgs.py,运行时需要把linesearch.py,bfgs.py和kflow.py放在一个文件夹运行
bfgs.py

# ---------------------------------------------------------------------------------------------------
# BFGS Optimizer
# The code is modified from torch.optim.LBFGS
# ---------------------------------------------------------------------------------------------------
import torch
from functools import reduce
from torch.optim.optimizer import Optimizer
import linesearch

class BFGS(Optimizer):
    """
    Arguments:
        lr (float): learning rate (default: 1)
        max_iter (int): maximal number of iterations per optimization step
            (default: 20)
        max_eval (int): maximal number of function evaluations per optimization
            step (default: max_iter * 1.25).
        tolerance_grad (float): termination tolerance on first order optimality
            (default: 1e-5).
        tolerance_change (float): termination tolerance on function
            value/parameter changes (default: 1e-9).
        history_size (int): update history size (default: 100).
        line_search_fn (str): either 'strong_wolfe' or None (default: None).
    """
    def __init__(self,
                 params,
                 lr=1,
                 max_iter=20,
                 max_eval=None,
                 tolerance_grad=1e-5,
                 tolerance_change=1e-9,
                 history_size=100,
                 line_search_fn=None):
        if max_eval is None:
            max_eval = max_iter * 5 // 4
        defaults = dict(
            lr=lr,
            max_iter=max_iter,
            max_eval=max_eval,
            tolerance_grad=tolerance_grad,
            tolerance_change=tolerance_change,
            history_size=history_size,
            line_search_fn=line_search_fn)
        super(BFGS, self).__init__(params, defaults)

        if len(self.param_groups) != 1:
            raise ValueError("BFGS doesn't support per-parameter options "
                             "(parameter groups)")

        self._params = self.param_groups[0]['params']
        self._numel_cache = None

    def _numel(self):
        if self._numel_cache is None:
            self._numel_cache = reduce(lambda total, p: total + p.numel(), self._params, 0)
        return self._numel_cache
    
    def _clone_param(self):
        return [p.clone() for p in self._params]

    def _set_param(self, params_data):
        offset = 0
        for p in self._params:
            numel = p.numel()
            p.data.copy_(params_data[offset:offset + numel].view_as(p.data))
            offset += numel
        assert offset == self._numel()

    def _flatten(self, x):
        views = []
        for p in x:
            view = p.view(-1)
            views.append(view)
        return torch.cat(views, 0)

    def _gather_grad_flat(self):
        views = []
        for p in self._params:
            if p.grad is None:
                view = p.new(p.numel()).zero_()
            elif p.grad.is_sparse:
                view = p.grad.to_dense().view(-1)
            else:
                view = p.grad.view(-1)
            views.append(view)
        return torch.cat(views, 0)

    def _add_grad(self, step_size, update):
        offset = 0
        for p in self._params:
            numel = p.numel()
            # view as to avoid deprecated pointwise semantics
            p.data.add_(step_size, update[offset:offset + numel].view_as(p.data))
            offset += numel
        assert offset == self._numel()

    def step(self, closure):
        """Performs a single optimization step.

        Arguments:
            closure (callable): A closure that reevaluates the model
                and returns the loss.
        """
        assert len(self.param_groups) == 1

        group = self.param_groups[0]
        lr = group['lr']
        max_iter = group['max_iter']
        max_eval = group['max_eval']
        tolerance_grad = group['tolerance_grad']
        tolerance_change = group['tolerance_change']
        line_search_fn = group['line_search_fn']
        history_size = group['history_size']

        # NOTE: BFGS has only global state, but we register it as state for
        # the first param, because this helps with casting in load_state_dict
        state = self.state[self._params[0]]
        state.setdefault('func_evals', 0)
        state.setdefault('n_iter', 0)

        # evaluate initial f(x) and df/dx
        loss = float(closure())
        grad_flat = self._gather_grad_flat()
        current_evals = 1
        state['func_evals'] += 1

        # optimal condition
        opt_cond = grad_flat.abs().max() <= tolerance_grad
        if opt_cond:
            return closure()

        # tensors cached in state (for tracing)
        x_prev = self._clone_param()
        x_prev = self._flatten(x_prev)
        
        direction = state.get('direction')
        step_size = state.get('step_size')
        loss_prev = state.get('loss_prev')
        loss_prev_prev = state.get('loss_prev_prev')
        grad_prev_flat = state.get('grad_prev_flat')
        Hess_inv = state.get('Hess_inv')

        if loss_prev is None:
            loss_prev = loss
        if grad_prev_flat is None:
            grad_prev_flat = grad_flat.clone()
        if loss_prev_prev is None:
            loss_prev_prev = loss_prev + ((grad_prev_flat**2).sum())**0.5 / 2
        if Hess_inv is None:
            Hess_inv = torch.eye(self._numel(), dtype=self._params[0].dtype).to(self._params[0].device)

        n_iter = 0
        # optimize for a max of max_iter iterations
        while n_iter < max_iter:
            direction = torch.mm(Hess_inv, grad_flat.neg().view(-1,1)).view(-1)

            # directional derivative
            grad_dot_dir = grad_flat.dot(direction)
            if grad_dot_dir > -tolerance_change:
                break

            # optional line search: user function
            ls_func_evals = 0
            if line_search_fn is not None:
                # perform line search, using user function
                if line_search_fn != "strong_wolfe":
                    raise RuntimeError("only 'strong_wolfe' is supported")
                else:
                    def obj_func_loss(x):
                        self._set_param(x)
                        return float(closure())

                    def obj_func_grad(x):
                        # self._set_param(x)
                        # loss = float(closure())
                        return self._gather_grad_flat()
                    try:
                        step_size, ls_func_evals, _, loss_prev, loss_prev_prev, _ = \
                             linesearch._line_search_wolfe12(obj_func_loss, obj_func_grad,
                                                            x_prev, direction, grad_flat,
                                                            loss_prev, loss_prev_prev,
                                                            amin=1e-100, amax=1e100)
                    except:
                        self._set_param(x_prev)
                        loss_prev = loss
                        loss_prev_prev = loss_prev + ((grad_prev_flat**2).sum())**0.5 / 2
                        grad_flat = self._gather_grad_flat()
                        Hess_inv = torch.eye(self._numel(), dtype=self._params[0].dtype).to(self._params[0].device)
                        direction = torch.mm(Hess_inv, grad_flat.neg().view(-1,1)).view(-1)

                        current_evals += 1
                        state['func_evals'] += 1
                        break
                        '''
                        step_size, ls_func_evals, _, loss_prev, loss_prev_prev, _ = \
                             linesearch._line_search_wolfe12(obj_func_loss, obj_func_grad,
                                                             x_prev, direction, grad_flat,
                                                             loss_prev, loss_prev_prev,
                                                             amin=1e-100, amax=1e100)
                        '''
            else:
                step_size = lr
                x = x_prev + step_size*direction
                loss_prev_prev = loss_prev
                loss_prev = float(closure())
                ls_func_evals = 1

            x = x_prev + step_size*direction
            s = direction.mul(step_size)

            grad_flat = self._gather_grad_flat()
            opt_cond = grad_flat.abs().max() <= tolerance_grad
            y = grad_flat.sub(grad_prev_flat)
            
            ys = y.dot(s)
            Hess_inv = Hess_inv - torch.mm(s.view(-1,1)/ys, torch.mm(y.view(-1,1).T, Hess_inv))
            Hess_inv = Hess_inv - torch.mm(torch.mm(Hess_inv, y.view(-1,1)), s.view(-1,1).T/ys)
            Hess_inv = Hess_inv + torch.mm(s.view(-1,1), s.view(-1,1).T) / ys

            x_prev = x
            grad_prev_flat = grad_flat
            
            # keep track of nb of iterations
            n_iter += 1
            state['n_iter'] += 1

            # update func eval
            current_evals += ls_func_evals
            state['func_evals'] += ls_func_evals
            
            ############################################################
            # check conditions
            ############################################################
            if n_iter == max_iter:
                break

            if current_evals >= max_eval:
                break

            # optimal condition
            if opt_cond:
                break

            # lack of progress
            if direction.mul(step_size).abs().max() <= tolerance_change:
                break

            if abs(loss_prev - loss_prev_prev) < tolerance_change:
                break

        state['direction'] = direction
        state['step_size'] = step_size
        state['grad_prev_flat'] = grad_prev_flat
        state['loss_prev'] = loss_prev
        state['loss_prev_prev'] = loss_prev_prev
        state['Hess_inv'] = Hess_inv

        return closure()

linesearch.py

# ---------------------------------------------------------------------------------------------------
# Minpack's Wolfe line and scalar searches
# The code is modified from scipy.optimize.linesearch to make it suitable to Pytorch
# ---------------------------------------------------------------------------------------------------
from warnings import warn

from scipy.optimize import minpack2
import numpy as np
import torch

def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,
                         **kwargs):
    """
    Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
    suitable step length is not found, and raise an exception if a
    suitable step length is not found.

    Raises
    ------
    _LineSearchError
        If no suitable step size is found

    """

    extra_condition = kwargs.pop('extra_condition', None)

    ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
                             old_fval, old_old_fval,
                             **kwargs)

    if ret[0] is not None and extra_condition is not None:
        xp1 = xk + ret[0] * pk
        if not extra_condition(ret[0], xp1, ret[3], ret[5]):
            # Reject step if extra_condition fails
            ret = (None,)

    if ret[0] is None:
        # line search failed: try different one.
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', LineSearchWarning)
            kwargs2 = {}
            for key in ('c1', 'c2', 'amax'):
                if key in kwargs:
                    kwargs2[key] = kwargs[key]
            ret = line_search_wolfe2(f, fprime, xk, pk, gfk,
                                     old_fval, old_old_fval,
                                     extra_condition=extra_condition,
                                     **kwargs2)

    if ret[0] is None:
        raise _LineSearchError()

    return ret

def line_search_wolfe1(f, fprime, xk, pk, gfk=None,
                       old_fval=None, old_old_fval=None,
                       args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8,
                       xtol=1e-14):
    """
    As `scalar_search_wolfe1` but do a line search to direction `pk`

    Parameters
    ----------
    f : callable
        Function `f(x)`
    fprime : callable
        Gradient of `f`
    xk : array_like
        Current point
    pk : array_like
        Search direction

    gfk : array_like, optional
        Gradient of `f` at point `xk`
    old_fval : float, optional
        Value of `f` at point `xk`
    old_old_fval : float, optional
        Value of `f` at point preceding `xk`

    The rest of the parameters are the same as for `scalar_search_wolfe1`.

    Returns
    -------
    stp, f_count, g_count, fval, old_fval
        As in `line_search_wolfe1`
    gval : array
        Gradient of `f` at the final point

    """
    if gfk is None:
        gfk = fprime(xk)

    if isinstance(fprime, tuple):
        eps = fprime[1]
        fprime = fprime[0]
        newargs = (f, eps) + args
        gradient = False
    else:
        newargs = args
        gradient = True

    gval = [gfk]
    gc = [0]
    fc = [0]

    def phi(s):
        fc[0] += 1
        return f(xk + s*pk, *args)

    def derphi(s):
        gval[0] = fprime(xk + s*pk, *newargs)
        if gradient:
            gc[0] += 1
        else:
            fc[0] += len(xk) + 1
        return torch.dot(gval[0], pk) ####

    derphi0 = torch.dot(gfk, pk)      ####

    stp, fval, old_fval = scalar_search_wolfe1(
            phi, derphi, old_fval, old_old_fval, derphi0,
            c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)

    return stp, fc[0], gc[0], fval, old_fval, gval[0]


def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None,
                         c1=1e-4, c2=0.9,
                         amax=50, amin=1e-8, xtol=1e-14):
    """
    Scalar function search for alpha that satisfies strong Wolfe conditions

    alpha > 0 is assumed to be a descent direction.

    Parameters
    ----------
    phi : callable phi(alpha)
        Function at point `alpha`
    derphi : callable phi'(alpha)
        Objective function derivative. Returns a scalar.
    phi0 : float, optional
        Value of phi at 0
    old_phi0 : float, optional
        Value of phi at previous point
    derphi0 : float, optional
        Value derphi at 0
    c1 : float, optional
        Parameter for Armijo condition rule.
    c2 : float, optional
        Parameter for curvature condition rule.
    amax, amin : float, optional
        Maximum and minimum step size
    xtol : float, optional
        Relative tolerance for an acceptable step.

    Returns
    -------
    alpha : float
        Step size, or None if no suitable step was found
    phi : float
        Value of `phi` at the new point `alpha`
    phi0 : float
        Value of `phi` at `alpha=0`

    Notes
    -----
    Uses routine DCSRCH from MINPACK.

    """

    if phi0 is None:
        phi0 = phi(0.)
    if derphi0 is None:
        derphi0 = derphi(0.)

    if old_phi0 is not None and derphi0 != 0:
        alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
        if alpha1 < 0:
            alpha1 = 1.0
    else:
        alpha1 = 1.0

    phi1 = phi0
    derphi1 = derphi0
    isave = np.zeros((2,), np.intc)
    dsave = np.zeros((13,), float)
    task = b'START'

    maxiter = 100
    for i in range(maxiter):
        stp, phi1, derphi1, task = minpack2.dcsrch(alpha1, phi1, derphi1,
                                                   c1, c2, xtol, task,
                                                   amin, amax, isave, dsave)
        if task[:2] == b'FG':
            alpha1 = stp
            phi1 = phi(stp)
            derphi1 = derphi(stp)
        else:
            break
    else:
        # maxiter reached, the line search did not converge
        stp = None

    if task[:5] == b'ERROR' or task[:4] == b'WARN':
        stp = None  # failed

    return stp, phi1, phi0

在这里插入图片描述
在这里插入图片描述
K flow代码参考

import torch
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import torch.nn as nn
import torch.nn.functional as F
import itertools
import bfgs
from scipy.stats import qmc
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#device = torch.device('cpu')

bound = np.array([-0.5,1,-0.5,1.5]).reshape(2,2)
Re = 40;nu = 1/Re
lam = 1/(2*nu) - np.sqrt(1/(4*nu**2) + 4*np.pi**2)
def UU(X,order,prob):
    if prob == 1:
        eta = 2*np.pi
        if order == [0,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = 1 - torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*lam/(eta)
            return tmp
        if order == [1,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = - torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*lam
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*(lam**2)/(eta)
            return tmp
        if order == [0,1]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*(eta)
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*lam
            return tmp
        if order == [2,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = - torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*lam*lam
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*(lam**3)/(eta)
            return tmp
        if order == [0,2]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*(eta)**2
            tmp[:,1] = -torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*lam*(eta)
            return tmp
    
def Delta(X,prob):
    return UU(X,[2,0],prob) + UU(X,[0,2],prob)
def PP(X,order,prob):
    if prob == 1:
        if order == [0,0]:
            return 0.5*(1 - torch.exp(2*lam*X[:,0]))
        if order == [1,0]:
            return - lam*torch.exp(2*lam*X[:,0])
        if order == [0,1]:
            return 0*X[:,0]
def FF(X,prob):#mu = 0.5
    tmp = torch.zeros(X.shape[0],2)
    tmp[:,0] = -nu*Delta(X,prob)[:,0] + (UU(X,[0,0],prob)[:,0])*(UU(X,[1,0],prob)[:,0]) + \
    (UU(X,[0,0],prob)[:,1])*(UU(X,[0,1],prob)[:,0]) + PP(X,[1,0],prob)
    tmp[:,1] = -nu*Delta(X,prob)[:,1] + (UU(X,[0,0],prob)[:,0])*(UU(X,[1,0],prob)[:,1]) + \
    (UU(X,[0,0],prob)[:,1])*(UU(X,[0,1],prob)[:,1]) + PP(X,[0,1],prob)
    return tmp
class INSET():#边界点取值
    def __init__(self,bound,nx,prob,mode):
        self.dim = 2
        #self.area = (bound[0,1] - bound[0,0])*(bound[1,1] - bound[1,0])
        self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
        self.size = nx[0]*nx[1]
        self.X = torch.zeros(self.size,self.dim)#储存内点
        if mode == 'uniform':
            for i in range(nx[0]):
                for j in range(nx[1]):
                    self.X[i*nx[1] + j,0] = bound[0,0] + (i + 0.5)*self.hx[0]
                    self.X[i*nx[1] + j,1] = bound[1,0] + (j + 0.5)*self.hx[1]
        elif mode == 'random':
            tmp = torch.rand(self.size,2)
            self.X[:,0] = bound[0,0] + self.hx[0] + (bound[0,1] - bound[0,0] - 2*self.hx[0])*tmp[:,0]
            self.X[:,1] = bound[1,0] + self.hx[1] + (bound[1,1] - bound[1,0] - 2*self.hx[1])*tmp[:,1]
        else:
            tmp = torch.tensor(self.quasi_samples(self.size))
            self.X[:,0] = bound[0,0] + self.hx[0] + (bound[0,1] - bound[0,0] - 2*self.hx[0])*tmp[:,0]
            self.X[:,1] = bound[1,0] + self.hx[1] + (bound[1,1] - bound[1,0] - 2*self.hx[1])*tmp[:,1]
        self.uu = UU(self.X,[0,0],prob)[:,0:1] 
        self.vv = UU(self.X,[0,0],prob)[:,1:2] 
        self.ff = FF(self.X,prob)
    def quasi_samples(self,N):
        sampler = qmc.Sobol(d=self.dim)
        sample = sampler.random(n=N)
        return sample
class BDSET():#边界点取值
    def __init__(self,bound,nx,prob):
        self.dim = 2
        self.area = (bound[0,1] - bound[0,0])*(bound[1,1] - bound[1,0])
        self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
        self.size = 2*(nx[0] + nx[1])
        self.X = torch.zeros(self.size,self.dim)#储存内点
        m = 0
        for i in range(nx[0]):
            self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
            self.X[m,1] = bound[1,0] 
            m = m + 1
        for j in range(nx[1]):
            self.X[m,0] = bound[0,1]
            self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
            m = m + 1
        for i in range(nx[0]):
            self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
            self.X[m,1] = bound[1,1] 
            m = m + 1
        for j in range(nx[1]):
            self.X[m,0] = bound[0,0]
            self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
            m = m + 1
        #plt.scatter(self.X[:,0],self.X[:,1])
        self.uu = UU(self.X,[0,0],prob)[:,0:1] 
        self.vv = UU(self.X,[0,0],prob)[:,1:2] 
class TESET():
    def __init__(self, bound, nx,prob):
        self.bound = bound
        self.nx = nx
        self.hx = [(self.bound[0,1]-self.bound[0,0])/self.nx[0],
                   (self.bound[1,1]-self.bound[1,0])/self.nx[1]]
        self.prob = prob
        self.size = (self.nx[0] + 1)*(self.nx[1] + 1)
        self.X = torch.zeros(self.size,2)
        m = 0
        for i in range(self.nx[0] + 1):
            for j in range(self.nx[1] + 1):
                self.X[m,0] = self.bound[0,0] + i*self.hx[0]
                self.X[m,1] = self.bound[1,0] + j*self.hx[1]
                m = m + 1
        #plt.scatter(self.X[:,0],self.X[:,1])
        self.uu = UU(self.X,[0,0],prob)[:,0:1] 
        self.vv = UU(self.X,[0,0],prob)[:,1:2] 
        self.pp = PP(self.X,[0,0],prob) 
class LEN():
    def __init__(self,bound,mu):
        self.mu = mu
        self.bound = bound
        self.hx = bound[:,1] - bound[:,0]
    def forward(self,X):
        L = 1.0
        for i in range(2):
            L = L*(1 - (1 - (X[:,i] - self.bound[i,0])/self.hx[i])**self.mu)
            L = L*(1 - (1 - (self.bound[i,1] - X[:,i])/self.hx[i])**self.mu)
        return L.view(-1,1)
        
np.random.seed(1234)
torch.manual_seed(1234)
class Net(torch.nn.Module):
    def __init__(self, layers, dtype):
        super(Net, self).__init__()
        self.layers = layers
        self.layers_hid_num = len(layers)-2
        self.device = device
        self.dtype = dtype
        fc = []
        for i in range(self.layers_hid_num):
            fc.append(torch.nn.Linear(self.layers[i],self.layers[i+1]))
            fc.append(torch.nn.Linear(self.layers[i+1],self.layers[i+1]))
        fc.append(torch.nn.Linear(self.layers[-2],self.layers[-1]))
        self.fc = torch.nn.Sequential(*fc)
        for i in range(self.layers_hid_num):
            self.fc[2*i].weight.data = self.fc[2*i].weight.data.type(dtype)
            self.fc[2*i].bias.data = self.fc[2*i].bias.data.type(dtype)
            self.fc[2*i + 1].weight.data = self.fc[2*i + 1].weight.data.type(dtype)
            self.fc[2*i + 1].bias.data = self.fc[2*i + 1].bias.data.type(dtype)
        self.fc[-1].weight.data = self.fc[-1].weight.data.type(dtype)
        self.fc[-1].bias.data = self.fc[-1].bias.data.type(dtype)
    def forward(self, x):
        dev = x.device
        for i in range(self.layers_hid_num):
            h = torch.sin(self.fc[2*i](x))
            h = torch.sin(self.fc[2*i+1](h))
            temp = torch.eye(x.shape[-1],self.layers[i+1],dtype = self.dtype,device = dev)
            x = h + x@temp
        return self.fc[-1](x) 
def pred_u(netu,X):
    return netu.forward(X)
def pred_v(netv,X):
    return netv.forward(X)
def pred_p(netp,X):
    return netp.forward(X)
def loadtype(inset,teset,bdset,dtype):
    inset.X = inset.X.type(dtype)
    inset.uu = inset.uu.type(dtype)
    inset.vv = inset.vv.type(dtype)
    inset.ff = inset.ff.type(dtype)

    bdset.X = bdset.X.type(dtype)
    bdset.uu = bdset.uu.type(dtype)
    bdset.vv = bdset.vv.type(dtype)

    teset.X = teset.X.type(dtype)
    teset.uu = teset.uu.type(dtype)
    teset.vv = teset.vv.type(dtype)
    
def loadcuda(netu,netv,netp,inset,teset,bdset):    
    netu = netu.to(device)
    netv = netv.to(device)
    netp = netp.to(device)
    
    
    if inset.X.requires_grad == False:
        inset.X.requires_grad = True
    inset.X = inset.X.to(device)
    inset.ff = inset.ff.to(device)
    bdset.X = bdset.X.to(device)
    bdset.uu = bdset.uu.to(device)
    bdset.vv = bdset.vv.to(device)
    teset.X = teset.X.to(device)
def loadcpu(netu,netv,netp,inset,teset,bdset):    
    netu = netu.to('cpu')
    netv = netv.to('cpu')
    netp = netp.to('cpu')
    

    inset.X = inset.X.to('cpu')
    inset.ff = inset.ff.to('cpu')
    bdset.X = bdset.X.to('cpu')
    bdset.uu = bdset.uu.to('cpu')
    bdset.vv = bdset.vv.to('cpu')
    teset.X = teset.X.to('cpu')

def error(u_pred,u_acc):
    u_pred = u_pred.to(device)
    u_acc = u_acc.to(device)
    tmp = (u_pred - u_acc)
    return max(abs(tmp))

#----------------------
def Lossyp(netu,netv,netp,inset,bdset,penalty_in):
    inset.u = pred_u(netu,inset.X)
    inset.v = pred_v(netv,inset.X)
    inset.p = pred_p(netp,inset.X)
    u_x, = torch.autograd.grad(inset.u, inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    u_xx, = torch.autograd.grad(u_x[:,0:1], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    u_yy, = torch.autograd.grad(u_x[:,1:2], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))  
    u_lap = u_xx[:,0:1] + u_yy[:,1:2]

    v_x, = torch.autograd.grad(inset.v, inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    v_xx, = torch.autograd.grad(v_x[:,0:1], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    v_yy, = torch.autograd.grad(v_x[:,1:2], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))  
    v_lap = v_xx[:,0:1] + v_yy[:,1:2]

    p_x, = torch.autograd.grad(inset.p, inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    
    inset.res_u = (-nu*u_lap + inset.u*u_x[:,0:1] + inset.v*u_x[:,1:2] + p_x[:,0:1] - inset.ff[:,0:1])**2
    inset.res_v = (-nu*v_lap + inset.u*v_x[:,0:1] + inset.v*v_x[:,1:2] + p_x[:,1:2] - inset.ff[:,1:2])**2
    inset.res_div = (u_x[:,0:1] + v_x[:,1:2])**2
    inset.loss_u = torch.sqrt(inset.res_u.mean())
    inset.loss_v = torch.sqrt(inset.res_v.mean())
    inset.loss_div = torch.sqrt(inset.res_div.mean())

    inset.loss = penalty_in[0]*inset.loss_u + penalty_in[1]*inset.loss_v + \
        penalty_in[2]*inset.loss_div
    bdset.res_u = (pred_u(netu,bdset.X) - bdset.uu)**2
    bdset.res_v = (pred_v(netv,bdset.X) - bdset.vv)**2
    bdset.loss = torch.sqrt(bdset.res_u).mean() + torch.sqrt(bdset.res_v).mean()
    return inset.loss + bdset.loss
    
def Trainyp(netu,netv,netp, inset,bdset,penalty_in,optim, epoch,error_history,optimtype):
    print('train neural network yp,optim:%s'%(optimtype))
    loss_history,lossin_history,lossbd_history = error_history
    loss = Lossyp(netu,netv,netp,inset,bdset,penalty_in)
    print('epoch_yp: %d, Lu:%.3e,Lv:%.3e,Ldiv:%.3e, bloss:%.3e,time: %.2f\n'
          %(0,inset.loss_u.item(),inset.loss_v.item(),inset.loss_div.item(), bdset.loss.item(),0.00))
    
    for it in range(epoch):
        st = time.time()
        if optimtype == 'Adam':
            for j in range(100):
                optim.zero_grad()
                loss = Lossyp(netu,netv,netp,inset,bdset,penalty_in)
                loss.backward()
                optim.step()
        if optimtype == 'BFGS' or optimtype == 'LBFGS':
            def closure():
                optim.zero_grad()
                loss = Lossyp(netu,netv,netp,inset,bdset,penalty_in)
                loss.backward()
                return loss
            optim.step(closure) 
        loss = Lossyp(netu,netv,netp,inset,bdset,penalty_in)
        ela = time.time() - st
        print('epoch_yp: %d, loss:%.3e,Lu:%.3e,Lv:%.3e,Ldiv:%.3e, bloss:%.3e,time: %.2f\n'
          %(it + 1,loss.item(),inset.loss_u.item(),inset.loss_v.item(),inset.loss_div.item(), bdset.loss.item(),ela))
        if (it + 1)%4 == 0:
            u_L1err = error(pred_u(netu,inset.X),inset.uu)
            v_L1err = error(pred_v(netv,inset.X),inset.vv)
            print('epoch_yp:%d,u_L1err:%.3e,v_L1err:%.3e'%(it + 1,u_L1err,v_L1err))
        loss_u = inset.loss_u.item()
        loss_v = inset.loss_v.item()
        loss_div = inset.loss_div.item()
        loss_history.append(loss.item())
        lossin_history.append([inset.loss.item(),loss_u,loss_v,loss_div])
        
        lossbd_history.append(bdset.loss.item())
        error_history = [loss_history,lossin_history,lossbd_history]
    return error_history



nx = [64,64]
nx_te = [10,10]
prob = 1
mu = 1
lr = 1e0
#mode = 'random'
#mode = 'uniform'
mode = 'qmc'
inset = INSET(bound,nx,prob,mode)
bdset = BDSET(bound,nx,prob)
teset = TESET(bound,nx_te,prob)

lenth = LEN(bound,mu)


dtype = torch.float64
wid = 25
layer_u = [2,wid,wid,1];netu = Net(layer_u,dtype)
layer_v = [2,wid,wid,1];netv = Net(layer_v,dtype)
layer_p = [2,wid,wid,1];netp = Net(layer_p,dtype)


fname1 = "u-CVDlay%dvar.pt"%(wid)
fname2 = "v-CVDlay%dvar.pt"%(wid)
fname3 = "p-CVDlay%dvar.pt"%(wid)

#netu = torch.load(fname1)
#netv = torch.load(fname2)
#netp = torch.load(fname3)

loadtype(inset,teset,bdset,dtype)
loadcuda(netu,netv,netp,inset,teset,bdset)

error_history = [[],[],[]]
epoch = 20
start_time = time.time()
penalty_in = [1e0,1e0,1e0]

lr = 1e0
max_iter = 1
#optimtype = 'Adam'
optimtype = 'BFGS'
#optimtype = 'LBFGS'
for i in range(max_iter):
    if optimtype == 'BFGS':
        optim = bfgs.BFGS(itertools.chain(netu.parameters(),
                                        netv.parameters(),
                                        netp.parameters()),
                        lr=lr, max_iter=100,
                        tolerance_grad=1e-16, tolerance_change=1e-16,
                        line_search_fn='strong_wolfe')
    if optimtype == 'LBFGS':
        optim = torch.optim.LBFGS(itertools.chain(netu.parameters(),
                                        netv.parameters(),
                                        netp.parameters()),
                        lr=lr, max_iter=100,
                        tolerance_grad=1e-16, tolerance_change=1e-16,
                        history_size=2500,
                        line_search_fn='strong_wolfe')
    if optimtype == 'Adam':
        optim = torch.optim.Adam(itertools.chain(netu.parameters(),
                                        netv.parameters(),
                                        netp.parameters()),
                        lr=lr)
    error_history = \
        Trainyp(netu,netv,netp, inset,bdset,penalty_in,optim, epoch,error_history,optimtype)
    if (i + 1)%5 == 0:
        lr *= 0.985
elapsed = time.time() - start_time
print('Finishied! train time: %.2f\n' %(elapsed)) 

loadcpu(netu,netv,netp,inset,teset,bdset)
torch.cuda.empty_cache()

torch.save(netu, fname1)
torch.save(netv, fname2)
torch.save(netp, fname3)

loss_history,lossin_history,lossbd_history = error_history
np.save('lay%d-epoch%d-lossin.npy'%(wid,epoch),lossin_history)
np.save('lay%d-epoch%d-lossvd.npy'%(wid,epoch),lossbd_history)
np.save('lay%d-epoch%d-loss.npy'%(wid,epoch),loss_history)
#%%
fig, ax = plt.subplots(1,2,figsize=(12,3.5))

ax[0].semilogy(np.array(lossin_history))
ax[0].legend(['loss', 'loss_u', 'loss_v', 'loss_div'])
ax[0].set_xlabel('iters') 

ax[1].plot(np.array(lossbd_history))
ax[1].legend(['boundary loss'])
ax[1].set_xlabel('iters') 

fig.tight_layout()
plt.show()

plt.scatter(inset.X.detach().numpy()[:,0],inset.X.detach().numpy()[:,1])
plt.show()
#%%
nx_te_in = [64,64]
x_train = np.linspace(bound[0,0],bound[0,1],nx_te_in[0])
y_train = np.linspace(bound[1,0],bound[1,1],nx_te_in[1])

x0, x1= np.meshgrid(x_train,y_train)

xx = np.hstack((x0.reshape(-1,1), x1.reshape(-1,1)))
xx = torch.from_numpy(xx).type(dtype)
u_pred = pred_u(netu,xx).detach().numpy()
v_pred = pred_v(netv,xx).detach().numpy()
u_acc = UU(xx,[0,0],prob).numpy()[:,0:1]

v_acc = UU(xx,[0,0],prob).numpy()[:,1:2]
fig, ax = plt.subplots(2,3,figsize=(12,6))
for i in range(1):
    for j in range(3):
        ax[i,j].axis('equal')
        ax[i,j].set_xlim([bound[0,0],bound[0,1]])
        ax[i,j].set_ylim([bound[1,0],bound[1,1]])
        ax[i,j].axis('off')
        
num_line = 10
x0 = x0.flatten()
x1 = x1.flatten()
u_pred = u_pred.reshape(-1,1).flatten()
v_pred = v_pred.reshape(-1,1).flatten()
u_acc = u_acc.reshape(-1,1).flatten()
v_acc = v_acc.reshape(-1,1).flatten()

    
ax00 = ax[0,0].tricontourf(x0, x1, u_pred, num_line, cmap='rainbow')
fig.colorbar(ax00,ax=ax[0,0],fraction = 0.05,pad = 0.04)
ax[0,0].set_title('PINN: u')

ax10 = ax[1,0].tricontourf(x0, x1, v_pred, num_line, alpha=1, cmap='rainbow')
fig.colorbar(ax10,ax=ax[1,0],fraction = 0.05,pad = 0.04)
ax[1,0].set_title('PINN: v')

ax01 = ax[0,1].tricontourf(x0, x1, u_acc, num_line, cmap='rainbow')
fig.colorbar(ax01,ax=ax[0,1],fraction = 0.05,pad = 0.04)
ax[0,1].set_title('exact: u')

ax11 = ax[1,1].tricontourf(x0, x1, v_acc, num_line, alpha=1, cmap='rainbow')
fig.colorbar(ax11,ax=ax[1,1],fraction = 0.05,pad = 0.04)
ax[1,1].set_title('exact: v')

ax02 = ax[0,2].tricontourf(x0, x1, u_pred - u_acc, num_line, cmap='rainbow')
fig.colorbar(ax02,ax=ax[0,2],fraction = 0.05,pad = 0.04)
ax[0,2].set_title('difference: u')


ax12 = ax[1,2].tricontourf(x0, x1, v_pred - v_acc, num_line, alpha=1, cmap='rainbow')
fig.colorbar(ax12,ax=ax[1,2],fraction = 0.05,pad = 0.04)
ax[1,2].set_title('difference: v')

plt.suptitle('PINN solve K flow',fontsize=20)

fig.tight_layout()
plt.show()


CVD问题

参考文献: Yang, H. , F. N. Hwang , and X. C. Cai . “NONLINEAR PRECONDITIONING TECHNIQUES FOR FULL-SPACE LAGRANGE-NEWTON SOLUTION OF PDE-CONSTRAINED OPTIMIZATION PROBLEMS *.” Siam Journal on entific Computing (2016).
其中 γ = 0.01 , R e = 1 , P r = 0.72 , G r = 1 e 4. \gamma=0.01,Re=1,Pr=0.72,Gr=1e4. γ=0.01,Re=1,Pr=0.72,Gr=1e4.
{ min ⁡ J = 1 2 ∫ Ω ω 2 d Ω + γ 2 ∫ Γ 2 ∪ Γ 4 g 2 d Γ ,  subject to  { ω = − ∂ u ∂ y + ∂ v ∂ x − Δ u − ∂ ω ∂ y = 0 − Δ v + ∂ ω ∂ x = 0 − 1 R e Δ ω + u ∂ ω ∂ x + v ∂ ω ∂ y − G r R e 2 ∂ T ∂ x = 0 − 1 R e P r Δ T + u ∂ T ∂ x + v ∂ T ∂ y = 0 and { v = ( 0 , 0 )  and  T = 1  on  Γ 1 ∪ C 1 ∪ C 2 , v = ( 0 , 0 )  and  ∂ T ∂ n = g − T  on  Γ 2 ∪ Γ 4 , v = ( 0 , − 4 ( x − 1 / 3 ) ( 2 / 3 − x ) )  and  T = 0  on  Γ 3 , m , v = ( 0 , 2 x ( 1 / 3 − x ) )  and  ∂ T ∂ n = 0  on  Γ 3 , l ∪ C 4 , v = ( 0 , 2 ( x − 2 / 3 ) ( 1 − x ) )  and  ∂ T ∂ n = 0  on  Γ 3 , r ∪ C 3 , \left\{\begin{aligned} &\min J=\frac{1}{2}\int_{\Omega}\omega^2 d\Omega+\frac{\gamma}{2}\int_{\Gamma_2\cup\Gamma_4}g^2d\Gamma, \\ &\text { subject to } \left\{\begin{aligned} \omega = -\frac{\partial u}{\partial y} + \frac{\partial v}{\partial x}\\ -\Delta u-\frac{\partial \omega}{\partial y}=0 \\ -\Delta v+\frac{\partial \omega}{\partial x}=0 \\ -\frac{1}{R e} \Delta \omega+u \frac{\partial \omega}{\partial x}+v \frac{\partial \omega}{\partial y}-\frac{G r}{R e^{2}} \frac{\partial T}{\partial x}=0 \\ -\frac{1}{R e P r} \Delta T+u \frac{\partial T}{\partial x}+v \frac{\partial T}{\partial y}=0 \end{aligned}\right.\\ &\text{and} \left\{\begin{array}{lll} \mathbf{v}=(0,0) \text { and } T=1 & \text { on } \quad \Gamma_{1} \cup C_{1} \cup C_{2}, \\ \mathbf{v}=(0,0) \text { and } \frac{\partial T}{\partial n}=g-T & \text { on } \quad \Gamma_{2} \cup \Gamma_{4}, \\ \mathbf{v}=(0,-4(x-1 / 3)(2 / 3-x)) \text { and } T=0 & \text { on } \quad \Gamma_{3, m}, \\ \mathbf{v}=(0,2 x(1 / 3-x)) \text { and } \frac{\partial T}{\partial n}=0 & \text { on } \quad \Gamma_{3, l} \cup C_{4}, \\ \mathbf{v}=(0,2(x-2 / 3)(1-x)) \text { and } \frac{\partial T}{\partial n}=0 & \text { on } \quad \Gamma_{3, r} \cup C_{3}, \end{array}\right. \end{aligned}\right. minJ=21Ωω2dΩ+2γΓ2Γ4g2dΓ, subject to  ω=yu+xvΔuyω=0Δv+xω=0Re1Δω+uxω+vyωRe2GrxT=0RePr1ΔT+uxT+vyT=0and v=(0,0) and T=1v=(0,0) and nT=gTv=(0,4(x1/3)(2/3x)) and T=0v=(0,2x(1/3x)) and nT=0v=(0,2(x2/3)(1x)) and nT=0 on Γ1C1C2, on Γ2Γ4, on Γ3,m, on Γ3,lC4, on Γ3,rC3,
在这里插入图片描述

选取 g = 0 g=0 g=0,只考虑约束条件,论文结果如下所示:
在这里插入图片描述
在这里插入图片描述

PINN求解CVD正问题代码

对于 u , v , w , T u,v,w,T u,v,w,T,设置FNN的层数以及神经元数目为 [ 2 , 25 , 25 , 25 , 1 ] [2,25,25,25,1] [2,25,25,25,1],内点使用均匀采样,剖分为 [ 64 , 64 ] [64,64] [64,64],边界点剖分为 [ 100 , 100 ] [100,100] [100,100],上面提到了15个边界条件,不过这里本人设置近似解定义如下:
{ u = n e t u x ( 1 − x ) y ( 1 − y ) , v = n e t v x ( 1 − x ) y , w = n e t w , T = n e t T \left\{\begin{array}{l} u = net_ux(1-x)y(1-y),\\ v = net_vx(1-x)y,\\ w = net_w,\\ T = net_T \end{array}\right. u=netux(1x)y(1y),v=netvx(1x)y,w=netw,T=netT
通过上面这种定义,近似解可以直接满足7个边界条件,引入损失函数定义:
L = α 1 L u + α 2 L v + α 3 L w + α 4 L T + α 5 L v a r + ∑ i = 1 5 β i L b d i L = \alpha_1L_u + \alpha_2 L_v + \alpha_3L_{w} + \alpha_4L_T + \alpha_5L_{var}+\sum_{i=1}^{5}\beta_iL_{bdi} L=α1Lu+α2Lv+α3Lw+α4LT+α5Lvar+i=15βiLbdi
{ L v a r = 1 n I ∑ i n I ( w + u y − v x ) i 2 , L b d 1 = 1 n B ∑ i n B ( T − 1 ) i 2 , L b d 2 = 1 n B ∑ i n B ( ∂ T ∂ n + T ) i 2 , L b d 3 = 1 n B ∑ i n B ( v + 4 ( x − 1 / 3 ) ( 2 / 3 − x ) ) i 2 + T i 2 , L b d 4 = 1 n B ∑ i n B ( v − 2 x ( 1 / 3 − x ) ) i 2 + ( ∂ T ∂ n − 0 ) i 2 , L b d 5 = 1 n B ∑ i n B ( v − 2 ( x − 2 / 3 ) ( 1 − x ) ) i 2 + ( ∂ T ∂ n − 0 ) i 2 . \left\{\begin{array}{l} L_{var}=\frac{1}{n_I}\sum_{i}^{n_I}(w +u_y - v_x)_i^2,\\ L_{bd1}=\frac{1}{n_B}\sum_{i}^{n_B}(T - 1)_i^2,\\ L_{bd2}=\frac{1}{n_B}\sum_{i}^{n_B}(\frac{\partial T}{\partial n} + T)_i^2,\\ L_{bd3}=\frac{1}{n_B}\sum_{i}^{n_B}(v + 4(x-1 / 3)(2 / 3-x))_i^2 + T_i^2,\\ L_{bd4}=\frac{1}{n_B}\sum_{i}^{n_B}(v - 2x(1/3 - x))_i^2 + (\frac{\partial T}{\partial n}-0)_i^2,\\ L_{bd5}=\frac{1}{n_B}\sum_{i}^{n_B}(v - 2(x-2/3)(1 - x))_i^2 + (\frac{\partial T}{\partial n}-0)_i^2. \end{array}\right. Lvar=nI1inI(w+uyvx)i2Lbd1=nB1inB(T1)i2Lbd2=nB1inB(nT+T)i2Lbd3=nB1inB(v+4(x1/3)(2/3x))i2+Ti2Lbd4=nB1inB(v2x(1/3x))i2+(nT0)i2Lbd5=nB1inB(v2(x2/3)(1x))i2+(nT0)i2.
其中所有惩罚参数都为1。
正问题的求解难点主要在于边界条件复杂,普通的PINN+FNN拟合效果有限,下面展示代码迭代200个epoch的结果:
在这里插入图片描述
在这里插入图片描述

自动满足边界bd1:
修改T的定义为 T = n e t T y + 1 T = net_Ty + 1 T=netTy+1,这样可以自动满足边界bd1,根据这个做法,重新迭代200次得到的结果如下:

在这里插入图片描述
在这里插入图片描述

import torch
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import torch.nn as nn
import torch.nn.functional as F
import itertools
import bfgs
from scipy.stats import qmc
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#device = torch.device('cpu')

bounds = np.array([0,1,0,1]).reshape(2,2)
Re = 1
Pr = 0.72
Gr = 1e4

class INSET():#边界点取值
    def __init__(self,nx,mode):
        self.dim = 2
        #self.area = (bound[0,1] - bound[0,0])*(bound[1,1] - bound[1,0])
        self.hx = [(bounds[0,1] - bounds[0,0])/nx[0],(bounds[1,1] - bounds[1,0])/nx[1]]
        self.size = nx[0]*nx[1]
        self.X = torch.zeros(self.size,self.dim)#储存内点
        if mode == 'uniform':
            for i in range(nx[0]):
                for j in range(nx[1]):
                    self.X[i*nx[1] + j,0] = bounds[0,0] + (i + 0.5)*self.hx[0]
                    self.X[i*nx[1] + j,1] = bounds[1,0] + (j + 0.5)*self.hx[1]
        elif mode == 'random':
            tmp = torch.rand(self.size,2)
            self.X[:,0] = bounds[0,0] + self.hx[0] + (bounds[0,1] - bounds[0,0] - 2*self.hx[0])*tmp[:,0]
            self.X[:,1] = bounds[1,0] + self.hx[1] + (bounds[1,1] - bounds[1,0] - 2*self.hx[1])*tmp[:,1]
        else:
            tmp = torch.tensor(self.quasi_samples(self.size))
            self.X[:,0] = bounds[0,0] + self.hx[0] + (bounds[0,1] - bounds[0,0] - 2*self.hx[0])*tmp[:,0]
            self.X[:,1] = bounds[1,0] + self.hx[1] + (bounds[1,1] - bounds[1,0] - 2*self.hx[1])*tmp[:,1]
        
    def quasi_samples(self,N):
        sampler = qmc.Sobol(d=self.dim)
        sample = sampler.random(n=N)
        return sample
class BDSET():#边界点取值
    def __init__(self,nx):
        self.dim = 2
        self.hx = [(bounds[0,1] - bounds[0,0])/nx[0],(bounds[1,1] - bounds[1,0])/nx[1]]
        self.size = 2*(nx[0] + nx[1])
        #----------------------------------
        self.u1 = torch.zeros(nx[0] + 1,1)
        self.v1 = torch.zeros(nx[0] + 1,1)
        self.x1 = torch.zeros(nx[0] + 1,2)
        for i in range(nx[0] + 1):
            self.x1[i,0] = bounds[0,0] + i*self.hx[0]
            self.x1[i,1] = bounds[1,0]
        self.T1 = torch.ones(self.x1.shape[0],1)
        #--------------------------------    
        self.u2 = torch.zeros(2*nx[1],1)
        self.v2 = torch.zeros(2*nx[1],1)
        self.x2 = torch.zeros(2*nx[1],2)
        self.dir2 = torch.zeros(2*nx[1],2)
        for i in range(nx[1]):
            self.x2[i,0] = bounds[0,0]
            self.x2[i,1] = bounds[1,0] + (i + 0.5)*self.hx[1]
            self.dir2[i,0] = -1;self.dir2[i,1] = 0
            self.x2[i + nx[1],0] = bounds[0,1]
            self.x2[i + nx[1],1] = bounds[1,0] + (i + 0.5)*self.hx[1]
            self.dir2[i + nx[1],0] = 1;self.dir2[i + nx[1],1] = 0
        self.g2 = torch.zeros_like(self.u2)
        #--------------------------
        self.u3 = torch.zeros(nx[0],1)
        self.v3 = torch.zeros(nx[0],1)
        self.x3 = torch.zeros(nx[0],2)
        
        for i in range(1,nx[0] - 1):
            self.x3[i,0] = 1/3 + i/(3*nx[0])
            self.x3[i,1] = 1.0
        self.x3[0,0] = 1/3;self.x3[0,1] = 1
        self.x3[-1,0] = 2/3;self.x3[-1,1] = 1
        for i in range(nx[0]):
            self.v3[i,0] = -4*(self.x3[i,0] - 1/3)*(2/3 - self.x3[i,0])
        self.T3 = torch.zeros(self.x3.shape[0],1)
        #------------------------------------
        self.x4 = torch.zeros(nx[0],2)
        self.u4 = torch.zeros(nx[0],1)
        self.v4 = torch.zeros(nx[0],1)
        self.dir4 = torch.zeros(nx[0],2)
        for i in range(1,nx[0]):
            self.x4[i,0] = i/(3*nx[0])
            self.x4[i,1] = 1.0
        self.x4[0,0] = 0;self.x4[0,1] = 1.0
        for i in range(nx[0]):
            self.v4[i,0] = 2*self.x4[i,0]*(1/3 - self.x4[i,0])
        self.dir4[:,0] = torch.zeros(nx[0]);self.dir4[:,1] = torch.ones(nx[0])
        #------------------------------------
        self.x5 = torch.zeros(nx[0],2)
        self.u5 = torch.zeros(nx[0],1)
        self.v5 = torch.zeros(nx[0],1)
        self.dir5 = torch.zeros(nx[0],2)
        for i in range(nx[0] - 1):
            self.x5[i,0] = 2/3 + (i + 0.5)/(3*nx[0])
            self.x5[i,1] = 1.0
        self.x5[-1,0] = 1;self.x5[-1,1] = 1.0
        for i in range(nx[0]):
            self.v5[i,0] = 2*(self.x5[i,0] - 2/3)*(1 - self.x5[i,0])

class LENU():
    def __init__(self):
        pass
    def forward(self,X):
        x = X[:,0]
        y = X[:,1]
        L = x*(1 -x)*y*(1 - y)
        return L.reshape(-1,1)
class LENV():
    def __init__(self):
        pass
    def forward(self,X):
        x = X[:,0]
        y = X[:,1]
        L = x*(1 -x)*y
        return L.reshape(-1,1)
        
np.random.seed(1234)
torch.manual_seed(1234)
class Net(torch.nn.Module):
    def __init__(self, layers, dtype):
        super(Net, self).__init__()
        self.layers = layers
        self.layers_hid_num = len(layers)-2
        self.device = device
        self.dtype = dtype
        fc = []
        for i in range(self.layers_hid_num):
            fc.append(torch.nn.Linear(self.layers[i],self.layers[i+1]))
            fc.append(torch.nn.Linear(self.layers[i+1],self.layers[i+1]))
        fc.append(torch.nn.Linear(self.layers[-2],self.layers[-1]))
        self.fc = torch.nn.Sequential(*fc)
        for i in range(self.layers_hid_num):
            self.fc[2*i].weight.data = self.fc[2*i].weight.data.type(dtype)
            self.fc[2*i].bias.data = self.fc[2*i].bias.data.type(dtype)
            self.fc[2*i + 1].weight.data = self.fc[2*i + 1].weight.data.type(dtype)
            self.fc[2*i + 1].bias.data = self.fc[2*i + 1].bias.data.type(dtype)
        self.fc[-1].weight.data = self.fc[-1].weight.data.type(dtype)
        self.fc[-1].bias.data = self.fc[-1].bias.data.type(dtype)
    def forward(self, x):
        dev = x.device
        for i in range(self.layers_hid_num):
            h = torch.sin(self.fc[2*i](x))
            h = torch.sin(self.fc[2*i+1](h))
            temp = torch.eye(x.shape[-1],self.layers[i+1],dtype = self.dtype,device = dev)
            x = h + x@temp
        return self.fc[-1](x) 

def pred_u(netu,lenthu,X):
    return netu.forward(X)*lenthu.forward(X)
def pred_v(netv,lenthv,X):
    return netv.forward(X)*lenthv.forward(X)
def pred_w(netw,X):
    return netw.forward(X)
def pred_T(netT,X):
    return netT.forward(X)*X[:,1:2] + 1
def loadtype(inset,bdset,dtype):
    inset.X = inset.X.type(dtype)
    
    #-----------------------------
    bdset.x1 = bdset.x1.type(dtype)
    bdset.u1 = bdset.u1.type(dtype)
    bdset.v1 = bdset.v1.type(dtype)
    bdset.T1 = bdset.T1.type(dtype)
    #-----------------------------
    bdset.x2 = bdset.x2.type(dtype)
    bdset.u2 = bdset.u2.type(dtype)
    bdset.v2 = bdset.v2.type(dtype)
    bdset.g2 = bdset.g2.type(dtype)
    bdset.dir2 = bdset.dir2.type(dtype)
    #-----------------------------
    bdset.x3 = bdset.x3.type(dtype)
    bdset.u3 = bdset.u3.type(dtype)
    bdset.v3 = bdset.v3.type(dtype)
    bdset.T3 = bdset.T3.type(dtype)
    #-----------------------------
    bdset.x4 = bdset.x4.type(dtype)
    bdset.u4 = bdset.u4.type(dtype)
    bdset.v4 = bdset.v4.type(dtype)
    bdset.dir4 = bdset.dir4.type(dtype)
    #-----------------------------
    bdset.x5 = bdset.x5.type(dtype)
    bdset.u5 = bdset.u5.type(dtype)
    bdset.v5 = bdset.v5.type(dtype)
    bdset.dir5 = bdset.dir5.type(dtype)
    
def loadcuda(netu,netv,netw,netT,inset,bdset):    
    netu = netu.to(device)
    netv = netv.to(device)
    netw = netw.to(device)
    netT = netT.to(device)
    
    if inset.X.requires_grad == False:
        inset.X.requires_grad = True
    inset.X = inset.X.to(device)
    
    #-----------------------------
    bdset.x1 = bdset.x1.to(device)
    bdset.u1 = bdset.u1.to(device)
    bdset.v1 = bdset.v1.to(device)
    bdset.T1 = bdset.T1.to(device)
    #-----------------------------
    if bdset.x2.requires_grad == False:
        bdset.x2.requires_grad = True
    bdset.x2 = bdset.x2.to(device)
    bdset.u2 = bdset.u2.to(device)
    bdset.v2 = bdset.v2.to(device)
    bdset.g2 = bdset.g2.to(device)
    bdset.dir2 = bdset.dir2.to(device)
    #-----------------------------
    bdset.x3 = bdset.x3.to(device)
    bdset.u3 = bdset.u3.to(device)
    bdset.v3 = bdset.v3.to(device)
    bdset.T3 = bdset.T3.to(device)
    #-----------------------------
    if bdset.x4.requires_grad == False:
        bdset.x4.requires_grad = True
    bdset.x4 = bdset.x4.to(device)
    bdset.u4 = bdset.u4.to(device)
    bdset.v4 = bdset.v4.to(device)
    bdset.dir4 = bdset.dir4.to(device)
    #-----------------------------
    if bdset.x5.requires_grad == False:
        bdset.x5.requires_grad = True
    bdset.x5 = bdset.x5.to(device)
    bdset.u5 = bdset.u5.to(device)
    bdset.v5 = bdset.v5.to(device)
    bdset.dir5 = bdset.dir5.to(device)

def loadcpu(netu,netv,netw,netT,inset,bdset):    
    netu = netu.to('cpu')
    netv = netv.to('cpu')
    netw = netw.to('cpu')
    netT = netT.to('cpu')
    
    if inset.X.requires_grad == False:
        inset.X.requires_grad = True
    inset.X = inset.X.to('cpu')
    
    #-----------------------------
    bdset.x1 = bdset.x1.to('cpu')
    bdset.u1 = bdset.u1.to('cpu')
    bdset.v1 = bdset.v1.to('cpu')
    bdset.T1 = bdset.T1.to('cpu')
    #-----------------------------
    if bdset.x2.requires_grad == False:
        bdset.x2.requires_grad = True
    bdset.x2 = bdset.x2.to('cpu')
    bdset.u2 = bdset.u2.to('cpu')
    bdset.v2 = bdset.v2.to('cpu')
    bdset.g2 = bdset.g2.to('cpu')
    bdset.dir2 = bdset.dir2.to('cpu')
    #-----------------------------
    bdset.x3 = bdset.x3.to('cpu')
    bdset.u3 = bdset.u3.to('cpu')
    bdset.v3 = bdset.v3.to('cpu')
    bdset.T3 = bdset.T3.to('cpu')
    #-----------------------------
    if bdset.x4.requires_grad == False:
        bdset.x4.requires_grad = True
    bdset.x4 = bdset.x4.to('cpu')
    bdset.u4 = bdset.u4.to('cpu')
    bdset.v4 = bdset.v4.to('cpu')
    bdset.dir4 = bdset.dir4.to('cpu')
    #-----------------------------
    if bdset.x5.requires_grad == False:
        bdset.x5.requires_grad = True
    bdset.x5 = bdset.x5.to('cpu')
    bdset.u5 = bdset.u5.to('cpu')
    bdset.v5 = bdset.v5.to('cpu')
    bdset.dir5 = bdset.dir5.to('cpu')
def Loss_yp(netu,netv,netw,netT,lenthu,lenthv,inset,bdset,penalty_in,penalty_bd):

    inset.u = pred_u(netu,lenthu,inset.X)
    inset.v = pred_v(netv,lenthv,inset.X)
    inset.w = pred_w(netw,inset.X)
    inset.T = pred_T(netT,inset.X)
    u_x, = torch.autograd.grad(inset.u, inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    u_xx, = torch.autograd.grad(u_x[:,0:1], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    u_yy, = torch.autograd.grad(u_x[:,1:2], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))  
    u_lap = u_xx[:,0:1] + u_yy[:,1:2]

    v_x, = torch.autograd.grad(inset.v, inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    v_xx, = torch.autograd.grad(v_x[:,0:1], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    v_yy, = torch.autograd.grad(v_x[:,1:2], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))  
    v_lap = v_xx[:,0:1] + v_yy[:,1:2]

    w_x, = torch.autograd.grad(inset.w, inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    w_xx, = torch.autograd.grad(w_x[:,0:1], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    w_yy, = torch.autograd.grad(w_x[:,1:2], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))  
    w_lap = w_xx[:,0:1] + w_yy[:,1:2]

    T_x, = torch.autograd.grad(inset.T, inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    T_xx, = torch.autograd.grad(T_x[:,0:1], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    T_yy, = torch.autograd.grad(T_x[:,1:2], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))  
    T_lap = T_xx[:,0:1] + T_yy[:,1:2]

    inset.res_u = (u_lap + w_x[:,1:2])**2
    inset.res_v = (-v_lap + w_x[:,0:1])**2
    inset.res_w = (-w_lap/Re + inset.u*w_x[:,0:1] + inset.v*w_x[:,1:2] - T_x[:,0:1]*Gr/(Re**2))**2
    inset.res_T = (-T_lap/(Re*Pr) + inset.u*T_x[:,0:1] + inset.v*T_x[:,1:2])**2
    inset.var_w = (inset.w + u_x[:,1:2] - v_x[:,0:1])**2
    inset.loss_u = torch.sqrt(inset.res_u.mean())
    inset.loss_v = torch.sqrt(inset.res_v.mean())
    inset.loss_w = torch.sqrt(inset.res_w.mean())
    inset.loss_T = torch.sqrt(inset.res_T.mean())
    inset.loss_var = torch.sqrt(inset.var_w.mean())
    inset.loss_in = penalty_in[0]*inset.loss_u + penalty_in[1]*inset.loss_v + \
        penalty_in[2]*inset.loss_w + penalty_in[3]*inset.loss_T + penalty_in[4]*inset.loss_var
    #--------------------------------------
    
    #2
    bdset.T2 = pred_T(netT,bdset.x2)
    Tbd_x2, = torch.autograd.grad(bdset.T2, bdset.x2, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(bdset.x2.shape[0],1).to(device))
    Tbd_n2 = ((Tbd_x2*bdset.dir2).sum(1)).reshape(-1,1)
    
    bdset.loss2 = torch.sqrt((Tbd_n2 + bdset.T2)**2).mean()
    #3
    tmpbd3 = (pred_v(netv,lenthv,bdset.x3) - bdset.v3)**2 + (pred_T(netT,bdset.x3))**2
    bdset.loss3 = torch.sqrt(tmpbd3.mean())
    #4
    Tbd_x4, = torch.autograd.grad(pred_T(netT,bdset.x4), bdset.x4, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(bdset.x4.shape[0],1).to(device))
    Tbd_n4 = ((Tbd_x4*bdset.dir4).sum(1)).reshape(-1,1)
    tmpbd4 = (pred_v(netv,lenthv,bdset.x4) - bdset.v4)**2 + (Tbd_n4)**2
    bdset.loss4 = torch.sqrt(tmpbd4.mean())
    #5
    Tbd_x5, = torch.autograd.grad(pred_T(netT,bdset.x5), bdset.x5, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(bdset.x5.shape[0],1).to(device))
    Tbd_n5 = ((Tbd_x5*bdset.dir5).sum(1)).reshape(-1,1)
    tmpbd5 = (pred_v(netv,lenthv,bdset.x5) - bdset.v5)**2 + (Tbd_n5)**2
    bdset.loss5 = torch.sqrt(tmpbd5.mean())
    inset.loss_bd = penalty_bd[0]*bdset.loss2 + penalty_bd[1]*bdset.loss3 +\
        penalty_bd[2]*bdset.loss4 + penalty_bd[3]*bdset.loss5
    return inset.loss_in + inset.loss_bd
    
def train_yp(netu,netv,netw,netT,lenthu,lenthv,inset,bdset,penalty_in,penalty_bd,optim,epoch,error_history,optimtype):
    print('Train y&p Neural Network')
    lossin_history,var_history,lossbd_history = error_history
    loss = Loss_yp(netu,netv,netw,netT,lenthu,lenthv,inset,bdset,penalty_in,penalty_bd)
    print('epoch_yp: %d, Lu:%.3e,Lv:%.3e,Lw:%.3e,LT:%.3e,Lvar:%.3e, time: %.2f\n'
          %(0,inset.loss_u.item(),inset.loss_v.item(),inset.loss_w.item(),inset.loss_T.item(),inset.loss_var.item(), 0.00))
    print('bd2:%.3e,bd3:%.3e,bd4:%.4e,bd5:%.3e\n'%
    (bdset.loss2.item(),bdset.loss3.item(),bdset.loss4.item(),bdset.loss5.item()))
    for it in range(epoch):
        st = time.time()
        if optimtype == 'Adam':
            for j in range(100):
                optim.zero_grad()
                loss = Loss_yp(netu,netv,netw,netT,lenthu,lenthv,inset,bdset,penalty_in,penalty_bd)
                loss.backward()
                optim.step()
        if optimtype == 'BFGS' or optimtype == 'LBFGS':
            def closure():
                optim.zero_grad()
                loss = Loss_yp(netu,netv,netw,netT,lenthu,lenthv,inset,bdset,penalty_in,penalty_bd)
                loss.backward()
                return loss
            optim.step(closure) 
        loss = Loss_yp(netu,netv,netw,netT,lenthu,lenthv,inset,bdset,penalty_in,penalty_bd)
        ela = time.time() - st
        print('------------------------------')
        print('loss:%.3e,bd2:%.3e,bd3:%.3e,bd4:%.4e,bd5:%.3e\n'%
        (loss.item(),bdset.loss2.item(),bdset.loss3.item(),bdset.loss4.item(),bdset.loss5.item()))
        print('epoch_yp: %d, Lu:%.3e,Lv:%.3e,Lw:%.3e,LT:%.3e,Lvar:%.3e, time: %.2f\n'
          %(it + 1,inset.loss_u.item(),inset.loss_v.item(),inset.loss_w.item(),inset.loss_T.item(),inset.loss_var.item(), ela))
        
        loss_u = inset.loss_u.item()
        loss_v = inset.loss_v.item()
        loss_w = inset.loss_w.item()
        loss_T = inset.loss_T.item()
        loss_var = inset.loss_var.item()
        
        bd2 = bdset.loss2.item()
        bd3 = bdset.loss3.item()
        bd4 = bdset.loss4.item()
        bd5 = bdset.loss5.item()
        lossin_history.append([loss.item(),loss_u,loss_v,loss_w,loss_T,loss_var])
        var_history.append(loss_var)
        lossbd_history.append([bd2,bd3,bd4,bd5])
        error_history = [lossin_history,var_history,lossbd_history]
    return error_history

nx = [64,64]
nx_bd = [100,100]

mu = 1
lr = 1e0
#mode = 'random'
mode = 'uniform'
#mode = 'qmc'
inset = INSET(nx,mode)
bdset = BDSET(nx_bd)
lenthu = LENU()
lenthv = LENV()

dtype = torch.float64
wid = 25
layer_u = [2,wid,wid,wid,1];netu = Net(layer_u,dtype)
layer_v = [2,wid,wid,wid,1];netv = Net(layer_v,dtype)
layer_w = [2,wid,wid,wid,1];netw = Net(layer_w,dtype)
layer_T = [2,wid,wid,wid,1];netT = Net(layer_T,dtype)

fname1 = "unobasic-CVDlay%dvar.pt"%(wid)
fname2 = "vnobasic-CVDlay%dvar.pt"%(wid)
fname3 = "wnobasic-CVDlay%dvar.pt"%(wid)
fname4 = "Tnobasic-CVDlay%dvar.pt"%(wid)

#netu = torch.load(fname1)
#netv = torch.load(fname2)
#netw = torch.load(fname3)
#netT = torch.load(fname4)

loadtype(inset,bdset,dtype)
loadcuda(netu,netv,netw,netT,inset,bdset)

loss_history = [[],[],[]]
epoch = 10
start_time = time.time()
penalty_in = [1e0,1e0,1e0,1e0,1e0]
penalty_bd = [1e0,1e0,1e0,1e0,1e0]
lr = 1e0
max_iter = 1
#optimtype = 'Adam'
optimtype = 'BFGS'
#optimtype = 'LBFGS'
for i in range(max_iter):
    if optimtype == 'BFGS':
        optim = bfgs.BFGS(itertools.chain(netu.parameters(),
                                        netv.parameters(),
                                        netw.parameters(),
                                        netT.parameters()),
                        lr=lr, max_iter=100,
                        tolerance_grad=1e-16, tolerance_change=1e-16,
                        line_search_fn='strong_wolfe')
    if optimtype == 'LBFGS':
        optim = torch.optim.LBFGS(itertools.chain(netu.parameters(),
                                        netv.parameters(),
                                        netw.parameters(),
                                        netT.parameters()),
                        lr=lr, max_iter=100,
                        history_size=100,
                        line_search_fn='strong_wolfe')
    if optimtype == 'Adam':
        optim = torch.optim.Adam(itertools.chain(netu.parameters(),
                                        netv.parameters(),
                                        netw.parameters(),
                                        netT.parameters()),
                        lr=lr)
    loss_history = \
        train_yp(netu,netv,netw,netT,lenthu,lenthv,inset,bdset,penalty_in,penalty_bd,optim,epoch,loss_history,optimtype)
    if (i + 1)%2 == 0:
        lr *= 0.985
elapsed = time.time() - start_time
print('Finishied! train time: %.2f\n' %(elapsed)) 

loadcpu(netu,netv,netw,netT,inset,bdset)
torch.cuda.empty_cache()

torch.save(netu, fname1)
torch.save(netv, fname2)
torch.save(netw, fname3)
torch.save(netT, fname4)

lossin_history,var_history,lossbd_history = loss_history
np.save('lay%d-epoch%d-lossin.npy'%(wid,epoch),lossin_history)
np.save('lay%d-epoch%d-lossvd.npy'%(wid,epoch),lossbd_history)
np.save('lay%d-epoch%d-lossvar.npy'%(wid,epoch),var_history)
fig, ax = plt.subplots(1,2,figsize=(12,3.5))

ax[0].semilogy(np.array(lossin_history))
ax[0].legend(['loss','loss_u', 'loss_v', 'loss_w', 'loss_T','loss_var'])
ax[0].set_xlabel('iters') 

ax[1].plot(np.array(lossbd_history))
ax[1].legend(['bd2','bd3','bd4','bd5'])
ax[1].set_xlabel('iters') 
plt.yscale('log')
fig.tight_layout()
plt.show()


plt.scatter(inset.X.detach().numpy()[:,0],inset.X.detach().numpy()[:,1])
plt.show()
#%%

fig, ax = plt.subplots(2,2,figsize=(8,8))
for i in range(2):
    for j in range(2):
        ax[i,j].axis('equal')
        ax[i,j].set_xlim([0,1])
        ax[i,j].set_ylim([0,1])
        ax[i,j].axis('off')
num_line = 100
nx = [21,11]
x = np.linspace(bounds[0,0],bounds[0,1],nx[0])
y = np.linspace(bounds[1,0],bounds[1,1],nx[1])
hx = [(bounds[0,1] - bounds[0,0])/(nx[0] - 1),(bounds[1,1] - bounds[1,0])/(nx[1] - 1)]
inp = torch.zeros(nx[0]*nx[1],2).type(dtype)
for i in range(nx[0]):
    for j in range(nx[1]):
        inp[i*nx[1] + j,0] = bounds[0,0] + i*hx[0]
        inp[i*nx[1] + j,1] = bounds[1,0] + j*hx[1]
u = pred_u(netu,lenthu,inp).detach().numpy().reshape(nx[0],nx[1]).T
v = pred_u(netv,lenthv,inp).detach().numpy().reshape(nx[0],nx[1]).T
w = pred_w(netw,inp).detach().numpy().reshape(nx[0],nx[1]).T
T = pred_T(netT,inp).detach().numpy().reshape(nx[0],nx[1]).T
X,Y = np.meshgrid(x,y)

s = ax[0,0].contourf(X,Y, u, num_line, cmap='rainbow')
ax[0,0].contour(s, linewidths=0.6, colors='black')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
ax[0,0].set_title('NN:u',fontsize=15)    
fig.colorbar(s,ax=ax[0,0],fraction = 0.045)

s = ax[0,1].contourf(X,Y, v, num_line, cmap='rainbow')
ax[0,1].contour(s, linewidths=0.6, colors='black')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
ax[0,1].set_title('NN:v',fontsize=15)    
fig.colorbar(s,ax=ax[0,1],fraction = 0.045)

s = ax[1,0].contourf(X,Y, w, num_line, cmap='rainbow')
ax[1,0].contour(s, linewidths=0.6, colors='black')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
ax[1,0].set_title('NN:w',fontsize=15)    
fig.colorbar(s,ax=ax[1,0],fraction = 0.045)

s = ax[1,1].contourf(X,Y, T, num_line, cmap='rainbow')
ax[1,1].contour(s, linewidths=0.6, colors='black')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
ax[1,1].set_title('NN:T',fontsize=15)    
fig.colorbar(s,ax=ax[1,1],fraction = 0.045)

fig.tight_layout()
plt.show()

#%%
plt.contourf(x,y,T,40,cmap = 'rainbow')
plt.colorbar()
plt.xlabel('x')
plt.ylabel('y')
plt.title('the temperture T')
plt.show()

障碍流问题

其中 d = 0.1 , ν = 0.01 d=0.1,\nu=0.01 d=0.1,ν=0.01
{ − ν Δ u + u u x + v u y + p x = 0 , − ν Δ v + u v x + v v y + p y = 0 , u x + v y = 0 , u ∣ x i n = 4 y ( 1 − y ) , v ∣ x i n = 0 , ∂ u ∂ n ∣ x o u t = 0 , ∂ v ∂ n ∣ x o u t = 0 , u ∣ x w = 0 , v ∣ x w = 0 , p ∣ x o u t = 0 , ∂ p ∂ n ∣ x o c = 0. \left\{\begin{array}{l} -\nu \Delta u + uu_x + v u_y + p_x = 0,\\ -\nu \Delta v + uv_x + vv_y + p_y=0,\\ u_x + v_y=0,\\ u|_{x_{in}} = 4y(1-y),v|_{x_{in}} = 0,\\ \frac{\partial u}{\partial n}|_{x_{out}} = 0,\frac{\partial v}{\partial n}|_{x_{out}} = 0,\\ u|_{x_{w}} = 0,v|_{x_{w}} = 0,\\ p|_{x_{out}} = 0,\\ \frac{\partial p}{\partial n}|_{x_{oc}} = 0. \end{array}\right. νΔu+uux+vuy+px=0,νΔv+uvx+vvy+py=0,ux+vy=0,uxin=4y(1y),vxin=0,nuxout=0,nvxout=0,uxw=0,vxw=0,pxout=0,npxoc=0.
{ x i n = { ( x , y ) ∣ x = 0 , 0 ≤ y ≤ 1 } , x o u t = { ( x , y ) ∣ x = 2 , 0 ≤ y ≤ 1 } , x w = { ( x , y ) ∣ y = 0 , 0 ≤ x ≤ 2 } ∪ { ( x , y ) ∣ y = 1 , 0 ≤ x ≤ 2 } ∪ Ω d , Ω d = { ( x , y ) ∣ 1 − d ≤ x ≤ 1 + d , 0.5 − d ≤ y ≤ 0.5 + d } , x o c = x w ∪ x i n . \left\{\begin{array}{l} x_{in}=\{(x,y)|x = 0,0\leq y \leq 1\},\\ x_{out}=\{(x,y)|x = 2,0\leq y \leq 1\},\\ x_{w} = \{(x,y)|y = 0,0\leq x \leq 2\}\cup \{(x,y)|y = 1,0\leq x \leq 2\} \cup \Omega_d,\\ \Omega_d = \{(x,y)|1 - d \leq x \leq 1 + d,0.5 -d \leq y \leq 0.5 + d\},\\ x_{oc} = x_w \cup x_{in}. \end{array}\right. xin={(x,y)x=0,0y1},xout={(x,y)x=2,0y1},xw={(x,y)y=0,0x2}{(x,y)y=1,0x2}Ωd,Ωd={(x,y)∣1dx1+d,0.5dy0.5+d},xoc=xwxin.
在这里插入图片描述
参考结果如下
在这里插入图片描述

PINN求解障碍流问题代码

对于 u , v , p u,v,p u,v,p,设置FNN的层数以及神经元数目为 [ 2 , 25 , 25 , 25 , 1 ] [2,25,25,25,1] [2,25,25,25,1],内点使用均匀采样,剖分为 [ 64 , 64 ] [64,64] [64,64],边界点剖分为 [ 100 , 100 ] [100,100] [100,100],上面提到了8个边界条件,不过这里本人设置近似解定义如下:
{ u = n e t u x + 4 y ( 1 − y ) , v = n e t v y ( 1 − y ) , p = n e t p ( y − 1 ) . \left\{\begin{array}{l} u = net_ux + 4y(1-y),\\ v = net_vy(1 - y),\\ p = net_p(y-1). \end{array}\right. u=netux+4y(1y),v=netvy(1y),p=netp(y1).
通过上面这种定义,近似解可以直接满足3个边界条件,迭代100次的结果如下:
在这里插入图片描述
上图中bd1拟合 ∂ u ∂ n ∣ x o u t = 0 \frac{\partial u}{\partial n}|_{x_{out}} = 0 nuxout=0,bd2拟合 ∂ v ∂ n ∣ x o u t = 0 \frac{\partial v}{\partial n}|_{x_{out}} = 0 nvxout=0,bd3拟合 u ∣ x w = 0 u|_{x_{w}} = 0 uxw=0,bd4拟合 v ∣ x w = 0 v|_{x_{w}} = 0 vxw=0
bd5拟合 ∂ p ∂ n ∣ x o c = 0 \frac{\partial p}{\partial n}|_{x_{oc}} = 0 npxoc=0
在这里插入图片描述
在这里插入图片描述

import torch
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import torch.nn as nn
import torch.nn.functional as F
import itertools
import bfgs
from scipy.stats import qmc
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#device = torch.device('cpu')

nu = 1e-2
d = 0.1
def region(x):
    x1 = x[:,0:1]
    x2 = x[:,1:2]
    box = (x1>1-d)&(x1<1+d)&(x2>0.5-d)&(x2<0.5+d)
    s = (x1>0)&(x1<2)&(x2>0)&(x2<1)&(np.invert(box))
    return s.flatten()
hr = 1e-3
bounds = np.array([0 - hr,2 + hr,0 - hr,1 + hr]).reshape(2,2)

Lx = 2.0
Ly = 1.0
def y_in(x):
    x2 = x[:,1:2]
    s = 0*x
    s[:,0:1] = 4*x2*(1-x2)/(Ly**2)
    return s

def y_d(x):
    return y_in(x)

class Geo():
    def __init__(self,region,bounds):
        self.region = region
        self.bounds = bounds
        self.dim = self.bounds.shape[0]
        
    def samples(self,N):
        x = np.zeros((N,self.dim))
        m=0
        while (m<N):
            pt = np.random.uniform(0,1,self.dim).reshape(1,-1)
            pt = pt*(self.bounds[:,1]-self.bounds[:,0])+self.bounds[:,0]
            if self.region(pt).all():
                x[m,:] = pt.ravel()
                m += 1
        return x          

    def quasi_samples(self,N):
        sampler = qmc.Sobol(d=self.dim)
        sample = sampler.random(n=2*N)
        sample = sample*(self.bounds[:,1]-self.bounds[:,0])+self.bounds[:,0]
        sample = sample[self.region(sample),:][:N,:]
        return sample   

Omega = Geo(region, bounds)
class INSET():
    def __init__(self,n_tr,mode):
        if mode == 'uniform':
            x = np.linspace(bounds[0,0],bounds[0,1],2*n_tr)
            y = np.linspace(bounds[1,0],bounds[1,1],n_tr)
            xx0, xx1 = np.meshgrid(x,y)

            inp_s = (np.hstack([xx0.reshape(-1,1),xx1.reshape(-1,1)]))
            x = []
            for i in range(inp_s.shape[0]):
                ind = (inp_s[i,0] > 1 - d)&(inp_s[i,0] < 1 + d)&(inp_s[i,1] > 0.5 - d)&(inp_s[i,1] < 0.5 + d)
                if ~ind:
                    x.append(inp_s[i,:])
            x = np.array(x)
            self.X = torch.tensor(x)
        else:
            self.X = torch.from_numpy(Omega.quasi_samples(n_tr*n_tr))
        self.size = self.X.shape[0]
        self.area = 2-d**2
               
        self.dim = 2
        self.yd = y_d(self.X)
class BDSET():#边界点取值
    def __init__(self,nx):
        self.dim = 2
        self.hx = [(bounds[0,1] - bounds[0,0])/nx[0],(bounds[1,1] - bounds[1,0])/nx[1]]
        self.x_in = torch.zeros(nx[1],self.dim)
        self.dir_in = torch.zeros(nx[1],self.dim)
        for i in range(nx[1]):
            self.x_in[i,0] = bounds[0,0]
            self.x_in[i,1] = bounds[1,0] + (i + 0.5)*self.hx[1]
            self.dir_in[i,0] = -1
            self.dir_in[i,1] = 0
        self.rig_in = torch.zeros_like(self.x_in)
        self.rig_in = y_in(self.x_in)

        self.x_out = torch.zeros(nx[1],self.dim)
        self.dir_out = torch.zeros(nx[1],self.dim)
        for i in range(nx[1]):
            self.x_out[i,0] = bounds[0,1]
            self.x_out[i,1] = bounds[1,0] + (i + 0.5)*self.hx[1]
            self.dir_out[i,0] = 1
            self.dir_out[i,1] = 0

        self.x_w = torch.zeros(4*nx[0] + 2*nx[1],self.dim)
        self.dir_w = torch.zeros(4*nx[0] + 2*nx[1],self.dim)
        m = 0
        for i in range(nx[0]):
            self.x_w[m,0] = bounds[0,0] + (i + 0.5)*self.hx[0]
            self.x_w[m,1] = bounds[1,0]
            self.dir_w[m,0] = 0
            self.dir_w[m,1] = -1
            m = m + 1
        for i in range(nx[0]):
            self.x_w[m,0] = bounds[0,0] + (i + 0.5)*self.hx[0]
            self.x_w[m,1] = bounds[1,1]
            self.dir_w[m,0] = 0
            self.dir_w[m,1] = 1
            m = m + 1
        for i in range(nx[0]):
            self.x_w[m,0] = 1 - d + (i + 0.5)*(2*d/nx[0])
            self.x_w[m,1] = 0.5 - d
            self.dir_w[m,0] = 0
            self.dir_w[m,1] = -1
            m = m + 1
        for i in range(nx[0]):
            self.x_w[m,0] = 1 - d + (i + 0.5)*(2*d/nx[0])
            self.x_w[m,1] = 0.5 + d
            self.dir_w[m,0] = 0
            self.dir_w[m,1] = 1
            m = m + 1
        for i in range(nx[1]):
            self.x_w[m,0] = 1 - d
            self.x_w[m,1] = 0.5 - d + (i + 0.5)*(2*d/nx[1])
            self.dir_w[m,0] = -1
            self.dir_w[m,1] = 0
            m = m + 1
        for i in range(nx[1]):
            self.x_w[m,0] = 1 + d
            self.x_w[m,1] = 0.5 - d + (i + 0.5)*(2*d/nx[1])
            self.dir_w[m,0] = 1
            self.dir_w[m,1] = 0
            m = m + 1
        self.x_oc = torch.cat([self.x_in,self.x_w],0)
        self.dir_oc = torch.cat([self.dir_in,self.dir_w],0)
np.random.seed(1234)
torch.manual_seed(1234)

class Net(torch.nn.Module):
    def __init__(self, layers, dtype):
        super(Net, self).__init__()
        self.layers = layers
        self.layers_hid_num = len(layers)-2
        self.device = device
        self.dtype = dtype
        fc = []
        for i in range(self.layers_hid_num):
            fc.append(torch.nn.Linear(self.layers[i],self.layers[i+1]))
            fc.append(torch.nn.Linear(self.layers[i+1],self.layers[i+1]))
        fc.append(torch.nn.Linear(self.layers[-2],self.layers[-1]))
        self.fc = torch.nn.Sequential(*fc)
        for i in range(self.layers_hid_num):
            self.fc[2*i].weight.data = self.fc[2*i].weight.data.type(dtype)
            self.fc[2*i].bias.data = self.fc[2*i].bias.data.type(dtype)
            self.fc[2*i + 1].weight.data = self.fc[2*i + 1].weight.data.type(dtype)
            self.fc[2*i + 1].bias.data = self.fc[2*i + 1].bias.data.type(dtype)
        self.fc[-1].weight.data = self.fc[-1].weight.data.type(dtype)
        self.fc[-1].bias.data = self.fc[-1].bias.data.type(dtype)
    def forward(self, x):
        dev = x.device
        for i in range(self.layers_hid_num):
            h = torch.sin(self.fc[2*i](x))
            h = torch.sin(self.fc[2*i+1](h))
            temp = torch.eye(x.shape[-1],self.layers[i+1],dtype = self.dtype,device = dev)
            x = h + x@temp
        return self.fc[-1](x) 
    def total_para(self):#计算参数数目
        return sum([x.numel() for x in self.parameters()])      
def length(X):
    return (X[:,1:2] - bounds[1,0])*(bounds[1,1] - X[:,1:2])
def pred_u(netu,X):
    return netu.forward(X)*(X[:,0:1] - bounds[0,0]) + 4*X[:,1:2]*(1 - X[:,1:2])
def pred_v(netv,X):
    return netv.forward(X)*(X[:,0:1] - bounds[0,0])*length(X)
def pred_p(netp,X):
    return netp.forward(X)*(X[:,0:1] - bounds[0,1])
def loadtype(inset,bdset,dtype):
    inset.X = inset.X.type(dtype)
    inset.yd = inset.yd.type(dtype)
    
    #-----------------------------
    bdset.x_in = bdset.x_in.type(dtype)
    bdset.rig_in = bdset.rig_in.type(dtype)
    bdset.x_out = bdset.x_out.type(dtype)
    bdset.x_w = bdset.x_w.type(dtype)
    bdset.x_oc = bdset.x_oc.type(dtype)
    bdset.dir_in = bdset.dir_in.type(dtype)
    bdset.dir_out = bdset.dir_out.type(dtype)
    bdset.dir_w = bdset.dir_w.type(dtype)
    bdset.dir_oc = bdset.dir_oc.type(dtype)
def loadcuda(netu,netv,netp,inset,bdset):    
    netu = netu.to(device)
    netv = netv.to(device)
    netp = netp.to(device)
    
    if inset.X.requires_grad == False:
        inset.X.requires_grad = True
    inset.X = inset.X.to(device)
    inset.yd = inset.yd.to(device)
    bdset.rig_in = bdset.rig_in.to(device)
    bdset.x_in = bdset.x_in.to(device)
    if bdset.x_out.requires_grad == False:
        bdset.x_out.requires_grad = True
    bdset.x_out = bdset.x_out.to(device)
    bdset.x_w = bdset.x_w.to(device)
    if bdset.x_oc.requires_grad == False:
        bdset.x_oc.requires_grad = True
    bdset.x_oc = bdset.x_oc.to(device)
    bdset.dir_in = bdset.dir_in.to(device)
    bdset.dir_out = bdset.dir_out.to(device)
    bdset.dir_w = bdset.dir_w.to(device)
    bdset.dir_oc = bdset.dir_oc.to(device)
def loadcpu(netu,netv,netp,inset,bdset):    
    netu = netu.to('cpu')
    netv = netv.to('cpu')
    netp = netp.to('cpu')
    
    if inset.X.requires_grad == False:
        inset.X.requires_grad = True
    inset.X = inset.X.to('cpu')
    inset.yd = inset.yd.to('cpu')
    bdset.rig_in = bdset.rig_in.to('cpu')
    bdset.x_in = bdset.x_in.to('cpu')
    bdset.x_out = bdset.x_out.to('cpu')
    bdset.x_w = bdset.x_w.to('cpu')
    bdset.x_oc = bdset.x_oc.to('cpu')
    bdset.dir_in = bdset.dir_in.to('cpu')
    bdset.dir_out = bdset.dir_out.to('cpu')
    bdset.dir_w = bdset.dir_w.to('cpu')
    bdset.dir_oc = bdset.dir_oc.to('cpu')
def Loss_yp(netu,netv,netp,inset,bdset,penalty_in,penalty_bd):
    inset.u = pred_u(netu,inset.X)
    inset.v = pred_v(netv,inset.X)
    inset.p = pred_p(netp,inset.X)
    u_x, = torch.autograd.grad(inset.u, inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    u_xx, = torch.autograd.grad(u_x[:,0:1], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    u_yy, = torch.autograd.grad(u_x[:,1:2], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))  
    u_lap = u_xx[:,0:1] + u_yy[:,1:2]

    v_x, = torch.autograd.grad(inset.v, inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    v_xx, = torch.autograd.grad(v_x[:,0:1], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    v_yy, = torch.autograd.grad(v_x[:,1:2], inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))  
    v_lap = v_xx[:,0:1] + v_yy[:,1:2]
    p_x, = torch.autograd.grad(inset.p, inset.X, create_graph=True, retain_graph=True,
                               grad_outputs=torch.ones(inset.size,1).to(device))
    inset.res_u = (-nu*u_lap + inset.u*u_x[:,0:1] + inset.v*u_x[:,1:2] + p_x[:,0:1])**2
    inset.res_v = (-nu*v_lap + inset.u*v_x[:,0:1] + inset.v*v_x[:,1:2] + p_x[:,1:2])**2
    inset.res_div = (u_x[:,0:1] + v_x[:,1:2])**2

    inset.loss_u = torch.sqrt(inset.res_u.mean())
    inset.loss_v = torch.sqrt(inset.res_v.mean())
    inset.loss_div = torch.sqrt(inset.res_div.mean())
    inset.loss_in = penalty_in[0]*inset.loss_u + penalty_in[1]*inset.loss_v + \
        penalty_in[2]*inset.loss_div


    u_out = pred_u(netu,bdset.x_out)
    u_out_x, = torch.autograd.grad(u_out, bdset.x_out, create_graph=True, retain_graph=True,
                                    grad_outputs=torch.ones(bdset.x_out.shape[0],1).to(device))
    u_out_n = ((u_out_x*bdset.dir_out).sum(1)).reshape(-1,1)
    bdset.loss1 = torch.sqrt(u_out_n**2).mean()

    v_out = pred_v(netv,bdset.x_out)
    v_out_x, = torch.autograd.grad(v_out, bdset.x_out, create_graph=True, retain_graph=True,
                                    grad_outputs=torch.ones(bdset.x_out.shape[0],1).to(device))
    v_out_n = ((v_out_x*bdset.dir_out).sum(1)).reshape(-1,1)
    bdset.loss2 = torch.sqrt(v_out_n**2).mean()

    u_w = pred_u(netu,bdset.x_w)
    bdset.loss3 = torch.sqrt(u_w**2).mean()
    v_w = pred_v(netv,bdset.x_w)
    bdset.loss4 = torch.sqrt(v_w**2).mean()

    p_oc = pred_p(netp,bdset.x_oc)
    p_oc_x, = torch.autograd.grad(p_oc, bdset.x_oc, create_graph=True, retain_graph=True,
                                    grad_outputs=torch.ones(bdset.x_oc.shape[0],1).to(device))
    p_oc_n = ((p_oc_x*bdset.dir_oc).sum(1)).reshape(-1,1)
    bdset.loss5 = torch.sqrt(p_oc_n**2).mean()

    inset.loss_bd = penalty_bd[0]*bdset.loss1 + penalty_bd[1]*bdset.loss2 + penalty_bd[2]*bdset.loss3 +\
        penalty_bd[3]*bdset.loss4 + penalty_bd[4]*bdset.loss5
    return inset.loss_in + inset.loss_bd
def train_yp(netu,netv,netp,inset,bdset,penalty_in,penalty_bd,optim,epoch,error_history,optimtype):
    print('Train y&p Neural Network')
    lossin_history,div_history,lossbd_history,lo_histroy = error_history
    loss = Loss_yp(netu,netv,netp,inset,bdset,penalty_in,penalty_bd)
    print('epoch_yp: %d, Lu:%.3e,Lv:%.3e,div:%.3e, time: %.2f\n'
          %(0,inset.loss_u.item(),inset.loss_v.item(),inset.loss_div.item(), 0.00))
    print('bd1:%.3e,bd2:%.3e,bd3:%.3e,bd4:%.4e,bd5:%.3e\n'%
    (bdset.loss1.item(),bdset.loss2.item(),bdset.loss3.item(),bdset.loss4.item(),bdset.loss5.item()))
    for it in range(epoch):
        st = time.time()
        if optimtype == 'Adam':
            for j in range(100):
                optim.zero_grad()
                loss = Loss_yp(netu,netv,netp,inset,bdset,penalty_in,penalty_bd)
                loss.backward()
                optim.step()
        if optimtype == 'BFGS' or optimtype == 'LBFGS':
            def closure():
                loss = Loss_yp(netu,netv,netp,inset,bdset,penalty_in,penalty_bd)
                optim.zero_grad()
                loss.backward()
                return loss
            optim.step(closure) 
        loss = Loss_yp(netu,netv,netp,inset,bdset,penalty_in,penalty_bd)
        ela = time.time() - st
        print('------------------------------')
        print('epoch_yp: %d, Loss:%.3e,Lu:%.3e,Lv:%.3e,div:%.3e, time: %.2f\n'
          %(it + 1,loss.item(),inset.loss_u.item(),inset.loss_v.item(),inset.loss_div.item(), ela))
        print('bd1:%.3e,bd2:%.3e,bd3:%.3e,bd4:%.4e,bd5:%.3e\n'%
        (bdset.loss1.item(),bdset.loss2.item(),bdset.loss3.item(),bdset.loss4.item(),bdset.loss5.item()))
        
        loss_u = inset.loss_u.item()
        loss_v = inset.loss_v.item()
        loss_div = inset.loss_div.item()
        bd1 = bdset.loss1.item()
        bd2 = bdset.loss2.item()
        bd3 = bdset.loss3.item()
        bd4 = bdset.loss4.item()
        bd5 = bdset.loss5.item()
        
        lossin_history.append([loss.item(),loss_u,loss_v,loss_div])
        div_history.append(loss_div)
        lossbd_history.append([bd1,bd2,bd3,bd4,bd5])
        lo_histroy.append(loss.item())
        error_history = [lossin_history,div_history,lossbd_history,lo_histroy]
    return error_history

n_tr = 64
nx_bd = [100,100]
mode = 'uniform'
#mode = 'qmc'
inset = INSET(n_tr,mode)

bdset = BDSET(nx_bd)


dtype = torch.float64
wid = 25
layer_u = [2,wid,wid,wid,1];netu = Net(layer_u,dtype)
layer_v = [2,wid,wid,wid,1];netv = Net(layer_v,dtype)
layer_p = [2,wid,wid,wid,1];netp = Net(layer_p,dtype)


fname1 = "ubasic-NSlay%dvar.pt"%(wid)
fname2 = "vbasic-NSlay%dvar.pt"%(wid)
fname3 = "pbasic-NSlay%dvar.pt"%(wid)


netu = torch.load(fname1)
netv = torch.load(fname2)
netw = torch.load(fname3)


loadtype(inset,bdset,dtype)
loadcuda(netu,netv,netp,inset,bdset)

loss_history = [[],[],[],[]]
epoch = 50
start_time = time.time()
penalty_in = [1e0,1e0,1e0]
penalty_bd = [1e0,1e0,1e0,1e0,1e0]
lr = 1e0
max_iter = 1
#optimtype = 'Adam'
optimtype = 'BFGS'
#optimtype = 'LBFGS'
number = 0
for i in range(max_iter):
    if optimtype == 'BFGS':
        optim = bfgs.BFGS(itertools.chain(netu.parameters(),
                                        netv.parameters(),
                                        netp.parameters()),
                        lr=lr, max_iter=100,
                        tolerance_grad=1e-16, tolerance_change=1e-16,
                        line_search_fn='strong_wolfe')
    if optimtype == 'LBFGS':
        optim = torch.optim.LBFGS(itertools.chain(netu.parameters(),
                                        netv.parameters(),
                                        netp.parameters()),
                        lr=lr, max_iter=100,
                        history_size=100,
                        line_search_fn='strong_wolfe')
    if optimtype == 'Adam':
        optim = torch.optim.Adam(itertools.chain(netu.parameters(),
                                        netv.parameters(),
                                        netp.parameters()),
                        lr=lr)
    loss_history = \
        train_yp(netu,netv,netp,inset,bdset,penalty_in,penalty_bd,optim,epoch,loss_history,optimtype)
    if (i + 1)%5 == 0:
        number += 1
        
        lr *= 0.985
elapsed = time.time() - start_time
print('Finishied! train time: %.2f\n' %(elapsed)) 
loadcpu(netu,netv,netp,inset,bdset)
torch.cuda.empty_cache()

torch.save(netu, fname1)
torch.save(netv, fname2)
torch.save(netp, fname3)


lossin_history,div_history,lossbd_history,lo_history = loss_history
np.save('lay%d-epoch%d-lossin.npy'%(wid,epoch),lossin_history)
np.save('lay%d-epoch%d-lossbd.npy'%(wid,epoch),lossbd_history)
np.save('lay%d-epoch%d-lossdiv.npy'%(wid,epoch),div_history)
np.save('lay%d-epoch%d-losslo.npy'%(wid,epoch),lo_history)
#%%
fig, ax = plt.subplots(1,2,figsize=(12,3.5))

ax[0].semilogy(np.array(lossin_history))
ax[0].legend(['loss','loss_u', 'loss_v', 'loss_div'])
ax[0].set_xlabel('iters') 

ax[1].plot(np.array(lossbd_history))
ax[1].legend(['bd1','bd2','bd3','bd4','bd5'])
ax[1].set_xlabel('iters') 
plt.yscale('log')
fig.tight_layout()
plt.show()


plt.scatter(inset.X.detach().numpy()[:,0],inset.X.detach().numpy()[:,1])
plt.show()
#%%
nx_te = [40,40]
te_bd_set = BDSET(nx_te)
hx = [(bounds[0,1] - bounds[0,0])/nx_te[0],(bounds[1,1] - bounds[1,0])/nx_te[1]]
fig, ax = plt.subplots(1,3, figsize=(13,4))
x1s = np.linspace(bounds[1,0] + 0.5*hx[1],bounds[1,1] - hx[1],nx_te[1])
te_bd_set.x_in = te_bd_set.x_in.type(dtype)
te_bd_set.x_out = te_bd_set.x_out.type(dtype)
u_in = pred_u(netu,te_bd_set.x_in).detach().numpy()
v_in = pred_v(netv,te_bd_set.x_in).detach().numpy()

u_out = pred_u(netu,te_bd_set.x_out).detach().numpy()
v_out = pred_v(netv,te_bd_set.x_out).detach().numpy()

u_o_p = x1s*(1-x1s)*4/(Ly**2)

ax[1].plot(x1s,u_out); ax[1].plot(x1s,u_in)
ax[1].legend(['outlet','inlet'])
ax[1].set_xlabel('y'); ax[1].set_ylabel('u')
ax[1].set_title('PINN: u')

ax[2].plot(x1s,v_out); ax[2].plot(x1s,v_in)
ax[2].legend(['outlet','inlet'])
ax[2].set_xlabel('y'); ax[2].set_ylabel('v')
ax[2].set_title('PINN: v')


ax[0].semilogy(np.array(lo_history))
ax[0].legend(['pde_loss'])
ax[0].set_xlabel('iters') 
plt.show()
#%%
n=n_tr
xx0 = np.linspace(bounds[0,0],bounds[0,1],2*n)
xx1 = np.linspace(bounds[1,0],bounds[1,1],n)
xx0, xx1 = np.meshgrid(xx0,xx1)
ind = (xx0>1-d)&(xx0<1+d)&(xx1>0.5-d)&(xx1<0.5+d)

inp_s = torch.from_numpy(np.hstack([xx0.reshape(-1,1),xx1.reshape(-1,1)]))
u_pred = torch.zeros(inp_s.shape[0],1)
v_pred = torch.zeros(inp_s.shape[0],1)
p_pred = torch.zeros(inp_s.shape[0],1)

u_pred = pred_u(netu,inp_s)
v_pred = pred_v(netv,inp_s)
p_pred = pred_p(netp,inp_s)

u = u_pred.detach().numpy()
v = v_pred.detach().numpy()
p = p_pred.detach().numpy()


y1_s = u.reshape(n,2*n); y1 = u.reshape(n,2*n);
y2_s = v.reshape(n,2*n); y2 = v.reshape(n,2*n);
p = p.reshape(n,2*n)

y1_s[ind] = np.nan  # for streamplot mask!
y2_s[ind] = np.nan
n_ind = np.invert(ind)
y1 = y1[n_ind].flatten()
y2 = y2[n_ind].flatten()
p = p[n_ind].flatten()

speed_s = np.sqrt(y1_s**2+y2_s**2)
x0 = xx0[n_ind].flatten()
x1 = xx1[n_ind].flatten()

num_line = 100

fig, ax = plt.subplots(2,2, figsize=(12,6))

for i in range(2): 
    for j in range(2):
            ax[i,j].axis('equal')
            ax[i,j].set_xlim([0.,2.0])
            ax[i,j].set_ylim([0.,1.])
            ax[i,j].axis('off')
            ax[i,j].add_artist(plt.Rectangle((1-d,0.5-d),2*d,2*d, fill=True, color='white'))


ax00 = ax[0,0].tricontourf(x0, x1, y1, num_line, cmap='rainbow')
fig.colorbar(ax00,ax=ax[0,0],fraction = 0.024,pad = 0.04)
#ax[0,0].contour(ax00, linewidths=0.6, colors='black')
                   #, cmap='jet', color=speed_s, linewidth=0.5)
ax01 = ax[0,1].tricontourf(x0, x1, y2, num_line, cmap='rainbow')
fig.colorbar(ax01,ax=ax[0,1],fraction = 0.024,pad = 0.04)
ax[0,0].set_title('PINN: u')
ax[0,1].set_title('PINN: v')


ax10 = ax[1,0].tricontourf(x0, x1, (y1**2+y2**2)**0.5, num_line, cmap='rainbow')
ax[1,0].streamplot(xx0, xx1, y1_s, y2_s, density = 1.5)
fig.colorbar(ax10,ax=ax[1,0],fraction = 0.024,pad = 0.04)
ax[1,0].set_title('PINN: stream')


ax11 = ax[1,1].tricontourf(x0, x1, p, num_line, cmap='rainbow')
#ax[1,0].contour(ax10, linewidths=0.6, colors='black')
#ax11 = ax[1,1].tricontourf(x0, x1, p, num_line, cmap='rainbow')
ax[1,1].set_title('PINN: p')
#ax[1,1].set_title('PINN: p')
fig.colorbar(ax11,ax=ax[1,1],fraction = 0.024,pad = 0.04)
#fig.colorbar(ax11,ax=ax[1,1])
plt.show()

画图障碍流

import torch
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import torch.nn as nn
import torch.nn.functional as F
import itertools
import bfgs
from scipy.stats import qmc
#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device('cpu')

nu = 1e-2
d = 0.1
def region(x):
    x1 = x[:,0:1]
    x2 = x[:,1:2]
    box = (x1>1-d)&(x1<1+d)&(x2>0.5-d)&(x2<0.5+d)
    s = (x1>0)&(x1<2)&(x2>0)&(x2<1)&(np.invert(box))
    return s.flatten()
hr = 1e-3
bounds = np.array([0 - hr,2 + hr,0 - hr,1 + hr]).reshape(2,2)

Lx = 2.0
Ly = 1.0
def y_in(x):
    x2 = x[:,1:2]
    s = 0*x
    s[:,0:1] = 4*x2*(1-x2)/(Ly**2)
    return s

def y_d(x):
    return y_in(x)
class BDSET():#边界点取值
    def __init__(self,nx):
        self.dim = 2
        self.hx = [(bounds[0,1] - bounds[0,0])/nx[0],(bounds[1,1] - bounds[1,0])/nx[1]]
        self.x_in = torch.zeros(nx[1],self.dim)
        self.dir_in = torch.zeros(nx[1],self.dim)
        for i in range(nx[1]):
            self.x_in[i,0] = bounds[0,0]
            self.x_in[i,1] = bounds[1,0] + (i + 0.5)*self.hx[1]
            self.dir_in[i,0] = -1
            self.dir_in[i,1] = 0
        self.rig_in = torch.zeros_like(self.x_in)
        self.rig_in = y_in(self.x_in)

        self.x_out = torch.zeros(nx[1],self.dim)
        self.dir_out = torch.zeros(nx[1],self.dim)
        for i in range(nx[1]):
            self.x_out[i,0] = bounds[0,1]
            self.x_out[i,1] = bounds[1,0] + (i + 0.5)*self.hx[1]
            self.dir_out[i,0] = 1
            self.dir_out[i,1] = 0

        self.x_w = torch.zeros(4*nx[0] + 2*nx[1],self.dim)
        self.dir_w = torch.zeros(4*nx[0] + 2*nx[1],self.dim)
        m = 0
        for i in range(nx[0]):
            self.x_w[m,0] = bounds[0,0] + (i + 0.5)*self.hx[0]
            self.x_w[m,1] = bounds[1,0]
            self.dir_w[m,0] = 0
            self.dir_w[m,1] = -1
            m = m + 1
        for i in range(nx[0]):
            self.x_w[m,0] = bounds[0,0] + (i + 0.5)*self.hx[0]
            self.x_w[m,1] = bounds[1,1]
            self.dir_w[m,0] = 0
            self.dir_w[m,1] = 1
            m = m + 1
        for i in range(nx[0]):
            self.x_w[m,0] = 1 - d + (i + 0.5)*(2*d/nx[0])
            self.x_w[m,1] = 0.5 - d
            self.dir_w[m,0] = 0
            self.dir_w[m,1] = -1
            m = m + 1
        for i in range(nx[0]):
            self.x_w[m,0] = 1 - d + (i + 0.5)*(2*d/nx[0])
            self.x_w[m,1] = 0.5 + d
            self.dir_w[m,0] = 0
            self.dir_w[m,1] = 1
            m = m + 1
        for i in range(nx[1]):
            self.x_w[m,0] = 1 - d
            self.x_w[m,1] = 0.5 - d + (i + 0.5)*(2*d/nx[1])
            self.dir_w[m,0] = -1
            self.dir_w[m,1] = 0
            m = m + 1
        for i in range(nx[1]):
            self.x_w[m,0] = 1 + d
            self.x_w[m,1] = 0.5 - d + (i + 0.5)*(2*d/nx[1])
            self.dir_w[m,0] = 1
            self.dir_w[m,1] = 0
            m = m + 1
        self.x_oc = torch.cat([self.x_in,self.x_w],0)
        self.dir_oc = torch.cat([self.dir_in,self.dir_w],0)

def loadtype(bdset,dtype):
    
    
    #-----------------------------
    bdset.x_in = bdset.x_in.type(dtype)
    bdset.rig_in = bdset.rig_in.type(dtype)
    bdset.x_out = bdset.x_out.type(dtype)
    bdset.x_w = bdset.x_w.type(dtype)
    bdset.x_oc = bdset.x_oc.type(dtype)
    bdset.dir_in = bdset.dir_in.type(dtype)
    bdset.dir_out = bdset.dir_out.type(dtype)
    bdset.dir_w = bdset.dir_w.type(dtype)
    bdset.dir_oc = bdset.dir_oc.type(dtype)
np.random.seed(1234)
torch.manual_seed(1234)

class Net(torch.nn.Module):
    def __init__(self, layers, dtype):
        super(Net, self).__init__()
        self.layers = layers
        self.layers_hid_num = len(layers)-2
        self.device = device
        self.dtype = dtype
        fc = []
        for i in range(self.layers_hid_num):
            fc.append(torch.nn.Linear(self.layers[i],self.layers[i+1]))
            fc.append(torch.nn.Linear(self.layers[i+1],self.layers[i+1]))
        fc.append(torch.nn.Linear(self.layers[-2],self.layers[-1]))
        self.fc = torch.nn.Sequential(*fc)
        for i in range(self.layers_hid_num):
            self.fc[2*i].weight.data = self.fc[2*i].weight.data.type(dtype)
            self.fc[2*i].bias.data = self.fc[2*i].bias.data.type(dtype)
            self.fc[2*i + 1].weight.data = self.fc[2*i + 1].weight.data.type(dtype)
            self.fc[2*i + 1].bias.data = self.fc[2*i + 1].bias.data.type(dtype)
        self.fc[-1].weight.data = self.fc[-1].weight.data.type(dtype)
        self.fc[-1].bias.data = self.fc[-1].bias.data.type(dtype)
    def forward(self, x):
        dev = x.device
        for i in range(self.layers_hid_num):
            h = torch.sin(self.fc[2*i](x))
            h = torch.sin(self.fc[2*i+1](h))
            temp = torch.eye(x.shape[-1],self.layers[i+1],dtype = self.dtype,device = dev)
            x = h + x@temp
        return self.fc[-1](x) 
    def total_para(self):#计算参数数目
        return sum([x.numel() for x in self.parameters()])      
def length(X):
    return (X[:,1:2] - bounds[1,0])*(bounds[1,1] - X[:,1:2])
def pred_u(netu,X):
    return netu.forward(X)*(X[:,0:1] - bounds[0,0]) + 4*X[:,1:2]*(1 - X[:,1:2])
def pred_v(netv,X):
    return netv.forward(X)*(X[:,0:1] - bounds[0,0])*length(X)
def pred_p(netp,X):
    return netp.forward(X)*(X[:,0:1] - bounds[0,1])

dtype = torch.float64
wid = 25
epoch = 100
layer_u = [2,wid,wid,wid,1];netu = Net(layer_u,dtype)
layer_v = [2,wid,wid,wid,1];netv = Net(layer_v,dtype)
layer_p = [2,wid,wid,wid,1];netp = Net(layer_p,dtype)

'''
fname1 = "C:\\Users\\2001213226\\Desktop\\mucode\\NSpde\\pengfei\\unsf-NSlay%dvar.pt"%(wid)
fname2 = "C:\\Users\\2001213226\\Desktop\\mucode\\NSpde\\pengfei\\vnsf-NSlay%dvar.pt"%(wid)
fname3 = "C:\\Users\\2001213226\\Desktop\\mucode\\NSpde\\pengfei\\pnsf-NSlay%dvar.pt"%(wid)
'''
fname1 = "C:\\Users\\2001213226\\Desktop\\mucode\\NSpde\\pengfei\\umeannsf-NSlay%dvar.pt"%(wid)
fname2 = "C:\\Users\\2001213226\\Desktop\\mucode\\NSpde\\pengfei\\vmeannsf-NSlay%dvar.pt"%(wid)
fname3 = "C:\\Users\\2001213226\\Desktop\\mucode\\NSpde\\pengfei\\pmeannsf-NSlay%dvar.pt"%(wid)

netu = torch.load(fname1).to('cpu')
netv = torch.load(fname2).to('cpu')
netw = torch.load(fname3).to('cpu')

lo_history = np.load('C:\\Users\\2001213226\\Desktop\\mucode\\NSpde\\pengfei\\lay%d-epoch%d-losslo.npy'%(wid,epoch))
lossin_history = np.load('C:\\Users\\2001213226\\Desktop\\mucode\\NSpde\\pengfei\\lay%d-epoch%d-lossin.npy'%(wid,epoch))
lossbd_history = np.load('C:\\Users\\2001213226\\Desktop\\mucode\\NSpde\\pengfei\\lay%d-epoch%d-lossbd.npy'%(wid,epoch))
lossdiv_history = np.load('C:\\Users\\2001213226\\Desktop\\mucode\\NSpde\\pengfei\\lay%d-epoch%d-lossdiv.npy'%(wid,epoch))
#%%
fig, ax = plt.subplots(1,2,figsize=(12,3.5))

ax[0].semilogy(np.array(lossin_history))
ax[0].legend(['loss','loss_u', 'loss_v', 'loss_div'])
ax[0].set_xlabel('iters') 

ax[1].plot(np.array(lossbd_history))
ax[1].legend(['bd1','bd2','bd3','bd4','bd5'])
ax[1].set_xlabel('iters') 
plt.yscale('log')
fig.tight_layout()
plt.show()

#%%
nx_te = [40,40]
te_bd_set = BDSET(nx_te)
hx = [(bounds[0,1] - bounds[0,0])/nx_te[0],(bounds[1,1] - bounds[1,0])/nx_te[1]]
fig, ax = plt.subplots(1,3, figsize=(13,4))
x1s = np.linspace(bounds[1,0] + 0.5*hx[1],bounds[1,1] - hx[1],nx_te[1])
te_bd_set.x_in = te_bd_set.x_in.type(dtype)
te_bd_set.x_out = te_bd_set.x_out.type(dtype)
u_in = pred_u(netu,te_bd_set.x_in).detach().numpy()
v_in = pred_v(netv,te_bd_set.x_in).detach().numpy()

u_out = pred_u(netu,te_bd_set.x_out).detach().numpy()
v_out = pred_v(netv,te_bd_set.x_out).detach().numpy()

u_o_p = x1s*(1-x1s)*4/(Ly**2)

ax[1].plot(x1s,u_out); ax[1].plot(x1s,u_in)
ax[1].legend(['outlet','inlet'])
ax[1].set_xlabel('y'); ax[1].set_ylabel('u')
ax[1].set_title('NSFnet: u')

ax[2].plot(x1s,v_out); ax[2].plot(x1s,v_in)
ax[2].legend(['outlet','inlet'])
ax[2].set_xlabel('y'); ax[2].set_ylabel('v')
ax[2].set_title('NSFnet: v')


ax[0].semilogy(np.array(lo_history))
ax[0].legend(['pde_loss'])
ax[0].set_xlabel('iters') 
plt.show()
#%%
n=64
xx0 = np.linspace(bounds[0,0],bounds[0,1],2*n)
xx1 = np.linspace(bounds[1,0],bounds[1,1],n)
xx0, xx1 = np.meshgrid(xx0,xx1)
ind = (xx0>1-d)&(xx0<1+d)&(xx1>0.5-d)&(xx1<0.5+d)

inp_s = torch.from_numpy(np.hstack([xx0.reshape(-1,1),xx1.reshape(-1,1)]))
u_pred = torch.zeros(inp_s.shape[0],1)
v_pred = torch.zeros(inp_s.shape[0],1)
p_pred = torch.zeros(inp_s.shape[0],1)

u_pred = pred_u(netu,inp_s)
v_pred = pred_v(netv,inp_s)
p_pred = pred_p(netp,inp_s)

u = u_pred.detach().numpy()
v = v_pred.detach().numpy()
p = p_pred.detach().numpy()


y1_s = u.reshape(n,2*n); y1 = u.reshape(n,2*n);
y2_s = v.reshape(n,2*n); y2 = v.reshape(n,2*n);
p = p.reshape(n,2*n)

y1_s[ind] = np.nan  # for streamplot mask!
y2_s[ind] = np.nan
n_ind = np.invert(ind)
y1 = y1[n_ind].flatten()
y2 = y2[n_ind].flatten()
p = p[n_ind].flatten()

speed_s = np.sqrt(y1_s**2+y2_s**2)
x0 = xx0[n_ind].flatten()
x1 = xx1[n_ind].flatten()

num_line = 100

fig, ax = plt.subplots(2,2, figsize=(12,6))

for i in range(2): 
    for j in range(2):
            ax[i,j].axis('equal')
            ax[i,j].set_xlim([0.,2.0])
            ax[i,j].set_ylim([0.,1.])
            ax[i,j].axis('off')
            ax[i,j].add_artist(plt.Rectangle((1-d,0.5-d),2*d,2*d, fill=True, color='white'))
            

ax00 = ax[0,0].tricontourf(x0, x1, y1, num_line, cmap='rainbow')
fig.colorbar(ax00,ax=ax[0,0],fraction = 0.024,pad = 0.04)
#ax[0,0].contour(ax00, linewidths=0.6, colors='black')
                   #, cmap='jet', color=speed_s, linewidth=0.5)
ax01 = ax[0,1].tricontourf(x0, x1, y2, num_line, cmap='rainbow')
fig.colorbar(ax01,ax=ax[0,1],fraction = 0.024,pad = 0.04)
ax[0,0].set_title('NSFnet: u')
ax[0,1].set_title('NSFnet: v')


ax10 = ax[1,0].tricontourf(x0, x1, (y1**2+y2**2)**0.5, num_line, cmap='rainbow')
ax[1,0].streamplot(xx0, xx1, y1_s, y2_s, density = 1.5)
fig.colorbar(ax10,ax=ax[1,0],fraction = 0.024,pad = 0.04)
ax[1,0].set_title('NSFnet: stream')


ax11 = ax[1,1].tricontourf(x0, x1, p, num_line, cmap='rainbow')
#ax[1,0].contour(ax10, linewidths=0.6, colors='black')
#ax11 = ax[1,1].tricontourf(x0, x1, p, num_line, cmap='rainbow')
ax[1,1].set_title('NSFnet: p')
#ax[1,1].set_title('PINN: p')
fig.colorbar(ax11,ax=ax[1,1],fraction = 0.024,pad = 0.04)
#fig.colorbar(ax11,ax=ax[1,1])
plt.show()

评论 21
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

谨慎付费(看不懂试读博客不要订阅)

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值