MPI:MPI_Probe应用的例子

MPI:MPI_Probe应用的例子

说明

MPI_Probe()和MPI_Probe()函数探测接收消息的内容,但不影响实际接收到的消息。我们可以根据探测到的消息内容决定如何接收这些消息,比如根据消息大小分配缓冲区等等。需要说明的是,这两个函数第一个是阻塞方式,即只有探测到匹配的消息才返回;第二个是非阻塞方式,即无论探测到与否都立即返回。

函数原型:

int MPI_Probe (int source /* in */,
            int tag /* in */,
            MPI_Comm comm /* in */,
            MPI_Status* status /*out*/)

以上是阻塞型探测,直到有一个符合条件的消息到达,返回MPI_ANY_SOURCE和 MPI_ANY_TAG

int MPI_Iprobe (int source /* in */,
            int tag /* in */,
            MPI_Comm comm /* in */,
            int * flag /*out*/,
            MPI_Status* status /*out*/)

以上是非阻塞型探测,无论是否有一个符合条件的消息到达,立即返回。有flag=true;否则flag=false

测试例:

int x; float y;
int send_x = 100;
float send_y = 3.14;
MPI_Comm_rank(comm, &rank);
if(rank ==0) /*0->2发送一int型数*/
    MPI_Send(&send_x,1,MPI_INT,2,99,comm);
else if(rank == 1) /*1->2发送一float型数*/
    MPI_Send(&send_y,1,MPI_FLOAT,2,99,comm);
else  /* 根进程接收 */
    for(int i=0;i<2;i++){
        MPI_Probe(MPI_ANY_SOURCE,99,comm,&status);/*Blocking*/
        if(status.MPI_SOURCE == 0)
            MPI_Recv(&x,1,MPI_INT,0,99,&status);
        else if(status.MPI_SOURCE == 1)
            MPI_Recv(&y,1,MPI_FLOAT,1,99,&status);
    }
subroutine exchange_ghost_particles(comm3d,worker_comm,local_nb_mps, local_mps, ghost_mps, max_ghosts, ghost_count,local_xmin, local_xmax, local_zmin, local_zmax, re,rank,global_indices) implicit none include 'mpif.h' integer, intent(in) :: comm3d, max_ghosts,rank,worker_comm,global_indices(54000) integer, intent(in) :: local_nb_mps type(MPS_kinematic), intent(in) :: local_mps(:) type(MPS_kinematic), intent(out) :: ghost_mps(max_ghosts) integer, intent(out) :: ghost_count real(RK), intent(in) :: local_xmin, local_xmax, local_zmin, local_zmax, re integer :: status(MPI_STATUS_SIZE) integer :: ierr, i, dir, p, num_send, num_recv, count,local_rank, worker_size integer :: left_rank, right_rank, up_rank, down_rank integer, parameter :: n_vars = 7,max_send_per_dir = 1800 real(RK) :: send_buf(4,n_vars*max_send_per_dir) real(RK) :: recv_buf(4,n_vars*max_send_per_dir) integer, dimension(4) :: send_requests, recv_requests integer :: send_tag, recv_tag, neighbor_send, neighbor_recv integer :: num_send_reqs, num_recv_reqs integer :: recv_counts(4) = 0 integer :: ghost_offset = 0 logical :: is_overflow if (rank == 0) return num_send_reqs = 0 num_recv_reqs = 0 ghost_count = 0 call MPI_Comm_rank(worker_comm, local_rank, ierr) call MPI_Comm_size(worker_comm, worker_size, ierr) call MPI_Cart_shift(comm3d, 0, 1, left_rank, right_rank, ierr) call MPI_Cart_shift(comm3d, 2, 1, down_rank, up_rank, ierr) do dir = 1, 4 select case(dir) case(1) send_tag = 0; recv_tag = 0 neighbor_send = right_rank; neighbor_recv = left_rank case(2) send_tag = 1; recv_tag = 1 neighbor_send = left_rank; neighbor_recv = right_rank case(3) send_tag = 2; recv_tag = 2 neighbor_send = up_rank; neighbor_recv = down_rank case(4) send_tag = 3; recv_tag = 3 neighbor_send = down_rank; neighbor_recv = up_rank end select if (neighbor_send /= -1) then num_send = 0 p = 1 select case(dir) case(1) do i = 1, local_nb_mps if (local_mps(global_indices(i))%Pos(1) >= (local_xmax - 2.0_RK*re) .and. & local_mps(global_indices(i))%Pos(1) < (local_xmax - re)) then num_send = num_send + 1 if (num_send > max_send_per_dir) then print *, "ERROR: Send buffer overflow! Dir=", dir, & ", Local_rank=", local_rank, ", Max_cap=", max_send_per_dir is_overflow = .true. exit end if send_buf(dir, p) = local_mps(global_indices(i))%Pos(1) send_buf(dir, p+1) = local_mps(global_indices(i))%Pos(2) send_buf(dir, p+2) = local_mps(global_indices(i))%Pos(3) send_buf(dir, p+3) = local_mps(global_indices(i))%Vel(1) send_buf(dir, p+4) = local_mps(global_indices(i))%Vel(2) send_buf(dir, p+5) = local_mps(global_indices(i))%Vel(3) send_buf(dir, p+6) = local_mps(global_indices(i))%press p = p + n_vars end if end do case(2) do i = 1, local_nb_mps if (local_mps(global_indices(i))%Pos(1) >= (local_xmin + 1.0_RK*re) .and. & local_mps(global_indices(i))%Pos(1) < (local_xmin + 2.0_RK*re)) then num_send = num_send + 1 if (num_send > max_send_per_dir) then print *, "ERROR: Send buffer overflow! Dir=", dir, & ", Local_rank=", local_rank, ", Max_cap=", max_send_per_dir is_overflow = .true. exit end if send_buf(dir, p) = local_mps(global_indices(i))%Pos(1) send_buf(dir, p+1) = local_mps(global_indices(i))%Pos(2) send_buf(dir, p+2) = local_mps(global_indices(i))%Pos(3) send_buf(dir, p+3) = local_mps(global_indices(i))%Vel(1) send_buf(dir, p+4) = local_mps(global_indices(i))%Vel(2) send_buf(dir, p+5) = local_mps(global_indices(i))%Vel(3) send_buf(dir, p+6) = local_mps(global_indices(i))%press p = p + n_vars end if end do case(3) do i = 1, local_nb_mps if (local_mps(global_indices(i))%Pos(3) >= (local_zmax - 2.0_RK*re) .and. & local_mps(global_indices(i))%Pos(3) < (local_zmax - 1.0_RK*re)) then num_send = num_send + 1 if (num_send > max_send_per_dir) then print *, "ERROR: Send buffer overflow! Dir=", dir, & ", Local_rank=", local_rank, ", Max_cap=", max_send_per_dir is_overflow = .true. exit end if send_buf(dir, p) = local_mps(global_indices(i))%Pos(1) send_buf(dir, p+1) = local_mps(global_indices(i))%Pos(2) send_buf(dir, p+2) = local_mps(global_indices(i))%Pos(3) send_buf(dir, p+3) = local_mps(global_indices(i))%Vel(1) send_buf(dir, p+4) = local_mps(global_indices(i))%Vel(2) send_buf(dir, p+5) = local_mps(global_indices(i))%Vel(3) send_buf(dir, p+6) = local_mps(global_indices(i))%press p = p + n_vars end if end do case(4) do i = 1, local_nb_mps if (local_mps(global_indices(i))%Pos(3) >= (local_zmin + 1.0_RK*re) .and. & local_mps(global_indices(i))%Pos(3) < (local_zmin + 2.0_RK*re)) then num_send = num_send + 1 if (num_send > max_send_per_dir) then print *, "ERROR: Send buffer overflow! Dir=", dir, & ", Local_rank=", local_rank, ", Max_cap=", max_send_per_dir is_overflow = .true. exit end if send_buf(dir, p) = local_mps(global_indices(i))%Pos(1) send_buf(dir, p+1) = local_mps(global_indices(i))%Pos(2) send_buf(dir, p+2) = local_mps(global_indices(i))%Pos(3) send_buf(dir, p+3) = local_mps(global_indices(i))%Vel(1) send_buf(dir, p+4) = local_mps(global_indices(i))%Vel(2) send_buf(dir, p+5) = local_mps(global_indices(i))%Vel(3) send_buf(dir, p+6) = local_mps(global_indices(i))%press p = p + n_vars end if end do end select if (num_send > 0) then num_send_reqs = num_send_reqs + 1 call MPI_Isend(send_buf(dir, 1:num_send*n_vars), & num_send*n_vars, MPI_DOUBLE_PRECISION, & neighbor_send, send_tag, comm3d, & send_requests(num_send_reqs), ierr) end if end if if (neighbor_recv /= -1) then call MPI_Probe(neighbor_recv, recv_tag, comm3d, status, ierr) call MPI_Get_count(status, MPI_DOUBLE_PRECISION, count, ierr) num_recv = count / n_vars recv_counts(dir) = num_recv if (num_recv > 0) then num_recv_reqs = num_recv_reqs + 1 call MPI_Irecv(recv_buf(dir, 1:num_recv*n_vars), & num_recv*n_vars, MPI_DOUBLE_PRECISION, & neighbor_recv, recv_tag, comm3d, & recv_requests(num_recv_reqs), ierr) end if else recv_counts(dir) = 0 end if end do if (num_send_reqs > 0) then call MPI_Waitall(num_send_reqs, send_requests(1:num_send_reqs), MPI_STATUSES_IGNORE, ierr) end if if (num_recv_reqs > 0) then call MPI_Waitall(num_recv_reqs, recv_requests(1:num_recv_reqs), MPI_STATUSES_IGNORE, ierr) end if do dir = 1, 4 num_recv = recv_counts(dir) if (num_recv <= 0) cycle p = 1 do i = 1, num_recv ghost_count = ghost_count + 1 ghost_mps(ghost_count)%Pos(1) = recv_buf(dir, p) ghost_mps(ghost_count)%Pos(2) = recv_buf(dir, p+1) ghost_mps(ghost_count)%Pos(3) = recv_buf(dir, p+2) ghost_mps(ghost_count)%Vel(1) = recv_buf(dir, p+3) ghost_mps(ghost_count)%Vel(2) = recv_buf(dir, p+4) ghost_mps(ghost_count)%Vel(3) = recv_buf(dir, p+5) ghost_mps(ghost_count)%press = recv_buf(dir, p+6) p = p + n_vars end do end do end subroutine exchange_ghost_particles请你查看一下现在存在一些什么问题呢?
最新发布
09-14
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值