initmpi Subroutine

public subroutine initmpi()

Arguments

None

Calls

proc~~initmpi~~CallsGraph proc~initmpi modmpi::initmpi mpi_cart_create mpi_cart_create proc~initmpi->mpi_cart_create mpi_cart_shift mpi_cart_shift proc~initmpi->mpi_cart_shift mpi_comm_rank mpi_comm_rank proc~initmpi->mpi_comm_rank mpi_comm_size mpi_comm_size proc~initmpi->mpi_comm_size mpi_dims_create mpi_dims_create proc~initmpi->mpi_dims_create mpi_init mpi_init proc~initmpi->mpi_init mpi_wtime mpi_wtime proc~initmpi->mpi_wtime

Called by

proc~~initmpi~~CalledByGraph proc~initmpi modmpi::initmpi program~dalesurban DALESURBAN program~dalesurban->proc~initmpi

Contents

Source Code


Source Code

  subroutine initmpi
    implicit none
    integer dims(1)
    logical periods(1)
    integer periods2(1)

    call MPI_INIT(mpierr)
    MY_REAL = MPI_DOUBLE_PRECISION  !MPI_REAL8 should be the same..
    call MPI_COMM_RANK( MPI_COMM_WORLD, myid, mpierr )
    call MPI_COMM_SIZE( MPI_COMM_WORLD, nprocs, mpierr )
! Specify the # procs in each direction.
! specifying a 0 means that MPI will try to find a useful # procs in
! the corresponding  direction,

! specifying 1 means only 1 processor in this direction, meaning that
! we have in fact a grid of (at most) 2 dimensions left. This is used
! when we want the array index range in 1 particular direction to be
! present on all processors in the grid

    dims(1) = 0


! directions 1 and 2 are chosen periodic


    periods(1) = .true.
! Soares 20080115
    periods2(1) = 1

! find suitable # procs in each direction

    call MPI_DIMS_CREATE( nprocs, 1, dims, mpierr )

! create the Cartesian communicator, denoted by the integer comm3d

    ! BUG - Thijs, Harm
    !call MPI_CART_CREATE(MPI_COMM_WORLD, 1, dims, periods,.false., &
    !                    comm3d, ierr )

    call MPI_CART_CREATE(MPI_COMM_WORLD, 1, dims, periods,.true., &
                        comm3d, mpierr )

! Soares 20080115
!     call MPI_CART_CREATE(MPI_COMM_WORLD, 1, dims, periods2,1, &
!                         comm3d, mpierr )

! Get my processor number in this communicator

    call MPI_COMM_RANK( comm3d, myid, mpierr )


! when applying boundary conditions, we need to know which processors
! are neighbours in all 3 directions


! these are determined with the aid of the MPI routine MPI_CART_SHIFT,

    call MPI_CART_SHIFT( comm3d, 0,  1, nbrbottom, nbrtop,   mpierr )

! determine some useful MPI datatypes for sending/receiving data

     write(cmyid,'(i3.3)') myid

    if(myid==0)then
      CPU_program0 = MPI_Wtime()
    end if

    write(*,*)'nprocs = ', nprocs
  end subroutine initmpi