DAMASK_EICMD/src/parallelization.f90

96 lines
3.8 KiB
Fortran
Raw Normal View History

!--------------------------------------------------------------------------------------------------
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
!> @brief Inquires variables related to parallelization (openMP, MPI)
!--------------------------------------------------------------------------------------------------
module parallelization
2020-09-19 14:20:32 +05:30
use, intrinsic :: ISO_fortran_env, only: &
OUTPUT_UNIT
#ifdef PETSc
#include <petsc/finclude/petscsys.h>
use petscsys
#endif
!$ use OMP_LIB
2020-09-19 14:20:32 +05:30
use prec
implicit none
private
2020-09-13 13:49:38 +05:30
integer, protected, public :: &
worldrank = 0, & !< MPI worldrank (/=0 for MPI simulations only)
worldsize = 1 !< MPI worldsize (/=1 for MPI simulations only)
public :: &
parallelization_init
contains
!--------------------------------------------------------------------------------------------------
!> @brief calls subroutines that reads material, numerics and debug configuration files
!--------------------------------------------------------------------------------------------------
subroutine parallelization_init
2020-09-19 14:20:32 +05:30
integer :: err, typeSize
2020-09-13 13:49:38 +05:30
!$ integer :: got_env, DAMASK_NUM_THREADS, threadLevel
!$ character(len=6) NumThreadsString
2020-09-13 13:49:38 +05:30
#ifdef PETSc
PetscErrorCode :: petsc_err
2020-09-13 13:49:38 +05:30
#else
2020-09-19 14:20:32 +05:30
print'(/,a)', ' <<<+- parallelization init -+>>>'; flush(OUTPUT_UNIT)
2020-09-13 13:49:38 +05:30
#endif
2020-09-13 13:49:38 +05:30
#ifdef PETSc
#ifdef _OPENMP
! If openMP is enabled, check if the MPI libary supports it and initialize accordingly.
! Otherwise, the first call to PETSc will do the initialization.
call MPI_Init_Thread(MPI_THREAD_FUNNELED,threadLevel,err)
if (err /= 0) error stop 'MPI init failed'
if (threadLevel<MPI_THREAD_FUNNELED) error stop 'MPI library does not support OpenMP'
2020-09-13 13:49:38 +05:30
#endif
2020-09-13 13:49:38 +05:30
call PETScInitializeNoArguments(petsc_err) ! first line in the code according to PETSc manual
CHKERRQ(petsc_err)
call MPI_Comm_rank(PETSC_COMM_WORLD,worldrank,err)
if (err /= 0) error stop 'Could not determine worldrank'
2020-09-14 00:51:55 +05:30
if (worldrank == 0) print'(/,a)', ' <<<+- parallelization init -+>>>'
2020-09-13 13:49:38 +05:30
call MPI_Comm_size(PETSC_COMM_WORLD,worldsize,err)
if (err /= 0) error stop 'Could not determine worldsize'
if (worldrank == 0) print'(a,i3)', ' MPI processes: ',worldsize
2020-09-13 13:49:38 +05:30
call MPI_Type_size(MPI_INTEGER,typeSize,err)
if (err /= 0) error stop 'Could not determine MPI integer size'
if (typeSize*8 /= bit_size(0)) error stop 'Mismatch between MPI and DAMASK integer'
call MPI_Type_size(MPI_DOUBLE,typeSize,err)
if (err /= 0) error stop 'Could not determine MPI real size'
if (typeSize*8 /= storage_size(0.0_pReal)) error stop 'Mismatch between MPI and DAMASK real'
#endif
2020-09-19 14:20:32 +05:30
if (worldrank /= 0) then
close(OUTPUT_UNIT) ! disable output
open(OUTPUT_UNIT,file='/dev/null',status='replace') ! close() alone will leave some temp files in cwd
endif
2020-09-13 13:49:38 +05:30
!$ call get_environment_variable(name='DAMASK_NUM_THREADS',value=NumThreadsString,STATUS=got_env)
!$ if(got_env /= 0) then
!$ print*, 'Could not determine value of $DAMASK_NUM_THREADS'
!$ DAMASK_NUM_THREADS = 1_pI32
!$ else
!$ read(NumThreadsString,'(i6)') DAMASK_NUM_THREADS
!$ if (DAMASK_NUM_THREADS < 1_pI32) then
!$ print*, 'Invalid DAMASK_NUM_THREADS: '//trim(NumThreadsString)
!$ DAMASK_NUM_THREADS = 1_pI32
!$ endif
!$ endif
2020-09-14 00:51:55 +05:30
!$ print'(a,i2)', ' DAMASK_NUM_THREADS: ',DAMASK_NUM_THREADS
!$ call omp_set_num_threads(DAMASK_NUM_THREADS)
2020-09-13 13:49:38 +05:30
end subroutine parallelization_init
end module parallelization