decouple DAMASK default integer from MPI default integer
This commit is contained in:
parent
1c46e7ea1a
commit
a3a3388855
|
@ -144,7 +144,7 @@ contains
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine spectral_utilities_init
|
||||
|
||||
PetscErrorCode :: ierr
|
||||
PetscErrorCode :: err_PETSc
|
||||
integer :: i, j, k, &
|
||||
FFTW_planner_flag
|
||||
integer, dimension(3) :: k_s
|
||||
|
@ -193,13 +193,13 @@ subroutine spectral_utilities_init
|
|||
'add more using the "PETSc_options" keyword in numerics.yaml'
|
||||
flush(IO_STDOUT)
|
||||
|
||||
call PetscOptionsClear(PETSC_NULL_OPTIONS,ierr)
|
||||
CHKERRQ(ierr)
|
||||
if (debugPETSc) call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(PETSCDEBUG),ierr)
|
||||
CHKERRQ(ierr)
|
||||
call PetscOptionsClear(PETSC_NULL_OPTIONS,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
if (debugPETSc) call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(PETSCDEBUG),err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
call PetscOptionsInsertString(PETSC_NULL_OPTIONS,&
|
||||
num_grid%get_asString('PETSc_options',defaultVal=''),ierr)
|
||||
CHKERRQ(ierr)
|
||||
num_grid%get_asString('PETSc_options',defaultVal=''),err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
|
||||
grid1Red = grid(1)/2 + 1
|
||||
wgt = 1.0/real(product(grid),pReal)
|
||||
|
@ -559,7 +559,8 @@ end subroutine utilities_fourierGreenConvolution
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
real(pReal) function utilities_divergenceRMS()
|
||||
|
||||
integer :: i, j, k, ierr
|
||||
integer :: i, j, k
|
||||
integer(MPI_INTEGER_KIND) :: err_MPI
|
||||
complex(pReal), dimension(3) :: rescaledGeom
|
||||
|
||||
print'(/,1x,a)', '... calculating divergence ................................................'
|
||||
|
@ -589,8 +590,8 @@ real(pReal) function utilities_divergenceRMS()
|
|||
conjg(-xi1st(1:3,grid1Red,j,k))*rescaledGeom))**2)
|
||||
enddo; enddo
|
||||
if (grid(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
|
||||
call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||
if (ierr /=0) error stop 'MPI error'
|
||||
call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space
|
||||
|
||||
end function utilities_divergenceRMS
|
||||
|
@ -601,7 +602,8 @@ end function utilities_divergenceRMS
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
real(pReal) function utilities_curlRMS()
|
||||
|
||||
integer :: i, j, k, l, ierr
|
||||
integer :: i, j, k, l
|
||||
integer(MPI_INTEGER_KIND) :: err_MPI
|
||||
complex(pReal), dimension(3,3) :: curl_fourier
|
||||
complex(pReal), dimension(3) :: rescaledGeom
|
||||
|
||||
|
@ -649,8 +651,8 @@ real(pReal) function utilities_curlRMS()
|
|||
+ sum(curl_fourier%re**2 + curl_fourier%im**2) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1)
|
||||
enddo; enddo
|
||||
|
||||
call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||
if (ierr /=0) error stop 'MPI error'
|
||||
call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
utilities_curlRMS = sqrt(utilities_curlRMS) * wgt
|
||||
if (grid(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
|
||||
|
||||
|
@ -799,8 +801,8 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
|
|||
type(rotation), intent(in), optional :: rotation_BC !< rotation of load frame
|
||||
|
||||
|
||||
integer :: &
|
||||
i,ierr
|
||||
integer :: i
|
||||
integer(MPI_INTEGER_KIND) :: err_MPI
|
||||
real(pReal), dimension(3,3,3,3) :: dPdF_max, dPdF_min
|
||||
real(pReal) :: dPdF_norm_max, dPdF_norm_min
|
||||
real(pReal), dimension(2) :: valueAndRank !< pair of min/max norm of dPdF to synchronize min/max of dPdF
|
||||
|
@ -818,7 +820,8 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
|
|||
|
||||
P = reshape(homogenization_P, [3,3,grid(1),grid(2),grid3])
|
||||
P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt
|
||||
call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||
call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
if (debugRotation) print'(/,1x,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', &
|
||||
'Piola--Kirchhoff stress (lab) / MPa =', transpose(P_av)*1.e-6_pReal
|
||||
if (present(rotation_BC)) P_av = rotation_BC%rotate(P_av)
|
||||
|
@ -842,22 +845,22 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
|
|||
enddo
|
||||
|
||||
valueAndRank = [dPdF_norm_max,real(worldrank,pReal)]
|
||||
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, MPI_COMM_WORLD, ierr)
|
||||
if (ierr /= 0) error stop 'MPI error'
|
||||
call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr)
|
||||
if (ierr /= 0) error stop 'MPI error'
|
||||
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, MPI_COMM_WORLD, err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
|
||||
valueAndRank = [dPdF_norm_min,real(worldrank,pReal)]
|
||||
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, MPI_COMM_WORLD, ierr)
|
||||
if (ierr /= 0) error stop 'MPI error'
|
||||
call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr)
|
||||
if (ierr /= 0) error stop 'MPI error'
|
||||
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, MPI_COMM_WORLD, err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
|
||||
C_minmaxAvg = 0.5_pReal*(dPdF_max + dPdF_min)
|
||||
|
||||
C_volAvg = sum(homogenization_dPdF,dim=5)
|
||||
call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||
if (ierr /= 0) error stop 'MPI error'
|
||||
call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
C_volAvg = C_volAvg * wgt
|
||||
|
||||
|
||||
|
@ -906,12 +909,13 @@ function utilities_forwardField(Delta_t,field_lastInc,rate,aim)
|
|||
real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: &
|
||||
utilities_forwardField
|
||||
real(pReal), dimension(3,3) :: fieldDiff !< <a + adot*t> - aim
|
||||
PetscErrorCode :: ierr
|
||||
integer(MPI_INTEGER_KIND) :: err_MPI
|
||||
|
||||
utilities_forwardField = field_lastInc + rate*Delta_t
|
||||
if (present(aim)) then !< correct to match average
|
||||
fieldDiff = sum(sum(sum(utilities_forwardField,dim=5),dim=4),dim=3)*wgt
|
||||
call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||
call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
fieldDiff = fieldDiff - aim
|
||||
utilities_forwardField = utilities_forwardField - &
|
||||
spread(spread(spread(fieldDiff,3,grid(1)),4,grid(2)),5,grid3)
|
||||
|
@ -982,8 +986,8 @@ subroutine utilities_updateCoords(F)
|
|||
integer :: &
|
||||
i,j,k,n, &
|
||||
rank_t, rank_b, &
|
||||
c, &
|
||||
ierr
|
||||
c
|
||||
integer(MPI_INTEGER_KIND) :: err_MPI
|
||||
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||
type(MPI_Request), dimension(4) :: request
|
||||
type(MPI_Status), dimension(4) :: status
|
||||
|
@ -1025,8 +1029,8 @@ subroutine utilities_updateCoords(F)
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
! average F
|
||||
if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt
|
||||
call MPI_Bcast(Favg,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||
if (ierr /=0) error stop 'MPI error'
|
||||
call MPI_Bcast(Favg,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! pad cell center fluctuations along z-direction (needed when running MPI simulation)
|
||||
|
@ -1036,19 +1040,19 @@ subroutine utilities_updateCoords(F)
|
|||
rank_b = modulo(worldrank-1,worldsize)
|
||||
|
||||
! send bottom layer to process below
|
||||
call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,MPI_COMM_WORLD,request(1),ierr)
|
||||
if (ierr /=0) error stop 'MPI error'
|
||||
call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,MPI_COMM_WORLD,request(2),ierr)
|
||||
if (ierr /=0) error stop 'MPI error'
|
||||
call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,MPI_COMM_WORLD,request(1),err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,MPI_COMM_WORLD,request(2),err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
|
||||
! send top layer to process above
|
||||
call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,MPI_COMM_WORLD,request(3),ierr)
|
||||
if (ierr /=0) error stop 'MPI error'
|
||||
call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,MPI_COMM_WORLD,request(4),ierr)
|
||||
if (ierr /=0) error stop 'MPI error'
|
||||
call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,MPI_COMM_WORLD,request(3),err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,MPI_COMM_WORLD,request(4),err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
|
||||
call MPI_Waitall(4,request,status,ierr)
|
||||
if (ierr /=0) error stop 'MPI error'
|
||||
call MPI_Waitall(4,request,status,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||
! ToDo
|
||||
#else
|
||||
|
|
|
@ -20,12 +20,19 @@ module parallelization
|
|||
implicit none
|
||||
private
|
||||
|
||||
integer, protected, public :: &
|
||||
worldrank = 0, & !< MPI worldrank (/=0 for MPI simulations only)
|
||||
worldsize = 1 !< MPI worldsize (/=1 for MPI simulations only)
|
||||
#ifndef PETSC
|
||||
integer, parameter, public :: &
|
||||
MPI_INTEGER_KIND = pI64
|
||||
integer(MPI_INTEGER_KIND), parameter, public :: &
|
||||
worldrank = 0_MPI_INTEGER_KIND, & !< MPI dummy worldrank
|
||||
worldsize = 1_MPI_INTEGER_KIND !< MPI dummy worldsize
|
||||
#else
|
||||
integer(MPI_INTEGER_KIND), protected, public :: &
|
||||
worldrank = 0_MPI_INTEGER_KIND, & !< MPI worldrank (/=0 for MPI simulations only)
|
||||
worldsize = 1_MPI_INTEGER_KIND !< MPI worldsize (/=1 for MPI simulations only)
|
||||
#endif
|
||||
|
||||
#ifndef PETSC
|
||||
public :: parallelization_bcast_str
|
||||
|
||||
contains
|
||||
subroutine parallelization_bcast_str(string)
|
||||
|
@ -44,7 +51,7 @@ contains
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine parallelization_init
|
||||
|
||||
integer :: err, typeSize
|
||||
integer(MPI_INTEGER_KIND) :: err_MPI, typeSize
|
||||
!$ integer :: got_env, threadLevel
|
||||
!$ integer(pI32) :: OMP_NUM_THREADS
|
||||
!$ character(len=6) NumThreadsString
|
||||
|
@ -54,8 +61,8 @@ subroutine parallelization_init
|
|||
#ifdef _OPENMP
|
||||
! If openMP is enabled, check if the MPI libary supports it and initialize accordingly.
|
||||
! Otherwise, the first call to PETSc will do the initialization.
|
||||
call MPI_Init_Thread(MPI_THREAD_FUNNELED,threadLevel,err)
|
||||
if (err /= 0) error stop 'MPI init failed'
|
||||
call MPI_Init_Thread(MPI_THREAD_FUNNELED,threadLevel,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI init failed'
|
||||
if (threadLevel<MPI_THREAD_FUNNELED) error stop 'MPI library does not support OpenMP'
|
||||
#endif
|
||||
|
||||
|
@ -73,27 +80,34 @@ subroutine parallelization_init
|
|||
#endif
|
||||
CHKERRQ(err_PETSc)
|
||||
|
||||
call MPI_Comm_rank(MPI_COMM_WORLD,worldrank,err)
|
||||
if (err /= 0) error stop 'Could not determine worldrank'
|
||||
call MPI_Comm_rank(MPI_COMM_WORLD,worldrank,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) &
|
||||
error stop 'Could not determine worldrank'
|
||||
|
||||
if (worldrank == 0) print'(/,1x,a)', '<<<+- parallelization init -+>>>'
|
||||
|
||||
call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err)
|
||||
if (err /= 0) error stop 'Could not determine worldsize'
|
||||
call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) &
|
||||
error stop 'Could not determine worldsize'
|
||||
if (worldrank == 0) print'(/,1x,a,i3)', 'MPI processes: ',worldsize
|
||||
|
||||
call MPI_Type_size(MPI_INTEGER,typeSize,err)
|
||||
if (err /= 0) error stop 'Could not determine MPI integer size'
|
||||
if (typeSize*8 /= bit_size(0)) error stop 'Mismatch between MPI and DAMASK integer'
|
||||
call MPI_Type_size(MPI_INTEGER,typeSize,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) &
|
||||
error stop 'Could not determine size of MPI_INTEGER'
|
||||
if (typeSize*8_MPI_INTEGER_KIND /= int(bit_size(0),MPI_INTEGER_KIND)) &
|
||||
error stop 'Mismatch between MPI_INTEGER and DAMASK default integer'
|
||||
|
||||
call MPI_Type_size(MPI_INTEGER8,typeSize,err)
|
||||
if (err /= 0) error stop 'Could not determine MPI integer size'
|
||||
if (int(typeSize,pI64)*8_pI64 /= bit_size(0_pI64)) &
|
||||
error stop 'Mismatch between MPI and DAMASK integer (long long)'
|
||||
call MPI_Type_size(MPI_INTEGER8,typeSize,err_MPI)
|
||||
if (err_MPI /= 0) &
|
||||
error stop 'Could not determine size of MPI_INTEGER8'
|
||||
if (typeSize*8_MPI_INTEGER_KIND /= int(bit_size(0_pI64),MPI_INTEGER_KIND)) &
|
||||
error stop 'Mismatch between MPI_INTEGER8 and DAMASK pI64'
|
||||
|
||||
call MPI_Type_size(MPI_DOUBLE,typeSize,err)
|
||||
if (err /= 0) error stop 'Could not determine MPI real size'
|
||||
if (typeSize*8 /= storage_size(0.0_pReal)) error stop 'Mismatch between MPI and DAMASK real'
|
||||
call MPI_Type_size(MPI_DOUBLE,typeSize,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) &
|
||||
error stop 'Could not determine size of MPI_DOUBLE'
|
||||
if (typeSize*8_MPI_INTEGER_KIND /= int(storage_size(0.0_pReal),MPI_INTEGER_KIND)) &
|
||||
error stop 'Mismatch between MPI_DOUBLE and DAMASK pReal'
|
||||
|
||||
if (worldrank /= 0) then
|
||||
close(OUTPUT_UNIT) ! disable output
|
||||
|
@ -124,14 +138,14 @@ subroutine parallelization_bcast_str(string)
|
|||
|
||||
character(len=:), allocatable, intent(inout) :: string
|
||||
|
||||
integer :: strlen, ierr ! pI64 for strlen not supported by MPI
|
||||
integer(MPI_INTEGER_KIND) :: strlen, err_MPI
|
||||
|
||||
|
||||
if (worldrank == 0) strlen = len(string)
|
||||
call MPI_Bcast(strlen,1,MPI_INTEGER,0,MPI_COMM_WORLD, ierr)
|
||||
if (worldrank == 0) strlen = len(string,MPI_INTEGER_KIND)
|
||||
call MPI_Bcast(strlen,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
|
||||
if (worldrank /= 0) allocate(character(len=strlen)::string)
|
||||
|
||||
call MPI_Bcast(string,strlen,MPI_CHARACTER,0,MPI_COMM_WORLD, ierr)
|
||||
call MPI_Bcast(string,strlen,MPI_CHARACTER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
|
||||
|
||||
|
||||
end subroutine parallelization_bcast_str
|
||||
|
|
|
@ -519,7 +519,8 @@ subroutine results_mapping_phase(ID,entry,label)
|
|||
dt_id
|
||||
|
||||
integer(SIZE_T) :: type_size_string, type_size_int
|
||||
integer :: hdferr, ierr, ce, co
|
||||
integer :: hdferr, ce, co
|
||||
integer(MPI_INTEGER_KIND) :: err_MPI
|
||||
|
||||
|
||||
writeSize = 0
|
||||
|
@ -536,8 +537,8 @@ subroutine results_mapping_phase(ID,entry,label)
|
|||
call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process
|
||||
if(ierr /= 0) error stop 'MPI error'
|
||||
call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get output at each process
|
||||
if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
|
||||
entryOffset = 0_pI64
|
||||
do co = 1, size(ID,1)
|
||||
|
@ -545,8 +546,8 @@ subroutine results_mapping_phase(ID,entry,label)
|
|||
entryOffset(ID(co,ce),worldrank) = entryOffset(ID(co,ce),worldrank) +1_pI64
|
||||
end do
|
||||
end do
|
||||
call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INTEGER8,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process
|
||||
if(ierr /= 0) error stop 'MPI error'
|
||||
call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INTEGER8,MPI_SUM,MPI_COMM_WORLD,err_MPI)! get offset at each process
|
||||
if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2)
|
||||
do co = 1, size(ID,1)
|
||||
do ce = 1, size(ID,2)
|
||||
|
@ -674,7 +675,8 @@ subroutine results_mapping_homogenization(ID,entry,label)
|
|||
dt_id
|
||||
|
||||
integer(SIZE_T) :: type_size_string, type_size_int
|
||||
integer :: hdferr, ierr, ce
|
||||
integer :: hdferr, ce
|
||||
integer(MPI_INTEGER_KIND) :: err_MPI
|
||||
|
||||
|
||||
writeSize = 0
|
||||
|
@ -691,15 +693,15 @@ subroutine results_mapping_homogenization(ID,entry,label)
|
|||
call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process
|
||||
if(ierr /= 0) error stop 'MPI error'
|
||||
call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get output at each process
|
||||
if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
|
||||
entryOffset = 0_pI64
|
||||
do ce = 1, size(ID,1)
|
||||
entryOffset(ID(ce),worldrank) = entryOffset(ID(ce),worldrank) +1_pI64
|
||||
end do
|
||||
call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INTEGER8,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process
|
||||
if(ierr /= 0) error stop 'MPI error'
|
||||
call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INTEGER8,MPI_SUM,MPI_COMM_WORLD,err_MPI)! get offset at each process
|
||||
if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2)
|
||||
do ce = 1, size(ID,1)
|
||||
entryGlobal(ce) = int(entry(ce),pI64) -1_pI64 + entryOffset(ID(ce),worldrank)
|
||||
|
|
Loading…
Reference in New Issue