From fd3c18ea4db4e252031f9422f2c888d9b1754ed7 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Wed, 12 Jan 2022 17:58:44 +0100 Subject: [PATCH] calculate global entry in 64 bit --- src/HDF5_utilities.f90 | 4 ++-- src/parallelization.f90 | 3 ++- src/results.f90 | 28 ++++++++++++++-------------- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/src/HDF5_utilities.f90 b/src/HDF5_utilities.f90 index e064997e5..7645189c6 100644 --- a/src/HDF5_utilities.f90 +++ b/src/HDF5_utilities.f90 @@ -1877,7 +1877,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ if (parallel) then call H5Pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process + call MPI_allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process if (ierr /= 0) error stop 'MPI error' end if #endif @@ -1977,7 +1977,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & writeSize(worldrank+1) = int(myShape(ubound(myShape,1))) #ifdef PETSC if (parallel) then - call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process + call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process if (ierr /= 0) error stop 'MPI error' end if #endif diff --git a/src/parallelization.f90 b/src/parallelization.f90 index 17f9698a5..a4f7e5ae1 100644 --- a/src/parallelization.f90 +++ b/src/parallelization.f90 @@ -88,7 +88,8 @@ subroutine parallelization_init call MPI_Type_size(MPI_INTEGER8,typeSize,err) if (err /= 0) error stop 'Could not determine MPI integer size' - if (typeSize*8 /= bit_size(0_pI64)) error stop 'Mismatch between MPI and DAMASK integer (long long)' + if (int(typeSize,pI64)*8_pI64 /= bit_size(0_pI64)) & + error stop 'Mismatch between MPI and DAMASK integer (long long)' call MPI_Type_size(MPI_DOUBLE,typeSize,err) if (err /= 0) error stop 'Could not determine MPI real size' diff --git a/src/results.f90 b/src/results.f90 index 31f3bb173..48cce82f9 100644 --- a/src/results.f90 +++ b/src/results.f90 @@ -499,8 +499,8 @@ subroutine results_mapping_phase(ID,entry,label) integer(pI64), dimension(size(entry,1),size(entry,2)) :: & entryGlobal - integer, dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process - integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process + integer(pI64), dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process + integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process integer(HSIZE_T), dimension(2) :: & myShape, & !< shape of the dataset (this process) myOffset, & @@ -536,21 +536,21 @@ subroutine results_mapping_phase(ID,entry,label) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process if(ierr /= 0) error stop 'MPI error' - entryOffset = 0 + entryOffset = 0_pI64 do co = 1, size(ID,1) do ce = 1, size(ID,2) - entryOffset(ID(co,ce),worldrank) = entryOffset(ID(co,ce),worldrank) +1 + entryOffset(ID(co,ce),worldrank) = entryOffset(ID(co,ce),worldrank) +1_pI64 end do end do - call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process + call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INTEGER8,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process if(ierr /= 0) error stop 'MPI error' entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do co = 1, size(ID,1) do ce = 1, size(ID,2) - entryGlobal(co,ce) = int(entry(co,ce) -1 + entryOffset(ID(co,ce),worldrank),pI64) + entryGlobal(co,ce) = int(entry(co,ce),pI64) -1_pI64 + entryOffset(ID(co,ce),worldrank) end do end do #endif @@ -654,8 +654,8 @@ subroutine results_mapping_homogenization(ID,entry,label) integer(pI64), dimension(size(entry,1)) :: & entryGlobal - integer, dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process - integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process + integer(pI64), dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process + integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process integer(HSIZE_T), dimension(1) :: & myShape, & !< shape of the dataset (this process) myOffset, & @@ -691,18 +691,18 @@ subroutine results_mapping_homogenization(ID,entry,label) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process if(ierr /= 0) error stop 'MPI error' - entryOffset = 0 + entryOffset = 0_pI64 do ce = 1, size(ID,1) - entryOffset(ID(ce),worldrank) = entryOffset(ID(ce),worldrank) +1 + entryOffset(ID(ce),worldrank) = entryOffset(ID(ce),worldrank) +1_pI64 end do - call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process + call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INTEGER8,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process if(ierr /= 0) error stop 'MPI error' entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do ce = 1, size(ID,1) - entryGlobal(ce) = int(entry(ce) -1 + entryOffset(ID(ce),worldrank),pI64) + entryGlobal(ce) = int(entry(ce),pI64) -1_pI64 + entryOffset(ID(ce),worldrank) end do #endif