From 12e7922fafa6a2b818d437456e14fbf591d6d112 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 5 Feb 2022 18:32:17 +0100 Subject: [PATCH] use modern Fortran interface not possible for HDF5... --- src/HDF5_utilities.f90 | 33 ++++++++++--------- ...hase_mechanical_eigen_thermalexpansion.f90 | 2 +- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/src/HDF5_utilities.f90 b/src/HDF5_utilities.f90 index d2076c2cc..9dc53ee17 100644 --- a/src/HDF5_utilities.f90 +++ b/src/HDF5_utilities.f90 @@ -10,8 +10,9 @@ module HDF5_utilities #include use PETScSys #if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) - use MPI + use MPI_f08 #endif + use MPI, only: MPI_INFO_NULL_F90 => MPI_INFO_NULL #endif use prec @@ -162,9 +163,9 @@ integer(HID_T) function HDF5_openFile(fileName,mode,parallel) character, intent(in), optional :: mode logical, intent(in), optional :: parallel - character :: m - integer(HID_T) :: plist_id - integer :: hdferr + character :: m + integer(HID_T) :: plist_id + integer :: hdferr if (present(mode)) then @@ -178,9 +179,9 @@ integer(HID_T) function HDF5_openFile(fileName,mode,parallel) #ifdef PETSC if (present(parallel)) then - if (parallel) call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr) + if (parallel) call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL_F90, hdferr) else - call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr) + call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL_F90, hdferr) end if if(hdferr < 0) error stop 'HDF5 error' #endif @@ -1860,7 +1861,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ globalShape !< shape of the dataset (all processes) integer(HID_T), intent(out) :: dset_id, filespace_id, memspace_id, plist_id, aplist_id - integer, dimension(worldsize) :: & + integer(MPI_INTEGER_KIND), dimension(worldsize) :: & readSize !< contribution of all processes integer :: hdferr integer(MPI_INTEGER_KIND) :: err_MPI @@ -1871,13 +1872,13 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ if(hdferr < 0) error stop 'HDF5 error' !-------------------------------------------------------------------------------------------------- - readSize = 0 - readSize(worldrank+1) = int(localShape(ubound(localShape,1))) + readSize = 0_MPI_INTEGER_KIND + readSize(worldrank+1) = int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND) #ifdef PETSC if (parallel) then call H5Pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,err_MPI) ! get total output size over each process + call MPI_Allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get total output size over each process if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end if #endif @@ -1954,8 +1955,8 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & totalShape !< shape of the dataset (all processes) integer(HID_T), intent(out) :: dset_id, filespace_id, memspace_id, plist_id - integer, dimension(worldsize) :: writeSize !< contribution of all processes - integer(HID_T) :: dcpl + integer(MPI_INTEGER_KIND), dimension(worldsize) :: writeSize !< contribution of all processes + integer(HID_T) :: dcpl integer :: hdferr integer(MPI_INTEGER_KIND) :: err_MPI integer(HSIZE_T), parameter :: chunkSize = 1024_HSIZE_T**2/8_HSIZE_T @@ -1974,11 +1975,11 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & !-------------------------------------------------------------------------------------------------- ! determine the global data layout among all processes - writeSize = 0 - writeSize(worldrank+1) = int(myShape(ubound(myShape,1))) + writeSize = 0_MPI_INTEGER_KIND + writeSize(worldrank+1) = int(myShape(ubound(myShape,1)),MPI_INTEGER_KIND) #ifdef PETSC if (parallel) then - call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,err_MPI) ! get total output size over each process + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get total output size over each process if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end if #endif @@ -2009,7 +2010,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & if (hdferr < 0) error stop 'HDF5 error' end if end if - + !-------------------------------------------------------------------------------------------------- ! create dataspace in memory (local shape) and in file (global shape) call H5Screate_simple_f(size(myShape), myShape, memspace_id, hdferr, myShape) diff --git a/src/phase_mechanical_eigen_thermalexpansion.f90 b/src/phase_mechanical_eigen_thermalexpansion.f90 index a5d9868a8..3c422616b 100644 --- a/src/phase_mechanical_eigen_thermalexpansion.f90 +++ b/src/phase_mechanical_eigen_thermalexpansion.f90 @@ -27,7 +27,7 @@ module function thermalexpansion_init(kinematics_length) result(myKinematics) integer, intent(in) :: kinematics_length logical, dimension(:,:), allocatable :: myKinematics - integer :: Ninstances,p,i,k + integer :: Ninstances, p, k class(tNode), pointer :: & phases, & phase, &