diff --git a/CMakeLists.txt b/CMakeLists.txt index 1612f22fb..ceddd8c53 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -29,7 +29,7 @@ file (STRINGS ${PROJECT_SOURCE_DIR}/VERSION DAMASK_VERSION) message ("\nBuilding ${CMAKE_PROJECT_NAME} ${DAMASK_VERSION}\n") -add_definitions (-DPETSc) +add_definitions (-DPETSC) add_definitions (-DDAMASKVERSION="${DAMASK_VERSION}") if (CMAKE_BUILD_TYPE STREQUAL "") diff --git a/src/CPFEM2.f90 b/src/CPFEM2.f90 index 07f64b72a..ea5820852 100644 --- a/src/CPFEM2.f90 +++ b/src/CPFEM2.f90 @@ -5,6 +5,7 @@ !-------------------------------------------------------------------------------------------------- module CPFEM2 use prec + use parallelization use config use math use rotations @@ -15,9 +16,10 @@ module CPFEM2 use IO use base64 use DAMASK_interface - use results use discretization + use HDF5 use HDF5_utilities + use results use homogenization use phase #if defined(MESH) diff --git a/src/DAMASK_interface.f90 b/src/DAMASK_interface.f90 index 0c3bdca80..52f58d363 100644 --- a/src/DAMASK_interface.f90 +++ b/src/DAMASK_interface.f90 @@ -88,7 +88,7 @@ subroutine DAMASK_interface_init print*, ' _/_/_/ _/ _/ _/ _/ _/ _/ _/_/_/ _/ _/ _/_/_/' #if defined(GRID) print*, ' Grid solver' -#elif defined(Mesh) +#elif defined(MESH) print*, ' Mesh solver' #endif #ifdef DEBUG diff --git a/src/HDF5_utilities.f90 b/src/HDF5_utilities.f90 index 8c72fbbfb..35d499e17 100644 --- a/src/HDF5_utilities.f90 +++ b/src/HDF5_utilities.f90 @@ -6,15 +6,18 @@ !-------------------------------------------------------------------------------------------------- module HDF5_utilities use HDF5 -#ifdef PETSc - use PETSC +#ifdef PETSC + use PETSc +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI +#endif #endif use prec use parallelization implicit none - public + private !-------------------------------------------------------------------------------------------------- !> @brief reads integer or float data of defined shape from file @@ -71,11 +74,23 @@ module HDF5_utilities module procedure HDF5_addAttribute_real_array end interface HDF5_addAttribute -#ifdef PETSc +#ifdef PETSC logical, parameter, private :: parallel_default = .true. #else logical, parameter, private :: parallel_default = .false. #endif + public :: & + HDF5_utilities_init, & + HDF5_read, & + HDF5_write, & + HDF5_addAttribute, & + HDF5_addGroup, & + HDF5_openGroup, & + HDF5_closeGroup, & + HDF5_openFile, & + HDF5_closeFile, & + HDF5_objectExists, & + HDF5_setLink contains @@ -130,7 +145,7 @@ integer(HID_T) function HDF5_openFile(fileName,mode) call h5pcreate_f(H5P_FILE_ACCESS_F, plist_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' -#ifdef PETSc +#ifdef PETSC call h5pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr) if(hdferr < 0) error stop 'HDF5 error' #endif @@ -187,7 +202,7 @@ integer(HID_T) function HDF5_addGroup(fileHandle,groupName) !------------------------------------------------------------------------------------------------- ! setting I/O mode to collective -#ifdef PETSc +#ifdef PETSC call h5pset_all_coll_metadata_ops_f(aplist_id, .true., hdferr) if(hdferr < 0) error stop 'HDF5 error' #endif @@ -223,7 +238,7 @@ integer(HID_T) function HDF5_openGroup(fileHandle,groupName) !------------------------------------------------------------------------------------------------- ! setting I/O mode to collective -#ifdef PETSc +#ifdef PETSC call h5pget_all_coll_metadata_ops_f(aplist_id, is_collective, hdferr) if(hdferr < 0) error stop 'HDF5 error' #endif @@ -1692,7 +1707,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ !-------------------------------------------------------------------------------------------------- readSize = 0 readSize(worldrank+1) = int(localShape(ubound(localShape,1))) -#ifdef PETSc +#ifdef PETSC if (parallel) then call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' @@ -1713,7 +1728,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ ! creating a property list for IO and set it to collective call h5pcreate_f(H5P_DATASET_ACCESS_F, aplist_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' -#ifdef PETSc +#ifdef PETSC call h5pset_all_coll_metadata_ops_f(aplist_id, .true., hdferr) if(hdferr < 0) error stop 'HDF5 error' #endif @@ -1782,7 +1797,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & ! creating a property list for transfer properties (is collective when reading in parallel) call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' -#ifdef PETSc +#ifdef PETSC if (parallel) then call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' @@ -1793,7 +1808,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & ! determine the global data layout among all processes writeSize = 0 writeSize(worldrank+1) = int(myShape(ubound(myShape,1))) -#ifdef PETSc +#ifdef PETSC if (parallel) then call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process if (ierr /= 0) error stop 'MPI error' diff --git a/src/grid/DAMASK_grid.f90 b/src/grid/DAMASK_grid.f90 index d1b90c42b..437fcf07e 100644 --- a/src/grid/DAMASK_grid.f90 +++ b/src/grid/DAMASK_grid.f90 @@ -8,7 +8,11 @@ !-------------------------------------------------------------------------------------------------- program DAMASK_grid #include - use PETScsys + use PETScSys +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif + use prec use parallelization use DAMASK_interface @@ -432,7 +436,7 @@ program DAMASK_grid print'(/,a,i0,a)', ' increment ', totalIncsCounter, ' NOT converged' endif; flush(IO_STDOUT) - call MPI_Allreduce(interface_SIGUSR1,signal,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(interface_SIGUSR1,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' if (mod(inc,loadCases(l)%f_out) == 0 .or. signal) then print'(1/,a)', ' ... writing results to file ......................................' @@ -440,14 +444,14 @@ program DAMASK_grid call CPFEM_results(totalIncsCounter,time) endif if(signal) call interface_setSIGUSR1(.false.) - call MPI_Allreduce(interface_SIGUSR2,signal,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(interface_SIGUSR2,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' if (mod(inc,loadCases(l)%f_restart) == 0 .or. signal) then call mechanical_restartWrite call CPFEM_restartWrite endif if(signal) call interface_setSIGUSR2(.false.) - call MPI_Allreduce(interface_SIGTERM,signal,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(interface_SIGTERM,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' if (signal) exit loadCaseLooping endif skipping diff --git a/src/grid/discretization_grid.f90 b/src/grid/discretization_grid.f90 index 312c79aa7..d53014bfb 100644 --- a/src/grid/discretization_grid.f90 +++ b/src/grid/discretization_grid.f90 @@ -6,7 +6,10 @@ !-------------------------------------------------------------------------------------------------- module discretization_grid #include - use PETScsys + use PETScSys +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use parallelization @@ -75,12 +78,12 @@ subroutine discretization_grid_init(restart) endif - call MPI_Bcast(grid,3,MPI_INTEGER,0,PETSC_COMM_WORLD, ierr) + call MPI_Bcast(grid,3,MPI_INTEGER,0,MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' if (grid(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1') - call MPI_Bcast(geomSize,3,MPI_DOUBLE,0,PETSC_COMM_WORLD, ierr) + call MPI_Bcast(geomSize,3,MPI_DOUBLE,0,MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' - call MPI_Bcast(origin,3,MPI_DOUBLE,0,PETSC_COMM_WORLD, ierr) + call MPI_Bcast(origin,3,MPI_DOUBLE,0,MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' print'(/,a,3(i12 ))', ' cells a b c: ', grid @@ -105,13 +108,13 @@ subroutine discretization_grid_init(restart) myGrid = [grid(1:2),grid3] mySize = [geomSize(1:2),size3] - call MPI_Gather(product(grid(1:2))*grid3Offset,1,MPI_INTEGER,displs, 1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) + call MPI_Gather(product(grid(1:2))*grid3Offset,1,MPI_INTEGER,displs, 1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' - call MPI_Gather(product(myGrid), 1,MPI_INTEGER,sendcounts,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) + call MPI_Gather(product(myGrid), 1,MPI_INTEGER,sendcounts,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' allocate(materialAt(product(myGrid))) - call MPI_scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) + call MPI_Scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' call discretization_init(materialAt, & diff --git a/src/grid/grid_damage_spectral.f90 b/src/grid/grid_damage_spectral.f90 index 7851f4f50..36bbde1d8 100644 --- a/src/grid/grid_damage_spectral.f90 +++ b/src/grid/grid_damage_spectral.f90 @@ -7,8 +7,11 @@ module grid_damage_spectral #include #include - use PETScdmda - use PETScsnes + use PETScDMDA + use PETScSNES +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use parallelization @@ -107,7 +110,7 @@ subroutine grid_damage_spectral_init() call SNESSetOptionsPrefix(damage_snes,'damage_',ierr);CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -187,9 +190,9 @@ function grid_damage_spectral_solution(timeinc) result(solution) endif stagNorm = maxval(abs(phi_current - phi_stagInc)) solnNorm = maxval(abs(phi_current)) - call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr) solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*solnNorm) - call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,ierr) phi_stagInc = phi_current !-------------------------------------------------------------------------------------------------- @@ -320,9 +323,9 @@ subroutine updateReference() enddo K_ref = K_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) mu_ref = mu_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) end subroutine updateReference diff --git a/src/grid/grid_mech_FEM.f90 b/src/grid/grid_mech_FEM.f90 index f603407ce..00e03c457 100644 --- a/src/grid/grid_mech_FEM.f90 +++ b/src/grid/grid_mech_FEM.f90 @@ -7,13 +7,17 @@ module grid_mechanical_FEM #include #include - use PETScdmda - use PETScsnes + use PETScDMDA + use PETScSNES +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use parallelization use DAMASK_interface use IO + use HDF5 use HDF5_utilities use math use rotations @@ -162,7 +166,7 @@ subroutine grid_mechanical_FEM_init CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, & DMDA_STENCIL_BOX, & @@ -236,16 +240,16 @@ subroutine grid_mechanical_FEM_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') @@ -270,10 +274,10 @@ subroutine grid_mechanical_FEM_init restartRead2: if (interface_restartInc > 0) then print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_closeGroup(groupHandle) @@ -567,7 +571,7 @@ subroutine formResidual(da_local,x_local, & call utilities_constitutiveResponse(P_current,& P_av,C_volAvg,devNull, & F,params%timeinc,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) !-------------------------------------------------------------------------------------------------- ! stress BC handling diff --git a/src/grid/grid_mech_spectral_basic.f90 b/src/grid/grid_mech_spectral_basic.f90 index aee4b4427..7f48768c4 100644 --- a/src/grid/grid_mech_spectral_basic.f90 +++ b/src/grid/grid_mech_spectral_basic.f90 @@ -7,13 +7,17 @@ module grid_mechanical_spectral_basic #include #include - use PETScdmda - use PETScsnes + use PETScDMDA + use PETScSNES +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use parallelization use DAMASK_interface use IO + use HDF5 use HDF5_utilities use math use rotations @@ -98,7 +102,11 @@ subroutine grid_mechanical_spectral_basic_init F ! pointer to solution data PetscInt, dimension(0:worldsize-1) :: localK integer(HID_T) :: fileHandle, groupHandle - integer :: fileUnit +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + type(MPI_File) :: fileUnit +#else + integer :: fileUnit +#endif class (tNode), pointer :: & num_grid, & debug_grid @@ -153,7 +161,7 @@ subroutine grid_mechanical_spectral_basic_init call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -184,16 +192,16 @@ subroutine grid_mechanical_spectral_basic_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') @@ -213,16 +221,16 @@ subroutine grid_mechanical_spectral_basic_init restartRead2: if (interface_restartInc > 0) then print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) - call MPI_File_open(PETSC_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & + call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr) call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr) call MPI_File_close(fileUnit,ierr) @@ -487,7 +495,7 @@ subroutine formResidual(in, F, & call utilities_constitutiveResponse(residuum, & ! "residuum" gets field of first PK stress (to save memory) P_av,C_volAvg,C_minMaxAvg, & F,params%timeinc,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) !-------------------------------------------------------------------------------------------------- ! stress BC handling diff --git a/src/grid/grid_mech_spectral_polarisation.f90 b/src/grid/grid_mech_spectral_polarisation.f90 index 218e42678..3648a4454 100644 --- a/src/grid/grid_mech_spectral_polarisation.f90 +++ b/src/grid/grid_mech_spectral_polarisation.f90 @@ -7,13 +7,17 @@ module grid_mechanical_spectral_polarisation #include #include - use PETScdmda - use PETScsnes + use PETScDMDA + use PETScSNES +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use parallelization use DAMASK_interface use IO + use HDF5 use HDF5_utilities use math use rotations @@ -111,7 +115,11 @@ subroutine grid_mechanical_spectral_polarisation_init F_tau ! specific (sub)pointer PetscInt, dimension(0:worldsize-1) :: localK integer(HID_T) :: fileHandle, groupHandle - integer :: fileUnit +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + type(MPI_File) :: fileUnit +#else + integer :: fileUnit +#endif class (tNode), pointer :: & num_grid, & debug_grid @@ -173,7 +181,7 @@ subroutine grid_mechanical_spectral_polarisation_init call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -206,16 +214,16 @@ subroutine grid_mechanical_spectral_polarisation_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') @@ -239,16 +247,16 @@ subroutine grid_mechanical_spectral_polarisation_init restartRead2: if (interface_restartInc > 0) then print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) - call MPI_File_open(PETSC_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & + call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr) call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr) call MPI_File_close(fileUnit,ierr) @@ -543,7 +551,7 @@ subroutine formResidual(in, FandF_tau, & X_RANGE, Y_RANGE, Z_RANGE) F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt - call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) call SNESGetNumberFunctionEvals(snes,nfuncs,ierr); CHKERRQ(ierr) call SNESGetIterationNumber(snes,PETScIter,ierr); CHKERRQ(ierr) @@ -587,7 +595,7 @@ subroutine formResidual(in, FandF_tau, & call utilities_constitutiveResponse(residual_F, & ! "residuum" gets field of first PK stress (to save memory) P_av,C_volAvg,C_minMaxAvg, & F - residual_F_tau/num%beta,params%timeinc,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) !-------------------------------------------------------------------------------------------------- ! stress BC handling diff --git a/src/grid/grid_thermal_spectral.f90 b/src/grid/grid_thermal_spectral.f90 index 6af80825b..4b31d10d0 100644 --- a/src/grid/grid_thermal_spectral.f90 +++ b/src/grid/grid_thermal_spectral.f90 @@ -7,8 +7,11 @@ module grid_thermal_spectral #include #include - use PETScdmda - use PETScsnes + use PETScDMDA + use PETScSNES +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use parallelization @@ -102,7 +105,7 @@ subroutine grid_thermal_spectral_init(T_0) call SNESSetOptionsPrefix(thermal_snes,'thermal_',ierr);CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -182,9 +185,9 @@ function grid_thermal_spectral_solution(timeinc) result(solution) endif stagNorm = maxval(abs(T_current - T_stagInc)) solnNorm = maxval(abs(T_current)) - call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr) solution%stagConverged = stagNorm < max(num%eps_thermal_atol, num%eps_thermal_rtol*solnNorm) - call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,ierr) T_stagInc = T_current !-------------------------------------------------------------------------------------------------- @@ -310,9 +313,9 @@ subroutine updateReference() enddo K_ref = K_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) mu_ref = mu_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) end subroutine updateReference diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index f512d9b6c..128c1a916 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -8,6 +8,9 @@ module spectral_utilities #include use PETScSys +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use DAMASK_interface @@ -591,7 +594,7 @@ real(pReal) function utilities_divergenceRMS() conjg(-xi1st(1:3,grid1Red,j,k))*rescaledGeom))**2.0_pReal) enddo; enddo if(grid(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 - call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space @@ -651,7 +654,7 @@ real(pReal) function utilities_curlRMS() + sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1) enddo; enddo - call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' utilities_curlRMS = sqrt(utilities_curlRMS) * wgt if(grid(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 @@ -816,7 +819,7 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& P = reshape(homogenization_P, [3,3,grid(1),grid(2),grid3]) P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt ! average of P - call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) if (debugRotation) print'(/,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', & ' Piola--Kirchhoff stress (lab) / MPa =', transpose(P_av)*1.e-6_pReal if(present(rotation_BC)) P_av = rotation_BC%rotate(P_av) @@ -840,21 +843,21 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& end do valueAndRank = [dPdF_norm_max,real(worldrank,pReal)] - call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, PETSC_COMM_WORLD, ierr) + call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' - call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr) + call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' valueAndRank = [dPdF_norm_min,real(worldrank,pReal)] - call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, PETSC_COMM_WORLD, ierr) + call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' - call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr) + call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' C_minmaxAvg = 0.5_pReal*(dPdF_max + dPdF_min) C_volAvg = sum(homogenization_dPdF,dim=5) - call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' C_volAvg = C_volAvg * wgt @@ -909,7 +912,7 @@ function utilities_forwardField(timeinc,field_lastInc,rate,aim) utilities_forwardField = field_lastInc + rate*timeinc if (present(aim)) then !< correct to match average fieldDiff = sum(sum(sum(utilities_forwardField,dim=5),dim=4),dim=3)*wgt - call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) fieldDiff = fieldDiff - aim utilities_forwardField = utilities_forwardField - & spread(spread(spread(fieldDiff,3,grid(1)),4,grid(2)),5,grid3) @@ -982,8 +985,13 @@ subroutine utilities_updateCoords(F) rank_t, rank_b, & c, & ierr +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + type(MPI_Request), dimension(4) :: request + type(MPI_Status), dimension(4) :: status +#else integer, dimension(4) :: request integer, dimension(MPI_STATUS_SIZE,4) :: status +#endif real(pReal), dimension(3) :: step real(pReal), dimension(3,3) :: Favg integer, dimension(3) :: me @@ -1018,7 +1026,7 @@ subroutine utilities_updateCoords(F) !-------------------------------------------------------------------------------------------------- ! average F if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt - call MPI_Bcast(Favg,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(Favg,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' !-------------------------------------------------------------------------------------------------- @@ -1029,20 +1037,24 @@ subroutine utilities_updateCoords(F) rank_b = modulo(worldrank-1,worldsize) ! send bottom layer to process below - call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,PETSC_COMM_WORLD,request(1),ierr) + call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,MPI_COMM_WORLD,request(1),ierr) if(ierr /=0) error stop 'MPI error' - call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,PETSC_COMM_WORLD,request(2),ierr) + call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,MPI_COMM_WORLD,request(2),ierr) if(ierr /=0) error stop 'MPI error' ! send top layer to process above - call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,PETSC_COMM_WORLD,request(3),ierr) + call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,MPI_COMM_WORLD,request(3),ierr) if(ierr /=0) error stop 'MPI error' - call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,PETSC_COMM_WORLD,request(4),ierr) + call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,MPI_COMM_WORLD,request(4),ierr) if(ierr /=0) error stop 'MPI error' call MPI_Waitall(4,request,status,ierr) if(ierr /=0) error stop 'MPI error' +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + ! ToDo +#else if(any(status(MPI_ERROR,:) /= 0)) error stop 'MPI error' +#endif !-------------------------------------------------------------------------------------------------- ! calculate nodal displacements diff --git a/src/homogenization.f90 b/src/homogenization.f90 index 8c9713eef..f34834272 100644 --- a/src/homogenization.f90 +++ b/src/homogenization.f90 @@ -12,6 +12,7 @@ module homogenization use material use phase use discretization + use HDF5 use HDF5_utilities use results use lattice diff --git a/src/mesh/FEM_utilities.f90 b/src/mesh/FEM_utilities.f90 index a84e3559f..35336847b 100644 --- a/src/mesh/FEM_utilities.f90 +++ b/src/mesh/FEM_utilities.f90 @@ -7,10 +7,13 @@ module FEM_utilities #include #include - use PETScdmplex - use PETScdmda - use PETScis - + use PETScDMplex + use PETScDMDA + use PETScIS +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif + use prec use config use math @@ -165,7 +168,7 @@ subroutine utilities_constitutiveResponse(timeinc,P_av,forwardData) cutBack = .false. ! reset cutBack status P_av = sum(homogenization_P,dim=3) * wgt - call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) end subroutine utilities_constitutiveResponse diff --git a/src/mesh/discretization_mesh.f90 b/src/mesh/discretization_mesh.f90 index d4206fcf5..aa0fb6d74 100644 --- a/src/mesh/discretization_mesh.f90 +++ b/src/mesh/discretization_mesh.f90 @@ -8,9 +8,12 @@ module discretization_mesh #include #include #include - use PETScdmplex - use PETScdmda - use PETScis + use PETScDMplex + use PETScDMDA + use PETScIS +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use DAMASK_interface use parallelization @@ -111,9 +114,9 @@ subroutine discretization_mesh_init(restart) ! get number of IDs in face sets (for boundary conditions?) call DMGetLabelSize(globalMesh,'Face Sets',mesh_Nboundaries,ierr) CHKERRQ(ierr) - call MPI_Bcast(mesh_Nboundaries,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) - call MPI_Bcast(mesh_NcpElemsGlobal,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) - call MPI_Bcast(dimPlex,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(mesh_Nboundaries,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) + call MPI_Bcast(mesh_NcpElemsGlobal,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) + call MPI_Bcast(dimPlex,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if (worldrank == 0) then call DMClone(globalMesh,geomMesh,ierr) @@ -134,7 +137,7 @@ subroutine discretization_mesh_init(restart) CHKERRQ(ierr) call ISRestoreIndicesF90(faceSetIS,pFaceSets,ierr) endif - call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) call DMDestroy(globalMesh,ierr); CHKERRQ(ierr) diff --git a/src/mesh/mesh_mech_FEM.f90 b/src/mesh/mesh_mech_FEM.f90 index 954070e81..fc17f9085 100644 --- a/src/mesh/mesh_mech_FEM.f90 +++ b/src/mesh/mesh_mech_FEM.f90 @@ -9,10 +9,13 @@ module mesh_mechanical_FEM #include #include - use PETScsnes + use PETScSNES use PETScDM use PETScDMplex use PETScDT +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use FEM_utilities @@ -396,7 +399,7 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,ierr) !-------------------------------------------------------------------------------------------------- ! evaluate constitutive response call Utilities_constitutiveResponse(params%timeinc,P_av,ForwardData) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) ForwardData = .false. !-------------------------------------------------------------------------------------------------- diff --git a/src/parallelization.f90 b/src/parallelization.f90 index ee8e71006..78a02563f 100644 --- a/src/parallelization.f90 +++ b/src/parallelization.f90 @@ -6,9 +6,12 @@ module parallelization use, intrinsic :: ISO_fortran_env, only: & OUTPUT_UNIT -#ifdef PETSc +#ifdef PETSC #include - use petscsys + use PETScSys +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif !$ use OMP_LIB #endif use prec @@ -20,7 +23,7 @@ module parallelization worldrank = 0, & !< MPI worldrank (/=0 for MPI simulations only) worldsize = 1 !< MPI worldsize (/=1 for MPI simulations only) -#ifdef PETSc +#ifdef PETSC public :: & parallelization_init @@ -60,12 +63,12 @@ subroutine parallelization_init #endif CHKERRQ(petsc_err) - call MPI_Comm_rank(PETSC_COMM_WORLD,worldrank,err) + call MPI_Comm_rank(MPI_COMM_WORLD,worldrank,err) if (err /= 0) error stop 'Could not determine worldrank' if (worldrank == 0) print'(/,a)', ' <<<+- parallelization init -+>>>' - call MPI_Comm_size(PETSC_COMM_WORLD,worldsize,err) + call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err) if (err /= 0) error stop 'Could not determine worldsize' if (worldrank == 0) print'(a,i3)', ' MPI processes: ',worldsize diff --git a/src/phase.f90 b/src/phase.f90 index da1ff9d14..e396e5661 100644 --- a/src/phase.f90 +++ b/src/phase.f90 @@ -14,6 +14,7 @@ module phase use lattice use discretization use parallelization + use HDF5 use HDF5_utilities implicit none diff --git a/src/quit.f90 b/src/quit.f90 index 26dc23bac..786241d85 100644 --- a/src/quit.f90 +++ b/src/quit.f90 @@ -7,9 +7,6 @@ subroutine quit(stop_id) #include use PetscSys -#ifdef _OPENMP - use MPI -#endif use HDF5 implicit none diff --git a/src/results.f90 b/src/results.f90 index 174023f1a..e74aced05 100644 --- a/src/results.f90 +++ b/src/results.f90 @@ -5,12 +5,17 @@ !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !-------------------------------------------------------------------------------------------------- module results + use prec use DAMASK_interface use parallelization use IO use HDF5_utilities -#ifdef PETSc - use PETSC + use HDF5 +#ifdef PETSC + use PETSc +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif #endif implicit none @@ -459,7 +464,7 @@ subroutine results_mapping_phase(ID,entry,label) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process if(ierr /= 0) error stop 'MPI error' entryOffset = 0 @@ -468,7 +473,7 @@ subroutine results_mapping_phase(ID,entry,label) entryOffset(ID(co,ce),worldrank) = entryOffset(ID(co,ce),worldrank) +1 enddo enddo - call MPI_allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)! get offset at each process + call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process if(ierr /= 0) error stop 'MPI error' entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do co = 1, size(ID,1) @@ -612,14 +617,14 @@ subroutine results_mapping_homogenization(ID,entry,label) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process if(ierr /= 0) error stop 'MPI error' entryOffset = 0 do ce = 1, size(ID,1) entryOffset(ID(ce),worldrank) = entryOffset(ID(ce),worldrank) +1 enddo - call MPI_allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get offset at each process + call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process if(ierr /= 0) error stop 'MPI error' entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do ce = 1, size(ID,1)