diff --git a/src/HDF5_utilities.f90 b/src/HDF5_utilities.f90 index afcdd0a64..35d499e17 100644 --- a/src/HDF5_utilities.f90 +++ b/src/HDF5_utilities.f90 @@ -8,6 +8,9 @@ module HDF5_utilities use HDF5 #ifdef PETSC use PETSc +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI +#endif #endif use prec diff --git a/src/grid/DAMASK_grid.f90 b/src/grid/DAMASK_grid.f90 index d1b90c42b..437fcf07e 100644 --- a/src/grid/DAMASK_grid.f90 +++ b/src/grid/DAMASK_grid.f90 @@ -8,7 +8,11 @@ !-------------------------------------------------------------------------------------------------- program DAMASK_grid #include - use PETScsys + use PETScSys +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif + use prec use parallelization use DAMASK_interface @@ -432,7 +436,7 @@ program DAMASK_grid print'(/,a,i0,a)', ' increment ', totalIncsCounter, ' NOT converged' endif; flush(IO_STDOUT) - call MPI_Allreduce(interface_SIGUSR1,signal,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(interface_SIGUSR1,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' if (mod(inc,loadCases(l)%f_out) == 0 .or. signal) then print'(1/,a)', ' ... writing results to file ......................................' @@ -440,14 +444,14 @@ program DAMASK_grid call CPFEM_results(totalIncsCounter,time) endif if(signal) call interface_setSIGUSR1(.false.) - call MPI_Allreduce(interface_SIGUSR2,signal,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(interface_SIGUSR2,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' if (mod(inc,loadCases(l)%f_restart) == 0 .or. signal) then call mechanical_restartWrite call CPFEM_restartWrite endif if(signal) call interface_setSIGUSR2(.false.) - call MPI_Allreduce(interface_SIGTERM,signal,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(interface_SIGTERM,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' if (signal) exit loadCaseLooping endif skipping diff --git a/src/grid/discretization_grid.f90 b/src/grid/discretization_grid.f90 index 312c79aa7..d53014bfb 100644 --- a/src/grid/discretization_grid.f90 +++ b/src/grid/discretization_grid.f90 @@ -6,7 +6,10 @@ !-------------------------------------------------------------------------------------------------- module discretization_grid #include - use PETScsys + use PETScSys +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use parallelization @@ -75,12 +78,12 @@ subroutine discretization_grid_init(restart) endif - call MPI_Bcast(grid,3,MPI_INTEGER,0,PETSC_COMM_WORLD, ierr) + call MPI_Bcast(grid,3,MPI_INTEGER,0,MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' if (grid(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1') - call MPI_Bcast(geomSize,3,MPI_DOUBLE,0,PETSC_COMM_WORLD, ierr) + call MPI_Bcast(geomSize,3,MPI_DOUBLE,0,MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' - call MPI_Bcast(origin,3,MPI_DOUBLE,0,PETSC_COMM_WORLD, ierr) + call MPI_Bcast(origin,3,MPI_DOUBLE,0,MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' print'(/,a,3(i12 ))', ' cells a b c: ', grid @@ -105,13 +108,13 @@ subroutine discretization_grid_init(restart) myGrid = [grid(1:2),grid3] mySize = [geomSize(1:2),size3] - call MPI_Gather(product(grid(1:2))*grid3Offset,1,MPI_INTEGER,displs, 1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) + call MPI_Gather(product(grid(1:2))*grid3Offset,1,MPI_INTEGER,displs, 1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' - call MPI_Gather(product(myGrid), 1,MPI_INTEGER,sendcounts,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) + call MPI_Gather(product(myGrid), 1,MPI_INTEGER,sendcounts,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' allocate(materialAt(product(myGrid))) - call MPI_scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) + call MPI_Scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' call discretization_init(materialAt, & diff --git a/src/grid/grid_damage_spectral.f90 b/src/grid/grid_damage_spectral.f90 index 26e0a909e..79bb077ed 100644 --- a/src/grid/grid_damage_spectral.f90 +++ b/src/grid/grid_damage_spectral.f90 @@ -7,8 +7,11 @@ module grid_damage_spectral #include #include - use PETScdmda - use PETScsnes + use PETScDMDA + use PETScSNES +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use parallelization @@ -107,7 +110,7 @@ subroutine grid_damage_spectral_init() call SNESSetOptionsPrefix(damage_snes,'damage_',ierr);CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -187,8 +190,8 @@ function grid_damage_spectral_solution(timeinc) result(solution) endif stagNorm = maxval(abs(phi_current - phi_stagInc)) solnNorm = maxval(abs(phi_current)) - call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr) - call MPI_Allreduce(MPI_IN_PLACE,solnNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,solnNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr) phi_stagInc = phi_current solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*solnNorm) @@ -320,9 +323,9 @@ subroutine updateReference() enddo K_ref = K_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) mu_ref = mu_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) end subroutine updateReference diff --git a/src/grid/grid_mech_FEM.f90 b/src/grid/grid_mech_FEM.f90 index 1f36336c0..00e03c457 100644 --- a/src/grid/grid_mech_FEM.f90 +++ b/src/grid/grid_mech_FEM.f90 @@ -7,8 +7,11 @@ module grid_mechanical_FEM #include #include - use PETScdmda - use PETScsnes + use PETScDMDA + use PETScSNES +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use parallelization @@ -163,7 +166,7 @@ subroutine grid_mechanical_FEM_init CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, & DMDA_STENCIL_BOX, & @@ -237,16 +240,16 @@ subroutine grid_mechanical_FEM_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') @@ -271,10 +274,10 @@ subroutine grid_mechanical_FEM_init restartRead2: if (interface_restartInc > 0) then print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_closeGroup(groupHandle) @@ -568,7 +571,7 @@ subroutine formResidual(da_local,x_local, & call utilities_constitutiveResponse(P_current,& P_av,C_volAvg,devNull, & F,params%timeinc,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) !-------------------------------------------------------------------------------------------------- ! stress BC handling diff --git a/src/grid/grid_mech_spectral_basic.f90 b/src/grid/grid_mech_spectral_basic.f90 index d4be18ed7..7f48768c4 100644 --- a/src/grid/grid_mech_spectral_basic.f90 +++ b/src/grid/grid_mech_spectral_basic.f90 @@ -7,8 +7,11 @@ module grid_mechanical_spectral_basic #include #include - use PETScdmda - use PETScsnes + use PETScDMDA + use PETScSNES +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use parallelization @@ -99,7 +102,11 @@ subroutine grid_mechanical_spectral_basic_init F ! pointer to solution data PetscInt, dimension(0:worldsize-1) :: localK integer(HID_T) :: fileHandle, groupHandle - integer :: fileUnit +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + type(MPI_File) :: fileUnit +#else + integer :: fileUnit +#endif class (tNode), pointer :: & num_grid, & debug_grid @@ -154,7 +161,7 @@ subroutine grid_mechanical_spectral_basic_init call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -185,16 +192,16 @@ subroutine grid_mechanical_spectral_basic_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') @@ -214,16 +221,16 @@ subroutine grid_mechanical_spectral_basic_init restartRead2: if (interface_restartInc > 0) then print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) - call MPI_File_open(PETSC_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & + call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr) call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr) call MPI_File_close(fileUnit,ierr) @@ -488,7 +495,7 @@ subroutine formResidual(in, F, & call utilities_constitutiveResponse(residuum, & ! "residuum" gets field of first PK stress (to save memory) P_av,C_volAvg,C_minMaxAvg, & F,params%timeinc,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) !-------------------------------------------------------------------------------------------------- ! stress BC handling diff --git a/src/grid/grid_mech_spectral_polarisation.f90 b/src/grid/grid_mech_spectral_polarisation.f90 index 7c71ec698..3648a4454 100644 --- a/src/grid/grid_mech_spectral_polarisation.f90 +++ b/src/grid/grid_mech_spectral_polarisation.f90 @@ -7,8 +7,11 @@ module grid_mechanical_spectral_polarisation #include #include - use PETScdmda - use PETScsnes + use PETScDMDA + use PETScSNES +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use parallelization @@ -112,7 +115,11 @@ subroutine grid_mechanical_spectral_polarisation_init F_tau ! specific (sub)pointer PetscInt, dimension(0:worldsize-1) :: localK integer(HID_T) :: fileHandle, groupHandle - integer :: fileUnit +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + type(MPI_File) :: fileUnit +#else + integer :: fileUnit +#endif class (tNode), pointer :: & num_grid, & debug_grid @@ -174,7 +181,7 @@ subroutine grid_mechanical_spectral_polarisation_init call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -207,16 +214,16 @@ subroutine grid_mechanical_spectral_polarisation_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') @@ -240,16 +247,16 @@ subroutine grid_mechanical_spectral_polarisation_init restartRead2: if (interface_restartInc > 0) then print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) - call MPI_File_open(PETSC_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & + call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr) call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr) call MPI_File_close(fileUnit,ierr) @@ -544,7 +551,7 @@ subroutine formResidual(in, FandF_tau, & X_RANGE, Y_RANGE, Z_RANGE) F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt - call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) call SNESGetNumberFunctionEvals(snes,nfuncs,ierr); CHKERRQ(ierr) call SNESGetIterationNumber(snes,PETScIter,ierr); CHKERRQ(ierr) @@ -588,7 +595,7 @@ subroutine formResidual(in, FandF_tau, & call utilities_constitutiveResponse(residual_F, & ! "residuum" gets field of first PK stress (to save memory) P_av,C_volAvg,C_minMaxAvg, & F - residual_F_tau/num%beta,params%timeinc,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) !-------------------------------------------------------------------------------------------------- ! stress BC handling diff --git a/src/grid/grid_thermal_spectral.f90 b/src/grid/grid_thermal_spectral.f90 index 5a1745668..b455cb56f 100644 --- a/src/grid/grid_thermal_spectral.f90 +++ b/src/grid/grid_thermal_spectral.f90 @@ -7,8 +7,11 @@ module grid_thermal_spectral #include #include - use PETScdmda - use PETScsnes + use PETScDMDA + use PETScSNES +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use parallelization @@ -102,7 +105,7 @@ subroutine grid_thermal_spectral_init(T_0) call SNESSetOptionsPrefix(thermal_snes,'thermal_',ierr);CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -182,8 +185,8 @@ function grid_thermal_spectral_solution(timeinc) result(solution) endif stagNorm = maxval(abs(T_current - T_stagInc)) solnNorm = maxval(abs(T_current)) - call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr) - call MPI_Allreduce(MPI_IN_PLACE,solnNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,solnNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr) T_stagInc = T_current solution%stagConverged = stagNorm < max(num%eps_thermal_atol, num%eps_thermal_rtol*solnNorm) @@ -310,9 +313,9 @@ subroutine updateReference() enddo K_ref = K_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) mu_ref = mu_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) end subroutine updateReference diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index f512d9b6c..128c1a916 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -8,6 +8,9 @@ module spectral_utilities #include use PETScSys +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use DAMASK_interface @@ -591,7 +594,7 @@ real(pReal) function utilities_divergenceRMS() conjg(-xi1st(1:3,grid1Red,j,k))*rescaledGeom))**2.0_pReal) enddo; enddo if(grid(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 - call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space @@ -651,7 +654,7 @@ real(pReal) function utilities_curlRMS() + sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1) enddo; enddo - call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' utilities_curlRMS = sqrt(utilities_curlRMS) * wgt if(grid(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 @@ -816,7 +819,7 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& P = reshape(homogenization_P, [3,3,grid(1),grid(2),grid3]) P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt ! average of P - call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) if (debugRotation) print'(/,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', & ' Piola--Kirchhoff stress (lab) / MPa =', transpose(P_av)*1.e-6_pReal if(present(rotation_BC)) P_av = rotation_BC%rotate(P_av) @@ -840,21 +843,21 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& end do valueAndRank = [dPdF_norm_max,real(worldrank,pReal)] - call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, PETSC_COMM_WORLD, ierr) + call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' - call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr) + call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' valueAndRank = [dPdF_norm_min,real(worldrank,pReal)] - call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, PETSC_COMM_WORLD, ierr) + call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' - call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr) + call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' C_minmaxAvg = 0.5_pReal*(dPdF_max + dPdF_min) C_volAvg = sum(homogenization_dPdF,dim=5) - call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' C_volAvg = C_volAvg * wgt @@ -909,7 +912,7 @@ function utilities_forwardField(timeinc,field_lastInc,rate,aim) utilities_forwardField = field_lastInc + rate*timeinc if (present(aim)) then !< correct to match average fieldDiff = sum(sum(sum(utilities_forwardField,dim=5),dim=4),dim=3)*wgt - call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) fieldDiff = fieldDiff - aim utilities_forwardField = utilities_forwardField - & spread(spread(spread(fieldDiff,3,grid(1)),4,grid(2)),5,grid3) @@ -982,8 +985,13 @@ subroutine utilities_updateCoords(F) rank_t, rank_b, & c, & ierr +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + type(MPI_Request), dimension(4) :: request + type(MPI_Status), dimension(4) :: status +#else integer, dimension(4) :: request integer, dimension(MPI_STATUS_SIZE,4) :: status +#endif real(pReal), dimension(3) :: step real(pReal), dimension(3,3) :: Favg integer, dimension(3) :: me @@ -1018,7 +1026,7 @@ subroutine utilities_updateCoords(F) !-------------------------------------------------------------------------------------------------- ! average F if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt - call MPI_Bcast(Favg,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(Favg,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' !-------------------------------------------------------------------------------------------------- @@ -1029,20 +1037,24 @@ subroutine utilities_updateCoords(F) rank_b = modulo(worldrank-1,worldsize) ! send bottom layer to process below - call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,PETSC_COMM_WORLD,request(1),ierr) + call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,MPI_COMM_WORLD,request(1),ierr) if(ierr /=0) error stop 'MPI error' - call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,PETSC_COMM_WORLD,request(2),ierr) + call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,MPI_COMM_WORLD,request(2),ierr) if(ierr /=0) error stop 'MPI error' ! send top layer to process above - call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,PETSC_COMM_WORLD,request(3),ierr) + call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,MPI_COMM_WORLD,request(3),ierr) if(ierr /=0) error stop 'MPI error' - call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,PETSC_COMM_WORLD,request(4),ierr) + call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,MPI_COMM_WORLD,request(4),ierr) if(ierr /=0) error stop 'MPI error' call MPI_Waitall(4,request,status,ierr) if(ierr /=0) error stop 'MPI error' +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + ! ToDo +#else if(any(status(MPI_ERROR,:) /= 0)) error stop 'MPI error' +#endif !-------------------------------------------------------------------------------------------------- ! calculate nodal displacements diff --git a/src/mesh/FEM_utilities.f90 b/src/mesh/FEM_utilities.f90 index a84e3559f..35336847b 100644 --- a/src/mesh/FEM_utilities.f90 +++ b/src/mesh/FEM_utilities.f90 @@ -7,10 +7,13 @@ module FEM_utilities #include #include - use PETScdmplex - use PETScdmda - use PETScis - + use PETScDMplex + use PETScDMDA + use PETScIS +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif + use prec use config use math @@ -165,7 +168,7 @@ subroutine utilities_constitutiveResponse(timeinc,P_av,forwardData) cutBack = .false. ! reset cutBack status P_av = sum(homogenization_P,dim=3) * wgt - call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) end subroutine utilities_constitutiveResponse diff --git a/src/mesh/discretization_mesh.f90 b/src/mesh/discretization_mesh.f90 index d4206fcf5..aa0fb6d74 100644 --- a/src/mesh/discretization_mesh.f90 +++ b/src/mesh/discretization_mesh.f90 @@ -8,9 +8,12 @@ module discretization_mesh #include #include #include - use PETScdmplex - use PETScdmda - use PETScis + use PETScDMplex + use PETScDMDA + use PETScIS +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use DAMASK_interface use parallelization @@ -111,9 +114,9 @@ subroutine discretization_mesh_init(restart) ! get number of IDs in face sets (for boundary conditions?) call DMGetLabelSize(globalMesh,'Face Sets',mesh_Nboundaries,ierr) CHKERRQ(ierr) - call MPI_Bcast(mesh_Nboundaries,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) - call MPI_Bcast(mesh_NcpElemsGlobal,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) - call MPI_Bcast(dimPlex,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(mesh_Nboundaries,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) + call MPI_Bcast(mesh_NcpElemsGlobal,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) + call MPI_Bcast(dimPlex,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if (worldrank == 0) then call DMClone(globalMesh,geomMesh,ierr) @@ -134,7 +137,7 @@ subroutine discretization_mesh_init(restart) CHKERRQ(ierr) call ISRestoreIndicesF90(faceSetIS,pFaceSets,ierr) endif - call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) call DMDestroy(globalMesh,ierr); CHKERRQ(ierr) diff --git a/src/mesh/mesh_mech_FEM.f90 b/src/mesh/mesh_mech_FEM.f90 index 954070e81..fc17f9085 100644 --- a/src/mesh/mesh_mech_FEM.f90 +++ b/src/mesh/mesh_mech_FEM.f90 @@ -9,10 +9,13 @@ module mesh_mechanical_FEM #include #include - use PETScsnes + use PETScSNES use PETScDM use PETScDMplex use PETScDT +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif use prec use FEM_utilities @@ -396,7 +399,7 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,ierr) !-------------------------------------------------------------------------------------------------- ! evaluate constitutive response call Utilities_constitutiveResponse(params%timeinc,P_av,ForwardData) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) ForwardData = .false. !-------------------------------------------------------------------------------------------------- diff --git a/src/parallelization.f90 b/src/parallelization.f90 index 32ea16edc..78a02563f 100644 --- a/src/parallelization.f90 +++ b/src/parallelization.f90 @@ -8,7 +8,10 @@ module parallelization #ifdef PETSC #include - use petscsys + use PETScSys +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif !$ use OMP_LIB #endif use prec @@ -60,12 +63,12 @@ subroutine parallelization_init #endif CHKERRQ(petsc_err) - call MPI_Comm_rank(PETSC_COMM_WORLD,worldrank,err) + call MPI_Comm_rank(MPI_COMM_WORLD,worldrank,err) if (err /= 0) error stop 'Could not determine worldrank' if (worldrank == 0) print'(/,a)', ' <<<+- parallelization init -+>>>' - call MPI_Comm_size(PETSC_COMM_WORLD,worldsize,err) + call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err) if (err /= 0) error stop 'Could not determine worldsize' if (worldrank == 0) print'(a,i3)', ' MPI processes: ',worldsize diff --git a/src/results.f90 b/src/results.f90 index 5cf93661b..e74aced05 100644 --- a/src/results.f90 +++ b/src/results.f90 @@ -13,6 +13,9 @@ module results use HDF5 #ifdef PETSC use PETSc +#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY + use MPI_f08 +#endif #endif implicit none @@ -461,7 +464,7 @@ subroutine results_mapping_phase(ID,entry,label) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process if(ierr /= 0) error stop 'MPI error' entryOffset = 0 @@ -470,7 +473,7 @@ subroutine results_mapping_phase(ID,entry,label) entryOffset(ID(co,ce),worldrank) = entryOffset(ID(co,ce),worldrank) +1 enddo enddo - call MPI_allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)! get offset at each process + call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process if(ierr /= 0) error stop 'MPI error' entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do co = 1, size(ID,1) @@ -614,14 +617,14 @@ subroutine results_mapping_homogenization(ID,entry,label) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process if(ierr /= 0) error stop 'MPI error' entryOffset = 0 do ce = 1, size(ID,1) entryOffset(ID(ce),worldrank) = entryOffset(ID(ce),worldrank) +1 enddo - call MPI_allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get offset at each process + call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process if(ierr /= 0) error stop 'MPI error' entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do ce = 1, size(ID,1)