diff --git a/src/HDF5_utilities.f90 b/src/HDF5_utilities.f90 index 65ee66af9..413446399 100644 --- a/src/HDF5_utilities.f90 +++ b/src/HDF5_utilities.f90 @@ -1560,7 +1560,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ call HDF5_chkerr(hdferr) call MPI_Allgather(int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,& readSize,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) end if #endif myStart = int(0,HSIZE_T) @@ -1667,7 +1667,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & if (parallel) then call MPI_Allgather(int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,& writeSize,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) end if #endif myStart = int(0,HSIZE_T) diff --git a/src/grid/DAMASK_grid.f90 b/src/grid/DAMASK_grid.f90 index 3fd220ce5..68ed40cf3 100644 --- a/src/grid/DAMASK_grid.f90 +++ b/src/grid/DAMASK_grid.f90 @@ -363,7 +363,7 @@ program DAMASK_grid end if; flush(IO_STDOUT) call MPI_Allreduce(signal_SIGUSR1,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) if (mod(inc,loadCases(l)%f_out) == 0 .or. sig) then print'(/,1x,a)', '... saving results ........................................................' flush(IO_STDOUT) @@ -371,7 +371,7 @@ program DAMASK_grid end if if (sig) call signal_setSIGUSR1(.false.) call MPI_Allreduce(signal_SIGUSR2,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) if (mod(inc,loadCases(l)%f_restart) == 0 .or. sig) then do field = 1, nActiveFields select case (ID(field)) @@ -387,7 +387,7 @@ program DAMASK_grid end if if (sig) call signal_setSIGUSR2(.false.) call MPI_Allreduce(signal_SIGINT,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) if (sig) exit loadCaseLooping end if skipping diff --git a/src/grid/discretization_grid.f90 b/src/grid/discretization_grid.f90 index 2cb5dbf9f..0c18b3317 100644 --- a/src/grid/discretization_grid.f90 +++ b/src/grid/discretization_grid.f90 @@ -97,12 +97,12 @@ subroutine discretization_grid_init(restart) call MPI_Bcast(cells,3_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) if (cells(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1') call MPI_Bcast(geomSize,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call MPI_Bcast(origin,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) print'(/,1x,a,i0,a,i0,a,i0)', 'cells: ', cells(1), ' × ', cells(2), ' × ', cells(3) print '(1x,a,es8.2,a,es8.2,a,es8.2,a)', 'size: ', geomSize(1), ' × ', geomSize(2), ' × ', geomSize(3), ' m³' @@ -126,15 +126,15 @@ subroutine discretization_grid_init(restart) call MPI_Gather(product(cells(1:2))*cells3Offset,1_MPI_INTEGER_KIND,MPI_INTEGER,displs,& 1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call MPI_Gather(product(myGrid), 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,& 1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) allocate(materialAt(product(myGrid))) call MPI_Scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),& MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call discretization_init(materialAt, & IPcoordinates0(myGrid,mySize,cells3Offset), & @@ -318,10 +318,10 @@ function discretization_grid_getInitialCondition(label) result(ic) real(pREAL), dimension(:), allocatable :: ic_global, ic_local integer(MPI_INTEGER_KIND) :: err_MPI - integer, dimension(worldsize) :: & displs, sendcounts + if (worldrank == 0) then ic_global = VTI_readDataset_real(IO_read(CLI_geomFile),label) else @@ -330,15 +330,15 @@ function discretization_grid_getInitialCondition(label) result(ic) call MPI_Gather(product(cells(1:2))*cells3Offset, 1_MPI_INTEGER_KIND,MPI_INTEGER,displs,& 1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call MPI_Gather(product(cells(1:2))*cells3, 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,& 1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) allocate(ic_local(product(cells(1:2))*cells3)) call MPI_Scatterv(ic_global,sendcounts,displs,MPI_DOUBLE,ic_local,size(ic_local),& MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) ic = reshape(ic_local,[cells(1),cells(2),cells3]) diff --git a/src/grid/grid_damage_spectral.f90 b/src/grid/grid_damage_spectral.f90 index cce869653..6b53c0a75 100644 --- a/src/grid/grid_damage_spectral.f90 +++ b/src/grid/grid_damage_spectral.f90 @@ -129,7 +129,7 @@ subroutine grid_damage_spectral_init(num_grid) CHKERRQ(err_PETSc) call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,& cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -239,10 +239,10 @@ function grid_damage_spectral_solution(Delta_t) result(solution) phi_max = maxval(phi) stagNorm = maxval(abs(phi - phi_stagInc)) call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*phi_max) call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) phi_stagInc = phi call homogenization_set_phi(reshape(phi,[product(cells(1:2))*cells3])) @@ -379,10 +379,10 @@ subroutine updateReference() K_ref = K_ref*wgt call MPI_Allreduce(MPI_IN_PLACE,K_ref,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) mu_ref = mu_ref*wgt call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) end subroutine updateReference diff --git a/src/grid/grid_mech_FEM.f90 b/src/grid/grid_mech_FEM.f90 index 59835f250..2b856ff1c 100644 --- a/src/grid/grid_mech_FEM.f90 +++ b/src/grid/grid_mech_FEM.f90 @@ -173,7 +173,7 @@ subroutine grid_mechanical_FEM_init(num_grid) CHKERRQ(err_PETSc) call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,& cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, & DMDA_STENCIL_BOX, & @@ -246,16 +246,16 @@ subroutine grid_mechanical_FEM_init(num_grid) call HDF5_read(P_aim,groupHandle,'P_aim',.false.) call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(F_aim,groupHandle,'F_aim',.false.) call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(temp33n,groupHandle,'F') F = reshape(temp33n,[3,3,cells(1),cells(2),cells3]) call HDF5_read(temp33n,groupHandle,'F_lastInc') @@ -283,10 +283,10 @@ subroutine grid_mechanical_FEM_init(num_grid) print'(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) @@ -575,7 +575,7 @@ subroutine formResidual(da_local,x_local, & P_av,C_volAvg,devNull, & F,params%Delta_t,params%rotation_BC) call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) !-------------------------------------------------------------------------------------------------- ! stress BC handling diff --git a/src/grid/grid_mech_spectral_basic.f90 b/src/grid/grid_mech_spectral_basic.f90 index 0e8ba4841..22113a6fd 100644 --- a/src/grid/grid_mech_spectral_basic.f90 +++ b/src/grid/grid_mech_spectral_basic.f90 @@ -169,7 +169,7 @@ subroutine grid_mechanical_spectral_basic_init(num_grid) CHKERRQ(err_PETSc) call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,& cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -207,16 +207,16 @@ subroutine grid_mechanical_spectral_basic_init(num_grid) call HDF5_read(P_aim,groupHandle,'P_aim',.false.) call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(F_aim,groupHandle,'F_aim',.false.) call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(temp33n,groupHandle,'F') F = reshape(temp33n,[9,cells(1),cells(2),cells3]) call HDF5_read(temp33n,groupHandle,'F_lastInc') @@ -238,13 +238,13 @@ subroutine grid_mechanical_spectral_basic_init(num_grid) print'(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(C_minMaxAvg,groupHandle,'C_minMaxAvg',.false.) call MPI_Bcast(C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) @@ -519,7 +519,7 @@ subroutine formResidual(residual_subdomain, F, & P_av,C_volAvg,C_minMaxAvg, & F,params%Delta_t,params%rotation_BC) call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) err_div = utilities_divergenceRMS(P) end associate diff --git a/src/grid/grid_mech_spectral_polarization.f90 b/src/grid/grid_mech_spectral_polarization.f90 index b5cc0b967..a4da7452d 100644 --- a/src/grid/grid_mech_spectral_polarization.f90 +++ b/src/grid/grid_mech_spectral_polarization.f90 @@ -190,7 +190,7 @@ subroutine grid_mechanical_spectral_polarization_init(num_grid) CHKERRQ(err_PETSc) call MPI_Allgather(int(cells3,pPetscInt),1_MPI_INTEGER_KIND,MPI_INTEGER,& cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -230,16 +230,16 @@ subroutine grid_mechanical_spectral_polarization_init(num_grid) call HDF5_read(P_aim,groupHandle,'P_aim',.false.) call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(F_aim,groupHandle,'F_aim',.false.) call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(temp33n,groupHandle,'F') F = reshape(temp33n,[9,cells(1),cells(2),cells3]) call HDF5_read(temp33n,groupHandle,'F_lastInc') @@ -267,13 +267,13 @@ subroutine grid_mechanical_spectral_polarization_init(num_grid) print '(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_read(C_minMaxAvg,groupHandle,'C_minMaxAvg',.false.) call MPI_Bcast(C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) @@ -573,7 +573,7 @@ subroutine formResidual(residual_subdomain, FandF_tau, & F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt call MPI_Allreduce(MPI_IN_PLACE,F_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call SNESGetNumberFunctionEvals(SNES_mech,nfuncs,err_PETSc) CHKERRQ(err_PETSc) diff --git a/src/grid/grid_mech_utilities.f90 b/src/grid/grid_mech_utilities.f90 index 600c89309..bae5c309b 100644 --- a/src/grid/grid_mech_utilities.f90 +++ b/src/grid/grid_mech_utilities.f90 @@ -140,7 +140,7 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& P = reshape(homogenization_P, [3,3,cells(1),cells(2),cells3]) P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) if (present(rotation_BC)) then if (any(dNeq(rotation_BC%asQuaternion(), real([1.0, 0.0, 0.0, 0.0],pREAL)))) & print'(/,1x,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', & @@ -168,21 +168,21 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& valueAndRank = [dPdF_norm_max,real(worldrank,pREAL)] call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1_MPI_INTEGER_KIND,MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call MPI_Bcast(dPdF_max,81_MPI_INTEGER_KIND,MPI_DOUBLE,int(valueAndRank(2),MPI_INTEGER_KIND),MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) valueAndRank = [dPdF_norm_min,real(worldrank,pREAL)] call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1_MPI_INTEGER_KIND,MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call MPI_Bcast(dPdF_min,81_MPI_INTEGER_KIND,MPI_DOUBLE,int(valueAndRank(2),MPI_INTEGER_KIND),MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) C_minmaxAvg = 0.5_pREAL*(dPdF_max + dPdF_min) C_volAvg = sum(homogenization_dPdF,dim=5) call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) C_volAvg = C_volAvg * wgt @@ -238,7 +238,7 @@ function utilities_forwardTensorField(Delta_t,field_lastInc,rate,aim) if (present(aim)) then !< correct to match average fieldDiff = sum(sum(sum(utilities_forwardTensorField,dim=5),dim=4),dim=3)*wgt call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) fieldDiff = fieldDiff - aim utilities_forwardTensorField = utilities_forwardTensorField & - spread(spread(spread(fieldDiff,3,cells(1)),4,cells(2)),5,cells3) diff --git a/src/grid/grid_thermal_spectral.f90 b/src/grid/grid_thermal_spectral.f90 index ae672d002..1c46050ff 100644 --- a/src/grid/grid_thermal_spectral.f90 +++ b/src/grid/grid_thermal_spectral.f90 @@ -123,7 +123,7 @@ subroutine grid_thermal_spectral_init(num_grid) CHKERRQ(err_PETSc) call MPI_Allgather(int(cells3,pPETSCINT),1_MPI_INTEGER_KIND,MPI_INTEGER,& cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -218,10 +218,10 @@ function grid_thermal_spectral_solution(Delta_t) result(solution) T_max = maxval(T) stagNorm = maxval(abs(T - T_stagInc)) call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) solution%stagConverged = stagNorm < max(num%eps_thermal_atol, num%eps_thermal_rtol*T_max) call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) T_stagInc = T call homogenization_thermal_setField(reshape(T,[product(cells(1:2))*cells3]), & @@ -367,10 +367,10 @@ subroutine updateReference() K_ref = K_ref*wgt call MPI_Allreduce(MPI_IN_PLACE,K_ref,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) mu_ref = mu_ref*wgt call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) end subroutine updateReference diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index 4ea53d038..90544b2f8 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -77,7 +77,7 @@ module spectral_utilities type :: tNumerics integer :: & - divergence_correction !< scale divergence/curl calculation: [0: no correction, 1: size scaled to 1, 2: size scaled to Npoints] + divergence_correction !< scale divergence/curl calculation logical :: & memory_efficient !< calculate gamma operator on the fly end type tNumerics @@ -563,7 +563,7 @@ real(pREAL) function utilities_divergenceRMS(tensorField) conjg(-xi1st(1:3,cells1Red,k,j))*rescaledGeom))**2) end do; end do call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space if (cells(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pREAL ! counted twice in case of cells(1) == 1 @@ -629,7 +629,7 @@ real(pREAL) function utilities_curlRMS(tensorField) end do; end do call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) utilities_curlRMS = sqrt(utilities_curlRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space if (cells(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pREAL ! counted twice in case of cells(1) == 1 @@ -778,7 +778,7 @@ subroutine utilities_updateCoords(F) ! average F if (cells3Offset == 0) Favg = tensorField_fourier(1:3,1:3,1,1,1)%re*wgt call MPI_Bcast(Favg,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) !-------------------------------------------------------------------------------------------------- ! integration in Fourier space to get fluctuations of cell center displacements @@ -798,24 +798,24 @@ subroutine utilities_updateCoords(F) !-------------------------------------------------------------------------------------------------- ! pad cell center fluctuations along z-direction (needed when running MPI simulation) - c = product(shape(u_tilde_p_padded(:,:,:,1))) !< amount of data to transfer + c = product(shape(u_tilde_p_padded(:,:,:,1))) !< amount of data to transfer rank_t = modulo(worldrank+1_MPI_INTEGER_KIND,worldsize) rank_b = modulo(worldrank-1_MPI_INTEGER_KIND,worldsize) ! send bottom layer to process below call MPI_Isend(u_tilde_p_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(1),err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call MPI_Irecv(u_tilde_p_padded(:,:,:,cells3+1),c,MPI_DOUBLE,rank_t,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(2),err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) ! send top layer to process above call MPI_Isend(u_tilde_p_padded(:,:,:,cells3) ,c,MPI_DOUBLE,rank_t,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(3),err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call MPI_Irecv(u_tilde_p_padded(:,:,:,0), c,MPI_DOUBLE,rank_b,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(4),err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call MPI_Waitall(4,request,status,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) #if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) ! ToDo #else @@ -868,7 +868,7 @@ subroutine selfTest() call fftw_mpi_execute_dft_r2c(planTensorForth,tensorField_real,tensorField_fourier) call MPI_Allreduce(sum(sum(sum(tensorField_real_,dim=5),dim=4),dim=3),tensorSum,9_MPI_INTEGER_KIND, & MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) if (worldrank==0) then if (any(dNeq(tensorSum/tensorField_fourier(:,:,1,1,1)%re,1.0_pREAL,1.0e-12_pREAL))) & error stop 'mismatch avg tensorField FFT <-> real' @@ -884,7 +884,7 @@ subroutine selfTest() call fftw_mpi_execute_dft_r2c(planVectorForth,vectorField_real,vectorField_fourier) call MPI_Allreduce(sum(sum(sum(vectorField_real_,dim=4),dim=3),dim=2),vectorSum,3_MPI_INTEGER_KIND, & MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) if (worldrank==0) then if (any(dNeq(vectorSum/vectorField_fourier(:,1,1,1)%re,1.0_pREAL,1.0e-12_pREAL))) & error stop 'mismatch avg vectorField FFT <-> real' @@ -900,7 +900,7 @@ subroutine selfTest() call fftw_mpi_execute_dft_r2c(planScalarForth,scalarField_real,scalarField_fourier) call MPI_Allreduce(sum(sum(sum(scalarField_real_,dim=3),dim=2),dim=1),scalarSum,1_MPI_INTEGER_KIND, & MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) if (worldrank==0) then if (dNeq(scalarSum/scalarField_fourier(1,1,1)%re,1.0_pREAL,1.0e-12_pREAL)) & error stop 'mismatch avg scalarField FFT <-> real' @@ -912,7 +912,7 @@ subroutine selfTest() call random_number(r) call MPI_Bcast(r,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) scalarField_real_ = r(1,1) if (maxval(abs(utilities_scalarGradient(scalarField_real_)))>5.0e-9_pREAL) error stop 'non-zero grad(const)' diff --git a/src/mesh/discretization_mesh.f90 b/src/mesh/discretization_mesh.f90 index be9be3b19..22eb6147f 100644 --- a/src/mesh/discretization_mesh.f90 +++ b/src/mesh/discretization_mesh.f90 @@ -121,13 +121,13 @@ subroutine discretization_mesh_init(restart) CHKERRQ(err_PETSc) mesh_Nboundaries = int(Nboundaries) call MPI_Bcast(mesh_Nboundaries,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call MPI_Bcast(mesh_NcpElemsGlobal,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) dim = int(dimPlex) call MPI_Bcast(dim,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) dimPlex = int(dim,pPETSCINT) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) if (worldsize == 1) then call DMClone(globalMesh,geomMesh,err_PETSc) @@ -149,7 +149,7 @@ subroutine discretization_mesh_init(restart) call ISRestoreIndicesF90(faceSetIS,pFaceSets,err_PETSc) end if call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) call DMDestroy(globalMesh,err_PETSc) CHKERRQ(err_PETSc) diff --git a/src/mesh/mesh_mech_FEM.f90 b/src/mesh/mesh_mech_FEM.f90 index f3c08c16c..5540c6ea2 100644 --- a/src/mesh/mesh_mech_FEM.f90 +++ b/src/mesh/mesh_mech_FEM.f90 @@ -461,7 +461,7 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,err_PETSc ! evaluate constitutive response call utilities_constitutiveResponse(params%Delta_t,P_av,ForwardData) call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) - if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call parallelization_chkerr(err_MPI) ForwardData = .false. !--------------------------------------------------------------------------------------------------