diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c27c90bce..6da114938 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -93,6 +93,27 @@ test_mesh_GNU: - cd PRIVATE/testing/pytest - pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_GNU +test_grid_GNU-64bit: + stage: compile + script: + - module load Compiler/GNU/10 Libraries/PETSc/3.16.2/64bit + - cd PRIVATE/testing/pytest + - pytest -k 'compile and grid' --basetemp ${TESTROOT}/compile_grid_GNU-64bit + +test_mesh_GNU-64bit: + stage: compile + script: + - module load Compiler/GNU/10 Libraries/PETSc/3.16.2/64bit + - cd PRIVATE/testing/pytest + - pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_GNU-64bit + +test_grid_IntelLLVM: + stage: compile + script: + - module load ${COMPILER_INTELLLVM} ${MPI_INTELLLVM} ${PETSC_INTELLLVM} + - cd PRIVATE/testing/pytest + - pytest -k 'compile and grid' --basetemp ${TESTROOT}/compile_grid_IntelLLVM + test_mesh_IntelLLVM: stage: compile script: @@ -121,7 +142,6 @@ test_Marc: - cd PRIVATE/testing/pytest - pytest -k 'compile and Marc' --basetemp ${TESTROOT}/compile_Marc - setup_grid: stage: compile script: diff --git a/install/MarcMentat/2020/Marc_tools/include_linux64.patch b/install/MarcMentat/2020/Marc_tools/include_linux64.patch index c28b41b9e..9849584fa 100644 --- a/install/MarcMentat/2020/Marc_tools/include_linux64.patch +++ b/install/MarcMentat/2020/Marc_tools/include_linux64.patch @@ -12,17 +12,15 @@ # AEM if test "$MARCDLLOUTDIR" = ""; then -@@ -390,8 +395,8 @@ +@@ -390,7 +395,7 @@ I8DEFINES= I8CDEFINES= else - I8FFLAGS="-i8" -- I8DEFINES="-DI64" + I8FFLAGS="-i8 -integer-size 64" -+ I8DEFINES="-DI64 -DINT=8" + I8DEFINES="-DI64" I8CDEFINES="-U_DOUBLE -D_SINGLE" fi - @@ -498,7 +503,7 @@ PROFILE=" $PROFILE -pg" fi diff --git a/install/MarcMentat/2021.2/Marc_tools/include_linux64.patch b/install/MarcMentat/2021.2/Marc_tools/include_linux64.patch index 430d5bc61..5ff02dae6 100644 --- a/install/MarcMentat/2021.2/Marc_tools/include_linux64.patch +++ b/install/MarcMentat/2021.2/Marc_tools/include_linux64.patch @@ -12,17 +12,15 @@ # AEM if test "$MARCDLLOUTDIR" = ""; then -@@ -439,8 +444,8 @@ +@@ -439,7 +444,7 @@ I8DEFINES= I8CDEFINES= else - I8FFLAGS="-i8" -- I8DEFINES="-DI64" + I8FFLAGS="-i8 -integer-size 64" -+ I8DEFINES="-DI64 -DINT=8" + I8DEFINES="-DI64" I8CDEFINES="-U_DOUBLE -D_SINGLE" fi - @@ -556,7 +561,7 @@ PROFILE=" $PROFILE -pg" fi diff --git a/install/MarcMentat/2021.3.1/Marc_tools/include_linux64.patch b/install/MarcMentat/2021.3.1/Marc_tools/include_linux64.patch index 91e2b1b59..e1cc543c6 100644 --- a/install/MarcMentat/2021.3.1/Marc_tools/include_linux64.patch +++ b/install/MarcMentat/2021.3.1/Marc_tools/include_linux64.patch @@ -12,17 +12,15 @@ # AEM if test "$MARCDLLOUTDIR" = ""; then DLLOUTDIR="$MARC_LIB" -@@ -439,8 +444,8 @@ if test "$MARC_INTEGER_SIZE" = "i4" ; then +@@ -439,7 +444,7 @@ if test "$MARC_INTEGER_SIZE" = "i4" ; then I8DEFINES= I8CDEFINES= else - I8FFLAGS="-i8" -- I8DEFINES="-DI64" + I8FFLAGS="-i8 -integer-size 64" -+ I8DEFINES="-DI64 -DINT=8" + I8DEFINES="-DI64" I8CDEFINES="-U_DOUBLE -D_SINGLE" fi - @@ -556,7 +561,7 @@ then PROFILE=" $PROFILE -pg" fi diff --git a/src/CPFEM.f90 b/src/CPFEM.f90 index 8f1217a88..8ee26ffe7 100644 --- a/src/CPFEM.f90 +++ b/src/CPFEM.f90 @@ -32,18 +32,18 @@ module CPFEM real(pReal), dimension (:,:,:,:), allocatable, private :: & CPFEM_dcsdE_knownGood !< known good tangent - integer(pInt), public :: & - cycleCounter = 0_pInt !< needs description + integer, public :: & + cycleCounter = 0 !< needs description - integer(pInt), parameter, public :: & - CPFEM_CALCRESULTS = 2_pInt**0_pInt, & - CPFEM_AGERESULTS = 2_pInt**1_pInt, & - CPFEM_BACKUPJACOBIAN = 2_pInt**2_pInt, & - CPFEM_RESTOREJACOBIAN = 2_pInt**3_pInt + integer, parameter, public :: & + CPFEM_CALCRESULTS = 2**0, & + CPFEM_AGERESULTS = 2**1, & + CPFEM_BACKUPJACOBIAN = 2**2, & + CPFEM_RESTOREJACOBIAN = 2**3 type, private :: tNumerics integer :: & - iJacoStiffness !< frequency of stiffness update + iJacoStiffness !< frequency of stiffness update end type tNumerics type(tNumerics), private :: num @@ -134,12 +134,12 @@ end subroutine CPFEM_init !-------------------------------------------------------------------------------------------------- subroutine CPFEM_general(mode, ffn, ffn1, temperature_inp, dt, elFE, ip, cauchyStress, jacobian) - integer(pInt), intent(in) :: elFE, & !< FE element number + integer, intent(in) :: elFE, & !< FE element number ip !< integration point number real(pReal), intent(in) :: dt !< time increment real(pReal), dimension (3,3), intent(in) :: ffn, & !< deformation gradient for t=t0 ffn1 !< deformation gradient for t=t1 - integer(pInt), intent(in) :: mode !< computation mode 1: regular computation plus aging of results + integer, intent(in) :: mode !< computation mode 1: regular computation plus aging of results real(pReal), intent(in) :: temperature_inp !< temperature real(pReal), dimension(6), intent(out) :: cauchyStress !< stress as 6 vector real(pReal), dimension(6,6), intent(out) :: jacobian !< jacobian as 66 tensor (Consistent tangent dcs/dE) @@ -150,7 +150,7 @@ subroutine CPFEM_general(mode, ffn, ffn1, temperature_inp, dt, elFE, ip, cauchyS real(pReal), dimension (3,3,3,3) :: H_sym, & H - integer(pInt) elCP, & ! crystal plasticity element number + integer elCP, & ! crystal plasticity element number i, j, k, l, m, n, ph, homog, mySource,ce real(pReal), parameter :: ODD_STRESS = 1e15_pReal, & !< return value for stress if terminallyIll @@ -171,17 +171,17 @@ subroutine CPFEM_general(mode, ffn, ffn1, temperature_inp, dt, elFE, ip, cauchyS print'(a,/)', '#############################################'; flush (6) endif - if (iand(mode, CPFEM_BACKUPJACOBIAN) /= 0_pInt) & + if (iand(mode, CPFEM_BACKUPJACOBIAN) /= 0) & CPFEM_dcsde_knownGood = CPFEM_dcsde - if (iand(mode, CPFEM_RESTOREJACOBIAN) /= 0_pInt) & + if (iand(mode, CPFEM_RESTOREJACOBIAN) /= 0) & CPFEM_dcsde = CPFEM_dcsde_knownGood - if (iand(mode, CPFEM_AGERESULTS) /= 0_pInt) call CPFEM_forward + if (iand(mode, CPFEM_AGERESULTS) /= 0) call CPFEM_forward homogenization_F0(1:3,1:3,ce) = ffn homogenization_F(1:3,1:3,ce) = ffn1 - if (iand(mode, CPFEM_CALCRESULTS) /= 0_pInt) then + if (iand(mode, CPFEM_CALCRESULTS) /= 0) then validCalculation: if (terminallyIll) then call random_number(rnd) @@ -264,8 +264,8 @@ end subroutine CPFEM_forward !-------------------------------------------------------------------------------------------------- subroutine CPFEM_results(inc,time) - integer(pInt), intent(in) :: inc - real(pReal), intent(in) :: time + integer, intent(in) :: inc + real(pReal), intent(in) :: time call results_openJobFile call results_addIncrement(inc,time) diff --git a/src/DAMASK_Marc.f90 b/src/DAMASK_Marc.f90 index 0525db323..6e969ced4 100644 --- a/src/DAMASK_Marc.f90 +++ b/src/DAMASK_Marc.f90 @@ -223,9 +223,9 @@ subroutine hypela2(d,g,e,de,s,t,dt,ngens,m,nn,kcus,matus,ndi,nshear,disp, & integer :: computationMode, i, node, CPnodeID integer(pI32) :: defaultNumThreadsInt !< default value set by Marc - integer(pInt), save :: & - theInc = -1_pInt, & !< needs description - lastLovl = 0_pInt !< lovl in previous call to marc hypela2 + integer, save :: & + theInc = -1, & !< needs description + lastLovl = 0 !< lovl in previous call to marc hypela2 real(pReal), save :: & theTime = 0.0_pReal, & !< needs description theDelta = 0.0_pReal diff --git a/src/HDF5_utilities.f90 b/src/HDF5_utilities.f90 index e064997e5..d2076c2cc 100644 --- a/src/HDF5_utilities.f90 +++ b/src/HDF5_utilities.f90 @@ -1857,13 +1857,13 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ localShape integer(HSIZE_T), intent(out), dimension(size(localShape,1)):: & myStart, & - globalShape !< shape of the dataset (all processes) + globalShape !< shape of the dataset (all processes) integer(HID_T), intent(out) :: dset_id, filespace_id, memspace_id, plist_id, aplist_id integer, dimension(worldsize) :: & readSize !< contribution of all processes - integer :: ierr integer :: hdferr + integer(MPI_INTEGER_KIND) :: err_MPI !------------------------------------------------------------------------------------------------- ! creating a property list for transfer properties (is collective for MPI) @@ -1877,8 +1877,8 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ if (parallel) then call H5Pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process - if (ierr /= 0) error stop 'MPI error' + call MPI_allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,err_MPI) ! get total output size over each process + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end if #endif myStart = int(0,HSIZE_T) @@ -1956,7 +1956,8 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & integer, dimension(worldsize) :: writeSize !< contribution of all processes integer(HID_T) :: dcpl - integer :: ierr, hdferr + integer :: hdferr + integer(MPI_INTEGER_KIND) :: err_MPI integer(HSIZE_T), parameter :: chunkSize = 1024_HSIZE_T**2/8_HSIZE_T @@ -1977,8 +1978,8 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & writeSize(worldrank+1) = int(myShape(ubound(myShape,1))) #ifdef PETSC if (parallel) then - call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process - if (ierr /= 0) error stop 'MPI error' + call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,err_MPI) ! get total output size over each process + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end if #endif myStart = int(0,HSIZE_T) diff --git a/src/base64.f90 b/src/base64.f90 index 7bc00a1f9..448f57d2a 100644 --- a/src/base64.f90 +++ b/src/base64.f90 @@ -151,7 +151,7 @@ pure logical function validBase64(base64_str) l = len(base64_str,pI64) validBase64 = .true. - if(mod(l,4_pI64)/=0_pI64 .or. l < 4_pInt) validBase64 = .false. + if(mod(l,4_pI64)/=0_pI64 .or. l < 4_pI64) validBase64 = .false. if(verify(base64_str(:l-2_pI64),base64_encoding, kind=pI64) /= 0_pI64) validBase64 = .false. if(verify(base64_str(l-1_pI64:),base64_encoding//'=',kind=pI64) /= 0_pI64) validBase64 = .false. diff --git a/src/grid/DAMASK_grid.f90 b/src/grid/DAMASK_grid.f90 index b1da0c2a2..6377748be 100644 --- a/src/grid/DAMASK_grid.f90 +++ b/src/grid/DAMASK_grid.f90 @@ -75,7 +75,6 @@ program DAMASK_grid integer :: & i, j, m, field, & errorID = 0, & - ierr,& cutBackLevel = 0, & !< cut back level \f$ t = \frac{t_{inc}}{2^l} \f$ stepFraction = 0, & !< fraction of current time interval l = 0, & !< current load case @@ -86,6 +85,7 @@ program DAMASK_grid nActiveFields = 0, & maxCutBack, & !< max number of cut backs stagItMax !< max number of field level staggered iterations + integer(MPI_INTEGER_KIND) :: err_MPI character(len=pStringLen) :: & incInfo @@ -455,23 +455,23 @@ program DAMASK_grid print'(/,1x,a,i0,a)', 'increment ', totalIncsCounter, ' NOT converged' endif; flush(IO_STDOUT) - call MPI_Allreduce(interface_SIGUSR1,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(interface_SIGUSR1,signal,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (mod(inc,loadCases(l)%f_out) == 0 .or. signal) then print'(/,1x,a)', '... writing results to file ...............................................' flush(IO_STDOUT) call CPFEM_results(totalIncsCounter,t) endif if (signal) call interface_setSIGUSR1(.false.) - call MPI_Allreduce(interface_SIGUSR2,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(interface_SIGUSR2,signal,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (mod(inc,loadCases(l)%f_restart) == 0 .or. signal) then call mechanical_restartWrite call CPFEM_restartWrite endif if (signal) call interface_setSIGUSR2(.false.) - call MPI_Allreduce(interface_SIGTERM,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(interface_SIGTERM,signal,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (signal) exit loadCaseLooping endif skipping diff --git a/src/grid/discretization_grid.f90 b/src/grid/discretization_grid.f90 index ddc9efd80..738206f9a 100644 --- a/src/grid/discretization_grid.f90 +++ b/src/grid/discretization_grid.f90 @@ -27,14 +27,14 @@ module discretization_grid private integer, dimension(3), public, protected :: & - grid !< (global) grid + grid !< (global) grid integer, public, protected :: & - grid3, & !< (local) grid in 3rd direction + grid3, & !< (local) grid in 3rd direction grid3Offset !< (local) grid offset in 3rd direction real(pReal), dimension(3), public, protected :: & - geomSize !< (global) physical size + geomSize !< (global) physical size real(pReal), public, protected :: & - size3, & !< (local) size in 3rd direction + size3, & !< (local) size in 3rd direction size3offset !< (local) size offset in 3rd direction public :: & @@ -62,8 +62,8 @@ subroutine discretization_grid_init(restart) integer :: & j, & - debug_element, debug_ip, & - ierr + debug_element, debug_ip + integer(MPI_INTEGER_KIND) :: err_MPI integer(C_INTPTR_T) :: & devNull, z, z_offset integer, dimension(worldsize) :: & @@ -88,13 +88,13 @@ subroutine discretization_grid_init(restart) end if - call MPI_Bcast(grid,3,MPI_INTEGER,0,MPI_COMM_WORLD, ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Bcast(grid,3_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (grid(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1') - call MPI_Bcast(geomSize,3,MPI_DOUBLE,0,MPI_COMM_WORLD, ierr) - if (ierr /= 0) error stop 'MPI error' - call MPI_Bcast(origin,3,MPI_DOUBLE,0,MPI_COMM_WORLD, ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Bcast(geomSize,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_Bcast(origin,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' print'(/,1x,a,3(i12,1x))', 'cells a b c: ', grid print '(1x,a,3(es12.5,1x))', 'size x y z: ', geomSize @@ -118,14 +118,17 @@ subroutine discretization_grid_init(restart) myGrid = [grid(1:2),grid3] mySize = [geomSize(1:2),size3] - call MPI_Gather(product(grid(1:2))*grid3Offset,1,MPI_INTEGER,displs, 1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) - if (ierr /= 0) error stop 'MPI error' - call MPI_Gather(product(myGrid), 1,MPI_INTEGER,sendcounts,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Gather(product(grid(1:2))*grid3Offset, 1_MPI_INTEGER_KIND,MPI_INTEGER,displs,& + 1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_Gather(product(myGrid), 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,& + 1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' allocate(materialAt(product(myGrid))) - call MPI_Scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),MPI_INTEGER,0,MPI_COMM_WORLD,ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),& + MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call discretization_init(materialAt, & IPcoordinates0(myGrid,mySize,grid3Offset), & @@ -307,7 +310,7 @@ subroutine readVTI(grid,geomSize,origin,material, & case('Float64') as_Int = int(prec_bytesToC_DOUBLE (asBytes(base64_str,headerType,compressed))) case default - call IO_error(844_pInt,ext_msg='unknown data type: '//trim(dataType)) + call IO_error(844,ext_msg='unknown data type: '//trim(dataType)) end select end function as_Int @@ -335,7 +338,7 @@ subroutine readVTI(grid,geomSize,origin,material, & case('Float64') as_pReal = real(prec_bytesToC_DOUBLE (asBytes(base64_str,headerType,compressed)),pReal) case default - call IO_error(844_pInt,ext_msg='unknown data type: '//trim(dataType)) + call IO_error(844,ext_msg='unknown data type: '//trim(dataType)) end select end function as_pReal diff --git a/src/grid/grid_damage_spectral.f90 b/src/grid/grid_damage_spectral.f90 index ef229368e..8955f9d66 100644 --- a/src/grid/grid_damage_spectral.f90 +++ b/src/grid/grid_damage_spectral.f90 @@ -69,7 +69,8 @@ subroutine grid_damage_spectral_init() PetscInt, dimension(0:worldsize-1) :: localK DM :: damage_grid Vec :: uBound, lBound - PetscErrorCode :: ierr + integer(MPI_INTEGER_KIND) :: err_MPI + PetscErrorCode :: err_PETSc class(tNode), pointer :: & num_grid, & num_generic @@ -99,50 +100,51 @@ subroutine grid_damage_spectral_init() !-------------------------------------------------------------------------------------------------- ! set default and user defined options for PETSc call PetscOptionsInsertString(PETSC_NULL_OPTIONS,'-damage_snes_type newtonls -damage_snes_mf & - &-damage_snes_ksp_ew -damage_ksp_type fgmres',ierr) - CHKERRQ(ierr) - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),ierr) - CHKERRQ(ierr) + &-damage_snes_ksp_ew -damage_ksp_type fgmres',err_PETSc) + CHKERRQ(err_PETSc) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! initialize solver specific parts of PETSc - call SNESCreate(PETSC_COMM_WORLD,damage_snes,ierr); CHKERRQ(ierr) - call SNESSetOptionsPrefix(damage_snes,'damage_',ierr);CHKERRQ(ierr) - localK = 0 - localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) + call SNESCreate(PETSC_COMM_WORLD,damage_snes,err_PETSc); CHKERRQ(err_PETSc) + call SNESSetOptionsPrefix(damage_snes,'damage_',err_PETSc);CHKERRQ(err_PETSc) + localK = 0_pPetscInt + localK(worldrank) = int(grid3,pPetscInt) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point - grid(1),grid(2),grid(3), & ! global grid - 1, 1, worldsize, & - 1, 0, & ! #dof (damage phase field), ghost boundary width (domain overlap) - [grid(1)],[grid(2)],localK, & ! local grid - damage_grid,ierr) ! handle, error - CHKERRQ(ierr) - call SNESSetDM(damage_snes,damage_grid,ierr); CHKERRQ(ierr) ! connect snes to da - call DMsetFromOptions(damage_grid,ierr); CHKERRQ(ierr) - call DMsetUp(damage_grid,ierr); CHKERRQ(ierr) - call DMCreateGlobalVector(damage_grid,solution_vec,ierr); CHKERRQ(ierr) ! global solution vector (grid x 1, i.e. every def grad tensor) - call DMDASNESSetFunctionLocal(damage_grid,INSERT_VALUES,formResidual,PETSC_NULL_SNES,ierr) ! residual vector of same shape as solution vector - CHKERRQ(ierr) - call SNESSetFromOptions(damage_snes,ierr); CHKERRQ(ierr) ! pull it all together with additional CLI arguments - call SNESGetType(damage_snes,snes_type,ierr); CHKERRQ(ierr) + int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid + 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & + 1_pPetscInt, 0_pPetscInt, & ! #dof (phi, scalar), ghost boundary width (domain overlap) + [int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid + damage_grid,err_PETSc) ! handle, error + CHKERRQ(err_PETSc) + call SNESSetDM(damage_snes,damage_grid,err_PETSc); CHKERRQ(err_PETSc) ! connect snes to da + call DMsetFromOptions(damage_grid,err_PETSc); CHKERRQ(err_PETSc) + call DMsetUp(damage_grid,err_PETSc); CHKERRQ(err_PETSc) + call DMCreateGlobalVector(damage_grid,solution_vec,err_PETSc); CHKERRQ(err_PETSc) ! global solution vector (grid x 1, i.e. every def grad tensor) + call DMDASNESSetFunctionLocal(damage_grid,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector + CHKERRQ(err_PETSc) + call SNESSetFromOptions(damage_snes,err_PETSc); CHKERRQ(err_PETSc) ! pull it all together with additional CLI arguments + call SNESGetType(damage_snes,snes_type,err_PETSc); CHKERRQ(err_PETSc) if (trim(snes_type) == 'vinewtonrsls' .or. & trim(snes_type) == 'vinewtonssls') then - call DMGetGlobalVector(damage_grid,lBound,ierr); CHKERRQ(ierr) - call DMGetGlobalVector(damage_grid,uBound,ierr); CHKERRQ(ierr) - call VecSet(lBound,0.0_pReal,ierr); CHKERRQ(ierr) - call VecSet(uBound,1.0_pReal,ierr); CHKERRQ(ierr) - call SNESVISetVariableBounds(damage_snes,lBound,uBound,ierr) ! variable bounds for variational inequalities like contact mechanics, damage etc. - call DMRestoreGlobalVector(damage_grid,lBound,ierr); CHKERRQ(ierr) - call DMRestoreGlobalVector(damage_grid,uBound,ierr); CHKERRQ(ierr) + call DMGetGlobalVector(damage_grid,lBound,err_PETSc); CHKERRQ(err_PETSc) + call DMGetGlobalVector(damage_grid,uBound,err_PETSc); CHKERRQ(err_PETSc) + call VecSet(lBound,0.0_pReal,err_PETSc); CHKERRQ(err_PETSc) + call VecSet(uBound,1.0_pReal,err_PETSc); CHKERRQ(err_PETSc) + call SNESVISetVariableBounds(damage_snes,lBound,uBound,err_PETSc) ! variable bounds for variational inequalities like contact mechanics, damage etc. + call DMRestoreGlobalVector(damage_grid,lBound,err_PETSc); CHKERRQ(err_PETSc) + call DMRestoreGlobalVector(damage_grid,uBound,err_PETSc); CHKERRQ(err_PETSc) end if !-------------------------------------------------------------------------------------------------- ! init fields - call DMDAGetCorners(damage_grid,xstart,ystart,zstart,xend,yend,zend,ierr) - CHKERRQ(ierr) + call DMDAGetCorners(damage_grid,xstart,ystart,zstart,xend,yend,zend,err_PETSc) + CHKERRQ(err_PETSc) xend = xstart + xend - 1 yend = ystart + yend - 1 zend = zstart + zend - 1 @@ -150,7 +152,7 @@ subroutine grid_damage_spectral_init() allocate(phi_lastInc(grid(1),grid(2),grid3), source=1.0_pReal) allocate(phi_stagInc(grid(1),grid(2),grid3), source=1.0_pReal) - call VecSet(solution_vec,1.0_pReal,ierr); CHKERRQ(ierr) + call VecSet(solution_vec,1.0_pReal,err_PETSc); CHKERRQ(err_PETSc) call updateReference @@ -169,7 +171,8 @@ function grid_damage_spectral_solution(Delta_t) result(solution) PetscInt :: devNull PetscReal :: phi_min, phi_max, stagNorm - PetscErrorCode :: ierr + integer(MPI_INTEGER_KIND) :: err_MPI + PetscErrorCode :: err_PETSc SNESConvergedReason :: reason solution%converged =.false. @@ -178,8 +181,10 @@ function grid_damage_spectral_solution(Delta_t) result(solution) ! set module wide availabe data params%Delta_t = Delta_t - call SNESSolve(damage_snes,PETSC_NULL_VEC,solution_vec,ierr); CHKERRQ(ierr) - call SNESGetConvergedReason(damage_snes,reason,ierr); CHKERRQ(ierr) + call SNESSolve(damage_snes,PETSC_NULL_VEC,solution_vec,err_PETSc) + CHKERRQ(err_PETSc) + call SNESGetConvergedReason(damage_snes,reason,err_PETSc) + CHKERRQ(err_PETSc) if (reason < 1) then solution%converged = .false. @@ -189,9 +194,11 @@ function grid_damage_spectral_solution(Delta_t) result(solution) solution%iterationsNeeded = totalIter end if stagNorm = maxval(abs(phi_current - phi_stagInc)) - call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*maxval(phi_current)) - call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' phi_stagInc = phi_current !-------------------------------------------------------------------------------------------------- @@ -202,8 +209,8 @@ function grid_damage_spectral_solution(Delta_t) result(solution) call homogenization_set_phi(phi_current(i,j,k),ce) end do; end do; end do - call VecMin(solution_vec,devNull,phi_min,ierr); CHKERRQ(ierr) - call VecMax(solution_vec,devNull,phi_max,ierr); CHKERRQ(ierr) + call VecMin(solution_vec,devNull,phi_min,err_PETSc); CHKERRQ(err_PETSc) + call VecMax(solution_vec,devNull,phi_max,err_PETSc); CHKERRQ(err_PETSc) if (solution%converged) & print'(/,1x,a)', '... nonlocal damage converged .....................................' print'(/,1x,a,f8.6,2x,f8.6,2x,e11.4)', 'Minimum|Maximum|Delta Damage = ', phi_min, phi_max, stagNorm @@ -222,17 +229,19 @@ subroutine grid_damage_spectral_forward(cutBack) integer :: i, j, k, ce DM :: dm_local PetscScalar, dimension(:,:,:), pointer :: x_scal - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc if (cutBack) then phi_current = phi_lastInc phi_stagInc = phi_lastInc !-------------------------------------------------------------------------------------------------- ! reverting damage field state - call SNESGetDM(damage_snes,dm_local,ierr); CHKERRQ(ierr) - call DMDAVecGetArrayF90(dm_local,solution_vec,x_scal,ierr); CHKERRQ(ierr) !< get the data out of PETSc to work with + call SNESGetDM(damage_snes,dm_local,err_PETSc); CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(dm_local,solution_vec,x_scal,err_PETSc) !< get the data out of PETSc to work with + CHKERRQ(err_PETSc) x_scal(xstart:xend,ystart:yend,zstart:zend) = phi_current - call DMDAVecRestoreArrayF90(dm_local,solution_vec,x_scal,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(dm_local,solution_vec,x_scal,err_PETSc) + CHKERRQ(err_PETSc) ce = 0 do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) ce = ce + 1 @@ -249,7 +258,7 @@ end subroutine grid_damage_spectral_forward !-------------------------------------------------------------------------------------------------- !> @brief forms the spectral damage residual vector !-------------------------------------------------------------------------------------------------- -subroutine formResidual(in,x_scal,f_scal,dummy,ierr) +subroutine formResidual(in,x_scal,f_scal,dummy,dummy_err) DMDALocalInfo, dimension(DMDA_LOCAL_INFO_SIZE) :: & in @@ -260,7 +269,7 @@ subroutine formResidual(in,x_scal,f_scal,dummy,ierr) X_RANGE,Y_RANGE,Z_RANGE), intent(out) :: & f_scal PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: dummy_err integer :: i, j, k, ce @@ -311,7 +320,8 @@ end subroutine formResidual !-------------------------------------------------------------------------------------------------- subroutine updateReference() - integer :: ce,ierr + integer :: ce + integer(MPI_INTEGER_KIND) :: err_MPI K_ref = 0.0_pReal @@ -322,9 +332,11 @@ subroutine updateReference() end do K_ref = K_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,K_ref,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' mu_ref = mu_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end subroutine updateReference diff --git a/src/grid/grid_mech_FEM.f90 b/src/grid/grid_mech_FEM.f90 index e1dc3dabe..010f11cea 100644 --- a/src/grid/grid_mech_FEM.f90 +++ b/src/grid/grid_mech_FEM.f90 @@ -107,7 +107,8 @@ subroutine grid_mechanical_FEM_init 1.0_pReal,-1.0_pReal,-1.0_pReal,-1.0_pReal, & 1.0_pReal, 1.0_pReal, 1.0_pReal, 1.0_pReal], [4,8]) real(pReal), dimension(3,3,3,3) :: devNull - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI PetscScalar, pointer, dimension(:,:,:,:) :: & u_current,u_lastInc PetscInt, dimension(0:worldsize-1) :: localK @@ -145,12 +146,11 @@ subroutine grid_mechanical_FEM_init ! set default and user defined options for PETSc call PetscOptionsInsertString(PETSC_NULL_OPTIONS, & '-mechanical_snes_type newtonls -mechanical_ksp_type fgmres & - &-mechanical_ksp_max_it 25 -mechanical_pc_type ml & - &-mechanical_mg_levels_ksp_type chebyshev', & - ierr) - CHKERRQ(ierr) - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),ierr) - CHKERRQ(ierr) + &-mechanical_ksp_max_it 25 -mechanical_mg_levels_ksp_type chebyshev', & + err_PETSc) + CHKERRQ(err_PETSc) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! allocate global fields @@ -160,59 +160,60 @@ subroutine grid_mechanical_FEM_init !-------------------------------------------------------------------------------------------------- ! initialize solver specific parts of PETSc - call SNESCreate(PETSC_COMM_WORLD,mechanical_snes,ierr) - CHKERRQ(ierr) - call SNESSetOptionsPrefix(mechanical_snes,'mechanical_',ierr) - CHKERRQ(ierr) - localK = 0 - localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) + call SNESCreate(PETSC_COMM_WORLD,mechanical_snes,err_PETSc) + CHKERRQ(err_PETSc) + call SNESSetOptionsPrefix(mechanical_snes,'mechanical_',err_PETSc) + CHKERRQ(err_PETSc) + localK = 0_pPetscInt + localK(worldrank) = int(grid3,pPetscInt) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, & DMDA_STENCIL_BOX, & - grid(1),grid(2),grid(3), & - 1, 1, worldsize, & - 3, 1, & - [grid(1)],[grid(2)],localK, & - mechanical_grid,ierr) - CHKERRQ(ierr) - call SNESSetDM(mechanical_snes,mechanical_grid,ierr) - CHKERRQ(ierr) - call DMsetFromOptions(mechanical_grid,ierr) - CHKERRQ(ierr) - call DMsetUp(mechanical_grid,ierr) - CHKERRQ(ierr) - call DMDASetUniformCoordinates(mechanical_grid,0.0_pReal,geomSize(1),0.0_pReal,geomSize(2),0.0_pReal,geomSize(3),ierr) - CHKERRQ(ierr) - call DMCreateGlobalVector(mechanical_grid,solution_current,ierr) - CHKERRQ(ierr) - call DMCreateGlobalVector(mechanical_grid,solution_lastInc,ierr) - CHKERRQ(ierr) - call DMCreateGlobalVector(mechanical_grid,solution_rate ,ierr) - CHKERRQ(ierr) - call DMSNESSetFunctionLocal(mechanical_grid,formResidual,PETSC_NULL_SNES,ierr) - CHKERRQ(ierr) - call DMSNESSetJacobianLocal(mechanical_grid,formJacobian,PETSC_NULL_SNES,ierr) - CHKERRQ(ierr) - call SNESSetConvergenceTest(mechanical_snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,ierr) ! specify custom convergence check function "_converged" - CHKERRQ(ierr) - call SNESSetMaxLinearSolveFailures(mechanical_snes, huge(1), ierr) ! ignore linear solve failures - CHKERRQ(ierr) - call SNESSetFromOptions(mechanical_snes,ierr) ! pull it all together with additional cli arguments - CHKERRQ(ierr) + int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid + 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & + 3_pPetscInt, 1_pPetscInt, & ! #dof (u, vector), ghost boundary width (domain overlap) + [int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid + mechanical_grid,err_PETSc) + CHKERRQ(err_PETSc) + call SNESSetDM(mechanical_snes,mechanical_grid,err_PETSc) + CHKERRQ(err_PETSc) + call DMsetFromOptions(mechanical_grid,err_PETSc) + CHKERRQ(err_PETSc) + call DMsetUp(mechanical_grid,err_PETSc) + CHKERRQ(err_PETSc) + call DMDASetUniformCoordinates(mechanical_grid,0.0_pReal,geomSize(1),0.0_pReal,geomSize(2),0.0_pReal,geomSize(3),err_PETSc) + CHKERRQ(err_PETSc) + call DMCreateGlobalVector(mechanical_grid,solution_current,err_PETSc) + CHKERRQ(err_PETSc) + call DMCreateGlobalVector(mechanical_grid,solution_lastInc,err_PETSc) + CHKERRQ(err_PETSc) + call DMCreateGlobalVector(mechanical_grid,solution_rate ,err_PETSc) + CHKERRQ(err_PETSc) + call DMSNESSetFunctionLocal(mechanical_grid,formResidual,PETSC_NULL_SNES,err_PETSc) + CHKERRQ(err_PETSc) + call DMSNESSetJacobianLocal(mechanical_grid,formJacobian,PETSC_NULL_SNES,err_PETSc) + CHKERRQ(err_PETSc) + call SNESSetConvergenceTest(mechanical_snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,err_PETSc) ! specify custom convergence check function "_converged" + CHKERRQ(err_PETSc) + call SNESSetMaxLinearSolveFailures(mechanical_snes, huge(1_pPetscInt), err_PETSc) ! ignore linear solve failures + CHKERRQ(err_PETSc) + call SNESSetFromOptions(mechanical_snes,err_PETSc) ! pull it all together with additional cli arguments + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! init fields - call VecSet(solution_current,0.0_pReal,ierr);CHKERRQ(ierr) - call VecSet(solution_lastInc,0.0_pReal,ierr);CHKERRQ(ierr) - call VecSet(solution_rate ,0.0_pReal,ierr);CHKERRQ(ierr) - call DMDAVecGetArrayF90(mechanical_grid,solution_current,u_current,ierr) - CHKERRQ(ierr) - call DMDAVecGetArrayF90(mechanical_grid,solution_lastInc,u_lastInc,ierr) - CHKERRQ(ierr) + call VecSet(solution_current,0.0_pReal,err_PETSc);CHKERRQ(err_PETSc) + call VecSet(solution_lastInc,0.0_pReal,err_PETSc);CHKERRQ(err_PETSc) + call VecSet(solution_rate ,0.0_pReal,err_PETSc);CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(mechanical_grid,solution_current,u_current,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(mechanical_grid,solution_lastInc,u_lastInc,err_PETSc) + CHKERRQ(err_PETSc) - call DMDAGetCorners(mechanical_grid,xstart,ystart,zstart,xend,yend,zend,ierr) ! local grid extent - CHKERRQ(ierr) + call DMDAGetCorners(mechanical_grid,xstart,ystart,zstart,xend,yend,zend,err_PETSc) ! local grid extent + CHKERRQ(err_PETSc) xend = xstart+xend-1 yend = ystart+yend-1 zend = zstart+zend-1 @@ -240,17 +241,17 @@ subroutine grid_mechanical_FEM_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if(ierr /=0) error stop 'MPI error' + call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if(ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if(ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if(ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') call HDF5_read(u_current,groupHandle,'u') @@ -266,19 +267,19 @@ subroutine grid_mechanical_FEM_init call utilities_constitutiveResponse(P_current,P_av,C_volAvg,devNull, & ! stress field, stress avg, global average of stiffness and (min+max)/2 F, & ! target F 0.0_pReal) ! time increment - call DMDAVecRestoreArrayF90(mechanical_grid,solution_current,u_current,ierr) - CHKERRQ(ierr) - call DMDAVecRestoreArrayF90(mechanical_grid,solution_lastInc,u_lastInc,ierr) - CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(mechanical_grid,solution_current,u_current,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecRestoreArrayF90(mechanical_grid,solution_lastInc,u_lastInc,err_PETSc) + CHKERRQ(err_PETSc) restartRead2: if (interface_restartInc > 0) then print'(1x,a,i0,a)', 'reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if(ierr /=0) error stop 'MPI error' + call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if(ierr /=0) error stop 'MPI error' + call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) @@ -301,7 +302,7 @@ function grid_mechanical_FEM_solution(incInfoIn) result(solution) solution !-------------------------------------------------------------------------------------------------- ! PETSc Data - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc SNESConvergedReason :: reason incInfo = incInfoIn @@ -312,13 +313,13 @@ function grid_mechanical_FEM_solution(incInfoIn) result(solution) !-------------------------------------------------------------------------------------------------- ! solve BVP - call SNESsolve(mechanical_snes,PETSC_NULL_VEC,solution_current,ierr) - CHKERRQ(ierr) + call SNESsolve(mechanical_snes,PETSC_NULL_VEC,solution_current,err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! check convergence - call SNESGetConvergedReason(mechanical_snes,reason,ierr) - CHKERRQ(ierr) + call SNESGetConvergedReason(mechanical_snes,reason,err_PETSc) + CHKERRQ(err_PETSc) solution%converged = reason > 0 solution%iterationsNeeded = totalIter @@ -348,22 +349,22 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai deformation_BC type(rotation), intent(in) :: & rotation_BC - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc PetscScalar, pointer, dimension(:,:,:,:) :: & u_current,u_lastInc - call DMDAVecGetArrayF90(mechanical_grid,solution_current,u_current,ierr) - CHKERRQ(ierr) - call DMDAVecGetArrayF90(mechanical_grid,solution_lastInc,u_lastInc,ierr) - CHKERRQ(ierr) + call DMDAVecGetArrayF90(mechanical_grid,solution_current,u_current,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(mechanical_grid,solution_lastInc,u_lastInc,err_PETSc) + CHKERRQ(err_PETSc) if (cutBack) then C_volAvg = C_volAvgLastInc else C_volAvgLastInc = C_volAvg - F_aimDot = merge(merge(.0_pReal,(F_aim-F_aim_lastInc)/Delta_t_old,stress_BC%mask),.0_pReal,guess) ! estimate deformation rate for prescribed stress components + F_aimDot = merge(merge(.0_pReal,(F_aim-F_aim_lastInc)/Delta_t_old,stress_BC%mask),.0_pReal,guess) ! estimate deformation rate for prescribed stress components F_aim_lastInc = F_aim !----------------------------------------------------------------------------------------------- @@ -380,13 +381,14 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai endif if (guess) then - call VecWAXPY(solution_rate,-1.0_pReal,solution_lastInc,solution_current,ierr) - CHKERRQ(ierr) - call VecScale(solution_rate,1.0_pReal/Delta_t_old,ierr); CHKERRQ(ierr) + call VecWAXPY(solution_rate,-1.0_pReal,solution_lastInc,solution_current,err_PETSc) + CHKERRQ(err_PETSc) + call VecScale(solution_rate,1.0_pReal/Delta_t_old,err_PETSc) + CHKERRQ(err_PETSc) else - call VecSet(solution_rate,0.0_pReal,ierr); CHKERRQ(ierr) + call VecSet(solution_rate,0.0_pReal,err_PETSc); CHKERRQ(err_PETSc) endif - call VecCopy(solution_current,solution_lastInc,ierr); CHKERRQ(ierr) + call VecCopy(solution_current,solution_lastInc,err_PETSc); CHKERRQ(err_PETSc) F_lastInc = F @@ -401,12 +403,12 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai if (stress_BC%myType=='dot_P') P_aim = P_aim & + merge(.0_pReal,stress_BC%values,stress_BC%mask)*Delta_t - call VecAXPY(solution_current,Delta_t,solution_rate,ierr); CHKERRQ(ierr) - - call DMDAVecRestoreArrayF90(mechanical_grid,solution_current,u_current,ierr) - CHKERRQ(ierr) - call DMDAVecRestoreArrayF90(mechanical_grid,solution_lastInc,u_lastInc,ierr) - CHKERRQ(ierr) + call VecAXPY(solution_current,Delta_t,solution_rate,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecRestoreArrayF90(mechanical_grid,solution_current,u_current,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecRestoreArrayF90(mechanical_grid,solution_lastInc,u_lastInc,err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! set module wide available data @@ -432,15 +434,15 @@ end subroutine grid_mechanical_FEM_updateCoords !-------------------------------------------------------------------------------------------------- subroutine grid_mechanical_FEM_restartWrite - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc integer(HID_T) :: fileHandle, groupHandle PetscScalar, dimension(:,:,:,:), pointer :: u_current,u_lastInc - call DMDAVecGetArrayF90(mechanical_grid,solution_current,u_current,ierr) - CHKERRQ(ierr) - call DMDAVecGetArrayF90(mechanical_grid,solution_lastInc,u_lastInc,ierr) - CHKERRQ(ierr) + call DMDAVecGetArrayF90(mechanical_grid,solution_current,u_current,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(mechanical_grid,solution_lastInc,u_lastInc,err_PETSc) + CHKERRQ(err_PETSc) print'(1x,a)', 'writing solver data required for restart to file'; flush(IO_STDOUT) @@ -466,10 +468,10 @@ subroutine grid_mechanical_FEM_restartWrite call HDF5_closeFile(fileHandle) endif - call DMDAVecRestoreArrayF90(mechanical_grid,solution_current,u_current,ierr) - CHKERRQ(ierr) - call DMDAVecRestoreArrayF90(mechanical_grid,solution_lastInc,u_lastInc,ierr) - CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(mechanical_grid,solution_current,u_current,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecRestoreArrayF90(mechanical_grid,solution_lastInc,u_lastInc,err_PETSc) + CHKERRQ(err_PETSc) end subroutine grid_mechanical_FEM_restartWrite @@ -477,7 +479,7 @@ end subroutine grid_mechanical_FEM_restartWrite !-------------------------------------------------------------------------------------------------- !> @brief convergence check !-------------------------------------------------------------------------------------------------- -subroutine converged(snes_local,PETScIter,devNull1,devNull2,fnorm,reason,dummy,ierr) +subroutine converged(snes_local,PETScIter,devNull1,devNull2,fnorm,reason,dummy,err_PETSc) SNES :: snes_local PetscInt, intent(in) :: PETScIter @@ -487,7 +489,7 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,fnorm,reason,dummy,i fnorm SNESConvergedReason :: reason PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc real(pReal) :: & err_div, & divTol, & @@ -521,7 +523,7 @@ end subroutine converged !> @brief forms the residual vector !-------------------------------------------------------------------------------------------------- subroutine formResidual(da_local,x_local, & - f_local,dummy,ierr) + f_local,dummy,err_PETSc) DM :: da_local Vec :: x_local, f_local @@ -532,13 +534,14 @@ subroutine formResidual(da_local,x_local, & PETScIter, & nfuncs PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI real(pReal), dimension(3,3,3,3) :: devNull - call SNESGetNumberFunctionEvals(mechanical_snes,nfuncs,ierr) - CHKERRQ(ierr) - call SNESGetIterationNumber(mechanical_snes,PETScIter,ierr) - CHKERRQ(ierr) + call SNESGetNumberFunctionEvals(mechanical_snes,nfuncs,err_PETSc) + CHKERRQ(err_PETSc) + call SNESGetIterationNumber(mechanical_snes,PETScIter,err_PETSc) + CHKERRQ(err_PETSc) if (nfuncs == 0 .and. PETScIter == 0) totalIter = -1 ! new increment @@ -556,7 +559,7 @@ subroutine formResidual(da_local,x_local, & !-------------------------------------------------------------------------------------------------- ! get deformation gradient - call DMDAVecGetArrayF90(da_local,x_local,x_scal,ierr);CHKERRQ(ierr) + call DMDAVecGetArrayF90(da_local,x_local,x_scal,err_PETSc);CHKERRQ(err_PETSc) do k = zstart, zend; do j = ystart, yend; do i = xstart, xend ctr = 0 do kk = 0, 1; do jj = 0, 1; do ii = 0, 1 @@ -566,14 +569,15 @@ subroutine formResidual(da_local,x_local, & ii = i-xstart+1; jj = j-ystart+1; kk = k-zstart+1 F(1:3,1:3,ii,jj,kk) = params%rotation_BC%rotate(F_aim,active=.true.) + transpose(matmul(BMat,x_elem)) enddo; enddo; enddo - call DMDAVecRestoreArrayF90(da_local,x_local,x_scal,ierr);CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da_local,x_local,x_scal,err_PETSc);CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! evaluate constitutive response call utilities_constitutiveResponse(P_current,& P_av,C_volAvg,devNull, & F,params%Delta_t,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' !-------------------------------------------------------------------------------------------------- ! stress BC handling @@ -582,9 +586,9 @@ subroutine formResidual(da_local,x_local, & !-------------------------------------------------------------------------------------------------- ! constructing residual - call VecSet(f_local,0.0_pReal,ierr);CHKERRQ(ierr) - call DMDAVecGetArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr) - call DMDAVecGetArrayF90(da_local,x_local,x_scal,ierr);CHKERRQ(ierr) + call VecSet(f_local,0.0_pReal,err_PETSc);CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(da_local,f_local,f_scal,err_PETSc);CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(da_local,x_local,x_scal,err_PETSc);CHKERRQ(err_PETSc) ele = 0 do k = zstart, zend; do j = ystart, yend; do i = xstart, xend ctr = 0 @@ -604,12 +608,12 @@ subroutine formResidual(da_local,x_local, & f_scal(0:2,i+ii,j+jj,k+kk) = f_scal(0:2,i+ii,j+jj,k+kk) + f_elem(ctr,1:3) enddo; enddo; enddo enddo; enddo; enddo - call DMDAVecRestoreArrayF90(da_local,x_local,x_scal,ierr);CHKERRQ(ierr) - call DMDAVecRestoreArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da_local,x_local,x_scal,err_PETSc);CHKERRQ(err_PETSc) + call DMDAVecRestoreArrayF90(da_local,f_local,f_scal,err_PETSc);CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! applying boundary conditions - call DMDAVecGetArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr) + call DMDAVecGetArrayF90(da_local,f_local,f_scal,err_PETSc);CHKERRQ(err_PETSc) if (zstart == 0) then f_scal(0:2,xstart,ystart,zstart) = 0.0 f_scal(0:2,xend+1,ystart,zstart) = 0.0 @@ -622,7 +626,7 @@ subroutine formResidual(da_local,x_local, & f_scal(0:2,xstart,yend+1,zend+1) = 0.0 f_scal(0:2,xend+1,yend+1,zend+1) = 0.0 endif - call DMDAVecRestoreArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da_local,f_local,f_scal,err_PETSc);CHKERRQ(err_PETSc) end subroutine formResidual @@ -630,7 +634,7 @@ end subroutine formResidual !-------------------------------------------------------------------------------------------------- !> @brief forms the FEM stiffness matrix !-------------------------------------------------------------------------------------------------- -subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,ierr) +subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,err_PETSc) DM :: da_local Vec :: x_local, coordinates @@ -644,15 +648,17 @@ subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,ierr) PetscScalar :: diag PetscObject :: dummy MatNullSpace :: matnull - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc BMatFull = 0.0 BMatFull(1:3,1 :8 ) = BMat BMatFull(4:6,9 :16) = BMat BMatFull(7:9,17:24) = BMat - call MatSetOption(Jac,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE,ierr); CHKERRQ(ierr) - call MatSetOption(Jac,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE,ierr); CHKERRQ(ierr) - call MatZeroEntries(Jac,ierr); CHKERRQ(ierr) + call MatSetOption(Jac,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE,err_PETSc) + CHKERRQ(err_PETSc) + call MatSetOption(Jac,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE,err_PETSc) + CHKERRQ(err_PETSc) + call MatZeroEntries(Jac,err_PETSc); CHKERRQ(err_PETSc) ele = 0 do k = zstart, zend; do j = ystart, yend; do i = xstart, xend ctr = 0 @@ -687,34 +693,42 @@ subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,ierr) matmul(transpose(BMatFull), & matmul(reshape(reshape(homogenization_dPdF(1:3,1:3,1:3,1:3,ele), & shape=[3,3,3,3], order=[2,1,4,3]),shape=[9,9]),BMatFull))*detJ - call MatSetValuesStencil(Jac,24,row,24,col,K_ele,ADD_VALUES,ierr) - CHKERRQ(ierr) + call MatSetValuesStencil(Jac,24_pPETScInt,row,24_pPetscInt,col,K_ele,ADD_VALUES,err_PETSc) + CHKERRQ(err_PETSc) enddo; enddo; enddo - call MatAssemblyBegin(Jac,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) - call MatAssemblyEnd(Jac,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) - call MatAssemblyBegin(Jac_pre,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) - call MatAssemblyEnd(Jac_pre,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) + call MatAssemblyBegin(Jac,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) + call MatAssemblyEnd(Jac,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) + call MatAssemblyBegin(Jac_pre,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) + call MatAssemblyEnd(Jac_pre,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! applying boundary conditions diag = (C_volAvg(1,1,1,1)/delta(1)**2 + & C_volAvg(2,2,2,2)/delta(2)**2 + & C_volAvg(3,3,3,3)/delta(3)**2)*detJ - call MatZeroRowsColumns(Jac,size(rows),rows,diag,PETSC_NULL_VEC,PETSC_NULL_VEC,ierr) - CHKERRQ(ierr) - call DMGetGlobalVector(da_local,coordinates,ierr); CHKERRQ(ierr) - call DMDAVecGetArrayF90(da_local,coordinates,x_scal,ierr); CHKERRQ(ierr) + call MatZeroRowsColumns(Jac,size(rows,kind=pPetscInt),rows,diag,PETSC_NULL_VEC,PETSC_NULL_VEC,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetGlobalVector(da_local,coordinates,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(da_local,coordinates,x_scal,err_PETSc) + CHKERRQ(err_PETSc) ele = 0 do k = zstart, zend; do j = ystart, yend; do i = xstart, xend ele = ele + 1 x_scal(0:2,i,j,k) = discretization_IPcoords(1:3,ele) enddo; enddo; enddo - call DMDAVecRestoreArrayF90(da_local,coordinates,x_scal,ierr); CHKERRQ(ierr) ! initialize to undeformed coordinates (ToDo: use ip coordinates) - call MatNullSpaceCreateRigidBody(coordinates,matnull,ierr); CHKERRQ(ierr) ! get rigid body deformation modes - call DMRestoreGlobalVector(da_local,coordinates,ierr); CHKERRQ(ierr) - call MatSetNullSpace(Jac,matnull,ierr); CHKERRQ(ierr) - call MatSetNearNullSpace(Jac,matnull,ierr); CHKERRQ(ierr) - call MatNullSpaceDestroy(matnull,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da_local,coordinates,x_scal,err_PETSc) + CHKERRQ(err_PETSc) ! initialize to undeformed coordinates (ToDo: use ip coordinates) + call MatNullSpaceCreateRigidBody(coordinates,matnull,err_PETSc) + CHKERRQ(err_PETSc) ! get rigid body deformation modes + call DMRestoreGlobalVector(da_local,coordinates,err_PETSc) + CHKERRQ(err_PETSc) + call MatSetNullSpace(Jac,matnull,err_PETSc) + CHKERRQ(err_PETSc) + call MatSetNearNullSpace(Jac,matnull,err_PETSc) + CHKERRQ(err_PETSc) + call MatNullSpaceDestroy(matnull,err_PETSc) + CHKERRQ(err_PETSc) end subroutine formJacobian diff --git a/src/grid/grid_mech_spectral_basic.f90 b/src/grid/grid_mech_spectral_basic.f90 index 66b703ed6..74400f160 100644 --- a/src/grid/grid_mech_spectral_basic.f90 +++ b/src/grid/grid_mech_spectral_basic.f90 @@ -97,7 +97,8 @@ contains subroutine grid_mechanical_spectral_basic_init real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: P - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI PetscScalar, pointer, dimension(:,:,:,:) :: & F ! pointer to solution data PetscInt, dimension(0:worldsize-1) :: localK @@ -145,10 +146,10 @@ subroutine grid_mechanical_spectral_basic_init !-------------------------------------------------------------------------------------------------- ! set default and user defined options for PETSc - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,'-mechanical_snes_type ngmres',ierr) - CHKERRQ(ierr) - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),ierr) - CHKERRQ(ierr) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,'-mechanical_snes_type ngmres',err_PETSc) + CHKERRQ(err_PETSc) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! allocate global fields @@ -157,33 +158,34 @@ subroutine grid_mechanical_spectral_basic_init !-------------------------------------------------------------------------------------------------- ! initialize solver specific parts of PETSc - call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr) - call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr) - localK = 0 - localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) + call SNESCreate(PETSC_COMM_WORLD,snes,err_PETSc); CHKERRQ(err_PETSc) + call SNESSetOptionsPrefix(snes,'mechanical_',err_PETSc);CHKERRQ(err_PETSc) + localK = 0_pPetscInt + localK(worldrank) = int(grid3,pPetscInt) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point - grid(1),grid(2),grid(3), & ! global grid - 1 , 1, worldsize, & - 9, 0, & ! #dof (F tensor), ghost boundary width (domain overlap) - [grid(1)],[grid(2)],localK, & ! local grid - da,ierr) ! handle, error - CHKERRQ(ierr) - call SNESSetDM(snes,da,ierr); CHKERRQ(ierr) ! connect snes to da - call DMsetFromOptions(da,ierr); CHKERRQ(ierr) - call DMsetUp(da,ierr); CHKERRQ(ierr) - call DMcreateGlobalVector(da,solution_vec,ierr); CHKERRQ(ierr) ! global solution vector (grid x 9, i.e. every def grad tensor) - call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,ierr) ! residual vector of same shape as solution vector - CHKERRQ(ierr) - call SNESsetConvergenceTest(snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,ierr) ! specify custom convergence check function "converged" - CHKERRQ(ierr) - call SNESsetFromOptions(snes,ierr); CHKERRQ(ierr) ! pull it all together with additional CLI arguments + int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid + 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & + 9_pPetscInt, 0_pPetscInt, & ! #dof (F, tensor), ghost boundary width (domain overlap) + [int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid + da,err_PETSc) ! handle, error + CHKERRQ(err_PETSc) + call SNESSetDM(snes,da,err_PETSc); CHKERRQ(err_PETSc) ! connect snes to da + call DMsetFromOptions(da,err_PETSc); CHKERRQ(err_PETSc) + call DMsetUp(da,err_PETSc); CHKERRQ(err_PETSc) + call DMcreateGlobalVector(da,solution_vec,err_PETSc); CHKERRQ(err_PETSc) ! global solution vector (grid x 9, i.e. every def grad tensor) + call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector + CHKERRQ(err_PETSc) + call SNESsetConvergenceTest(snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,err_PETSc) ! specify custom convergence check function "converged" + CHKERRQ(err_PETSc) + call SNESsetFromOptions(snes,err_PETSc); CHKERRQ(err_PETSc) ! pull it all together with additional CLI arguments !-------------------------------------------------------------------------------------------------- ! init fields - call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) ! places pointer on PETSc data + call DMDAVecGetArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) ! places pointer on PETSc data restartRead: if (interface_restartInc > 0) then print'(/,1x,a,i0,a)', 'reading restart data of increment ', interface_restartInc, ' from file' @@ -192,17 +194,17 @@ subroutine grid_mechanical_spectral_basic_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') @@ -216,24 +218,27 @@ subroutine grid_mechanical_spectral_basic_init call utilities_constitutiveResponse(P,P_av,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2 reshape(F,shape(F_lastInc)), & ! target F 0.0_pReal) ! time increment - call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) ! deassociate pointer + call DMDAVecRestoreArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) ! deassociate pointer restartRead2: if (interface_restartInc > 0) then print'(1x,a,i0,a)', 'reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & - MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr) - call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr) - call MPI_File_close(fileUnit,ierr) + MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_File_close(fileUnit,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end if restartRead2 call utilities_updateGamma(C_minMaxAvg) @@ -255,7 +260,7 @@ function grid_mechanical_spectral_basic_solution(incInfoIn) result(solution) solution !-------------------------------------------------------------------------------------------------- ! PETSc Data - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc SNESConvergedReason :: reason incInfo = incInfoIn @@ -267,11 +272,11 @@ function grid_mechanical_spectral_basic_solution(incInfoIn) result(solution) !-------------------------------------------------------------------------------------------------- ! solve BVP - call SNESsolve(snes,PETSC_NULL_VEC,solution_vec,ierr); CHKERRQ(ierr) + call SNESsolve(snes,PETSC_NULL_VEC,solution_vec,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! check convergence - call SNESGetConvergedReason(snes,reason,ierr); CHKERRQ(ierr) + call SNESGetConvergedReason(snes,reason,err_PETSc); CHKERRQ(err_PETSc) solution%converged = reason > 0 solution%iterationsNeeded = totalIter @@ -301,11 +306,11 @@ subroutine grid_mechanical_spectral_basic_forward(cutBack,guess,Delta_t,Delta_t_ deformation_BC type(rotation), intent(in) :: & rotation_BC - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc PetscScalar, pointer, dimension(:,:,:,:) :: F - call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) + call DMDAVecGetArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) if (cutBack) then C_volAvg = C_volAvgLastInc @@ -314,7 +319,7 @@ subroutine grid_mechanical_spectral_basic_forward(cutBack,guess,Delta_t,Delta_t_ C_volAvgLastInc = C_volAvg C_minMaxAvgLastInc = C_minMaxAvg - F_aimDot = merge(merge(.0_pReal,(F_aim-F_aim_lastInc)/Delta_t_old,stress_BC%mask),.0_pReal,guess) ! estimate deformation rate for prescribed stress components + F_aimDot = merge(merge(.0_pReal,(F_aim-F_aim_lastInc)/Delta_t_old,stress_BC%mask),.0_pReal,guess) ! estimate deformation rate for prescribed stress components F_aim_lastInc = F_aim !----------------------------------------------------------------------------------------------- @@ -348,7 +353,7 @@ subroutine grid_mechanical_spectral_basic_forward(cutBack,guess,Delta_t,Delta_t_ F = reshape(utilities_forwardField(Delta_t,F_lastInc,Fdot, & ! estimate of F at end of time+Delta_t that matches rotated F_aim on average rotation_BC%rotate(F_aim,active=.true.)),[9,grid(1),grid(2),grid3]) - call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! set module wide available data @@ -364,12 +369,12 @@ end subroutine grid_mechanical_spectral_basic_forward !-------------------------------------------------------------------------------------------------- subroutine grid_mechanical_spectral_basic_updateCoords - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc PetscScalar, dimension(:,:,:,:), pointer :: F - call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) + call DMDAVecGetArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) call utilities_updateCoords(F) - call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) end subroutine grid_mechanical_spectral_basic_updateCoords @@ -379,11 +384,11 @@ end subroutine grid_mechanical_spectral_basic_updateCoords !-------------------------------------------------------------------------------------------------- subroutine grid_mechanical_spectral_basic_restartWrite - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc integer(HID_T) :: fileHandle, groupHandle PetscScalar, dimension(:,:,:,:), pointer :: F - call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) + call DMDAVecGetArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) print'(1x,a)', 'writing solver data required for restart to file'; flush(IO_STDOUT) @@ -410,7 +415,7 @@ subroutine grid_mechanical_spectral_basic_restartWrite if (num%update_gamma) call utilities_saveReferenceStiffness - call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) end subroutine grid_mechanical_spectral_basic_restartWrite @@ -418,7 +423,7 @@ end subroutine grid_mechanical_spectral_basic_restartWrite !-------------------------------------------------------------------------------------------------- !> @brief convergence check !-------------------------------------------------------------------------------------------------- -subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dummy,ierr) +subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dummy,err_PETSc) SNES :: snes_local PetscInt, intent(in) :: PETScIter @@ -428,7 +433,7 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dumm devNull3 SNESConvergedReason :: reason PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc real(pReal) :: & divTol, & BCTol @@ -460,7 +465,7 @@ end subroutine converged !> @brief forms the residual vector !-------------------------------------------------------------------------------------------------- subroutine formResidual(in, F, & - residuum, dummy, ierr) + residuum, dummy, err_PETSc) DMDALocalInfo, dimension(DMDA_LOCAL_INFO_SIZE) :: in !< DMDA info (needs to be named "in" for macros like XRANGE to work) PetscScalar, dimension(3,3,XG_RANGE,YG_RANGE,ZG_RANGE), & @@ -473,10 +478,11 @@ subroutine formResidual(in, F, & PETScIter, & nfuncs PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI - call SNESGetNumberFunctionEvals(snes,nfuncs,ierr); CHKERRQ(ierr) - call SNESGetIterationNumber(snes,PETScIter,ierr); CHKERRQ(ierr) + call SNESGetNumberFunctionEvals(snes,nfuncs,err_PETSc); CHKERRQ(err_PETSc) + call SNESGetIterationNumber(snes,PETScIter,err_PETSc); CHKERRQ(err_PETSc) if (nfuncs == 0 .and. PETScIter == 0) totalIter = -1 ! new increment @@ -497,7 +503,8 @@ subroutine formResidual(in, F, & call utilities_constitutiveResponse(residuum, & ! "residuum" gets field of first PK stress (to save memory) P_av,C_volAvg,C_minMaxAvg, & F,params%Delta_t,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' !-------------------------------------------------------------------------------------------------- ! stress BC handling diff --git a/src/grid/grid_mech_spectral_polarisation.f90 b/src/grid/grid_mech_spectral_polarisation.f90 index 3d524029c..765a1a5a7 100644 --- a/src/grid/grid_mech_spectral_polarisation.f90 +++ b/src/grid/grid_mech_spectral_polarisation.f90 @@ -108,7 +108,8 @@ contains subroutine grid_mechanical_spectral_polarisation_init real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: P - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI PetscScalar, pointer, dimension(:,:,:,:) :: & FandF_tau, & ! overall pointer to solution data F, & ! specific (sub)pointer @@ -163,10 +164,10 @@ subroutine grid_mechanical_spectral_polarisation_init !-------------------------------------------------------------------------------------------------- ! set default and user defined options for PETSc - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,'-mechanical_snes_type ngmres',ierr) - CHKERRQ(ierr) - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),ierr) - CHKERRQ(ierr) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,'-mechanical_snes_type ngmres',err_PETSc) + CHKERRQ(err_PETSc) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! allocate global fields @@ -177,33 +178,34 @@ subroutine grid_mechanical_spectral_polarisation_init !-------------------------------------------------------------------------------------------------- ! initialize solver specific parts of PETSc - call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr) - call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr) - localK = 0 - localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) + call SNESCreate(PETSC_COMM_WORLD,snes,err_PETSc); CHKERRQ(err_PETSc) + call SNESSetOptionsPrefix(snes,'mechanical_',err_PETSc);CHKERRQ(err_PETSc) + localK = 0_pPetscInt + localK(worldrank) = int(grid3,pPetscInt) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point - grid(1),grid(2),grid(3), & ! global grid - 1 , 1, worldsize, & - 18, 0, & ! #dof (F tensor), ghost boundary width (domain overlap) - [grid(1)],[grid(2)],localK, & ! local grid - da,ierr) ! handle, error - CHKERRQ(ierr) - call SNESSetDM(snes,da,ierr); CHKERRQ(ierr) ! connect snes to da - call DMsetFromOptions(da,ierr); CHKERRQ(ierr) - call DMsetUp(da,ierr); CHKERRQ(ierr) - call DMcreateGlobalVector(da,solution_vec,ierr); CHKERRQ(ierr) ! global solution vector (grid x 18, i.e. every def grad tensor) - call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,ierr) ! residual vector of same shape as solution vector - CHKERRQ(ierr) - call SNESsetConvergenceTest(snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,ierr) ! specify custom convergence check function "converged" - CHKERRQ(ierr) - call SNESsetFromOptions(snes,ierr); CHKERRQ(ierr) ! pull it all together with additional CLI arguments + int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid + 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & + 18_pPetscInt, 0_pPetscInt, & ! #dof (2xtensor), ghost boundary width (domain overlap) + [int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid + da,err_PETSc) ! handle, error + CHKERRQ(err_PETSc) + call SNESSetDM(snes,da,err_PETSc); CHKERRQ(err_PETSc) ! connect snes to da + call DMsetFromOptions(da,err_PETSc); CHKERRQ(err_PETSc) + call DMsetUp(da,err_PETSc); CHKERRQ(err_PETSc) + call DMcreateGlobalVector(da,solution_vec,err_PETSc); CHKERRQ(err_PETSc) ! global solution vector (grid x 18, i.e. every def grad tensor) + call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector + CHKERRQ(err_PETSc) + call SNESsetConvergenceTest(snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,err_PETSc) ! specify custom convergence check function "converged" + CHKERRQ(err_PETSc) + call SNESsetFromOptions(snes,err_PETSc); CHKERRQ(err_PETSc) ! pull it all together with additional CLI arguments !-------------------------------------------------------------------------------------------------- ! init fields - call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) ! places pointer on PETSc data + call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,err_PETSc); CHKERRQ(err_PETSc) ! places pointer on PETSc data F => FandF_tau(0: 8,:,:,:) F_tau => FandF_tau(9:17,:,:,:) @@ -214,17 +216,17 @@ subroutine grid_mechanical_spectral_polarisation_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') call HDF5_read(F_tau,groupHandle,'F_tau') @@ -242,24 +244,28 @@ subroutine grid_mechanical_spectral_polarisation_init call utilities_constitutiveResponse(P,P_av,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2 reshape(F,shape(F_lastInc)), & ! target F 0.0_pReal) ! time increment - call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) ! deassociate pointer + call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,err_PETSc) ! deassociate pointer + CHKERRQ(err_PETSc) restartRead2: if (interface_restartInc > 0) then print'(1x,a,i0,a)', 'reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & - MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr) - call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr) - call MPI_File_close(fileUnit,ierr) + MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_File_close(fileUnit,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end if restartRead2 call utilities_updateGamma(C_minMaxAvg) @@ -283,7 +289,7 @@ function grid_mechanical_spectral_polarisation_solution(incInfoIn) result(soluti solution !-------------------------------------------------------------------------------------------------- ! PETSc Data - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc SNESConvergedReason :: reason incInfo = incInfoIn @@ -299,11 +305,11 @@ function grid_mechanical_spectral_polarisation_solution(incInfoIn) result(soluti !-------------------------------------------------------------------------------------------------- ! solve BVP - call SNESsolve(snes,PETSC_NULL_VEC,solution_vec,ierr); CHKERRQ(ierr) + call SNESsolve(snes,PETSC_NULL_VEC,solution_vec,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! check convergence - call SNESGetConvergedReason(snes,reason,ierr); CHKERRQ(ierr) + call SNESGetConvergedReason(snes,reason,err_PETSc); CHKERRQ(err_PETSc) solution%converged = reason > 0 solution%iterationsNeeded = totalIter @@ -333,13 +339,13 @@ subroutine grid_mechanical_spectral_polarisation_forward(cutBack,guess,Delta_t,D deformation_BC type(rotation), intent(in) :: & rotation_BC - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc PetscScalar, pointer, dimension(:,:,:,:) :: FandF_tau, F, F_tau integer :: i, j, k real(pReal), dimension(3,3) :: F_lambda33 - call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) + call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,err_PETSc); CHKERRQ(err_PETSc) F => FandF_tau(0: 8,:,:,:) F_tau => FandF_tau(9:17,:,:,:) @@ -402,7 +408,8 @@ subroutine grid_mechanical_spectral_polarisation_forward(cutBack,guess,Delta_t,D end do; end do; end do end if - call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! set module wide available data @@ -418,12 +425,14 @@ end subroutine grid_mechanical_spectral_polarisation_forward !-------------------------------------------------------------------------------------------------- subroutine grid_mechanical_spectral_polarisation_updateCoords - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc PetscScalar, dimension(:,:,:,:), pointer :: FandF_tau - call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) + call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,err_PETSc) + CHKERRQ(err_PETSc) call utilities_updateCoords(FandF_tau(0:8,:,:,:)) - call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,err_PETSc) + CHKERRQ(err_PETSc) end subroutine grid_mechanical_spectral_polarisation_updateCoords @@ -433,11 +442,11 @@ end subroutine grid_mechanical_spectral_polarisation_updateCoords !-------------------------------------------------------------------------------------------------- subroutine grid_mechanical_spectral_polarisation_restartWrite - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc integer(HID_T) :: fileHandle, groupHandle PetscScalar, dimension(:,:,:,:), pointer :: FandF_tau, F, F_tau - call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) + call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,err_PETSc); CHKERRQ(err_PETSc) F => FandF_tau(0: 8,:,:,:) F_tau => FandF_tau(9:17,:,:,:) @@ -467,7 +476,8 @@ subroutine grid_mechanical_spectral_polarisation_restartWrite if (num%update_gamma) call utilities_saveReferenceStiffness - call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,err_PETSc) + CHKERRQ(err_PETSc) end subroutine grid_mechanical_spectral_polarisation_restartWrite @@ -475,7 +485,7 @@ end subroutine grid_mechanical_spectral_polarisation_restartWrite !-------------------------------------------------------------------------------------------------- !> @brief convergence check !-------------------------------------------------------------------------------------------------- -subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dummy,ierr) +subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dummy,err_PETSc) SNES :: snes_local PetscInt, intent(in) :: PETScIter @@ -485,7 +495,7 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dumm devNull3 SNESConvergedReason :: reason PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc real(pReal) :: & curlTol, & divTol, & @@ -521,7 +531,7 @@ end subroutine converged !> @brief forms the residual vector !-------------------------------------------------------------------------------------------------- subroutine formResidual(in, FandF_tau, & - residuum, dummy,ierr) + residuum, dummy,err_PETSc) DMDALocalInfo, dimension(DMDA_LOCAL_INFO_SIZE) :: in !< DMDA info (needs to be named "in" for macros like XRANGE to work) PetscScalar, dimension(3,3,2,XG_RANGE,YG_RANGE,ZG_RANGE), & @@ -537,8 +547,9 @@ subroutine formResidual(in, FandF_tau, & PETScIter, & nfuncs PetscObject :: dummy - PetscErrorCode :: ierr - integer :: & + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI + integer :: & i, j, k, e !--------------------------------------------------------------------------------------------------- @@ -553,10 +564,11 @@ subroutine formResidual(in, FandF_tau, & X_RANGE, Y_RANGE, Z_RANGE) F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt - call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,F_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call SNESGetNumberFunctionEvals(snes,nfuncs,ierr); CHKERRQ(ierr) - call SNESGetIterationNumber(snes,PETScIter,ierr); CHKERRQ(ierr) + call SNESGetNumberFunctionEvals(snes,nfuncs,err_PETSc); CHKERRQ(err_PETSc) + call SNESGetIterationNumber(snes,PETScIter,err_PETSc); CHKERRQ(err_PETSc) if (nfuncs == 0 .and. PETScIter == 0) totalIter = -1 ! new increment @@ -597,7 +609,7 @@ subroutine formResidual(in, FandF_tau, & call utilities_constitutiveResponse(residual_F, & ! "residuum" gets field of first PK stress (to save memory) P_av,C_volAvg,C_minMaxAvg, & F - residual_F_tau/num%beta,params%Delta_t,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) !-------------------------------------------------------------------------------------------------- ! stress BC handling diff --git a/src/grid/grid_thermal_spectral.f90 b/src/grid/grid_thermal_spectral.f90 index 67c7ba1c3..8862df725 100644 --- a/src/grid/grid_thermal_spectral.f90 +++ b/src/grid/grid_thermal_spectral.f90 @@ -71,7 +71,8 @@ subroutine grid_thermal_spectral_init(T_0) integer :: i, j, k, ce DM :: thermal_grid PetscScalar, dimension(:,:,:), pointer :: T_PETSc - PetscErrorCode :: ierr + integer(MPI_INTEGER_KIND) :: err_MPI + PetscErrorCode :: err_PETSc class(tNode), pointer :: & num_grid @@ -94,39 +95,41 @@ subroutine grid_thermal_spectral_init(T_0) !-------------------------------------------------------------------------------------------------- ! set default and user defined options for PETSc call PetscOptionsInsertString(PETSC_NULL_OPTIONS,'-thermal_snes_type newtonls -thermal_snes_mf & - &-thermal_snes_ksp_ew -thermal_ksp_type fgmres',ierr) - CHKERRQ(ierr) - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),ierr) - CHKERRQ(ierr) + &-thermal_snes_ksp_ew -thermal_ksp_type fgmres',err_PETSc) + CHKERRQ(err_PETSc) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! initialize solver specific parts of PETSc - call SNESCreate(PETSC_COMM_WORLD,thermal_snes,ierr); CHKERRQ(ierr) - call SNESSetOptionsPrefix(thermal_snes,'thermal_',ierr);CHKERRQ(ierr) - localK = 0 - localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) + call SNESCreate(PETSC_COMM_WORLD,thermal_snes,err_PETSc); CHKERRQ(err_PETSc) + call SNESSetOptionsPrefix(thermal_snes,'thermal_',err_PETSc);CHKERRQ(err_PETSc) + localK = 0_pPetscInt + localK(worldrank) = int(grid3,pPetscInt) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point - grid(1),grid(2),grid(3), & ! global grid - 1, 1, worldsize, & - 1, 0, & ! #dof (T field), ghost boundary width (domain overlap) - [grid(1)],[grid(2)],localK, & ! local grid - thermal_grid,ierr) ! handle, error - CHKERRQ(ierr) - call SNESSetDM(thermal_snes,thermal_grid,ierr); CHKERRQ(ierr) ! connect snes to da - call DMsetFromOptions(thermal_grid,ierr); CHKERRQ(ierr) - call DMsetUp(thermal_grid,ierr); CHKERRQ(ierr) - call DMCreateGlobalVector(thermal_grid,solution_vec,ierr); CHKERRQ(ierr) ! global solution vector (grid x 1, i.e. every def grad tensor) - call DMDASNESSetFunctionLocal(thermal_grid,INSERT_VALUES,formResidual,PETSC_NULL_SNES,ierr) ! residual vector of same shape as solution vector - CHKERRQ(ierr) - call SNESSetFromOptions(thermal_snes,ierr); CHKERRQ(ierr) ! pull it all together with additional CLI arguments + int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid + 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & + 1_pPetscInt, 0_pPetscInt, & ! #dof (T, scalar), ghost boundary width (domain overlap) + [int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid + thermal_grid,err_PETSc) ! handle, error + CHKERRQ(err_PETSc) + call SNESSetDM(thermal_snes,thermal_grid,err_PETSc); CHKERRQ(err_PETSc) ! connect snes to da + call DMsetFromOptions(thermal_grid,err_PETSc); CHKERRQ(err_PETSc) + call DMsetUp(thermal_grid,err_PETSc); CHKERRQ(err_PETSc) + call DMCreateGlobalVector(thermal_grid,solution_vec,err_PETSc) ! global solution vector (grid x 1, i.e. every def grad tensor) + CHKERRQ(err_PETSc) + call DMDASNESSetFunctionLocal(thermal_grid,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector + CHKERRQ(err_PETSc) + call SNESSetFromOptions(thermal_snes,err_PETSc); CHKERRQ(err_PETSc) ! pull it all together with additional CLI arguments !-------------------------------------------------------------------------------------------------- ! init fields - call DMDAGetCorners(thermal_grid,xstart,ystart,zstart,xend,yend,zend,ierr) - CHKERRQ(ierr) + call DMDAGetCorners(thermal_grid,xstart,ystart,zstart,xend,yend,zend,err_PETSc) + CHKERRQ(err_PETSc) xend = xstart + xend - 1 yend = ystart + yend - 1 zend = zstart + zend - 1 @@ -143,9 +146,11 @@ subroutine grid_thermal_spectral_init(T_0) call homogenization_thermal_setField(T_0,0.0_pReal,ce) end do; end do; end do - call DMDAVecGetArrayF90(thermal_grid,solution_vec,T_PETSc,ierr); CHKERRQ(ierr) + call DMDAVecGetArrayF90(thermal_grid,solution_vec,T_PETSc,err_PETSc) + CHKERRQ(err_PETSc) T_PETSc(xstart:xend,ystart:yend,zstart:zend) = T_current - call DMDAVecRestoreArrayF90(thermal_grid,solution_vec,T_PETSc,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(thermal_grid,solution_vec,T_PETSc,err_PETSc) + CHKERRQ(err_PETSc) call updateReference @@ -164,7 +169,8 @@ function grid_thermal_spectral_solution(Delta_t) result(solution) PetscInt :: devNull PetscReal :: T_min, T_max, stagNorm - PetscErrorCode :: ierr + integer(MPI_INTEGER_KIND) :: err_MPI + PetscErrorCode :: err_PETSc SNESConvergedReason :: reason solution%converged =.false. @@ -173,8 +179,10 @@ function grid_thermal_spectral_solution(Delta_t) result(solution) ! set module wide availabe data params%Delta_t = Delta_t - call SNESSolve(thermal_snes,PETSC_NULL_VEC,solution_vec,ierr); CHKERRQ(ierr) - call SNESGetConvergedReason(thermal_snes,reason,ierr); CHKERRQ(ierr) + call SNESSolve(thermal_snes,PETSC_NULL_VEC,solution_vec,err_PETSc) + CHKERRQ(err_PETSc) + call SNESGetConvergedReason(thermal_snes,reason,err_PETSc) + CHKERRQ(err_PETSc) if (reason < 1) then solution%converged = .false. @@ -184,9 +192,11 @@ function grid_thermal_spectral_solution(Delta_t) result(solution) solution%iterationsNeeded = totalIter end if stagNorm = maxval(abs(T_current - T_stagInc)) - call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' solution%stagConverged = stagNorm < max(num%eps_thermal_atol, num%eps_thermal_rtol*maxval(T_current)) - call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' T_stagInc = T_current !-------------------------------------------------------------------------------------------------- @@ -197,8 +207,8 @@ function grid_thermal_spectral_solution(Delta_t) result(solution) call homogenization_thermal_setField(T_current(i,j,k),(T_current(i,j,k)-T_lastInc(i,j,k))/params%Delta_t,ce) end do; end do; end do - call VecMin(solution_vec,devNull,T_min,ierr); CHKERRQ(ierr) - call VecMax(solution_vec,devNull,T_max,ierr); CHKERRQ(ierr) + call VecMin(solution_vec,devNull,T_min,err_PETSc); CHKERRQ(err_PETSc) + call VecMax(solution_vec,devNull,T_max,err_PETSc); CHKERRQ(err_PETSc) if (solution%converged) & print'(/,1x,a)', '... thermal conduction converged ..................................' print'(/,1x,a,f8.4,2x,f8.4,2x,f8.4)', 'Minimum|Maximum|Delta Temperature / K = ', T_min, T_max, stagNorm @@ -217,7 +227,7 @@ subroutine grid_thermal_spectral_forward(cutBack) integer :: i, j, k, ce DM :: dm_local PetscScalar, dimension(:,:,:), pointer :: x_scal - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc if (cutBack) then T_current = T_lastInc @@ -225,10 +235,13 @@ subroutine grid_thermal_spectral_forward(cutBack) !-------------------------------------------------------------------------------------------------- ! reverting thermal field state - call SNESGetDM(thermal_snes,dm_local,ierr); CHKERRQ(ierr) - call DMDAVecGetArrayF90(dm_local,solution_vec,x_scal,ierr); CHKERRQ(ierr) !< get the data out of PETSc to work with + call SNESGetDM(thermal_snes,dm_local,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(dm_local,solution_vec,x_scal,err_PETSc) !< get the data out of PETSc to work with + CHKERRQ(err_PETSc) x_scal(xstart:xend,ystart:yend,zstart:zend) = T_current - call DMDAVecRestoreArrayF90(dm_local,solution_vec,x_scal,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(dm_local,solution_vec,x_scal,err_PETSc) + CHKERRQ(err_PETSc) ce = 0 do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) ce = ce + 1 @@ -245,7 +258,7 @@ end subroutine grid_thermal_spectral_forward !-------------------------------------------------------------------------------------------------- !> @brief forms the spectral thermal residual vector !-------------------------------------------------------------------------------------------------- -subroutine formResidual(in,x_scal,f_scal,dummy,ierr) +subroutine formResidual(in,x_scal,f_scal,dummy,dummy_err) DMDALocalInfo, dimension(DMDA_LOCAL_INFO_SIZE) :: & in @@ -256,7 +269,7 @@ subroutine formResidual(in,x_scal,f_scal,dummy,ierr) X_RANGE,Y_RANGE,Z_RANGE), intent(out) :: & f_scal PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: dummy_err integer :: i, j, k, ce T_current = x_scal @@ -301,7 +314,8 @@ end subroutine formResidual !-------------------------------------------------------------------------------------------------- subroutine updateReference() - integer :: ce,ierr + integer :: ce + integer(MPI_INTEGER_KIND) :: err_MPI K_ref = 0.0_pReal @@ -312,9 +326,11 @@ subroutine updateReference() end do K_ref = K_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,K_ref,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' mu_ref = mu_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end subroutine updateReference diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index f40025039..80cde388f 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -144,7 +144,7 @@ contains !-------------------------------------------------------------------------------------------------- subroutine spectral_utilities_init - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc integer :: i, j, k, & FFTW_planner_flag integer, dimension(3) :: k_s @@ -193,13 +193,13 @@ subroutine spectral_utilities_init 'add more using the "PETSc_options" keyword in numerics.yaml' flush(IO_STDOUT) - call PetscOptionsClear(PETSC_NULL_OPTIONS,ierr) - CHKERRQ(ierr) - if (debugPETSc) call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(PETSCDEBUG),ierr) - CHKERRQ(ierr) + call PetscOptionsClear(PETSC_NULL_OPTIONS,err_PETSc) + CHKERRQ(err_PETSc) + if (debugPETSc) call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(PETSCDEBUG),err_PETSc) + CHKERRQ(err_PETSc) call PetscOptionsInsertString(PETSC_NULL_OPTIONS,& - num_grid%get_asString('PETSc_options',defaultVal=''),ierr) - CHKERRQ(ierr) + num_grid%get_asString('PETSc_options',defaultVal=''),err_PETSc) + CHKERRQ(err_PETSc) grid1Red = grid(1)/2 + 1 wgt = 1.0/real(product(grid),pReal) @@ -292,12 +292,12 @@ subroutine spectral_utilities_init tensorSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &! no. of transforms, default iblock and oblock tensorField_real, tensorField_fourier, & ! input data, output data PETSC_COMM_WORLD, FFTW_planner_flag) ! use all processors, planer precision - if (.not. C_ASSOCIATED(planTensorForth)) error stop 'FFTW error' + if (.not. c_associated(planTensorForth)) error stop 'FFTW error' planTensorBack = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order tensorSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &! no. of transforms, default iblock and oblock tensorField_fourier,tensorField_real, & ! input data, output data PETSC_COMM_WORLD, FFTW_planner_flag) ! all processors, planer precision - if (.not. C_ASSOCIATED(planTensorBack)) error stop 'FFTW error' + if (.not. c_associated(planTensorBack)) error stop 'FFTW error' !-------------------------------------------------------------------------------------------------- ! vector MPI fftw plans @@ -559,8 +559,9 @@ end subroutine utilities_fourierGreenConvolution !-------------------------------------------------------------------------------------------------- real(pReal) function utilities_divergenceRMS() - integer :: i, j, k, ierr - complex(pReal), dimension(3) :: rescaledGeom + integer :: i, j, k + integer(MPI_INTEGER_KIND) :: err_MPI + complex(pReal), dimension(3) :: rescaledGeom print'(/,1x,a)', '... calculating divergence ................................................' flush(IO_STDOUT) @@ -589,8 +590,8 @@ real(pReal) function utilities_divergenceRMS() conjg(-xi1st(1:3,grid1Red,j,k))*rescaledGeom))**2) enddo; enddo if (grid(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 - call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space end function utilities_divergenceRMS @@ -601,7 +602,8 @@ end function utilities_divergenceRMS !-------------------------------------------------------------------------------------------------- real(pReal) function utilities_curlRMS() - integer :: i, j, k, l, ierr + integer :: i, j, k, l + integer(MPI_INTEGER_KIND) :: err_MPI complex(pReal), dimension(3,3) :: curl_fourier complex(pReal), dimension(3) :: rescaledGeom @@ -649,8 +651,8 @@ real(pReal) function utilities_curlRMS() + sum(curl_fourier%re**2 + curl_fourier%im**2) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1) enddo; enddo - call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' utilities_curlRMS = sqrt(utilities_curlRMS) * wgt if (grid(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 @@ -799,8 +801,8 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& type(rotation), intent(in), optional :: rotation_BC !< rotation of load frame - integer :: & - i,ierr + integer :: i + integer(MPI_INTEGER_KIND) :: err_MPI real(pReal), dimension(3,3,3,3) :: dPdF_max, dPdF_min real(pReal) :: dPdF_norm_max, dPdF_norm_min real(pReal), dimension(2) :: valueAndRank !< pair of min/max norm of dPdF to synchronize min/max of dPdF @@ -818,7 +820,8 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& P = reshape(homogenization_P, [3,3,grid(1),grid(2),grid3]) P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt - call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (debugRotation) print'(/,1x,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', & 'Piola--Kirchhoff stress (lab) / MPa =', transpose(P_av)*1.e-6_pReal if (present(rotation_BC)) P_av = rotation_BC%rotate(P_av) @@ -842,22 +845,22 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& enddo valueAndRank = [dPdF_norm_max,real(worldrank,pReal)] - call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, MPI_COMM_WORLD, ierr) - if (ierr /= 0) error stop 'MPI error' - call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1_MPI_INTEGER_KIND,MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_Bcast(dPdF_max,81_MPI_INTEGER_KIND,MPI_DOUBLE,int(valueAndRank(2),MPI_INTEGER_KIND),MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' valueAndRank = [dPdF_norm_min,real(worldrank,pReal)] - call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, MPI_COMM_WORLD, ierr) - if (ierr /= 0) error stop 'MPI error' - call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1_MPI_INTEGER_KIND,MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_Bcast(dPdF_min,81_MPI_INTEGER_KIND,MPI_DOUBLE,int(valueAndRank(2),MPI_INTEGER_KIND),MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' C_minmaxAvg = 0.5_pReal*(dPdF_max + dPdF_min) C_volAvg = sum(homogenization_dPdF,dim=5) - call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' C_volAvg = C_volAvg * wgt @@ -906,12 +909,13 @@ function utilities_forwardField(Delta_t,field_lastInc,rate,aim) real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: & utilities_forwardField real(pReal), dimension(3,3) :: fieldDiff !< - aim - PetscErrorCode :: ierr + integer(MPI_INTEGER_KIND) :: err_MPI utilities_forwardField = field_lastInc + rate*Delta_t if (present(aim)) then !< correct to match average fieldDiff = sum(sum(sum(utilities_forwardField,dim=5),dim=4),dim=3)*wgt - call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' fieldDiff = fieldDiff - aim utilities_forwardField = utilities_forwardField - & spread(spread(spread(fieldDiff,3,grid(1)),4,grid(2)),5,grid3) @@ -981,9 +985,10 @@ subroutine utilities_updateCoords(F) real(pReal), dimension(3, grid(1)+1,grid(2)+1,grid3+1) :: nodeCoords integer :: & i,j,k,n, & - rank_t, rank_b, & - c, & - ierr + c + integer(MPI_INTEGER_KIND) :: & + rank_t, rank_b + integer(MPI_INTEGER_KIND) :: err_MPI #if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) type(MPI_Request), dimension(4) :: request type(MPI_Status), dimension(4) :: status @@ -1025,30 +1030,30 @@ subroutine utilities_updateCoords(F) !-------------------------------------------------------------------------------------------------- ! average F if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt - call MPI_Bcast(Favg,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(Favg,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' !-------------------------------------------------------------------------------------------------- ! pad cell center fluctuations along z-direction (needed when running MPI simulation) IPfluct_padded(1:3,1:grid(1),1:grid(2),2:grid3+1) = vectorField_real(1:3,1:grid(1),1:grid(2),1:grid3) c = product(shape(IPfluct_padded(:,:,:,1))) !< amount of data to transfer - rank_t = modulo(worldrank+1,worldsize) - rank_b = modulo(worldrank-1,worldsize) + rank_t = modulo(worldrank+1_MPI_INTEGER_KIND,worldsize) + rank_b = modulo(worldrank-1_MPI_INTEGER_KIND,worldsize) ! send bottom layer to process below - call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,MPI_COMM_WORLD,request(1),ierr) - if (ierr /=0) error stop 'MPI error' - call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,MPI_COMM_WORLD,request(2),ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(1),err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(2),err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' ! send top layer to process above - call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,MPI_COMM_WORLD,request(3),ierr) - if (ierr /=0) error stop 'MPI error' - call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,MPI_COMM_WORLD,request(4),ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(3),err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(4),err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call MPI_Waitall(4,request,status,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Waitall(4,request,status,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' #if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) ! ToDo #else diff --git a/src/mesh/DAMASK_mesh.f90 b/src/mesh/DAMASK_mesh.f90 index 14872bf15..aa156ec2d 100644 --- a/src/mesh/DAMASK_mesh.f90 +++ b/src/mesh/DAMASK_mesh.f90 @@ -78,7 +78,7 @@ program DAMASK_mesh type(tLoadCase), allocatable, dimension(:) :: loadCases !< array of all load cases type(tSolutionState), allocatable, dimension(:) :: solres PetscInt :: faceSet, currentFaceSet, dimPlex - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc integer(kind(COMPONENT_UNDEFINED_ID)) :: ID external :: & quit @@ -98,8 +98,8 @@ program DAMASK_mesh if (maxCutBack < 0) call IO_error(301,ext_msg='maxCutBack') ! reading basic information from load case file and allocate data structure containing load cases - call DMGetDimension(geomMesh,dimPlex,ierr) !< dimension of mesh (2D or 3D) - CHKERRA(ierr) + call DMGetDimension(geomMesh,dimPlex,err_PETSc) !< dimension of mesh (2D or 3D) + CHKERRA(err_PETSc) allocate(solres(1)) !-------------------------------------------------------------------------------------------------- diff --git a/src/mesh/FEM_utilities.f90 b/src/mesh/FEM_utilities.f90 index df16fa8e2..703c6c818 100644 --- a/src/mesh/FEM_utilities.f90 +++ b/src/mesh/FEM_utilities.f90 @@ -50,7 +50,7 @@ module FEM_utilities type, public :: tSolutionState !< return type of solution from FEM solver variants logical :: converged = .true. logical :: stagConverged = .true. - integer :: iterationsNeeded = 0 + PetscInt :: iterationsNeeded = 0_pPETSCINT end type tSolutionState type, public :: tComponentBC @@ -92,7 +92,7 @@ subroutine FEM_utilities_init p_i !< integration order (quadrature rule) character(len=*), parameter :: & PETSCDEBUG = ' -snes_view -snes_monitor ' - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc logical :: debugPETSc !< use some in debug defined options for more verbose PETSc solution @@ -103,9 +103,9 @@ subroutine FEM_utilities_init p_s = num_mesh%get_asInt('p_s',defaultVal = 2) p_i = num_mesh%get_asInt('p_i',defaultVal = p_s) - if (p_s < 1_pInt .or. p_s > size(FEM_nQuadrature,2)) & + if (p_s < 1 .or. p_s > size(FEM_nQuadrature,2)) & call IO_error(821,ext_msg='shape function order (p_s) out of bounds') - if (p_i < max(1_pInt,p_s-1_pInt) .or. p_i > p_s) & + if (p_i < max(1,p_s-1) .or. p_i > p_s) & call IO_error(821,ext_msg='integration order (p_i) out of bounds') debug_mesh => config_debug%get('mesh',defaultVal=emptyList) @@ -116,20 +116,20 @@ subroutine FEM_utilities_init trim(PETScDebug), & 'add more using the "PETSc_options" keyword in numerics.yaml' flush(IO_STDOUT) - call PetscOptionsClear(PETSC_NULL_OPTIONS,ierr) - CHKERRQ(ierr) - if(debugPETSc) call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(PETSCDEBUG),ierr) - CHKERRQ(ierr) + call PetscOptionsClear(PETSC_NULL_OPTIONS,err_PETSc) + CHKERRQ(err_PETSc) + if(debugPETSc) call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(PETSCDEBUG),err_PETSc) + CHKERRQ(err_PETSc) call PetscOptionsInsertString(PETSC_NULL_OPTIONS,'-mechanical_snes_type newtonls & &-mechanical_snes_linesearch_type cp -mechanical_snes_ksp_ew & &-mechanical_snes_ksp_ew_rtol0 0.01 -mechanical_snes_ksp_ew_rtolmax 0.01 & - &-mechanical_ksp_type fgmres -mechanical_ksp_max_it 25', ierr) - CHKERRQ(ierr) - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_mesh%get_asString('PETSc_options',defaultVal=''),ierr) - CHKERRQ(ierr) + &-mechanical_ksp_type fgmres -mechanical_ksp_max_it 25', err_PETSc) + CHKERRQ(err_PETSc) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_mesh%get_asString('PETSc_options',defaultVal=''),err_PETSc) + CHKERRQ(err_PETSc) write(petsc_optionsOrder,'(a,i0)') '-mechFE_petscspace_degree ', p_s - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(petsc_optionsOrder),ierr) - CHKERRQ(ierr) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(petsc_optionsOrder),err_PETSc) + CHKERRQ(err_PETSc) wgt = 1.0/real(mesh_maxNips*mesh_NcpElemsGlobal,pReal) @@ -144,10 +144,9 @@ subroutine utilities_constitutiveResponse(timeinc,P_av,forwardData) real(pReal), intent(in) :: timeinc !< loading time logical, intent(in) :: forwardData !< age results - real(pReal),intent(out), dimension(3,3) :: P_av !< average PK stress - PetscErrorCode :: ierr + integer(MPI_INTEGER_KIND) :: err_MPI print'(/,1x,a)', '... evaluating constitutive response ......................................' @@ -157,7 +156,9 @@ subroutine utilities_constitutiveResponse(timeinc,P_av,forwardData) cutBack = .false. P_av = sum(homogenization_P,dim=3) * wgt - call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + end subroutine utilities_constitutiveResponse @@ -174,26 +175,29 @@ subroutine utilities_projectBCValues(localVec,section,field,comp,bcPointsIS,BCVa PetscInt, pointer :: bcPoints(:) PetscScalar, pointer :: localArray(:) PetscScalar :: BCValue,BCDotValue,timeinc - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc - call PetscSectionGetFieldComponents(section,field,numComp,ierr); CHKERRQ(ierr) - call ISGetSize(bcPointsIS,nBcPoints,ierr); CHKERRQ(ierr) - if (nBcPoints > 0) call ISGetIndicesF90(bcPointsIS,bcPoints,ierr) - call VecGetArrayF90(localVec,localArray,ierr); CHKERRQ(ierr) + call PetscSectionGetFieldComponents(section,field,numComp,err_PETSc) + CHKERRQ(err_PETSc) + call ISGetSize(bcPointsIS,nBcPoints,err_PETSc) + CHKERRQ(err_PETSc) + if (nBcPoints > 0) call ISGetIndicesF90(bcPointsIS,bcPoints,err_PETSc) + call VecGetArrayF90(localVec,localArray,err_PETSc); CHKERRQ(err_PETSc) do point = 1, nBcPoints - call PetscSectionGetFieldDof(section,bcPoints(point),field,numDof,ierr) - CHKERRQ(ierr) - call PetscSectionGetFieldOffset(section,bcPoints(point),field,offset,ierr) - CHKERRQ(ierr) + call PetscSectionGetFieldDof(section,bcPoints(point),field,numDof,err_PETSc) + CHKERRQ(err_PETSc) + call PetscSectionGetFieldOffset(section,bcPoints(point),field,offset,err_PETSc) + CHKERRQ(err_PETSc) do dof = offset+comp+1, offset+numDof, numComp localArray(dof) = localArray(dof) + BCValue + BCDotValue*timeinc end do end do - call VecRestoreArrayF90(localVec,localArray,ierr); CHKERRQ(ierr) - call VecAssemblyBegin(localVec, ierr); CHKERRQ(ierr) - call VecAssemblyEnd (localVec, ierr); CHKERRQ(ierr) - if (nBcPoints > 0) call ISRestoreIndicesF90(bcPointsIS,bcPoints,ierr) + call VecRestoreArrayF90(localVec,localArray,err_PETSc); CHKERRQ(err_PETSc) + call VecAssemblyBegin(localVec, err_PETSc); CHKERRQ(err_PETSc) + call VecAssemblyEnd (localVec, err_PETSc); CHKERRQ(err_PETSc) + if (nBcPoints > 0) call ISRestoreIndicesF90(bcPointsIS,bcPoints,err_PETSc) + CHKERRQ(err_PETSc) end subroutine utilities_projectBCValues diff --git a/src/mesh/discretization_mesh.f90 b/src/mesh/discretization_mesh.f90 index 2446bc0de..8623ebee4 100644 --- a/src/mesh/discretization_mesh.f90 +++ b/src/mesh/discretization_mesh.f90 @@ -71,21 +71,22 @@ subroutine discretization_mesh_init(restart) logical, intent(in) :: restart - integer :: dimPlex, & + PetscInt :: dimPlex, & mesh_Nnodes, & !< total number of nodes in mesh j, & debug_element, debug_ip PetscSF :: sf DM :: globalMesh - PetscInt :: nFaceSets + PetscInt :: nFaceSets, Nboundaries, NelemsGlobal, Nelems PetscInt, pointer, dimension(:) :: pFaceSets IS :: faceSetIS - PetscErrorCode :: ierr - integer, dimension(:), allocatable :: & + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI + PetscInt, dimension(:), allocatable :: & materialAt class(tNode), pointer :: & num_mesh - integer :: p_i !< integration order (quadrature rule) + integer :: p_i, dim !< integration order (quadrature rule) type(tvec) :: coords_node0 print'(/,1x,a)', '<<<+- discretization_mesh init -+>>>' @@ -100,56 +101,64 @@ subroutine discretization_mesh_init(restart) debug_element = config_debug%get_asInt('element',defaultVal=1) debug_ip = config_debug%get_asInt('integrationpoint',defaultVal=1) - call DMPlexCreateFromFile(PETSC_COMM_WORLD,interface_geomFile,PETSC_TRUE,globalMesh,ierr) - CHKERRQ(ierr) - call DMGetDimension(globalMesh,dimPlex,ierr) - CHKERRQ(ierr) - call DMGetStratumSize(globalMesh,'depth',dimPlex,mesh_NcpElemsGlobal,ierr) - CHKERRQ(ierr) - print'()' - call DMView(globalMesh, PETSC_VIEWER_STDOUT_WORLD,ierr) - CHKERRQ(ierr) + call DMPlexCreateFromFile(PETSC_COMM_WORLD,interface_geomFile,PETSC_TRUE,globalMesh,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetDimension(globalMesh,dimPlex,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetStratumSize(globalMesh,'depth',dimPlex,NelemsGlobal,err_PETSc) + CHKERRQ(err_PETSc) + mesh_NcpElemsGlobal = int(NelemsGlobal) + call DMView(globalMesh, PETSC_VIEWER_STDOUT_WORLD,err_PETSc) + CHKERRQ(err_PETSc) ! get number of IDs in face sets (for boundary conditions?) - call DMGetLabelSize(globalMesh,'Face Sets',mesh_Nboundaries,ierr) - CHKERRQ(ierr) - call MPI_Bcast(mesh_Nboundaries,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) - call MPI_Bcast(mesh_NcpElemsGlobal,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) - call MPI_Bcast(dimPlex,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) + call DMGetLabelSize(globalMesh,'Face Sets',Nboundaries,err_PETSc) + CHKERRQ(err_PETSc) + mesh_Nboundaries = int(Nboundaries) + call MPI_Bcast(mesh_Nboundaries,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_Bcast(mesh_NcpElemsGlobal,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + dim = int(dimPlex) + call MPI_Bcast(dim,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + dimPlex = int(dim,pPETSCINT) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (worldrank == 0) then - call DMClone(globalMesh,geomMesh,ierr) + call DMClone(globalMesh,geomMesh,err_PETSc) else - call DMPlexDistribute(globalMesh,0,sf,geomMesh,ierr) + call DMPlexDistribute(globalMesh,0_pPETSCINT,sf,geomMesh,err_PETSc) endif - CHKERRQ(ierr) + CHKERRQ(err_PETSc) - allocate(mesh_boundaries(mesh_Nboundaries), source = 0) - call DMGetLabelSize(globalMesh,'Face Sets',nFaceSets,ierr) - CHKERRQ(ierr) - call DMGetLabelIdIS(globalMesh,'Face Sets',faceSetIS,ierr) - CHKERRQ(ierr) + allocate(mesh_boundaries(mesh_Nboundaries), source = 0_pPETSCINT) + call DMGetLabelSize(globalMesh,'Face Sets',nFaceSets,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetLabelIdIS(globalMesh,'Face Sets',faceSetIS,err_PETSc) + CHKERRQ(err_PETSc) if (nFaceSets > 0) then - call ISGetIndicesF90(faceSetIS,pFaceSets,ierr) - CHKERRQ(ierr) + call ISGetIndicesF90(faceSetIS,pFaceSets,err_PETSc) + CHKERRQ(err_PETSc) mesh_boundaries(1:nFaceSets) = pFaceSets - CHKERRQ(ierr) - call ISRestoreIndicesF90(faceSetIS,pFaceSets,ierr) + CHKERRQ(err_PETSc) + call ISRestoreIndicesF90(faceSetIS,pFaceSets,err_PETSc) endif - call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) + call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call DMDestroy(globalMesh,ierr); CHKERRQ(ierr) + call DMDestroy(globalMesh,err_PETSc); CHKERRQ(err_PETSc) - call DMGetStratumSize(geomMesh,'depth',dimPlex,mesh_NcpElems,ierr) - CHKERRQ(ierr) - call DMGetStratumSize(geomMesh,'depth',0,mesh_Nnodes,ierr) - CHKERRQ(ierr) + call DMGetStratumSize(geomMesh,'depth',dimPlex,Nelems,err_PETSc) + CHKERRQ(err_PETSc) + mesh_NcpElems = int(Nelems) + call DMGetStratumSize(geomMesh,'depth',0_pPETSCINT,mesh_Nnodes,err_PETSc) + CHKERRQ(err_PETSc) ! Get initial nodal coordinates - call DMGetCoordinates(geomMesh,coords_node0,ierr) - CHKERRQ(ierr) - call VecGetArrayF90(coords_node0, mesh_node0_temp,ierr) - CHKERRQ(ierr) + call DMGetCoordinates(geomMesh,coords_node0,err_PETSc) + CHKERRQ(err_PETSc) + call VecGetArrayF90(coords_node0, mesh_node0_temp,err_PETSc) + CHKERRQ(err_PETSc) mesh_maxNips = FEM_nQuadrature(dimPlex,p_i) @@ -158,10 +167,10 @@ subroutine discretization_mesh_init(restart) allocate(materialAt(mesh_NcpElems)) do j = 1, mesh_NcpElems - call DMGetLabelValue(geomMesh,'Cell Sets',j-1,materialAt(j),ierr) - CHKERRQ(ierr) + call DMGetLabelValue(geomMesh,'Cell Sets',j-1,materialAt(j),err_PETSc) + CHKERRQ(err_PETSc) enddo - materialAt = materialAt + 1 + materialAt = materialAt + 1_pPETSCINT if (debug_element < 1 .or. debug_element > mesh_NcpElems) call IO_error(602,ext_msg='element') if (debug_ip < 1 .or. debug_ip > mesh_maxNips) call IO_error(602,ext_msg='IP') @@ -170,7 +179,7 @@ subroutine discretization_mesh_init(restart) mesh_node0(1:dimPlex,:) = reshape(mesh_node0_temp,[dimPlex,mesh_Nnodes]) - call discretization_init(materialAt,& + call discretization_init(int(materialAt),& reshape(mesh_ipCoordinates,[3,mesh_maxNips*mesh_NcpElems]), & mesh_node0) @@ -188,16 +197,17 @@ subroutine mesh_FEM_build_ipVolumes(dimPlex) PetscReal :: vol PetscReal, pointer,dimension(:) :: pCent, pNorm PetscInt :: cellStart, cellEnd, cell - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc allocate(mesh_ipVolume(mesh_maxNips,mesh_NcpElems),source=0.0_pReal) - call DMPlexGetHeightStratum(geomMesh,0,cellStart,cellEnd,ierr); CHKERRQ(ierr) + call DMPlexGetHeightStratum(geomMesh,0_pPETSCINT,cellStart,cellEnd,err_PETSc) + CHKERRQ(err_PETSc) allocate(pCent(dimPlex)) allocate(pNorm(dimPlex)) do cell = cellStart, cellEnd-1 - call DMPlexComputeCellGeometryFVM(geomMesh,cell,vol,pCent,pNorm,ierr) - CHKERRQ(ierr) + call DMPlexComputeCellGeometryFVM(geomMesh,cell,vol,pCent,pNorm,err_PETSc) + CHKERRQ(err_PETSc) mesh_ipVolume(:,cell+1) = vol/real(mesh_maxNips,pReal) enddo @@ -215,7 +225,7 @@ subroutine mesh_FEM_build_ipCoordinates(dimPlex,qPoints) PetscReal, pointer,dimension(:) :: pV0, pCellJ, pInvcellJ PetscReal :: detJ PetscInt :: cellStart, cellEnd, cell, qPt, dirI, dirJ, qOffset - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc allocate(mesh_ipCoordinates(3,mesh_maxNips,mesh_NcpElems),source=0.0_pReal) @@ -223,10 +233,11 @@ subroutine mesh_FEM_build_ipCoordinates(dimPlex,qPoints) allocate(pV0(dimPlex)) allocatE(pCellJ(dimPlex**2)) allocatE(pinvCellJ(dimPlex**2)) - call DMPlexGetHeightStratum(geomMesh,0,cellStart,cellEnd,ierr); CHKERRQ(ierr) + call DMPlexGetHeightStratum(geomMesh,0_pPETSCINT,cellStart,cellEnd,err_PETSc) + CHKERRQ(err_PETSc) do cell = cellStart, cellEnd-1 !< loop over all elements - call DMPlexComputeCellGeometryAffineFEM(geomMesh,cell,pV0,pCellJ,pInvcellJ,detJ,ierr) - CHKERRQ(ierr) + call DMPlexComputeCellGeometryAffineFEM(geomMesh,cell,pV0,pCellJ,pInvcellJ,detJ,err_PETSc) + CHKERRQ(err_PETSc) qOffset = 0 do qPt = 1, mesh_maxNips do dirI = 1, dimPlex diff --git a/src/mesh/mesh_mech_FEM.f90 b/src/mesh/mesh_mech_FEM.f90 index c1c0d8e3d..8cf9dbfa1 100644 --- a/src/mesh/mesh_mech_FEM.f90 +++ b/src/mesh/mesh_mech_FEM.f90 @@ -40,7 +40,7 @@ module mesh_mechanical_FEM type(tSolutionParams) :: params type, private :: tNumerics - integer :: & + PetscInt :: & p_i, & !< integration order (quadrature rule) itmax logical :: & @@ -55,7 +55,8 @@ module mesh_mechanical_FEM ! PETSc data SNES :: mechanical_snes Vec :: solution, solution_rate, solution_local - PetscInt :: dimPlex, cellDof, nQuadrature, nBasis + PetscInt :: dimPlex, cellDof, nBasis + integer :: nQuadrature PetscReal, allocatable, target :: qPoints(:), qWeights(:) MatNullSpace :: matnull @@ -104,11 +105,11 @@ subroutine FEM_mechanical_init(fieldBC) PetscReal :: detJ PetscReal, allocatable, target :: cellJMat(:,:) - PetscScalar, pointer :: px_scal(:) - PetscScalar, allocatable, target :: x_scal(:) + PetscScalar, pointer, dimension(:) :: px_scal + PetscScalar, allocatable, target, dimension(:) :: x_scal character(len=*), parameter :: prefix = 'mechFE_' - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc real(pReal), dimension(3,3) :: devNull class(tNode), pointer :: & num_mesh @@ -118,8 +119,8 @@ subroutine FEM_mechanical_init(fieldBC) !----------------------------------------------------------------------------- ! read numerical parametes and do sanity checks num_mesh => config_numerics%get('mesh',defaultVal=emptyDict) - num%p_i = num_mesh%get_asInt('p_i',defaultVal = 2) - num%itmax = num_mesh%get_asInt('itmax',defaultVal=250) + num%p_i = int(num_mesh%get_asInt('p_i',defaultVal = 2),pPETSCINT) + num%itmax = int(num_mesh%get_asInt('itmax',defaultVal=250),pPETSCINT) num%BBarStabilisation = num_mesh%get_asBool('bbarstabilisation',defaultVal = .false.) num%eps_struct_atol = num_mesh%get_asFloat('eps_struct_atol', defaultVal = 1.0e-10_pReal) num%eps_struct_rtol = num_mesh%get_asFloat('eps_struct_rtol', defaultVal = 1.0e-4_pReal) @@ -130,8 +131,8 @@ subroutine FEM_mechanical_init(fieldBC) !-------------------------------------------------------------------------------------------------- ! Setup FEM mech mesh - call DMClone(geomMesh,mechanical_mesh,ierr); CHKERRQ(ierr) - call DMGetDimension(mechanical_mesh,dimPlex,ierr); CHKERRQ(ierr) + call DMClone(geomMesh,mechanical_mesh,err_PETSc); CHKERRQ(err_PETSc) + call DMGetDimension(mechanical_mesh,dimPlex,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! Setup FEM mech discretization @@ -140,96 +141,104 @@ subroutine FEM_mechanical_init(fieldBC) nQuadrature = FEM_nQuadrature( dimPlex,num%p_i) qPointsP => qPoints qWeightsP => qWeights - call PetscQuadratureCreate(PETSC_COMM_SELF,mechQuad,ierr); CHKERRQ(ierr) - CHKERRQ(ierr) + call PetscQuadratureCreate(PETSC_COMM_SELF,mechQuad,err_PETSc) + CHKERRQ(err_PETSc) nc = dimPlex - call PetscQuadratureSetData(mechQuad,dimPlex,nc,nQuadrature,qPointsP,qWeightsP,ierr) - CHKERRQ(ierr) + call PetscQuadratureSetData(mechQuad,dimPlex,nc,int(nQuadrature,pPETSCINT),qPointsP,qWeightsP,err_PETSc) + CHKERRQ(err_PETSc) call PetscFECreateDefault(PETSC_COMM_SELF,dimPlex,nc,PETSC_TRUE,prefix, & - num%p_i,mechFE,ierr); CHKERRQ(ierr) - call PetscFESetQuadrature(mechFE,mechQuad,ierr); CHKERRQ(ierr) - call PetscFEGetDimension(mechFE,nBasis,ierr); CHKERRQ(ierr) + num%p_i,mechFE,err_PETSc); CHKERRQ(err_PETSc) + call PetscFESetQuadrature(mechFE,mechQuad,err_PETSc); CHKERRQ(err_PETSc) + call PetscFEGetDimension(mechFE,nBasis,err_PETSc); CHKERRQ(err_PETSc) nBasis = nBasis/nc - call DMAddField(mechanical_mesh,PETSC_NULL_DMLABEL,mechFE,ierr); CHKERRQ(ierr) - call DMCreateDS(mechanical_mesh,ierr); CHKERRQ(ierr) - call DMGetDS(mechanical_mesh,mechDS,ierr); CHKERRQ(ierr) - call PetscDSGetTotalDimension(mechDS,cellDof,ierr); CHKERRQ(ierr) - call PetscFEDestroy(mechFE,ierr); CHKERRQ(ierr) - call PetscQuadratureDestroy(mechQuad,ierr); CHKERRQ(ierr) + call DMAddField(mechanical_mesh,PETSC_NULL_DMLABEL,mechFE,err_PETSc) + CHKERRQ(err_PETSc) + call DMCreateDS(mechanical_mesh,err_PETSc); CHKERRQ(err_PETSc) + call DMGetDS(mechanical_mesh,mechDS,err_PETSc); CHKERRQ(err_PETSc) + call PetscDSGetTotalDimension(mechDS,cellDof,err_PETSc); CHKERRQ(err_PETSc) + call PetscFEDestroy(mechFE,err_PETSc); CHKERRQ(err_PETSc) + call PetscQuadratureDestroy(mechQuad,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! Setup FEM mech boundary conditions - call DMGetLabel(mechanical_mesh,'Face Sets',BCLabel,ierr); CHKERRQ(ierr) - call DMPlexLabelComplete(mechanical_mesh,BCLabel,ierr); CHKERRQ(ierr) - call DMGetLocalSection(mechanical_mesh,section,ierr); CHKERRQ(ierr) + call DMGetLabel(mechanical_mesh,'Face Sets',BCLabel,err_PETSc) + CHKERRQ(err_PETSc) + call DMPlexLabelComplete(mechanical_mesh,BCLabel,err_PETSc); CHKERRQ(err_PETSc) + call DMGetLocalSection(mechanical_mesh,section,err_PETSc); CHKERRQ(err_PETSc) allocate(pnumComp(1), source=dimPlex) - allocate(pnumDof(0:dimPlex), source = 0) + allocate(pnumDof(0:dimPlex), source = 0_pPETSCINT) do topologDim = 0, dimPlex - call DMPlexGetDepthStratum(mechanical_mesh,topologDim,cellStart,cellEnd,ierr) - CHKERRQ(ierr) - call PetscSectionGetDof(section,cellStart,pnumDof(topologDim),ierr) - CHKERRQ(ierr) + call DMPlexGetDepthStratum(mechanical_mesh,topologDim,cellStart,cellEnd,err_PETSc) + CHKERRQ(err_PETSc) + call PetscSectionGetDof(section,cellStart,pnumDof(topologDim),err_PETSc) + CHKERRQ(err_PETSc) enddo numBC = 0 do field = 1, dimPlex; do faceSet = 1, mesh_Nboundaries if (fieldBC%componentBC(field)%Mask(faceSet)) numBC = numBC + 1 enddo; enddo - allocate(pbcField(numBC), source=0) + allocate(pbcField(numBC), source=0_pPETSCINT) allocate(pbcComps(numBC)) allocate(pbcPoints(numBC)) numBC = 0 do field = 1, dimPlex; do faceSet = 1, mesh_Nboundaries if (fieldBC%componentBC(field)%Mask(faceSet)) then numBC = numBC + 1 - call ISCreateGeneral(PETSC_COMM_WORLD,1,[field-1],PETSC_COPY_VALUES,pbcComps(numBC),ierr) - CHKERRQ(ierr) - call DMGetStratumSize(mechanical_mesh,'Face Sets',mesh_boundaries(faceSet),bcSize,ierr) - CHKERRQ(ierr) + call ISCreateGeneral(PETSC_COMM_WORLD,1_pPETSCINT,[field-1],PETSC_COPY_VALUES,pbcComps(numBC),err_PETSc) + CHKERRQ(err_PETSc) + call DMGetStratumSize(mechanical_mesh,'Face Sets',mesh_boundaries(faceSet),bcSize,err_PETSc) + CHKERRQ(err_PETSc) if (bcSize > 0) then - call DMGetStratumIS(mechanical_mesh,'Face Sets',mesh_boundaries(faceSet),bcPoint,ierr) - CHKERRQ(ierr) - call ISGetIndicesF90(bcPoint,pBcPoint,ierr); CHKERRQ(ierr) - call ISCreateGeneral(PETSC_COMM_WORLD,bcSize,pBcPoint,PETSC_COPY_VALUES,pbcPoints(numBC),ierr) - CHKERRQ(ierr) - call ISRestoreIndicesF90(bcPoint,pBcPoint,ierr); CHKERRQ(ierr) - call ISDestroy(bcPoint,ierr); CHKERRQ(ierr) + call DMGetStratumIS(mechanical_mesh,'Face Sets',mesh_boundaries(faceSet),bcPoint,err_PETSc) + CHKERRQ(err_PETSc) + call ISGetIndicesF90(bcPoint,pBcPoint,err_PETSc); CHKERRQ(err_PETSc) + call ISCreateGeneral(PETSC_COMM_WORLD,bcSize,pBcPoint,PETSC_COPY_VALUES,pbcPoints(numBC),err_PETSc) + CHKERRQ(err_PETSc) + call ISRestoreIndicesF90(bcPoint,pBcPoint,err_PETSc); CHKERRQ(err_PETSc) + call ISDestroy(bcPoint,err_PETSc); CHKERRQ(err_PETSc) else - call ISCreateGeneral(PETSC_COMM_WORLD,0,[0],PETSC_COPY_VALUES,pbcPoints(numBC),ierr) - CHKERRQ(ierr) + call ISCreateGeneral(PETSC_COMM_WORLD,0_pPETSCINT,[0_pPETSCINT],PETSC_COPY_VALUES,pbcPoints(numBC),err_PETSc) + CHKERRQ(err_PETSc) endif endif enddo; enddo call DMPlexCreateSection(mechanical_mesh,nolabel,pNumComp,pNumDof, & - numBC,pBcField,pBcComps,pBcPoints,PETSC_NULL_IS,section,ierr) - CHKERRQ(ierr) - call DMSetSection(mechanical_mesh,section,ierr); CHKERRQ(ierr) + numBC,pBcField,pBcComps,pBcPoints,PETSC_NULL_IS,section,err_PETSc) + CHKERRQ(err_PETSc) + call DMSetSection(mechanical_mesh,section,err_PETSc); CHKERRQ(err_PETSc) do faceSet = 1, numBC - call ISDestroy(pbcPoints(faceSet),ierr); CHKERRQ(ierr) + call ISDestroy(pbcPoints(faceSet),err_PETSc); CHKERRQ(err_PETSc) enddo !-------------------------------------------------------------------------------------------------- ! initialize solver specific parts of PETSc - call SNESCreate(PETSC_COMM_WORLD,mechanical_snes,ierr);CHKERRQ(ierr) - call SNESSetOptionsPrefix(mechanical_snes,'mechanical_',ierr);CHKERRQ(ierr) - call SNESSetDM(mechanical_snes,mechanical_mesh,ierr); CHKERRQ(ierr) !< set the mesh for non-linear solver - call DMCreateGlobalVector(mechanical_mesh,solution ,ierr); CHKERRQ(ierr) !< locally owned displacement Dofs - call DMCreateGlobalVector(mechanical_mesh,solution_rate ,ierr); CHKERRQ(ierr) !< locally owned velocity Dofs to guess solution at next load step - call DMCreateLocalVector (mechanical_mesh,solution_local ,ierr); CHKERRQ(ierr) !< locally owned velocity Dofs to guess solution at next load step - call DMSNESSetFunctionLocal(mechanical_mesh,FEM_mechanical_formResidual,PETSC_NULL_VEC,ierr) !< function to evaluate residual forces - CHKERRQ(ierr) - call DMSNESSetJacobianLocal(mechanical_mesh,FEM_mechanical_formJacobian,PETSC_NULL_VEC,ierr) !< function to evaluate stiffness matrix - CHKERRQ(ierr) - call SNESSetMaxLinearSolveFailures(mechanical_snes, huge(1), ierr); CHKERRQ(ierr) !< ignore linear solve failures - call SNESSetConvergenceTest(mechanical_snes,FEM_mechanical_converged,PETSC_NULL_VEC,PETSC_NULL_FUNCTION,ierr) - CHKERRQ(ierr) - call SNESSetTolerances(mechanical_snes,1.0,0.0,0.0,num%itmax,num%itmax,ierr) - CHKERRQ(ierr) - call SNESSetFromOptions(mechanical_snes,ierr); CHKERRQ(ierr) + call SNESCreate(PETSC_COMM_WORLD,mechanical_snes,err_PETSc);CHKERRQ(err_PETSc) + call SNESSetOptionsPrefix(mechanical_snes,'mechanical_',err_PETSc) + CHKERRQ(err_PETSc) + call SNESSetDM(mechanical_snes,mechanical_mesh,err_PETSc) ! set the mesh for non-linear solver + CHKERRQ(err_PETSc) + call DMCreateGlobalVector(mechanical_mesh,solution, err_PETSc) ! locally owned displacement Dofs + CHKERRQ(err_PETSc) + call DMCreateGlobalVector(mechanical_mesh,solution_rate, err_PETSc) ! locally owned velocity Dofs to guess solution at next load step + CHKERRQ(err_PETSc) + call DMCreateLocalVector (mechanical_mesh,solution_local,err_PETSc) ! locally owned velocity Dofs to guess solution at next load step + CHKERRQ(err_PETSc) + call DMSNESSetFunctionLocal(mechanical_mesh,FEM_mechanical_formResidual,PETSC_NULL_VEC,err_PETSc) ! function to evaluate residual forces + CHKERRQ(err_PETSc) + call DMSNESSetJacobianLocal(mechanical_mesh,FEM_mechanical_formJacobian,PETSC_NULL_VEC,err_PETSc) ! function to evaluate stiffness matrix + CHKERRQ(err_PETSc) + call SNESSetMaxLinearSolveFailures(mechanical_snes, huge(1_pPETSCINT), err_PETSc) ! ignore linear solve failures + CHKERRQ(err_PETSc) + call SNESSetConvergenceTest(mechanical_snes,FEM_mechanical_converged,PETSC_NULL_VEC,PETSC_NULL_FUNCTION,err_PETSc) + CHKERRQ(err_PETSc) + call SNESSetTolerances(mechanical_snes,1.0,0.0,0.0,num%itmax,num%itmax,err_PETSc) + CHKERRQ(err_PETSc) + call SNESSetFromOptions(mechanical_snes,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! init fields - call VecSet(solution ,0.0,ierr); CHKERRQ(ierr) - call VecSet(solution_rate ,0.0,ierr); CHKERRQ(ierr) + call VecSet(solution ,0.0,err_PETSc); CHKERRQ(err_PETSc) + call VecSet(solution_rate ,0.0,err_PETSc); CHKERRQ(err_PETSc) allocate(x_scal(cellDof)) allocate(nodalWeightsP(1)) allocate(nodalPointsP(dimPlex)) @@ -237,26 +246,26 @@ subroutine FEM_mechanical_init(fieldBC) allocate(pcellJ(dimPlex**2)) allocate(pinvcellJ(dimPlex**2)) allocate(cellJMat(dimPlex,dimPlex)) - call PetscDSGetDiscretization(mechDS,0,mechFE,ierr) - CHKERRQ(ierr) - call PetscFEGetDualSpace(mechFE,mechDualSpace,ierr); CHKERRQ(ierr) - call DMPlexGetHeightStratum(mechanical_mesh,0,cellStart,cellEnd,ierr) - CHKERRQ(ierr) + call PetscDSGetDiscretization(mechDS,0_pPETSCINT,mechFE,err_PETSc) + CHKERRQ(err_PETSc) + call PetscFEGetDualSpace(mechFE,mechDualSpace,err_PETSc); CHKERRQ(err_PETSc) + call DMPlexGetHeightStratum(mechanical_mesh,0_pPETSCINT,cellStart,cellEnd,err_PETSc) + CHKERRQ(err_PETSc) do cell = cellStart, cellEnd-1 !< loop over all elements x_scal = 0.0_pReal - call DMPlexComputeCellGeometryAffineFEM(mechanical_mesh,cell,pV0,pCellJ,pInvcellJ,detJ,ierr) - CHKERRQ(ierr) + call DMPlexComputeCellGeometryAffineFEM(mechanical_mesh,cell,pV0,pCellJ,pInvcellJ,detJ,err_PETSc) + CHKERRQ(err_PETSc) cellJMat = reshape(pCellJ,shape=[dimPlex,dimPlex]) do basis = 0, nBasis*dimPlex-1, dimPlex - call PetscDualSpaceGetFunctional(mechDualSpace,basis,functional,ierr) - CHKERRQ(ierr) - call PetscQuadratureGetData(functional,dimPlex,nc,nNodalPoints,nodalPointsP,nodalWeightsP,ierr) - CHKERRQ(ierr) + call PetscDualSpaceGetFunctional(mechDualSpace,basis,functional,err_PETSc) + CHKERRQ(err_PETSc) + call PetscQuadratureGetData(functional,dimPlex,nc,nNodalPoints,nodalPointsP,nodalWeightsP,err_PETSc) + CHKERRQ(err_PETSc) x_scal(basis+1:basis+dimPlex) = pV0 + matmul(transpose(cellJMat),nodalPointsP + 1.0_pReal) enddo px_scal => x_scal - call DMPlexVecSetClosure(mechanical_mesh,section,solution_local,cell,px_scal,5,ierr) - CHKERRQ(ierr) + call DMPlexVecSetClosure(mechanical_mesh,section,solution_local,cell,px_scal,5,err_PETSc) + CHKERRQ(err_PETSc) enddo call utilities_constitutiveResponse(0.0_pReal,devNull,.true.) @@ -279,7 +288,7 @@ type(tSolutionState) function FEM_mechanical_solution( & character(len=*), intent(in) :: & incInfoIn - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc SNESConvergedReason :: reason incInfo = incInfoIn @@ -289,8 +298,10 @@ type(tSolutionState) function FEM_mechanical_solution( & params%timeinc = timeinc params%fieldBC = fieldBC - call SNESSolve(mechanical_snes,PETSC_NULL_VEC,solution,ierr); CHKERRQ(ierr) ! solve mechanical_snes based on solution guess (result in solution) - call SNESGetConvergedReason(mechanical_snes,reason,ierr); CHKERRQ(ierr) ! solution converged? + call SNESSolve(mechanical_snes,PETSC_NULL_VEC,solution,err_PETSc) ! solve mechanical_snes based on solution guess (result in solution) + CHKERRQ(err_PETSc) + call SNESGetConvergedReason(mechanical_snes,reason,err_PETSc) ! solution converged? + CHKERRQ(err_PETSc) terminallyIll = .false. if (reason < 1) then ! 0: still iterating (will not occur), negative -> convergence error @@ -298,8 +309,8 @@ type(tSolutionState) function FEM_mechanical_solution( & FEM_mechanical_solution%iterationsNeeded = num%itmax else ! >= 1 proper convergence (or terminally ill) FEM_mechanical_solution%converged = .true. - call SNESGetIterationNumber(mechanical_snes,FEM_mechanical_solution%iterationsNeeded,ierr) - CHKERRQ(ierr) + call SNESGetIterationNumber(mechanical_snes,FEM_mechanical_solution%iterationsNeeded,err_PETSc) + CHKERRQ(err_PETSc) endif print'(/,1x,a)', '===========================================================================' @@ -311,11 +322,12 @@ end function FEM_mechanical_solution !-------------------------------------------------------------------------------------------------- !> @brief forms the FEM residual vector !-------------------------------------------------------------------------------------------------- -subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,ierr) +subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,err_PETSc) DM :: dm_local PetscObject,intent(in) :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI PetscDS :: prob Vec :: x_local, f_local, xx_local @@ -339,22 +351,25 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,ierr) allocate(pinvcellJ(dimPlex**2)) allocate(x_scal(cellDof)) - call DMGetLocalSection(dm_local,section,ierr); CHKERRQ(ierr) - call DMGetDS(dm_local,prob,ierr); CHKERRQ(ierr) - call PetscDSGetTabulation(prob,0,basisField,basisFieldDer,ierr) - CHKERRQ(ierr) - call DMPlexGetHeightStratum(dm_local,0,cellStart,cellEnd,ierr); CHKERRQ(ierr) - call DMGetLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) - call VecWAXPY(x_local,1.0,xx_local,solution_local,ierr); CHKERRQ(ierr) + call DMGetLocalSection(dm_local,section,err_PETSc); CHKERRQ(err_PETSc) + call DMGetDS(dm_local,prob,err_PETSc); CHKERRQ(err_PETSc) + call PetscDSGetTabulation(prob,0_pPETSCINT,basisField,basisFieldDer,err_PETSc) + CHKERRQ(err_PETSc) + call DMPlexGetHeightStratum(dm_local,0_pPETSCINT,cellStart,cellEnd,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetLocalVector(dm_local,x_local,err_PETSc) + CHKERRQ(err_PETSc) + call VecWAXPY(x_local,1.0,xx_local,solution_local,err_PETSc) + CHKERRQ(err_PETSc) do field = 1, dimPlex; do face = 1, mesh_Nboundaries if (params%fieldBC%componentBC(field)%Mask(face)) then - call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,ierr) + call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,err_PETSc) if (bcSize > 0) then - call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,ierr) - CHKERRQ(ierr) - call utilities_projectBCValues(x_local,section,0,field-1,bcPoints, & + call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,err_PETSc) + CHKERRQ(err_PETSc) + call utilities_projectBCValues(x_local,section,0_pPETSCINT,field-1,bcPoints, & 0.0_pReal,params%fieldBC%componentBC(field)%Value(face),params%timeinc) - call ISDestroy(bcPoints,ierr); CHKERRQ(ierr) + call ISDestroy(bcPoints,err_PETSc); CHKERRQ(err_PETSc) endif endif enddo; enddo @@ -363,12 +378,12 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,ierr) ! evaluate field derivatives do cell = cellStart, cellEnd-1 !< loop over all elements - call PetscSectionGetNumFields(section,numFields,ierr) - CHKERRQ(ierr) - call DMPlexVecGetClosure(dm_local,section,x_local,cell,x_scal,ierr) !< get Dofs belonging to element - CHKERRQ(ierr) - call DMPlexComputeCellGeometryAffineFEM(dm_local,cell,pV0,pCellJ,pInvcellJ,detJ,ierr) - CHKERRQ(ierr) + call PetscSectionGetNumFields(section,numFields,err_PETSc) + CHKERRQ(err_PETSc) + call DMPlexVecGetClosure(dm_local,section,x_local,cell,x_scal,err_PETSc) !< get Dofs belonging to element + CHKERRQ(err_PETSc) + call DMPlexComputeCellGeometryAffineFEM(dm_local,cell,pV0,pCellJ,pInvcellJ,detJ,err_PETSc) + CHKERRQ(err_PETSc) IcellJMat = reshape(pInvcellJ,shape=[dimPlex,dimPlex]) do qPt = 0, nQuadrature-1 m = cell*nQuadrature + qPt+1 @@ -392,23 +407,24 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,ierr) enddo endif - call DMPlexVecRestoreClosure(dm_local,section,x_local,cell,x_scal,ierr) - CHKERRQ(ierr) + call DMPlexVecRestoreClosure(dm_local,section,x_local,cell,x_scal,err_PETSc) + CHKERRQ(err_PETSc) enddo !-------------------------------------------------------------------------------------------------- ! evaluate constitutive response call utilities_constitutiveResponse(params%timeinc,P_av,ForwardData) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' ForwardData = .false. !-------------------------------------------------------------------------------------------------- ! integrating residual do cell = cellStart, cellEnd-1 !< loop over all elements - call DMPlexVecGetClosure(dm_local,section,x_local,cell,x_scal,ierr) !< get Dofs belonging to element - CHKERRQ(ierr) - call DMPlexComputeCellGeometryAffineFEM(dm_local,cell,pV0,pCellJ,pInvcellJ,detJ,ierr) - CHKERRQ(ierr) + call DMPlexVecGetClosure(dm_local,section,x_local,cell,x_scal,err_PETSc) !< get Dofs belonging to element + CHKERRQ(err_PETSc) + call DMPlexComputeCellGeometryAffineFEM(dm_local,cell,pV0,pCellJ,pInvcellJ,detJ,err_PETSc) + CHKERRQ(err_PETSc) IcellJMat = reshape(pInvcellJ,shape=[dimPlex,dimPlex]) f_scal = 0.0 do qPt = 0, nQuadrature-1 @@ -429,12 +445,12 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,ierr) enddo f_scal = f_scal*abs(detJ) pf_scal => f_scal - call DMPlexVecSetClosure(dm_local,section,f_local,cell,pf_scal,ADD_VALUES,ierr) - CHKERRQ(ierr) - call DMPlexVecRestoreClosure(dm_local,section,x_local,cell,x_scal,ierr) - CHKERRQ(ierr) + call DMPlexVecSetClosure(dm_local,section,f_local,cell,pf_scal,ADD_VALUES,err_PETSc) + CHKERRQ(err_PETSc) + call DMPlexVecRestoreClosure(dm_local,section,x_local,cell,x_scal,err_PETSc) + CHKERRQ(err_PETSc) enddo - call DMRestoreLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) + call DMRestoreLocalVector(dm_local,x_local,err_PETSc); CHKERRQ(err_PETSc) end subroutine FEM_mechanical_formResidual @@ -442,13 +458,13 @@ end subroutine FEM_mechanical_formResidual !-------------------------------------------------------------------------------------------------- !> @brief forms the FEM stiffness matrix !-------------------------------------------------------------------------------------------------- -subroutine FEM_mechanical_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,ierr) +subroutine FEM_mechanical_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,err_PETSc) DM :: dm_local Mat :: Jac_pre, Jac PetscObject, intent(in) :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc PetscDS :: prob Vec :: x_local, xx_local @@ -478,34 +494,43 @@ subroutine FEM_mechanical_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,ierr) allocate(pcellJ(dimPlex**2)) allocate(pinvcellJ(dimPlex**2)) - call MatSetOption(Jac,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE,ierr); CHKERRQ(ierr) - call MatSetOption(Jac,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE,ierr); CHKERRQ(ierr) - call MatZeroEntries(Jac,ierr); CHKERRQ(ierr) - call DMGetDS(dm_local,prob,ierr); CHKERRQ(ierr) - call PetscDSGetTabulation(prob,0,basisField,basisFieldDer,ierr) - call DMGetLocalSection(dm_local,section,ierr); CHKERRQ(ierr) - call DMGetGlobalSection(dm_local,gSection,ierr); CHKERRQ(ierr) + call MatSetOption(Jac,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE,err_PETSc) + CHKERRQ(err_PETSc) + call MatSetOption(Jac,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE,err_PETSc) + CHKERRQ(err_PETSc) + call MatZeroEntries(Jac,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetDS(dm_local,prob,err_PETSc) + CHKERRQ(err_PETSc) + call PetscDSGetTabulation(prob,0_pPETSCINT,basisField,basisFieldDer,err_PETSc) + call DMGetLocalSection(dm_local,section,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetGlobalSection(dm_local,gSection,err_PETSc) + CHKERRQ(err_PETSc) - call DMGetLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) - call VecWAXPY(x_local,1.0_pReal,xx_local,solution_local,ierr); CHKERRQ(ierr) + call DMGetLocalVector(dm_local,x_local,err_PETSc) + CHKERRQ(err_PETSc) + call VecWAXPY(x_local,1.0_pReal,xx_local,solution_local,err_PETSc) + CHKERRQ(err_PETSc) do field = 1, dimPlex; do face = 1, mesh_Nboundaries if (params%fieldBC%componentBC(field)%Mask(face)) then - call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,ierr) + call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,err_PETSc) if (bcSize > 0) then - call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,ierr) - CHKERRQ(ierr) - call utilities_projectBCValues(x_local,section,0,field-1,bcPoints, & + call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,err_PETSc) + CHKERRQ(err_PETSc) + call utilities_projectBCValues(x_local,section,0_pPETSCINT,field-1,bcPoints, & 0.0_pReal,params%fieldBC%componentBC(field)%Value(face),params%timeinc) - call ISDestroy(bcPoints,ierr); CHKERRQ(ierr) + call ISDestroy(bcPoints,err_PETSc); CHKERRQ(err_PETSc) endif endif enddo; enddo - call DMPlexGetHeightStratum(dm_local,0,cellStart,cellEnd,ierr); CHKERRQ(ierr) + call DMPlexGetHeightStratum(dm_local,0_pPETSCINT,cellStart,cellEnd,err_PETSc) + CHKERRQ(err_PETSc) do cell = cellStart, cellEnd-1 !< loop over all elements - call DMPlexVecGetClosure(dm_local,section,x_local,cell,x_scal,ierr) !< get Dofs belonging to element - CHKERRQ(ierr) - call DMPlexComputeCellGeometryAffineFEM(dm_local,cell,pV0,pCellJ,pInvcellJ,detJ,ierr) - CHKERRQ(ierr) + call DMPlexVecGetClosure(dm_local,section,x_local,cell,x_scal,err_PETSc) !< get Dofs belonging to element + CHKERRQ(err_PETSc) + call DMPlexComputeCellGeometryAffineFEM(dm_local,cell,pV0,pCellJ,pInvcellJ,detJ,err_PETSc) + CHKERRQ(err_PETSc) K_eA = 0.0 K_eB = 0.0 MatB = 0.0 @@ -531,11 +556,11 @@ subroutine FEM_mechanical_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,ierr) FInv = math_inv33(F) K_eA = K_eA + matmul(transpose(BMat),MatA)*math_det33(FInv)**(1.0/real(dimPlex)) K_eB = K_eB - & - matmul(transpose(matmul(reshape(homogenization_F(1:dimPlex,1:dimPlex,m),shape=[dimPlex*dimPlex,1]), & + matmul(transpose(matmul(reshape(homogenization_F(1:dimPlex,1:dimPlex,m),shape=[dimPlex**2,1_pPETSCINT]), & matmul(reshape(FInv(1:dimPlex,1:dimPlex), & - shape=[1,dimPlex*dimPlex],order=[2,1]),BMat))),MatA) + shape=[1_pPETSCINT,dimPlex**2],order=[2,1]),BMat))),MatA) MatB = MatB & - + matmul(reshape(homogenization_F(1:dimPlex,1:dimPlex,m),shape=[1,dimPlex*dimPlex]),MatA) + + matmul(reshape(homogenization_F(1:dimPlex,1:dimPlex,m),shape=[1_pPETSCINT,dimPlex**2]),MatA) FAvg = FAvg + F BMatAvg = BMatAvg + BMat else @@ -546,39 +571,40 @@ subroutine FEM_mechanical_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,ierr) FInv = math_inv33(FAvg) K_e = K_eA*math_det33(FAvg/real(nQuadrature))**(1.0/real(dimPlex)) + & (matmul(matmul(transpose(BMatAvg), & - reshape(FInv(1:dimPlex,1:dimPlex),shape=[dimPlex*dimPlex,1],order=[2,1])),MatB) + & + reshape(FInv(1:dimPlex,1:dimPlex),shape=[dimPlex**2,1_pPETSCINT],order=[2,1])),MatB) + & K_eB)/real(dimPlex) else K_e = K_eA endif - K_e = (K_e + eps*math_eye(cellDof)) * abs(detJ) + K_e = (K_e + eps*math_eye(int(cellDof))) * abs(detJ) #ifndef __INTEL_COMPILER pK_e(1:cellDOF**2) => K_e #else ! https://software.intel.com/en-us/forums/intel-fortran-compiler/topic/782230 (bug) allocate(pK_e(cellDOF**2),source = reshape(K_e,[cellDOF**2])) #endif - call DMPlexMatSetClosure(dm_local,section,gSection,Jac,cell,pK_e,ADD_VALUES,ierr) - CHKERRQ(ierr) - call DMPlexVecRestoreClosure(dm_local,section,x_local,cell,x_scal,ierr) - CHKERRQ(ierr) + call DMPlexMatSetClosure(dm_local,section,gSection,Jac,cell,pK_e,ADD_VALUES,err_PETSc) + CHKERRQ(err_PETSc) + call DMPlexVecRestoreClosure(dm_local,section,x_local,cell,x_scal,err_PETSc) + CHKERRQ(err_PETSc) enddo - call MatAssemblyBegin(Jac,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) - call MatAssemblyEnd(Jac,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) - call MatAssemblyBegin(Jac_pre,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) - call MatAssemblyEnd(Jac_pre,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) - call DMRestoreLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) + call MatAssemblyBegin(Jac,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) + call MatAssemblyEnd(Jac,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) + call MatAssemblyBegin(Jac_pre,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) + call MatAssemblyEnd(Jac_pre,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) + call DMRestoreLocalVector(dm_local,x_local,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! apply boundary conditions #if (PETSC_VERSION_MINOR < 14) - call DMPlexCreateRigidBody(dm_local,matnull,ierr); CHKERRQ(ierr) + call DMPlexCreateRigidBody(dm_local,matnull,err_PETSc); CHKERRQ(err_PETSc) #else - call DMPlexCreateRigidBody(dm_local,0,matnull,ierr); CHKERRQ(ierr) + call DMPlexCreateRigidBody(dm_local,0_pPETSCINT,matnull,err_PETSc) + CHKERRQ(err_PETSc) #endif - call MatSetNullSpace(Jac,matnull,ierr); CHKERRQ(ierr) - call MatSetNearNullSpace(Jac,matnull,ierr); CHKERRQ(ierr) - call MatNullSpaceDestroy(matnull,ierr); CHKERRQ(ierr) + call MatSetNullSpace(Jac,matnull,err_PETSc); CHKERRQ(err_PETSc) + call MatSetNearNullSpace(Jac,matnull,err_PETSc); CHKERRQ(err_PETSc) + call MatNullSpaceDestroy(matnull,err_PETSc); CHKERRQ(err_PETSc) end subroutine FEM_mechanical_formJacobian @@ -601,43 +627,43 @@ subroutine FEM_mechanical_forward(guess,timeinc,timeinc_old,fieldBC) Vec :: x_local PetscSection :: section IS :: bcPoints - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc !-------------------------------------------------------------------------------------------------- ! forward last inc if (guess .and. .not. cutBack) then ForwardData = .True. homogenization_F0 = homogenization_F - call SNESGetDM(mechanical_snes,dm_local,ierr); CHKERRQ(ierr) !< retrieve mesh info from mechanical_snes into dm_local - call DMGetSection(dm_local,section,ierr); CHKERRQ(ierr) - call DMGetLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) - call VecSet(x_local,0.0_pReal,ierr); CHKERRQ(ierr) - call DMGlobalToLocalBegin(dm_local,solution,INSERT_VALUES,x_local,ierr) !< retrieve my partition of global solution vector - CHKERRQ(ierr) - call DMGlobalToLocalEnd(dm_local,solution,INSERT_VALUES,x_local,ierr) - CHKERRQ(ierr) - call VecAXPY(solution_local,1.0,x_local,ierr); CHKERRQ(ierr) + call SNESGetDM(mechanical_snes,dm_local,err_PETSc); CHKERRQ(err_PETSc) !< retrieve mesh info from mechanical_snes into dm_local + call DMGetSection(dm_local,section,err_PETSc); CHKERRQ(err_PETSc) + call DMGetLocalVector(dm_local,x_local,err_PETSc); CHKERRQ(err_PETSc) + call VecSet(x_local,0.0_pReal,err_PETSc); CHKERRQ(err_PETSc) + call DMGlobalToLocalBegin(dm_local,solution,INSERT_VALUES,x_local,err_PETSc) !< retrieve my partition of global solution vector + CHKERRQ(err_PETSc) + call DMGlobalToLocalEnd(dm_local,solution,INSERT_VALUES,x_local,err_PETSc) + CHKERRQ(err_PETSc) + call VecAXPY(solution_local,1.0,x_local,err_PETSc); CHKERRQ(err_PETSc) do field = 1, dimPlex; do face = 1, mesh_Nboundaries if (fieldBC%componentBC(field)%Mask(face)) then - call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,ierr) + call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,err_PETSc) if (bcSize > 0) then - call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,ierr) - CHKERRQ(ierr) - call utilities_projectBCValues(solution_local,section,0,field-1,bcPoints, & + call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,err_PETSc) + CHKERRQ(err_PETSc) + call utilities_projectBCValues(solution_local,section,0_pPETSCINT,field-1,bcPoints, & 0.0_pReal,fieldBC%componentBC(field)%Value(face),timeinc_old) - call ISDestroy(bcPoints,ierr); CHKERRQ(ierr) + call ISDestroy(bcPoints,err_PETSc); CHKERRQ(err_PETSc) endif endif enddo; enddo - call DMRestoreLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) + call DMRestoreLocalVector(dm_local,x_local,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! update rate and forward last inc - call VecCopy(solution,solution_rate,ierr); CHKERRQ(ierr) - call VecScale(solution_rate,1.0/timeinc_old,ierr); CHKERRQ(ierr) + call VecCopy(solution,solution_rate,err_PETSc); CHKERRQ(err_PETSc) + call VecScale(solution_rate,1.0/timeinc_old,err_PETSc); CHKERRQ(err_PETSc) endif - call VecCopy(solution_rate,solution,ierr); CHKERRQ(ierr) - call VecScale(solution,timeinc,ierr); CHKERRQ(ierr) + call VecCopy(solution_rate,solution,err_PETSc); CHKERRQ(err_PETSc) + call VecScale(solution,timeinc,err_PETSc); CHKERRQ(err_PETSc) end subroutine FEM_mechanical_forward @@ -645,24 +671,24 @@ end subroutine FEM_mechanical_forward !-------------------------------------------------------------------------------------------------- !> @brief reporting !-------------------------------------------------------------------------------------------------- -subroutine FEM_mechanical_converged(snes_local,PETScIter,xnorm,snorm,fnorm,reason,dummy,ierr) +subroutine FEM_mechanical_converged(snes_local,PETScIter,xnorm,snorm,fnorm,reason,dummy,err_PETSc) SNES :: snes_local PetscInt :: PETScIter PetscReal :: xnorm,snorm,fnorm,divTol SNESConvergedReason :: reason PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc !-------------------------------------------------------------------------------------------------- ! report divTol = max(maxval(abs(P_av(1:dimPlex,1:dimPlex)))*num%eps_struct_rtol,num%eps_struct_atol) - call SNESConvergedDefault(snes_local,PETScIter,xnorm,snorm,fnorm/divTol,reason,dummy,ierr) - CHKERRQ(ierr) + call SNESConvergedDefault(snes_local,PETScIter,xnorm,snorm,fnorm/divTol,reason,dummy,err_PETSc) + CHKERRQ(err_PETSc) if (terminallyIll) reason = SNES_DIVERGED_FUNCTION_DOMAIN print'(/,1x,a,a,i0,a,i0,f0.3)', trim(incInfo), & ' @ Iteration ',PETScIter,' mechanical residual norm = ', & - int(fnorm/divTol),fnorm/divTol-int(fnorm/divTol) + int(fnorm/divTol),fnorm/divTol-int(fnorm/divTol) ! ToDo: int casting? print'(/,1x,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', & 'Piola--Kirchhoff stress / MPa =',transpose(P_av)*1.e-6_pReal flush(IO_STDOUT) @@ -675,9 +701,7 @@ end subroutine FEM_mechanical_converged !-------------------------------------------------------------------------------------------------- subroutine FEM_mechanical_updateCoords() - real(pReal), pointer, dimension(:) :: & - nodeCoords_linear !< nodal coordinates (dimPlex*Nnodes) - real(pReal), pointer, dimension(:,:) :: & + PetscReal, pointer, dimension(:,:) :: & nodeCoords !< nodal coordinates (3,Nnodes) real(pReal), pointer, dimension(:,:,:) :: & ipCoords !< ip coordinates (3,nQuadrature,mesh_NcpElems) @@ -690,42 +714,47 @@ subroutine FEM_mechanical_updateCoords() DM :: dm_local Vec :: x_local - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc PetscInt :: pStart, pEnd, p, s, e, q, & cellStart, cellEnd, c, n PetscSection :: section PetscQuadrature :: mechQuad - PetscReal, dimension(:), pointer :: basisField, basisFieldDer - PetscScalar, dimension(:), pointer :: x_scal + PetscReal, dimension(:), pointer :: basisField, basisFieldDer, & + nodeCoords_linear !< nodal coordinates (dimPlex*Nnodes) + PetscScalar, dimension(:), pointer :: x_scal - call SNESGetDM(mechanical_snes,dm_local,ierr); CHKERRQ(ierr) - call DMGetDS(dm_local,mechQuad,ierr); CHKERRQ(ierr) - call DMGetLocalSection(dm_local,section,ierr); CHKERRQ(ierr) - call DMGetLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) - call DMGetDimension(dm_local,dimPlex,ierr); CHKERRQ(ierr) + call SNESGetDM(mechanical_snes,dm_local,err_PETSc); CHKERRQ(err_PETSc) + call DMGetDS(dm_local,mechQuad,err_PETSc); CHKERRQ(err_PETSc) + call DMGetLocalSection(dm_local,section,err_PETSc); CHKERRQ(err_PETSc) + call DMGetLocalVector(dm_local,x_local,err_PETSc); CHKERRQ(err_PETSc) + call DMGetDimension(dm_local,dimPlex,err_PETSc); CHKERRQ(err_PETSc) ! write cell vertex displacements - call DMPlexGetDepthStratum(dm_local,0,pStart,pEnd,ierr); CHKERRQ(ierr) + call DMPlexGetDepthStratum(dm_local,0_pPETSCINT,pStart,pEnd,err_PETSc) + CHKERRQ(err_PETSc) allocate(nodeCoords(3,pStart:pEnd-1),source=0.0_pReal) - call VecGetArrayF90(x_local,nodeCoords_linear,ierr); CHKERRQ(ierr) + call VecGetArrayF90(x_local,nodeCoords_linear,err_PETSc); CHKERRQ(err_PETSc) do p=pStart, pEnd-1 - call DMPlexGetPointLocal(dm_local, p, s, e, ierr); CHKERRQ(ierr) + call DMPlexGetPointLocal(dm_local, p, s, e, err_PETSc); CHKERRQ(err_PETSc) nodeCoords(1:dimPlex,p)=nodeCoords_linear(s+1:e) enddo call discretization_setNodeCoords(nodeCoords) - call VecRestoreArrayF90(x_local,nodeCoords_linear,ierr); CHKERRQ(ierr) + call VecRestoreArrayF90(x_local,nodeCoords_linear,err_PETSc); CHKERRQ(err_PETSc) ! write ip displacements - call DMPlexGetHeightStratum(dm_local,0,cellStart,cellEnd,ierr); CHKERRQ(ierr) - call PetscDSGetTabulation(mechQuad,0,basisField,basisFieldDer,ierr); CHKERRQ(ierr) + call DMPlexGetHeightStratum(dm_local,0_pPETSCINT,cellStart,cellEnd,err_PETSc) + CHKERRQ(err_PETSc) + call PetscDSGetTabulation(mechQuad,0_pPETSCINT,basisField,basisFieldDer,err_PETSc) + CHKERRQ(err_PETSc) allocate(ipCoords(3,nQuadrature,mesh_NcpElems),source=0.0_pReal) do c=cellStart,cellEnd-1 qOffset=0 - call DMPlexVecGetClosure(dm_local,section,x_local,c,x_scal,ierr); CHKERRQ(ierr) !< get nodal coordinates of each element + call DMPlexVecGetClosure(dm_local,section,x_local,c,x_scal,err_PETSc) !< get nodal coordinates of each element + CHKERRQ(err_PETSc) do qPt=0,nQuadrature-1 qOffset= qPt * (size(basisField)/nQuadrature) - do comp=0,dimPlex-1 !< loop over components + do comp=0,dimPlex-1 !< loop over components nOffset=0 q = comp do n=0,nBasis-1 @@ -737,10 +766,11 @@ subroutine FEM_mechanical_updateCoords() enddo enddo enddo - call DMPlexVecRestoreClosure(dm_local,section,x_local,c,x_scal,ierr); CHKERRQ(ierr) + call DMPlexVecRestoreClosure(dm_local,section,x_local,c,x_scal,err_PETSc) + CHKERRQ(err_PETSc) end do call discretization_setIPcoords(reshape(ipCoords,[3,mesh_NcpElems*nQuadrature])) - call DMRestoreLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) + call DMRestoreLocalVector(dm_local,x_local,err_PETSc); CHKERRQ(err_PETSc) end subroutine FEM_mechanical_updateCoords diff --git a/src/parallelization.f90 b/src/parallelization.f90 index ebb9573d4..d9ca15981 100644 --- a/src/parallelization.f90 +++ b/src/parallelization.f90 @@ -20,9 +20,17 @@ module parallelization implicit none private - integer, protected, public :: & - worldrank = 0, & !< MPI worldrank (/=0 for MPI simulations only) - worldsize = 1 !< MPI worldsize (/=1 for MPI simulations only) +#ifndef PETSC + integer, parameter, public :: & + MPI_INTEGER_KIND = pI64 + integer(MPI_INTEGER_KIND), parameter, public :: & + worldrank = 0_MPI_INTEGER_KIND, & !< MPI dummy worldrank + worldsize = 1_MPI_INTEGER_KIND !< MPI dummy worldsize +#else + integer(MPI_INTEGER_KIND), protected, public :: & + worldrank = 0_MPI_INTEGER_KIND, & !< MPI worldrank (/=0 for MPI simulations only) + worldsize = 1_MPI_INTEGER_KIND !< MPI worldsize (/=1 for MPI simulations only) +#endif #ifndef PETSC public :: parallelization_bcast_str @@ -44,51 +52,63 @@ contains !-------------------------------------------------------------------------------------------------- subroutine parallelization_init - integer :: err, typeSize + integer(MPI_INTEGER_KIND) :: err_MPI, typeSize !$ integer :: got_env, threadLevel !$ integer(pI32) :: OMP_NUM_THREADS !$ character(len=6) NumThreadsString - PetscErrorCode :: petsc_err + PetscErrorCode :: err_PETSc #ifdef _OPENMP ! If openMP is enabled, check if the MPI libary supports it and initialize accordingly. ! Otherwise, the first call to PETSc will do the initialization. - call MPI_Init_Thread(MPI_THREAD_FUNNELED,threadLevel,err) - if (err /= 0) error stop 'MPI init failed' + call MPI_Init_Thread(MPI_THREAD_FUNNELED,threadLevel,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI init failed' if (threadLevel>>' - call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err) - if (err /= 0) error stop 'Could not determine worldsize' + call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) & + error stop 'Could not determine worldsize' if (worldrank == 0) print'(/,1x,a,i3)', 'MPI processes: ',worldsize - call MPI_Type_size(MPI_INTEGER,typeSize,err) - if (err /= 0) error stop 'Could not determine MPI integer size' - if (typeSize*8 /= bit_size(0)) error stop 'Mismatch between MPI and DAMASK integer' + call MPI_Type_size(MPI_INTEGER,typeSize,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) & + error stop 'Could not determine size of MPI_INTEGER' + if (typeSize*8_MPI_INTEGER_KIND /= int(bit_size(0),MPI_INTEGER_KIND)) & + error stop 'Mismatch between MPI_INTEGER and DAMASK default integer' - call MPI_Type_size(MPI_DOUBLE,typeSize,err) - if (err /= 0) error stop 'Could not determine MPI real size' - if (typeSize*8 /= storage_size(0.0_pReal)) error stop 'Mismatch between MPI and DAMASK real' + call MPI_Type_size(MPI_INTEGER8,typeSize,err_MPI) + if (err_MPI /= 0) & + error stop 'Could not determine size of MPI_INTEGER8' + if (typeSize*8_MPI_INTEGER_KIND /= int(bit_size(0_pI64),MPI_INTEGER_KIND)) & + error stop 'Mismatch between MPI_INTEGER8 and DAMASK pI64' + + call MPI_Type_size(MPI_DOUBLE,typeSize,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) & + error stop 'Could not determine size of MPI_DOUBLE' + if (typeSize*8_MPI_INTEGER_KIND /= int(storage_size(0.0_pReal),MPI_INTEGER_KIND)) & + error stop 'Mismatch between MPI_DOUBLE and DAMASK pReal' if (worldrank /= 0) then close(OUTPUT_UNIT) ! disable output @@ -119,14 +139,14 @@ subroutine parallelization_bcast_str(string) character(len=:), allocatable, intent(inout) :: string - integer :: strlen, ierr ! pI64 for strlen not supported by MPI + integer(MPI_INTEGER_KIND) :: strlen, err_MPI - if (worldrank == 0) strlen = len(string) - call MPI_Bcast(strlen,1,MPI_INTEGER,0,MPI_COMM_WORLD, ierr) + if (worldrank == 0) strlen = len(string,MPI_INTEGER_KIND) + call MPI_Bcast(strlen,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) if (worldrank /= 0) allocate(character(len=strlen)::string) - call MPI_Bcast(string,strlen,MPI_CHARACTER,0,MPI_COMM_WORLD, ierr) + call MPI_Bcast(string,strlen,MPI_CHARACTER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) end subroutine parallelization_bcast_str diff --git a/src/prec.f90 b/src/prec.f90 index 843033219..8de82fee8 100644 --- a/src/prec.f90 +++ b/src/prec.f90 @@ -10,6 +10,11 @@ module prec use, intrinsic :: IEEE_arithmetic use, intrinsic :: ISO_C_binding +#ifdef PETSC +#include + use PETScSys +#endif + implicit none public @@ -17,13 +22,12 @@ module prec integer, parameter :: pReal = IEEE_selected_real_kind(15,307) !< number with 15 significant digits, up to 1e+-307 (typically 64 bit) integer, parameter :: pI32 = selected_int_kind(9) !< number with at least up to +-1e9 (typically 32 bit) integer, parameter :: pI64 = selected_int_kind(18) !< number with at least up to +-1e18 (typically 64 bit) -#if(INT==8) - integer, parameter :: pInt = pI64 -#else - integer, parameter :: pInt = pI32 +#ifdef PETSC + PetscInt, private :: dummy + integer, parameter :: pPETSCINT = kind(dummy) #endif - integer, parameter :: pStringLen = 256 !< default string length - integer, parameter :: pPathLen = 4096 !< maximum length of a path name on linux + integer, parameter :: pSTRINGLEN = 256 !< default string length + integer, parameter :: pPATHLEN = 4096 !< maximum length of a path name on linux real(pReal), parameter :: tol_math_check = 1.0e-8_pReal !< tolerance for internal math self-checks (rotation) @@ -268,7 +272,7 @@ subroutine selfTest integer, allocatable, dimension(:) :: realloc_lhs_test real(pReal), dimension(1) :: f - integer(pInt), dimension(1) :: i + integer(pI64), dimension(1) :: i real(pReal), dimension(2) :: r @@ -289,11 +293,11 @@ subroutine selfTest f = real(prec_bytesToC_DOUBLE(int([0,0,0,-32,+119,+65,+115,65],C_SIGNED_CHAR)),pReal) if (dNeq(f(1),20191102.0_pReal,0.0_pReal)) error stop 'prec_bytesToC_DOUBLE' - i = int(prec_bytesToC_INT32_T(int([+126,+23,+52,+1],C_SIGNED_CHAR)),pInt) - if (i(1) /= 20191102_pInt) error stop 'prec_bytesToC_INT32_T' + i = int(prec_bytesToC_INT32_T(int([+126,+23,+52,+1],C_SIGNED_CHAR)),pI64) + if (i(1) /= 20191102_pI64) error stop 'prec_bytesToC_INT32_T' - i = int(prec_bytesToC_INT64_T(int([+126,+23,+52,+1,0,0,0,0],C_SIGNED_CHAR)),pInt) - if (i(1) /= 20191102_pInt) error stop 'prec_bytesToC_INT64_T' + i = int(prec_bytesToC_INT64_T(int([+126,+23,+52,+1,0,0,0,0],C_SIGNED_CHAR)),pI64) + if (i(1) /= 20191102_pI64) error stop 'prec_bytesToC_INT64_T' end subroutine selfTest diff --git a/src/quit.f90 b/src/quit.f90 index 6361ff783..3c5f5e6f2 100644 --- a/src/quit.f90 +++ b/src/quit.f90 @@ -15,20 +15,23 @@ subroutine quit(stop_id) implicit none integer, intent(in) :: stop_id integer, dimension(8) :: dateAndTime - integer :: error - PetscErrorCode :: ierr = 0 + integer :: err_HDF5 + integer(MPI_INTEGER_KIND) :: err_MPI + PetscErrorCode :: err_PETSc - call h5open_f(error) - if (error /= 0) write(6,'(a,i5)') ' Error in h5open_f ',error ! prevents error if not opened yet - call h5close_f(error) - if (error /= 0) write(6,'(a,i5)') ' Error in h5close_f ',error + call h5open_f(err_HDF5) + if (err_HDF5 /= 0_MPI_INTEGER_KIND) write(6,'(a,i5)') ' Error in h5open_f ',err_HDF5 ! prevents error if not opened yet + call h5close_f(err_HDF5) + if (err_HDF5 /= 0_MPI_INTEGER_KIND) write(6,'(a,i5)') ' Error in h5close_f ',err_HDF5 - call PetscFinalize(ierr) - CHKERRQ(ierr) + call PetscFinalize(err_PETSc) + CHKERRQ(err_PETSc) #ifdef _OPENMP - call MPI_finalize(error) - if (error /= 0) write(6,'(a,i5)') ' Error in MPI_finalize',error + call MPI_finalize(err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) write(6,'(a,i5)') ' Error in MPI_finalize',err_MPI +#else + err_MPI = 0_MPI_INTEGER_KIND #endif call date_and_time(values = dateAndTime) @@ -40,7 +43,10 @@ subroutine quit(stop_id) dateAndTime(6),':',& dateAndTime(7) - if (stop_id == 0 .and. ierr == 0 .and. error == 0) stop 0 ! normal termination + if (stop_id == 0 .and. & + err_HDF5 == 0 .and. & + err_MPI == 0_MPI_INTEGER_KIND .and. & + err_PETSC == 0) stop 0 ! normal termination stop 1 ! error (message from IO_error) end subroutine quit diff --git a/src/results.f90 b/src/results.f90 index 7c2346095..dc77d5a7f 100644 --- a/src/results.f90 +++ b/src/results.f90 @@ -497,16 +497,17 @@ subroutine results_mapping_phase(ID,entry,label) integer, dimension(:,:), intent(in) :: entry !< phase entry at (co,ce) character(len=*), dimension(:), intent(in) :: label !< label of each phase section - integer, dimension(size(entry,1),size(entry,2)) :: & + integer(pI64), dimension(size(entry,1),size(entry,2)) :: & entryGlobal - integer, dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process - integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process + integer(pI64), dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process + integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process integer(HSIZE_T), dimension(2) :: & myShape, & !< shape of the dataset (this process) myOffset, & totalShape !< shape of the dataset (all processes) integer(HID_T) :: & + pI64_t, & !< HDF5 type for pI64 (8 bit integer) loc_id, & !< identifier of group in file dtype_id, & !< identifier of compound data type label_id, & !< identifier of label (string) in compound data type @@ -518,7 +519,8 @@ subroutine results_mapping_phase(ID,entry,label) dt_id integer(SIZE_T) :: type_size_string, type_size_int - integer :: hdferr, ierr, ce, co + integer :: hdferr, ce, co + integer(MPI_INTEGER_KIND) :: err_MPI writeSize = 0 @@ -528,28 +530,28 @@ subroutine results_mapping_phase(ID,entry,label) if(hdferr < 0) error stop 'HDF5 error' #ifndef PETSC - entryGlobal = entry -1 ! 0-based + entryGlobal = int(entry -1,pI64) ! 0-based #else !-------------------------------------------------------------------------------------------------- ! MPI settings and communication call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process - if(ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get output at each process + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - entryOffset = 0 + entryOffset = 0_pI64 do co = 1, size(ID,1) do ce = 1, size(ID,2) - entryOffset(ID(co,ce),worldrank) = entryOffset(ID(co,ce),worldrank) +1 + entryOffset(ID(co,ce),worldrank) = entryOffset(ID(co,ce),worldrank) +1_pI64 end do end do - call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process - if(ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INTEGER8,MPI_SUM,MPI_COMM_WORLD,err_MPI)! get offset at each process + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do co = 1, size(ID,1) do ce = 1, size(ID,2) - entryGlobal(co,ce) = entry(co,ce) -1 + entryOffset(ID(co,ce),worldrank) + entryGlobal(co,ce) = int(entry(co,ce),pI64) -1_pI64 + entryOffset(ID(co,ce),worldrank) end do end do #endif @@ -567,14 +569,15 @@ subroutine results_mapping_phase(ID,entry,label) call h5tget_size_f(dt_id, type_size_string, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call h5tget_size_f(H5T_NATIVE_INTEGER, type_size_int, hdferr) + pI64_t = h5kind_to_type(kind(entryGlobal),H5_INTEGER_KIND) + call h5tget_size_f(pI64_t, type_size_int, hdferr) if(hdferr < 0) error stop 'HDF5 error' call h5tcreate_f(H5T_COMPOUND_F, type_size_string + type_size_int, dtype_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' call h5tinsert_f(dtype_id, 'label', 0_SIZE_T, dt_id,hdferr) if(hdferr < 0) error stop 'HDF5 error' - call h5tinsert_f(dtype_id, 'entry', type_size_string, H5T_NATIVE_INTEGER, hdferr) + call h5tinsert_f(dtype_id, 'entry', type_size_string, pI64_t, hdferr) if(hdferr < 0) error stop 'HDF5 error' !-------------------------------------------------------------------------------------------------- @@ -586,7 +589,7 @@ subroutine results_mapping_phase(ID,entry,label) call h5tcreate_f(H5T_COMPOUND_F, type_size_int, entry_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call h5tinsert_f(entry_id, 'entry', 0_SIZE_T, H5T_NATIVE_INTEGER, hdferr) + call h5tinsert_f(entry_id, 'entry', 0_SIZE_T, pI64_t, hdferr) if(hdferr < 0) error stop 'HDF5 error' call h5tclose_f(dt_id, hdferr) @@ -650,16 +653,17 @@ subroutine results_mapping_homogenization(ID,entry,label) integer, dimension(:), intent(in) :: entry !< homogenization entry at (ce) character(len=*), dimension(:), intent(in) :: label !< label of each homogenization section - integer, dimension(size(entry,1)) :: & + integer(pI64), dimension(size(entry,1)) :: & entryGlobal - integer, dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process - integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process + integer(pI64), dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process + integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process integer(HSIZE_T), dimension(1) :: & myShape, & !< shape of the dataset (this process) myOffset, & totalShape !< shape of the dataset (all processes) integer(HID_T) :: & + pI64_t, & !< HDF5 type for pI64 (8 bit integer) loc_id, & !< identifier of group in file dtype_id, & !< identifier of compound data type label_id, & !< identifier of label (string) in compound data type @@ -671,7 +675,8 @@ subroutine results_mapping_homogenization(ID,entry,label) dt_id integer(SIZE_T) :: type_size_string, type_size_int - integer :: hdferr, ierr, ce + integer :: hdferr, ce + integer(MPI_INTEGER_KIND) :: err_MPI writeSize = 0 @@ -681,25 +686,25 @@ subroutine results_mapping_homogenization(ID,entry,label) if(hdferr < 0) error stop 'HDF5 error' #ifndef PETSC - entryGlobal = entry -1 ! 0-based + entryGlobal = int(entry -1,pI64) ! 0-based #else !-------------------------------------------------------------------------------------------------- ! MPI settings and communication call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process - if(ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get output at each process + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - entryOffset = 0 + entryOffset = 0_pI64 do ce = 1, size(ID,1) - entryOffset(ID(ce),worldrank) = entryOffset(ID(ce),worldrank) +1 + entryOffset(ID(ce),worldrank) = entryOffset(ID(ce),worldrank) +1_pI64 end do - call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process - if(ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INTEGER8,MPI_SUM,MPI_COMM_WORLD,err_MPI)! get offset at each process + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do ce = 1, size(ID,1) - entryGlobal(ce) = entry(ce) -1 + entryOffset(ID(ce),worldrank) + entryGlobal(ce) = int(entry(ce),pI64) -1_pI64 + entryOffset(ID(ce),worldrank) end do #endif @@ -716,14 +721,15 @@ subroutine results_mapping_homogenization(ID,entry,label) call h5tget_size_f(dt_id, type_size_string, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call h5tget_size_f(H5T_NATIVE_INTEGER, type_size_int, hdferr) + pI64_t = h5kind_to_type(kind(entryGlobal),H5_INTEGER_KIND) + call h5tget_size_f(pI64_t, type_size_int, hdferr) if(hdferr < 0) error stop 'HDF5 error' call h5tcreate_f(H5T_COMPOUND_F, type_size_string + type_size_int, dtype_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' call h5tinsert_f(dtype_id, 'label', 0_SIZE_T, dt_id,hdferr) if(hdferr < 0) error stop 'HDF5 error' - call h5tinsert_f(dtype_id, 'entry', type_size_string, H5T_NATIVE_INTEGER, hdferr) + call h5tinsert_f(dtype_id, 'entry', type_size_string, pI64_t, hdferr) if(hdferr < 0) error stop 'HDF5 error' !-------------------------------------------------------------------------------------------------- @@ -735,7 +741,7 @@ subroutine results_mapping_homogenization(ID,entry,label) call h5tcreate_f(H5T_COMPOUND_F, type_size_int, entry_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call h5tinsert_f(entry_id, 'entry', 0_SIZE_T, H5T_NATIVE_INTEGER, hdferr) + call h5tinsert_f(entry_id, 'entry', 0_SIZE_T, pI64_t, hdferr) if(hdferr < 0) error stop 'HDF5 error' call h5tclose_f(dt_id, hdferr)