Merge branch 'MPI_F08' into polishing-for-beta

This commit is contained in:
Martin Diehl 2021-07-09 11:32:17 +02:00
commit 8e75e87ad9
20 changed files with 194 additions and 116 deletions

View File

@ -29,7 +29,7 @@ file (STRINGS ${PROJECT_SOURCE_DIR}/VERSION DAMASK_VERSION)
message ("\nBuilding ${CMAKE_PROJECT_NAME} ${DAMASK_VERSION}\n") message ("\nBuilding ${CMAKE_PROJECT_NAME} ${DAMASK_VERSION}\n")
add_definitions (-DPETSc) add_definitions (-DPETSC)
add_definitions (-DDAMASKVERSION="${DAMASK_VERSION}") add_definitions (-DDAMASKVERSION="${DAMASK_VERSION}")
if (CMAKE_BUILD_TYPE STREQUAL "") if (CMAKE_BUILD_TYPE STREQUAL "")

View File

@ -5,6 +5,7 @@
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
module CPFEM2 module CPFEM2
use prec use prec
use parallelization
use config use config
use math use math
use rotations use rotations
@ -15,9 +16,10 @@ module CPFEM2
use IO use IO
use base64 use base64
use DAMASK_interface use DAMASK_interface
use results
use discretization use discretization
use HDF5
use HDF5_utilities use HDF5_utilities
use results
use homogenization use homogenization
use phase use phase
#if defined(MESH) #if defined(MESH)

View File

@ -88,7 +88,7 @@ subroutine DAMASK_interface_init
print*, ' _/_/_/ _/ _/ _/ _/ _/ _/ _/_/_/ _/ _/ _/_/_/' print*, ' _/_/_/ _/ _/ _/ _/ _/ _/ _/_/_/ _/ _/ _/_/_/'
#if defined(GRID) #if defined(GRID)
print*, ' Grid solver' print*, ' Grid solver'
#elif defined(Mesh) #elif defined(MESH)
print*, ' Mesh solver' print*, ' Mesh solver'
#endif #endif
#ifdef DEBUG #ifdef DEBUG

View File

@ -6,15 +6,18 @@
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
module HDF5_utilities module HDF5_utilities
use HDF5 use HDF5
#ifdef PETSc #ifdef PETSC
use PETSC use PETSc
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
use MPI
#endif
#endif #endif
use prec use prec
use parallelization use parallelization
implicit none implicit none
public private
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief reads integer or float data of defined shape from file !> @brief reads integer or float data of defined shape from file
@ -71,11 +74,23 @@ module HDF5_utilities
module procedure HDF5_addAttribute_real_array module procedure HDF5_addAttribute_real_array
end interface HDF5_addAttribute end interface HDF5_addAttribute
#ifdef PETSc #ifdef PETSC
logical, parameter, private :: parallel_default = .true. logical, parameter, private :: parallel_default = .true.
#else #else
logical, parameter, private :: parallel_default = .false. logical, parameter, private :: parallel_default = .false.
#endif #endif
public :: &
HDF5_utilities_init, &
HDF5_read, &
HDF5_write, &
HDF5_addAttribute, &
HDF5_addGroup, &
HDF5_openGroup, &
HDF5_closeGroup, &
HDF5_openFile, &
HDF5_closeFile, &
HDF5_objectExists, &
HDF5_setLink
contains contains
@ -130,7 +145,7 @@ integer(HID_T) function HDF5_openFile(fileName,mode)
call h5pcreate_f(H5P_FILE_ACCESS_F, plist_id, hdferr) call h5pcreate_f(H5P_FILE_ACCESS_F, plist_id, hdferr)
if(hdferr < 0) error stop 'HDF5 error' if(hdferr < 0) error stop 'HDF5 error'
#ifdef PETSc #ifdef PETSC
call h5pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr) call h5pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr)
if(hdferr < 0) error stop 'HDF5 error' if(hdferr < 0) error stop 'HDF5 error'
#endif #endif
@ -187,7 +202,7 @@ integer(HID_T) function HDF5_addGroup(fileHandle,groupName)
!------------------------------------------------------------------------------------------------- !-------------------------------------------------------------------------------------------------
! setting I/O mode to collective ! setting I/O mode to collective
#ifdef PETSc #ifdef PETSC
call h5pset_all_coll_metadata_ops_f(aplist_id, .true., hdferr) call h5pset_all_coll_metadata_ops_f(aplist_id, .true., hdferr)
if(hdferr < 0) error stop 'HDF5 error' if(hdferr < 0) error stop 'HDF5 error'
#endif #endif
@ -223,7 +238,7 @@ integer(HID_T) function HDF5_openGroup(fileHandle,groupName)
!------------------------------------------------------------------------------------------------- !-------------------------------------------------------------------------------------------------
! setting I/O mode to collective ! setting I/O mode to collective
#ifdef PETSc #ifdef PETSC
call h5pget_all_coll_metadata_ops_f(aplist_id, is_collective, hdferr) call h5pget_all_coll_metadata_ops_f(aplist_id, is_collective, hdferr)
if(hdferr < 0) error stop 'HDF5 error' if(hdferr < 0) error stop 'HDF5 error'
#endif #endif
@ -1692,7 +1707,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
readSize = 0 readSize = 0
readSize(worldrank+1) = int(localShape(ubound(localShape,1))) readSize(worldrank+1) = int(localShape(ubound(localShape,1)))
#ifdef PETSc #ifdef PETSC
if (parallel) then if (parallel) then
call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
if(hdferr < 0) error stop 'HDF5 error' if(hdferr < 0) error stop 'HDF5 error'
@ -1713,7 +1728,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_
! creating a property list for IO and set it to collective ! creating a property list for IO and set it to collective
call h5pcreate_f(H5P_DATASET_ACCESS_F, aplist_id, hdferr) call h5pcreate_f(H5P_DATASET_ACCESS_F, aplist_id, hdferr)
if(hdferr < 0) error stop 'HDF5 error' if(hdferr < 0) error stop 'HDF5 error'
#ifdef PETSc #ifdef PETSC
call h5pset_all_coll_metadata_ops_f(aplist_id, .true., hdferr) call h5pset_all_coll_metadata_ops_f(aplist_id, .true., hdferr)
if(hdferr < 0) error stop 'HDF5 error' if(hdferr < 0) error stop 'HDF5 error'
#endif #endif
@ -1782,7 +1797,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
! creating a property list for transfer properties (is collective when reading in parallel) ! creating a property list for transfer properties (is collective when reading in parallel)
call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr) call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr)
if(hdferr < 0) error stop 'HDF5 error' if(hdferr < 0) error stop 'HDF5 error'
#ifdef PETSc #ifdef PETSC
if (parallel) then if (parallel) then
call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
if(hdferr < 0) error stop 'HDF5 error' if(hdferr < 0) error stop 'HDF5 error'
@ -1793,7 +1808,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
! determine the global data layout among all processes ! determine the global data layout among all processes
writeSize = 0 writeSize = 0
writeSize(worldrank+1) = int(myShape(ubound(myShape,1))) writeSize(worldrank+1) = int(myShape(ubound(myShape,1)))
#ifdef PETSc #ifdef PETSC
if (parallel) then if (parallel) then
call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process
if (ierr /= 0) error stop 'MPI error' if (ierr /= 0) error stop 'MPI error'

View File

@ -8,7 +8,11 @@
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
program DAMASK_grid program DAMASK_grid
#include <petsc/finclude/petscsys.h> #include <petsc/finclude/petscsys.h>
use PETScsys use PETScSys
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
use MPI_f08
#endif
use prec use prec
use parallelization use parallelization
use DAMASK_interface use DAMASK_interface
@ -432,7 +436,7 @@ program DAMASK_grid
print'(/,a,i0,a)', ' increment ', totalIncsCounter, ' NOT converged' print'(/,a,i0,a)', ' increment ', totalIncsCounter, ' NOT converged'
endif; flush(IO_STDOUT) endif; flush(IO_STDOUT)
call MPI_Allreduce(interface_SIGUSR1,signal,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(interface_SIGUSR1,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
if (ierr /= 0) error stop 'MPI error' if (ierr /= 0) error stop 'MPI error'
if (mod(inc,loadCases(l)%f_out) == 0 .or. signal) then if (mod(inc,loadCases(l)%f_out) == 0 .or. signal) then
print'(1/,a)', ' ... writing results to file ......................................' print'(1/,a)', ' ... writing results to file ......................................'
@ -440,14 +444,14 @@ program DAMASK_grid
call CPFEM_results(totalIncsCounter,time) call CPFEM_results(totalIncsCounter,time)
endif endif
if(signal) call interface_setSIGUSR1(.false.) if(signal) call interface_setSIGUSR1(.false.)
call MPI_Allreduce(interface_SIGUSR2,signal,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(interface_SIGUSR2,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
if (ierr /= 0) error stop 'MPI error' if (ierr /= 0) error stop 'MPI error'
if (mod(inc,loadCases(l)%f_restart) == 0 .or. signal) then if (mod(inc,loadCases(l)%f_restart) == 0 .or. signal) then
call mechanical_restartWrite call mechanical_restartWrite
call CPFEM_restartWrite call CPFEM_restartWrite
endif endif
if(signal) call interface_setSIGUSR2(.false.) if(signal) call interface_setSIGUSR2(.false.)
call MPI_Allreduce(interface_SIGTERM,signal,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(interface_SIGTERM,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
if (ierr /= 0) error stop 'MPI error' if (ierr /= 0) error stop 'MPI error'
if (signal) exit loadCaseLooping if (signal) exit loadCaseLooping
endif skipping endif skipping

View File

@ -6,7 +6,10 @@
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
module discretization_grid module discretization_grid
#include <petsc/finclude/petscsys.h> #include <petsc/finclude/petscsys.h>
use PETScsys use PETScSys
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
use MPI_f08
#endif
use prec use prec
use parallelization use parallelization
@ -75,12 +78,12 @@ subroutine discretization_grid_init(restart)
endif endif
call MPI_Bcast(grid,3,MPI_INTEGER,0,PETSC_COMM_WORLD, ierr) call MPI_Bcast(grid,3,MPI_INTEGER,0,MPI_COMM_WORLD, ierr)
if (ierr /= 0) error stop 'MPI error' if (ierr /= 0) error stop 'MPI error'
if (grid(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1') if (grid(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1')
call MPI_Bcast(geomSize,3,MPI_DOUBLE,0,PETSC_COMM_WORLD, ierr) call MPI_Bcast(geomSize,3,MPI_DOUBLE,0,MPI_COMM_WORLD, ierr)
if (ierr /= 0) error stop 'MPI error' if (ierr /= 0) error stop 'MPI error'
call MPI_Bcast(origin,3,MPI_DOUBLE,0,PETSC_COMM_WORLD, ierr) call MPI_Bcast(origin,3,MPI_DOUBLE,0,MPI_COMM_WORLD, ierr)
if (ierr /= 0) error stop 'MPI error' if (ierr /= 0) error stop 'MPI error'
print'(/,a,3(i12 ))', ' cells a b c: ', grid print'(/,a,3(i12 ))', ' cells a b c: ', grid
@ -105,13 +108,13 @@ subroutine discretization_grid_init(restart)
myGrid = [grid(1:2),grid3] myGrid = [grid(1:2),grid3]
mySize = [geomSize(1:2),size3] mySize = [geomSize(1:2),size3]
call MPI_Gather(product(grid(1:2))*grid3Offset,1,MPI_INTEGER,displs, 1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) call MPI_Gather(product(grid(1:2))*grid3Offset,1,MPI_INTEGER,displs, 1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
if (ierr /= 0) error stop 'MPI error' if (ierr /= 0) error stop 'MPI error'
call MPI_Gather(product(myGrid), 1,MPI_INTEGER,sendcounts,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) call MPI_Gather(product(myGrid), 1,MPI_INTEGER,sendcounts,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
if (ierr /= 0) error stop 'MPI error' if (ierr /= 0) error stop 'MPI error'
allocate(materialAt(product(myGrid))) allocate(materialAt(product(myGrid)))
call MPI_scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) call MPI_Scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
if (ierr /= 0) error stop 'MPI error' if (ierr /= 0) error stop 'MPI error'
call discretization_init(materialAt, & call discretization_init(materialAt, &

View File

@ -7,8 +7,11 @@
module grid_damage_spectral module grid_damage_spectral
#include <petsc/finclude/petscsnes.h> #include <petsc/finclude/petscsnes.h>
#include <petsc/finclude/petscdmda.h> #include <petsc/finclude/petscdmda.h>
use PETScdmda use PETScDMDA
use PETScsnes use PETScSNES
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
use MPI_f08
#endif
use prec use prec
use parallelization use parallelization
@ -107,7 +110,7 @@ subroutine grid_damage_spectral_init()
call SNESSetOptionsPrefix(damage_snes,'damage_',ierr);CHKERRQ(ierr) call SNESSetOptionsPrefix(damage_snes,'damage_',ierr);CHKERRQ(ierr)
localK = 0 localK = 0
localK(worldrank) = grid3 localK(worldrank) = grid3
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr)
call DMDACreate3D(PETSC_COMM_WORLD, & call DMDACreate3D(PETSC_COMM_WORLD, &
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
@ -187,9 +190,9 @@ function grid_damage_spectral_solution(timeinc) result(solution)
endif endif
stagNorm = maxval(abs(phi_current - phi_stagInc)) stagNorm = maxval(abs(phi_current - phi_stagInc))
solnNorm = maxval(abs(phi_current)) solnNorm = maxval(abs(phi_current))
call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr)
solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*solnNorm) solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*solnNorm)
call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,ierr)
phi_stagInc = phi_current phi_stagInc = phi_current
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
@ -320,9 +323,9 @@ subroutine updateReference()
enddo enddo
K_ref = K_ref*wgt K_ref = K_ref*wgt
call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
mu_ref = mu_ref*wgt mu_ref = mu_ref*wgt
call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
end subroutine updateReference end subroutine updateReference

View File

@ -7,13 +7,17 @@
module grid_mechanical_FEM module grid_mechanical_FEM
#include <petsc/finclude/petscsnes.h> #include <petsc/finclude/petscsnes.h>
#include <petsc/finclude/petscdmda.h> #include <petsc/finclude/petscdmda.h>
use PETScdmda use PETScDMDA
use PETScsnes use PETScSNES
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
use MPI_f08
#endif
use prec use prec
use parallelization use parallelization
use DAMASK_interface use DAMASK_interface
use IO use IO
use HDF5
use HDF5_utilities use HDF5_utilities
use math use math
use rotations use rotations
@ -162,7 +166,7 @@ subroutine grid_mechanical_FEM_init
CHKERRQ(ierr) CHKERRQ(ierr)
localK = 0 localK = 0
localK(worldrank) = grid3 localK(worldrank) = grid3
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr)
call DMDACreate3d(PETSC_COMM_WORLD, & call DMDACreate3d(PETSC_COMM_WORLD, &
DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, & DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, &
DMDA_STENCIL_BOX, & DMDA_STENCIL_BOX, &
@ -236,16 +240,16 @@ subroutine grid_mechanical_FEM_init
groupHandle = HDF5_openGroup(fileHandle,'solver') groupHandle = HDF5_openGroup(fileHandle,'solver')
call HDF5_read(P_aim,groupHandle,'P_aim',.false.) call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_read(F_aim,groupHandle,'F_aim',.false.) call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_read(F,groupHandle,'F') call HDF5_read(F,groupHandle,'F')
call HDF5_read(F_lastInc,groupHandle,'F_lastInc') call HDF5_read(F_lastInc,groupHandle,'F_lastInc')
@ -270,10 +274,10 @@ subroutine grid_mechanical_FEM_init
restartRead2: if (interface_restartInc > 0) then restartRead2: if (interface_restartInc > 0) then
print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file' print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file'
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_closeGroup(groupHandle) call HDF5_closeGroup(groupHandle)
@ -567,7 +571,7 @@ subroutine formResidual(da_local,x_local, &
call utilities_constitutiveResponse(P_current,& call utilities_constitutiveResponse(P_current,&
P_av,C_volAvg,devNull, & P_av,C_volAvg,devNull, &
F,params%timeinc,params%rotation_BC) F,params%timeinc,params%rotation_BC)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! stress BC handling ! stress BC handling

View File

@ -7,13 +7,17 @@
module grid_mechanical_spectral_basic module grid_mechanical_spectral_basic
#include <petsc/finclude/petscsnes.h> #include <petsc/finclude/petscsnes.h>
#include <petsc/finclude/petscdmda.h> #include <petsc/finclude/petscdmda.h>
use PETScdmda use PETScDMDA
use PETScsnes use PETScSNES
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
use MPI_f08
#endif
use prec use prec
use parallelization use parallelization
use DAMASK_interface use DAMASK_interface
use IO use IO
use HDF5
use HDF5_utilities use HDF5_utilities
use math use math
use rotations use rotations
@ -98,7 +102,11 @@ subroutine grid_mechanical_spectral_basic_init
F ! pointer to solution data F ! pointer to solution data
PetscInt, dimension(0:worldsize-1) :: localK PetscInt, dimension(0:worldsize-1) :: localK
integer(HID_T) :: fileHandle, groupHandle integer(HID_T) :: fileHandle, groupHandle
integer :: fileUnit #ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
type(MPI_File) :: fileUnit
#else
integer :: fileUnit
#endif
class (tNode), pointer :: & class (tNode), pointer :: &
num_grid, & num_grid, &
debug_grid debug_grid
@ -153,7 +161,7 @@ subroutine grid_mechanical_spectral_basic_init
call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr) call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr)
localK = 0 localK = 0
localK(worldrank) = grid3 localK(worldrank) = grid3
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr)
call DMDACreate3d(PETSC_COMM_WORLD, & call DMDACreate3d(PETSC_COMM_WORLD, &
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
@ -184,16 +192,16 @@ subroutine grid_mechanical_spectral_basic_init
groupHandle = HDF5_openGroup(fileHandle,'solver') groupHandle = HDF5_openGroup(fileHandle,'solver')
call HDF5_read(P_aim,groupHandle,'P_aim',.false.) call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_read(F_aim,groupHandle,'F_aim',.false.) call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_read(F,groupHandle,'F') call HDF5_read(F,groupHandle,'F')
call HDF5_read(F_lastInc,groupHandle,'F_lastInc') call HDF5_read(F_lastInc,groupHandle,'F_lastInc')
@ -213,16 +221,16 @@ subroutine grid_mechanical_spectral_basic_init
restartRead2: if (interface_restartInc > 0) then restartRead2: if (interface_restartInc > 0) then
print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file' print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file'
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_closeGroup(groupHandle) call HDF5_closeGroup(groupHandle)
call HDF5_closeFile(fileHandle) call HDF5_closeFile(fileHandle)
call MPI_File_open(PETSC_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', &
MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr) MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr)
call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr) call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr)
call MPI_File_close(fileUnit,ierr) call MPI_File_close(fileUnit,ierr)
@ -487,7 +495,7 @@ subroutine formResidual(in, F, &
call utilities_constitutiveResponse(residuum, & ! "residuum" gets field of first PK stress (to save memory) call utilities_constitutiveResponse(residuum, & ! "residuum" gets field of first PK stress (to save memory)
P_av,C_volAvg,C_minMaxAvg, & P_av,C_volAvg,C_minMaxAvg, &
F,params%timeinc,params%rotation_BC) F,params%timeinc,params%rotation_BC)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! stress BC handling ! stress BC handling

View File

@ -7,13 +7,17 @@
module grid_mechanical_spectral_polarisation module grid_mechanical_spectral_polarisation
#include <petsc/finclude/petscsnes.h> #include <petsc/finclude/petscsnes.h>
#include <petsc/finclude/petscdmda.h> #include <petsc/finclude/petscdmda.h>
use PETScdmda use PETScDMDA
use PETScsnes use PETScSNES
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
use MPI_f08
#endif
use prec use prec
use parallelization use parallelization
use DAMASK_interface use DAMASK_interface
use IO use IO
use HDF5
use HDF5_utilities use HDF5_utilities
use math use math
use rotations use rotations
@ -111,7 +115,11 @@ subroutine grid_mechanical_spectral_polarisation_init
F_tau ! specific (sub)pointer F_tau ! specific (sub)pointer
PetscInt, dimension(0:worldsize-1) :: localK PetscInt, dimension(0:worldsize-1) :: localK
integer(HID_T) :: fileHandle, groupHandle integer(HID_T) :: fileHandle, groupHandle
integer :: fileUnit #ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
type(MPI_File) :: fileUnit
#else
integer :: fileUnit
#endif
class (tNode), pointer :: & class (tNode), pointer :: &
num_grid, & num_grid, &
debug_grid debug_grid
@ -173,7 +181,7 @@ subroutine grid_mechanical_spectral_polarisation_init
call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr) call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr)
localK = 0 localK = 0
localK(worldrank) = grid3 localK(worldrank) = grid3
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr)
call DMDACreate3d(PETSC_COMM_WORLD, & call DMDACreate3d(PETSC_COMM_WORLD, &
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
@ -206,16 +214,16 @@ subroutine grid_mechanical_spectral_polarisation_init
groupHandle = HDF5_openGroup(fileHandle,'solver') groupHandle = HDF5_openGroup(fileHandle,'solver')
call HDF5_read(P_aim,groupHandle,'P_aim',.false.) call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_read(F_aim,groupHandle,'F_aim',.false.) call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_read(F,groupHandle,'F') call HDF5_read(F,groupHandle,'F')
call HDF5_read(F_lastInc,groupHandle,'F_lastInc') call HDF5_read(F_lastInc,groupHandle,'F_lastInc')
@ -239,16 +247,16 @@ subroutine grid_mechanical_spectral_polarisation_init
restartRead2: if (interface_restartInc > 0) then restartRead2: if (interface_restartInc > 0) then
print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file' print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file'
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call HDF5_closeGroup(groupHandle) call HDF5_closeGroup(groupHandle)
call HDF5_closeFile(fileHandle) call HDF5_closeFile(fileHandle)
call MPI_File_open(PETSC_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', &
MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr) MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr)
call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr) call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr)
call MPI_File_close(fileUnit,ierr) call MPI_File_close(fileUnit,ierr)
@ -543,7 +551,7 @@ subroutine formResidual(in, FandF_tau, &
X_RANGE, Y_RANGE, Z_RANGE) X_RANGE, Y_RANGE, Z_RANGE)
F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt
call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
call SNESGetNumberFunctionEvals(snes,nfuncs,ierr); CHKERRQ(ierr) call SNESGetNumberFunctionEvals(snes,nfuncs,ierr); CHKERRQ(ierr)
call SNESGetIterationNumber(snes,PETScIter,ierr); CHKERRQ(ierr) call SNESGetIterationNumber(snes,PETScIter,ierr); CHKERRQ(ierr)
@ -587,7 +595,7 @@ subroutine formResidual(in, FandF_tau, &
call utilities_constitutiveResponse(residual_F, & ! "residuum" gets field of first PK stress (to save memory) call utilities_constitutiveResponse(residual_F, & ! "residuum" gets field of first PK stress (to save memory)
P_av,C_volAvg,C_minMaxAvg, & P_av,C_volAvg,C_minMaxAvg, &
F - residual_F_tau/num%beta,params%timeinc,params%rotation_BC) F - residual_F_tau/num%beta,params%timeinc,params%rotation_BC)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! stress BC handling ! stress BC handling

View File

@ -7,8 +7,11 @@
module grid_thermal_spectral module grid_thermal_spectral
#include <petsc/finclude/petscsnes.h> #include <petsc/finclude/petscsnes.h>
#include <petsc/finclude/petscdmda.h> #include <petsc/finclude/petscdmda.h>
use PETScdmda use PETScDMDA
use PETScsnes use PETScSNES
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
use MPI_f08
#endif
use prec use prec
use parallelization use parallelization
@ -102,7 +105,7 @@ subroutine grid_thermal_spectral_init(T_0)
call SNESSetOptionsPrefix(thermal_snes,'thermal_',ierr);CHKERRQ(ierr) call SNESSetOptionsPrefix(thermal_snes,'thermal_',ierr);CHKERRQ(ierr)
localK = 0 localK = 0
localK(worldrank) = grid3 localK(worldrank) = grid3
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr)
call DMDACreate3D(PETSC_COMM_WORLD, & call DMDACreate3D(PETSC_COMM_WORLD, &
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
@ -182,9 +185,9 @@ function grid_thermal_spectral_solution(timeinc) result(solution)
endif endif
stagNorm = maxval(abs(T_current - T_stagInc)) stagNorm = maxval(abs(T_current - T_stagInc))
solnNorm = maxval(abs(T_current)) solnNorm = maxval(abs(T_current))
call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr)
solution%stagConverged = stagNorm < max(num%eps_thermal_atol, num%eps_thermal_rtol*solnNorm) solution%stagConverged = stagNorm < max(num%eps_thermal_atol, num%eps_thermal_rtol*solnNorm)
call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,ierr)
T_stagInc = T_current T_stagInc = T_current
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
@ -310,9 +313,9 @@ subroutine updateReference()
enddo enddo
K_ref = K_ref*wgt K_ref = K_ref*wgt
call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
mu_ref = mu_ref*wgt mu_ref = mu_ref*wgt
call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
end subroutine updateReference end subroutine updateReference

View File

@ -8,6 +8,9 @@ module spectral_utilities
#include <petsc/finclude/petscsys.h> #include <petsc/finclude/petscsys.h>
use PETScSys use PETScSys
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
use MPI_f08
#endif
use prec use prec
use DAMASK_interface use DAMASK_interface
@ -591,7 +594,7 @@ real(pReal) function utilities_divergenceRMS()
conjg(-xi1st(1:3,grid1Red,j,k))*rescaledGeom))**2.0_pReal) conjg(-xi1st(1:3,grid1Red,j,k))*rescaledGeom))**2.0_pReal)
enddo; enddo enddo; enddo
if(grid(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 if(grid(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space
@ -651,7 +654,7 @@ real(pReal) function utilities_curlRMS()
+ sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1) + sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1)
enddo; enddo enddo; enddo
call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
utilities_curlRMS = sqrt(utilities_curlRMS) * wgt utilities_curlRMS = sqrt(utilities_curlRMS) * wgt
if(grid(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 if(grid(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
@ -816,7 +819,7 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
P = reshape(homogenization_P, [3,3,grid(1),grid(2),grid3]) P = reshape(homogenization_P, [3,3,grid(1),grid(2),grid3])
P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt ! average of P P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt ! average of P
call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
if (debugRotation) print'(/,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', & if (debugRotation) print'(/,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', &
' Piola--Kirchhoff stress (lab) / MPa =', transpose(P_av)*1.e-6_pReal ' Piola--Kirchhoff stress (lab) / MPa =', transpose(P_av)*1.e-6_pReal
if(present(rotation_BC)) P_av = rotation_BC%rotate(P_av) if(present(rotation_BC)) P_av = rotation_BC%rotate(P_av)
@ -840,21 +843,21 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
end do end do
valueAndRank = [dPdF_norm_max,real(worldrank,pReal)] valueAndRank = [dPdF_norm_max,real(worldrank,pReal)]
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, PETSC_COMM_WORLD, ierr) call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, MPI_COMM_WORLD, ierr)
if (ierr /= 0) error stop 'MPI error' if (ierr /= 0) error stop 'MPI error'
call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr) call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr)
if (ierr /= 0) error stop 'MPI error' if (ierr /= 0) error stop 'MPI error'
valueAndRank = [dPdF_norm_min,real(worldrank,pReal)] valueAndRank = [dPdF_norm_min,real(worldrank,pReal)]
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, PETSC_COMM_WORLD, ierr) call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, MPI_COMM_WORLD, ierr)
if (ierr /= 0) error stop 'MPI error' if (ierr /= 0) error stop 'MPI error'
call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr) call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr)
if (ierr /= 0) error stop 'MPI error' if (ierr /= 0) error stop 'MPI error'
C_minmaxAvg = 0.5_pReal*(dPdF_max + dPdF_min) C_minmaxAvg = 0.5_pReal*(dPdF_max + dPdF_min)
C_volAvg = sum(homogenization_dPdF,dim=5) C_volAvg = sum(homogenization_dPdF,dim=5)
call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
if (ierr /= 0) error stop 'MPI error' if (ierr /= 0) error stop 'MPI error'
C_volAvg = C_volAvg * wgt C_volAvg = C_volAvg * wgt
@ -909,7 +912,7 @@ function utilities_forwardField(timeinc,field_lastInc,rate,aim)
utilities_forwardField = field_lastInc + rate*timeinc utilities_forwardField = field_lastInc + rate*timeinc
if (present(aim)) then !< correct to match average if (present(aim)) then !< correct to match average
fieldDiff = sum(sum(sum(utilities_forwardField,dim=5),dim=4),dim=3)*wgt fieldDiff = sum(sum(sum(utilities_forwardField,dim=5),dim=4),dim=3)*wgt
call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
fieldDiff = fieldDiff - aim fieldDiff = fieldDiff - aim
utilities_forwardField = utilities_forwardField - & utilities_forwardField = utilities_forwardField - &
spread(spread(spread(fieldDiff,3,grid(1)),4,grid(2)),5,grid3) spread(spread(spread(fieldDiff,3,grid(1)),4,grid(2)),5,grid3)
@ -982,8 +985,13 @@ subroutine utilities_updateCoords(F)
rank_t, rank_b, & rank_t, rank_b, &
c, & c, &
ierr ierr
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
type(MPI_Request), dimension(4) :: request
type(MPI_Status), dimension(4) :: status
#else
integer, dimension(4) :: request integer, dimension(4) :: request
integer, dimension(MPI_STATUS_SIZE,4) :: status integer, dimension(MPI_STATUS_SIZE,4) :: status
#endif
real(pReal), dimension(3) :: step real(pReal), dimension(3) :: step
real(pReal), dimension(3,3) :: Favg real(pReal), dimension(3,3) :: Favg
integer, dimension(3) :: me integer, dimension(3) :: me
@ -1018,7 +1026,7 @@ subroutine utilities_updateCoords(F)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! average F ! average F
if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt
call MPI_Bcast(Favg,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(Favg,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
@ -1029,20 +1037,24 @@ subroutine utilities_updateCoords(F)
rank_b = modulo(worldrank-1,worldsize) rank_b = modulo(worldrank-1,worldsize)
! send bottom layer to process below ! send bottom layer to process below
call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,PETSC_COMM_WORLD,request(1),ierr) call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,MPI_COMM_WORLD,request(1),ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,PETSC_COMM_WORLD,request(2),ierr) call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,MPI_COMM_WORLD,request(2),ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
! send top layer to process above ! send top layer to process above
call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,PETSC_COMM_WORLD,request(3),ierr) call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,MPI_COMM_WORLD,request(3),ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,PETSC_COMM_WORLD,request(4),ierr) call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,MPI_COMM_WORLD,request(4),ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
call MPI_Waitall(4,request,status,ierr) call MPI_Waitall(4,request,status,ierr)
if(ierr /=0) error stop 'MPI error' if(ierr /=0) error stop 'MPI error'
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
! ToDo
#else
if(any(status(MPI_ERROR,:) /= 0)) error stop 'MPI error' if(any(status(MPI_ERROR,:) /= 0)) error stop 'MPI error'
#endif
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! calculate nodal displacements ! calculate nodal displacements

View File

@ -12,6 +12,7 @@ module homogenization
use material use material
use phase use phase
use discretization use discretization
use HDF5
use HDF5_utilities use HDF5_utilities
use results use results
use lattice use lattice

View File

@ -7,10 +7,13 @@ module FEM_utilities
#include <petsc/finclude/petscdmda.h> #include <petsc/finclude/petscdmda.h>
#include <petsc/finclude/petscis.h> #include <petsc/finclude/petscis.h>
use PETScdmplex use PETScDMplex
use PETScdmda use PETScDMDA
use PETScis use PETScIS
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
use MPI_f08
#endif
use prec use prec
use config use config
use math use math
@ -165,7 +168,7 @@ subroutine utilities_constitutiveResponse(timeinc,P_av,forwardData)
cutBack = .false. ! reset cutBack status cutBack = .false. ! reset cutBack status
P_av = sum(homogenization_P,dim=3) * wgt P_av = sum(homogenization_P,dim=3) * wgt
call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
end subroutine utilities_constitutiveResponse end subroutine utilities_constitutiveResponse

View File

@ -8,9 +8,12 @@ module discretization_mesh
#include <petsc/finclude/petscdmplex.h> #include <petsc/finclude/petscdmplex.h>
#include <petsc/finclude/petscis.h> #include <petsc/finclude/petscis.h>
#include <petsc/finclude/petscdmda.h> #include <petsc/finclude/petscdmda.h>
use PETScdmplex use PETScDMplex
use PETScdmda use PETScDMDA
use PETScis use PETScIS
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
use MPI_f08
#endif
use DAMASK_interface use DAMASK_interface
use parallelization use parallelization
@ -111,9 +114,9 @@ subroutine discretization_mesh_init(restart)
! get number of IDs in face sets (for boundary conditions?) ! get number of IDs in face sets (for boundary conditions?)
call DMGetLabelSize(globalMesh,'Face Sets',mesh_Nboundaries,ierr) call DMGetLabelSize(globalMesh,'Face Sets',mesh_Nboundaries,ierr)
CHKERRQ(ierr) CHKERRQ(ierr)
call MPI_Bcast(mesh_Nboundaries,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(mesh_Nboundaries,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
call MPI_Bcast(mesh_NcpElemsGlobal,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(mesh_NcpElemsGlobal,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
call MPI_Bcast(dimPlex,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(dimPlex,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
if (worldrank == 0) then if (worldrank == 0) then
call DMClone(globalMesh,geomMesh,ierr) call DMClone(globalMesh,geomMesh,ierr)
@ -134,7 +137,7 @@ subroutine discretization_mesh_init(restart)
CHKERRQ(ierr) CHKERRQ(ierr)
call ISRestoreIndicesF90(faceSetIS,pFaceSets,ierr) call ISRestoreIndicesF90(faceSetIS,pFaceSets,ierr)
endif endif
call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
call DMDestroy(globalMesh,ierr); CHKERRQ(ierr) call DMDestroy(globalMesh,ierr); CHKERRQ(ierr)

View File

@ -9,10 +9,13 @@ module mesh_mechanical_FEM
#include <petsc/finclude/petscdm.h> #include <petsc/finclude/petscdm.h>
#include <petsc/finclude/petsc.h> #include <petsc/finclude/petsc.h>
use PETScsnes use PETScSNES
use PETScDM use PETScDM
use PETScDMplex use PETScDMplex
use PETScDT use PETScDT
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
use MPI_f08
#endif
use prec use prec
use FEM_utilities use FEM_utilities
@ -396,7 +399,7 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,ierr)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! evaluate constitutive response ! evaluate constitutive response
call Utilities_constitutiveResponse(params%timeinc,P_av,ForwardData) call Utilities_constitutiveResponse(params%timeinc,P_av,ForwardData)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
ForwardData = .false. ForwardData = .false.
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------

View File

@ -6,9 +6,12 @@ module parallelization
use, intrinsic :: ISO_fortran_env, only: & use, intrinsic :: ISO_fortran_env, only: &
OUTPUT_UNIT OUTPUT_UNIT
#ifdef PETSc #ifdef PETSC
#include <petsc/finclude/petscsys.h> #include <petsc/finclude/petscsys.h>
use petscsys use PETScSys
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
use MPI_f08
#endif
!$ use OMP_LIB !$ use OMP_LIB
#endif #endif
use prec use prec
@ -20,7 +23,7 @@ module parallelization
worldrank = 0, & !< MPI worldrank (/=0 for MPI simulations only) worldrank = 0, & !< MPI worldrank (/=0 for MPI simulations only)
worldsize = 1 !< MPI worldsize (/=1 for MPI simulations only) worldsize = 1 !< MPI worldsize (/=1 for MPI simulations only)
#ifdef PETSc #ifdef PETSC
public :: & public :: &
parallelization_init parallelization_init
@ -60,12 +63,12 @@ subroutine parallelization_init
#endif #endif
CHKERRQ(petsc_err) CHKERRQ(petsc_err)
call MPI_Comm_rank(PETSC_COMM_WORLD,worldrank,err) call MPI_Comm_rank(MPI_COMM_WORLD,worldrank,err)
if (err /= 0) error stop 'Could not determine worldrank' if (err /= 0) error stop 'Could not determine worldrank'
if (worldrank == 0) print'(/,a)', ' <<<+- parallelization init -+>>>' if (worldrank == 0) print'(/,a)', ' <<<+- parallelization init -+>>>'
call MPI_Comm_size(PETSC_COMM_WORLD,worldsize,err) call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err)
if (err /= 0) error stop 'Could not determine worldsize' if (err /= 0) error stop 'Could not determine worldsize'
if (worldrank == 0) print'(a,i3)', ' MPI processes: ',worldsize if (worldrank == 0) print'(a,i3)', ' MPI processes: ',worldsize

View File

@ -14,6 +14,7 @@ module phase
use lattice use lattice
use discretization use discretization
use parallelization use parallelization
use HDF5
use HDF5_utilities use HDF5_utilities
implicit none implicit none

View File

@ -7,9 +7,6 @@
subroutine quit(stop_id) subroutine quit(stop_id)
#include <petsc/finclude/petscsys.h> #include <petsc/finclude/petscsys.h>
use PetscSys use PetscSys
#ifdef _OPENMP
use MPI
#endif
use HDF5 use HDF5
implicit none implicit none

View File

@ -5,12 +5,17 @@
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
module results module results
use prec
use DAMASK_interface use DAMASK_interface
use parallelization use parallelization
use IO use IO
use HDF5_utilities use HDF5_utilities
#ifdef PETSc use HDF5
use PETSC #ifdef PETSC
use PETSc
#ifndef PETSC_HAVE_MPI_F90MODULE_VISIBILITY
use MPI_f08
#endif
#endif #endif
implicit none implicit none
@ -459,7 +464,7 @@ subroutine results_mapping_phase(ID,entry,label)
call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
if(hdferr < 0) error stop 'HDF5 error' if(hdferr < 0) error stop 'HDF5 error'
call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process
if(ierr /= 0) error stop 'MPI error' if(ierr /= 0) error stop 'MPI error'
entryOffset = 0 entryOffset = 0
@ -468,7 +473,7 @@ subroutine results_mapping_phase(ID,entry,label)
entryOffset(ID(co,ce),worldrank) = entryOffset(ID(co,ce),worldrank) +1 entryOffset(ID(co,ce),worldrank) = entryOffset(ID(co,ce),worldrank) +1
enddo enddo
enddo enddo
call MPI_allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)! get offset at each process call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process
if(ierr /= 0) error stop 'MPI error' if(ierr /= 0) error stop 'MPI error'
entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2)
do co = 1, size(ID,1) do co = 1, size(ID,1)
@ -612,14 +617,14 @@ subroutine results_mapping_homogenization(ID,entry,label)
call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
if(hdferr < 0) error stop 'HDF5 error' if(hdferr < 0) error stop 'HDF5 error'
call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process
if(ierr /= 0) error stop 'MPI error' if(ierr /= 0) error stop 'MPI error'
entryOffset = 0 entryOffset = 0
do ce = 1, size(ID,1) do ce = 1, size(ID,1)
entryOffset(ID(ce),worldrank) = entryOffset(ID(ce),worldrank) +1 entryOffset(ID(ce),worldrank) = entryOffset(ID(ce),worldrank) +1
enddo enddo
call MPI_allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get offset at each process call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process
if(ierr /= 0) error stop 'MPI error' if(ierr /= 0) error stop 'MPI error'
entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2)
do ce = 1, size(ID,1) do ce = 1, size(ID,1)