Merge remote-tracking branch 'origin/development' into nonlocal-compatibility-in-dependent-state
This commit is contained in:
commit
c6dae711c5
|
@ -17,17 +17,14 @@ pkg_get_variable(CMAKE_Fortran_COMPILER PETSc fcompiler)
|
||||||
pkg_get_variable(CMAKE_C_COMPILER PETSc ccompiler)
|
pkg_get_variable(CMAKE_C_COMPILER PETSc ccompiler)
|
||||||
|
|
||||||
# Solver determines name of project
|
# Solver determines name of project
|
||||||
string(TOUPPER "${DAMASK_SOLVER}" DAMASK_SOLVER)
|
string(TOUPPER "${DAMASK_SOLVER}" DAMASK_SOLVER_UPPER)
|
||||||
if(DAMASK_SOLVER STREQUAL "GRID")
|
string(TOLOWER "${DAMASK_SOLVER}" DAMASK_SOLVER_LOWER)
|
||||||
project(damask-grid HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
|
if("${DAMASK_SOLVER_UPPER}" MATCHES "^(GRID|MESH|TEST)$")
|
||||||
elseif(DAMASK_SOLVER STREQUAL "MESH")
|
project("damask-${DAMASK_SOLVER_LOWER}" HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
|
||||||
project(damask-mesh HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
|
|
||||||
elseif(DAMASK_SOLVER STREQUAL "TEST")
|
|
||||||
project(damask-test HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
|
|
||||||
else()
|
else()
|
||||||
message(FATAL_ERROR "Invalid solver: DAMASK_SOLVER=${DAMASK_SOLVER}")
|
message(FATAL_ERROR "Invalid solver: DAMASK_SOLVER=${DAMASK_SOLVER}")
|
||||||
endif()
|
endif()
|
||||||
add_definitions("-D${DAMASK_SOLVER}")
|
add_definitions("-D${DAMASK_SOLVER_UPPER}")
|
||||||
|
|
||||||
set(CMAKE_Fortran_PREPROCESS "ON") # works only for CMake >= 3.18
|
set(CMAKE_Fortran_PREPROCESS "ON") # works only for CMake >= 3.18
|
||||||
|
|
||||||
|
|
|
@ -1,16 +1,7 @@
|
||||||
file(GLOB damask-sources CONFIGURE_DEPENDS *.f90 *.c)
|
file(GLOB damask-sources CONFIGURE_DEPENDS *.f90 *.c)
|
||||||
|
|
||||||
if(PROJECT_NAME STREQUAL "damask-grid")
|
set(executable-name "DAMASK_${DAMASK_SOLVER_LOWER}")
|
||||||
set(executable-name "DAMASK_grid")
|
file(GLOB solver-sources CONFIGURE_DEPENDS ${DAMASK_SOLVER_LOWER}/*.f90)
|
||||||
file(GLOB solver-sources CONFIGURE_DEPENDS grid/*.f90)
|
|
||||||
elseif(PROJECT_NAME STREQUAL "damask-mesh")
|
|
||||||
set(executable-name "DAMASK_mesh")
|
|
||||||
file(GLOB solver-sources CONFIGURE_DEPENDS mesh/*.f90)
|
|
||||||
elseif(PROJECT_NAME STREQUAL "damask-test")
|
|
||||||
set(executable-name "DAMASK_test")
|
|
||||||
file(GLOB solver-sources CONFIGURE_DEPENDS test/*.f90)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
|
|
||||||
if(NOT CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
|
if(NOT CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
|
||||||
add_executable(${executable-name} ${damask-sources} ${solver-sources})
|
add_executable(${executable-name} ${damask-sources} ${solver-sources})
|
||||||
|
|
|
@ -1560,7 +1560,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_
|
||||||
call HDF5_chkerr(hdferr)
|
call HDF5_chkerr(hdferr)
|
||||||
call MPI_Allgather(int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
|
call MPI_Allgather(int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
|
||||||
readSize,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
|
readSize,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
end if
|
end if
|
||||||
#endif
|
#endif
|
||||||
myStart = int(0,HSIZE_T)
|
myStart = int(0,HSIZE_T)
|
||||||
|
@ -1667,7 +1667,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
|
||||||
if (parallel) then
|
if (parallel) then
|
||||||
call MPI_Allgather(int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
|
call MPI_Allgather(int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
|
||||||
writeSize,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
|
writeSize,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
end if
|
end if
|
||||||
#endif
|
#endif
|
||||||
myStart = int(0,HSIZE_T)
|
myStart = int(0,HSIZE_T)
|
||||||
|
|
|
@ -158,9 +158,6 @@ subroutine materialpoint_general(mode, ffn, ffn1, temperature_inp, dt, elFE, ip,
|
||||||
else validCalculation
|
else validCalculation
|
||||||
call homogenization_mechanical_response(dt,(elCP-1)*discretization_nIPs + ip, &
|
call homogenization_mechanical_response(dt,(elCP-1)*discretization_nIPs + ip, &
|
||||||
(elCP-1)*discretization_nIPs + ip)
|
(elCP-1)*discretization_nIPs + ip)
|
||||||
if (.not. terminallyIll) &
|
|
||||||
call homogenization_mechanical_response2(dt,(elCP-1)*discretization_nIPs + ip, &
|
|
||||||
(elCP-1)*discretization_nIPs + ip)
|
|
||||||
|
|
||||||
terminalIllness: if (terminallyIll) then
|
terminalIllness: if (terminallyIll) then
|
||||||
|
|
||||||
|
|
|
@ -363,7 +363,7 @@ program DAMASK_grid
|
||||||
end if; flush(IO_STDOUT)
|
end if; flush(IO_STDOUT)
|
||||||
|
|
||||||
call MPI_Allreduce(signal_SIGUSR1,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(signal_SIGUSR1,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
if (mod(inc,loadCases(l)%f_out) == 0 .or. sig) then
|
if (mod(inc,loadCases(l)%f_out) == 0 .or. sig) then
|
||||||
print'(/,1x,a)', '... saving results ........................................................'
|
print'(/,1x,a)', '... saving results ........................................................'
|
||||||
flush(IO_STDOUT)
|
flush(IO_STDOUT)
|
||||||
|
@ -371,7 +371,7 @@ program DAMASK_grid
|
||||||
end if
|
end if
|
||||||
if (sig) call signal_setSIGUSR1(.false.)
|
if (sig) call signal_setSIGUSR1(.false.)
|
||||||
call MPI_Allreduce(signal_SIGUSR2,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(signal_SIGUSR2,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
if (mod(inc,loadCases(l)%f_restart) == 0 .or. sig) then
|
if (mod(inc,loadCases(l)%f_restart) == 0 .or. sig) then
|
||||||
do field = 1, nActiveFields
|
do field = 1, nActiveFields
|
||||||
select case (ID(field))
|
select case (ID(field))
|
||||||
|
@ -387,7 +387,7 @@ program DAMASK_grid
|
||||||
end if
|
end if
|
||||||
if (sig) call signal_setSIGUSR2(.false.)
|
if (sig) call signal_setSIGUSR2(.false.)
|
||||||
call MPI_Allreduce(signal_SIGINT,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(signal_SIGINT,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
if (sig) exit loadCaseLooping
|
if (sig) exit loadCaseLooping
|
||||||
end if skipping
|
end if skipping
|
||||||
|
|
||||||
|
|
|
@ -97,12 +97,12 @@ subroutine discretization_grid_init(restart)
|
||||||
|
|
||||||
|
|
||||||
call MPI_Bcast(cells,3_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
|
call MPI_Bcast(cells,3_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
if (cells(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1')
|
if (cells(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1')
|
||||||
call MPI_Bcast(geomSize,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
|
call MPI_Bcast(geomSize,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call MPI_Bcast(origin,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
|
call MPI_Bcast(origin,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
print'(/,1x,a,i0,a,i0,a,i0)', 'cells: ', cells(1), ' × ', cells(2), ' × ', cells(3)
|
print'(/,1x,a,i0,a,i0,a,i0)', 'cells: ', cells(1), ' × ', cells(2), ' × ', cells(3)
|
||||||
print '(1x,a,es8.2,a,es8.2,a,es8.2,a)', 'size: ', geomSize(1), ' × ', geomSize(2), ' × ', geomSize(3), ' m³'
|
print '(1x,a,es8.2,a,es8.2,a,es8.2,a)', 'size: ', geomSize(1), ' × ', geomSize(2), ' × ', geomSize(3), ' m³'
|
||||||
|
@ -126,15 +126,15 @@ subroutine discretization_grid_init(restart)
|
||||||
|
|
||||||
call MPI_Gather(product(cells(1:2))*cells3Offset,1_MPI_INTEGER_KIND,MPI_INTEGER,displs,&
|
call MPI_Gather(product(cells(1:2))*cells3Offset,1_MPI_INTEGER_KIND,MPI_INTEGER,displs,&
|
||||||
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call MPI_Gather(product(myGrid), 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,&
|
call MPI_Gather(product(myGrid), 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,&
|
||||||
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
allocate(materialAt(product(myGrid)))
|
allocate(materialAt(product(myGrid)))
|
||||||
call MPI_Scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),&
|
call MPI_Scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),&
|
||||||
MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
call discretization_init(materialAt, &
|
call discretization_init(materialAt, &
|
||||||
IPcoordinates0(myGrid,mySize,cells3Offset), &
|
IPcoordinates0(myGrid,mySize,cells3Offset), &
|
||||||
|
@ -318,10 +318,10 @@ function discretization_grid_getInitialCondition(label) result(ic)
|
||||||
|
|
||||||
real(pREAL), dimension(:), allocatable :: ic_global, ic_local
|
real(pREAL), dimension(:), allocatable :: ic_global, ic_local
|
||||||
integer(MPI_INTEGER_KIND) :: err_MPI
|
integer(MPI_INTEGER_KIND) :: err_MPI
|
||||||
|
|
||||||
integer, dimension(worldsize) :: &
|
integer, dimension(worldsize) :: &
|
||||||
displs, sendcounts
|
displs, sendcounts
|
||||||
|
|
||||||
|
|
||||||
if (worldrank == 0) then
|
if (worldrank == 0) then
|
||||||
ic_global = VTI_readDataset_real(IO_read(CLI_geomFile),label)
|
ic_global = VTI_readDataset_real(IO_read(CLI_geomFile),label)
|
||||||
else
|
else
|
||||||
|
@ -330,15 +330,15 @@ function discretization_grid_getInitialCondition(label) result(ic)
|
||||||
|
|
||||||
call MPI_Gather(product(cells(1:2))*cells3Offset, 1_MPI_INTEGER_KIND,MPI_INTEGER,displs,&
|
call MPI_Gather(product(cells(1:2))*cells3Offset, 1_MPI_INTEGER_KIND,MPI_INTEGER,displs,&
|
||||||
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call MPI_Gather(product(cells(1:2))*cells3, 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,&
|
call MPI_Gather(product(cells(1:2))*cells3, 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,&
|
||||||
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
allocate(ic_local(product(cells(1:2))*cells3))
|
allocate(ic_local(product(cells(1:2))*cells3))
|
||||||
call MPI_Scatterv(ic_global,sendcounts,displs,MPI_DOUBLE,ic_local,size(ic_local),&
|
call MPI_Scatterv(ic_global,sendcounts,displs,MPI_DOUBLE,ic_local,size(ic_local),&
|
||||||
MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
ic = reshape(ic_local,[cells(1),cells(2),cells3])
|
ic = reshape(ic_local,[cells(1),cells(2),cells3])
|
||||||
|
|
||||||
|
|
|
@ -129,7 +129,7 @@ subroutine grid_damage_spectral_init(num_grid)
|
||||||
CHKERRQ(err_PETSc)
|
CHKERRQ(err_PETSc)
|
||||||
call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
|
call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
|
||||||
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
|
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call DMDACreate3D(PETSC_COMM_WORLD, &
|
call DMDACreate3D(PETSC_COMM_WORLD, &
|
||||||
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
||||||
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
||||||
|
@ -239,10 +239,10 @@ function grid_damage_spectral_solution(Delta_t) result(solution)
|
||||||
phi_max = maxval(phi)
|
phi_max = maxval(phi)
|
||||||
stagNorm = maxval(abs(phi - phi_stagInc))
|
stagNorm = maxval(abs(phi - phi_stagInc))
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*phi_max)
|
solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*phi_max)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
phi_stagInc = phi
|
phi_stagInc = phi
|
||||||
|
|
||||||
call homogenization_set_phi(reshape(phi,[product(cells(1:2))*cells3]))
|
call homogenization_set_phi(reshape(phi,[product(cells(1:2))*cells3]))
|
||||||
|
@ -379,10 +379,10 @@ subroutine updateReference()
|
||||||
|
|
||||||
K_ref = K_ref*wgt
|
K_ref = K_ref*wgt
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,K_ref,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,K_ref,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
mu_ref = mu_ref*wgt
|
mu_ref = mu_ref*wgt
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
end subroutine updateReference
|
end subroutine updateReference
|
||||||
|
|
||||||
|
|
|
@ -173,7 +173,7 @@ subroutine grid_mechanical_FEM_init(num_grid)
|
||||||
CHKERRQ(err_PETSc)
|
CHKERRQ(err_PETSc)
|
||||||
call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
|
call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
|
||||||
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
|
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call DMDACreate3d(PETSC_COMM_WORLD, &
|
call DMDACreate3d(PETSC_COMM_WORLD, &
|
||||||
DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, &
|
DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, &
|
||||||
DMDA_STENCIL_BOX, &
|
DMDA_STENCIL_BOX, &
|
||||||
|
@ -246,16 +246,16 @@ subroutine grid_mechanical_FEM_init(num_grid)
|
||||||
|
|
||||||
call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
|
call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
|
||||||
call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
|
call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
|
||||||
call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
||||||
call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
|
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
|
||||||
call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(temp33n,groupHandle,'F')
|
call HDF5_read(temp33n,groupHandle,'F')
|
||||||
F = reshape(temp33n,[3,3,cells(1),cells(2),cells3])
|
F = reshape(temp33n,[3,3,cells(1),cells(2),cells3])
|
||||||
call HDF5_read(temp33n,groupHandle,'F_lastInc')
|
call HDF5_read(temp33n,groupHandle,'F_lastInc')
|
||||||
|
@ -283,10 +283,10 @@ subroutine grid_mechanical_FEM_init(num_grid)
|
||||||
print'(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc
|
print'(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc
|
||||||
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
|
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
|
||||||
call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
||||||
call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
call HDF5_closeGroup(groupHandle)
|
call HDF5_closeGroup(groupHandle)
|
||||||
call HDF5_closeFile(fileHandle)
|
call HDF5_closeFile(fileHandle)
|
||||||
|
@ -575,7 +575,7 @@ subroutine formResidual(da_local,x_local, &
|
||||||
P_av,C_volAvg,devNull, &
|
P_av,C_volAvg,devNull, &
|
||||||
F,params%Delta_t,params%rotation_BC)
|
F,params%Delta_t,params%rotation_BC)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! stress BC handling
|
! stress BC handling
|
||||||
|
|
|
@ -169,7 +169,7 @@ subroutine grid_mechanical_spectral_basic_init(num_grid)
|
||||||
CHKERRQ(err_PETSc)
|
CHKERRQ(err_PETSc)
|
||||||
call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
|
call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
|
||||||
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
|
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call DMDACreate3d(PETSC_COMM_WORLD, &
|
call DMDACreate3d(PETSC_COMM_WORLD, &
|
||||||
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
||||||
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
||||||
|
@ -207,16 +207,16 @@ subroutine grid_mechanical_spectral_basic_init(num_grid)
|
||||||
|
|
||||||
call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
|
call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
|
||||||
call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
|
call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
|
||||||
call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
||||||
call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
|
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
|
||||||
call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(temp33n,groupHandle,'F')
|
call HDF5_read(temp33n,groupHandle,'F')
|
||||||
F = reshape(temp33n,[9,cells(1),cells(2),cells3])
|
F = reshape(temp33n,[9,cells(1),cells(2),cells3])
|
||||||
call HDF5_read(temp33n,groupHandle,'F_lastInc')
|
call HDF5_read(temp33n,groupHandle,'F_lastInc')
|
||||||
|
@ -238,13 +238,13 @@ subroutine grid_mechanical_spectral_basic_init(num_grid)
|
||||||
print'(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc
|
print'(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc
|
||||||
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
|
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
|
||||||
call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
||||||
call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(C_minMaxAvg,groupHandle,'C_minMaxAvg',.false.)
|
call HDF5_read(C_minMaxAvg,groupHandle,'C_minMaxAvg',.false.)
|
||||||
call MPI_Bcast(C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
call HDF5_closeGroup(groupHandle)
|
call HDF5_closeGroup(groupHandle)
|
||||||
call HDF5_closeFile(fileHandle)
|
call HDF5_closeFile(fileHandle)
|
||||||
|
@ -519,7 +519,7 @@ subroutine formResidual(residual_subdomain, F, &
|
||||||
P_av,C_volAvg,C_minMaxAvg, &
|
P_av,C_volAvg,C_minMaxAvg, &
|
||||||
F,params%Delta_t,params%rotation_BC)
|
F,params%Delta_t,params%rotation_BC)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
err_div = utilities_divergenceRMS(P)
|
err_div = utilities_divergenceRMS(P)
|
||||||
end associate
|
end associate
|
||||||
|
|
||||||
|
|
|
@ -190,7 +190,7 @@ subroutine grid_mechanical_spectral_polarization_init(num_grid)
|
||||||
CHKERRQ(err_PETSc)
|
CHKERRQ(err_PETSc)
|
||||||
call MPI_Allgather(int(cells3,pPetscInt),1_MPI_INTEGER_KIND,MPI_INTEGER,&
|
call MPI_Allgather(int(cells3,pPetscInt),1_MPI_INTEGER_KIND,MPI_INTEGER,&
|
||||||
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
|
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call DMDACreate3d(PETSC_COMM_WORLD, &
|
call DMDACreate3d(PETSC_COMM_WORLD, &
|
||||||
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
||||||
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
||||||
|
@ -230,16 +230,16 @@ subroutine grid_mechanical_spectral_polarization_init(num_grid)
|
||||||
|
|
||||||
call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
|
call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
|
||||||
call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
|
call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
|
||||||
call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
||||||
call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
|
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
|
||||||
call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(temp33n,groupHandle,'F')
|
call HDF5_read(temp33n,groupHandle,'F')
|
||||||
F = reshape(temp33n,[9,cells(1),cells(2),cells3])
|
F = reshape(temp33n,[9,cells(1),cells(2),cells3])
|
||||||
call HDF5_read(temp33n,groupHandle,'F_lastInc')
|
call HDF5_read(temp33n,groupHandle,'F_lastInc')
|
||||||
|
@ -267,13 +267,13 @@ subroutine grid_mechanical_spectral_polarization_init(num_grid)
|
||||||
print '(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc
|
print '(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc
|
||||||
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
|
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
|
||||||
call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
||||||
call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call HDF5_read(C_minMaxAvg,groupHandle,'C_minMaxAvg',.false.)
|
call HDF5_read(C_minMaxAvg,groupHandle,'C_minMaxAvg',.false.)
|
||||||
call MPI_Bcast(C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
call HDF5_closeGroup(groupHandle)
|
call HDF5_closeGroup(groupHandle)
|
||||||
call HDF5_closeFile(fileHandle)
|
call HDF5_closeFile(fileHandle)
|
||||||
|
@ -573,7 +573,7 @@ subroutine formResidual(residual_subdomain, FandF_tau, &
|
||||||
|
|
||||||
F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt
|
F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,F_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,F_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
call SNESGetNumberFunctionEvals(SNES_mech,nfuncs,err_PETSc)
|
call SNESGetNumberFunctionEvals(SNES_mech,nfuncs,err_PETSc)
|
||||||
CHKERRQ(err_PETSc)
|
CHKERRQ(err_PETSc)
|
||||||
|
|
|
@ -136,15 +136,11 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
|
||||||
homogenization_F = reshape(F,[3,3,product(cells(1:2))*cells3]) ! set materialpoint target F to estimated field
|
homogenization_F = reshape(F,[3,3,product(cells(1:2))*cells3]) ! set materialpoint target F to estimated field
|
||||||
|
|
||||||
call homogenization_mechanical_response(Delta_t,1,product(cells(1:2))*cells3) ! calculate P field
|
call homogenization_mechanical_response(Delta_t,1,product(cells(1:2))*cells3) ! calculate P field
|
||||||
if (.not. terminallyIll) &
|
|
||||||
call homogenization_thermal_response(Delta_t,1,product(cells(1:2))*cells3)
|
|
||||||
if (.not. terminallyIll) &
|
|
||||||
call homogenization_mechanical_response2(Delta_t,1,product(cells(1:2))*cells3)
|
|
||||||
|
|
||||||
P = reshape(homogenization_P, [3,3,cells(1),cells(2),cells3])
|
P = reshape(homogenization_P, [3,3,cells(1),cells(2),cells3])
|
||||||
P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt
|
P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
if (present(rotation_BC)) then
|
if (present(rotation_BC)) then
|
||||||
if (any(dNeq(rotation_BC%asQuaternion(), real([1.0, 0.0, 0.0, 0.0],pREAL)))) &
|
if (any(dNeq(rotation_BC%asQuaternion(), real([1.0, 0.0, 0.0, 0.0],pREAL)))) &
|
||||||
print'(/,1x,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', &
|
print'(/,1x,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', &
|
||||||
|
@ -172,21 +168,21 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
|
||||||
|
|
||||||
valueAndRank = [dPdF_norm_max,real(worldrank,pREAL)]
|
valueAndRank = [dPdF_norm_max,real(worldrank,pREAL)]
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1_MPI_INTEGER_KIND,MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1_MPI_INTEGER_KIND,MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call MPI_Bcast(dPdF_max,81_MPI_INTEGER_KIND,MPI_DOUBLE,int(valueAndRank(2),MPI_INTEGER_KIND),MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(dPdF_max,81_MPI_INTEGER_KIND,MPI_DOUBLE,int(valueAndRank(2),MPI_INTEGER_KIND),MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
valueAndRank = [dPdF_norm_min,real(worldrank,pREAL)]
|
valueAndRank = [dPdF_norm_min,real(worldrank,pREAL)]
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1_MPI_INTEGER_KIND,MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1_MPI_INTEGER_KIND,MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call MPI_Bcast(dPdF_min,81_MPI_INTEGER_KIND,MPI_DOUBLE,int(valueAndRank(2),MPI_INTEGER_KIND),MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(dPdF_min,81_MPI_INTEGER_KIND,MPI_DOUBLE,int(valueAndRank(2),MPI_INTEGER_KIND),MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
C_minmaxAvg = 0.5_pREAL*(dPdF_max + dPdF_min)
|
C_minmaxAvg = 0.5_pREAL*(dPdF_max + dPdF_min)
|
||||||
|
|
||||||
C_volAvg = sum(homogenization_dPdF,dim=5)
|
C_volAvg = sum(homogenization_dPdF,dim=5)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
C_volAvg = C_volAvg * wgt
|
C_volAvg = C_volAvg * wgt
|
||||||
|
|
||||||
|
|
||||||
|
@ -242,7 +238,7 @@ function utilities_forwardTensorField(Delta_t,field_lastInc,rate,aim)
|
||||||
if (present(aim)) then !< correct to match average
|
if (present(aim)) then !< correct to match average
|
||||||
fieldDiff = sum(sum(sum(utilities_forwardTensorField,dim=5),dim=4),dim=3)*wgt
|
fieldDiff = sum(sum(sum(utilities_forwardTensorField,dim=5),dim=4),dim=3)*wgt
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
fieldDiff = fieldDiff - aim
|
fieldDiff = fieldDiff - aim
|
||||||
utilities_forwardTensorField = utilities_forwardTensorField &
|
utilities_forwardTensorField = utilities_forwardTensorField &
|
||||||
- spread(spread(spread(fieldDiff,3,cells(1)),4,cells(2)),5,cells3)
|
- spread(spread(spread(fieldDiff,3,cells(1)),4,cells(2)),5,cells3)
|
||||||
|
|
|
@ -123,7 +123,7 @@ subroutine grid_thermal_spectral_init(num_grid)
|
||||||
CHKERRQ(err_PETSc)
|
CHKERRQ(err_PETSc)
|
||||||
call MPI_Allgather(int(cells3,pPETSCINT),1_MPI_INTEGER_KIND,MPI_INTEGER,&
|
call MPI_Allgather(int(cells3,pPETSCINT),1_MPI_INTEGER_KIND,MPI_INTEGER,&
|
||||||
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
|
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call DMDACreate3D(PETSC_COMM_WORLD, &
|
call DMDACreate3D(PETSC_COMM_WORLD, &
|
||||||
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
||||||
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
||||||
|
@ -218,10 +218,10 @@ function grid_thermal_spectral_solution(Delta_t) result(solution)
|
||||||
T_max = maxval(T)
|
T_max = maxval(T)
|
||||||
stagNorm = maxval(abs(T - T_stagInc))
|
stagNorm = maxval(abs(T - T_stagInc))
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
solution%stagConverged = stagNorm < max(num%eps_thermal_atol, num%eps_thermal_rtol*T_max)
|
solution%stagConverged = stagNorm < max(num%eps_thermal_atol, num%eps_thermal_rtol*T_max)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
T_stagInc = T
|
T_stagInc = T
|
||||||
|
|
||||||
call homogenization_thermal_setField(reshape(T,[product(cells(1:2))*cells3]), &
|
call homogenization_thermal_setField(reshape(T,[product(cells(1:2))*cells3]), &
|
||||||
|
@ -323,6 +323,8 @@ subroutine formResidual(residual_subdomain,x_scal,r,dummy,err_PETSc)
|
||||||
real(pREAL), dimension(3,cells(1),cells(2),cells3) :: vectorField
|
real(pREAL), dimension(3,cells(1),cells(2),cells3) :: vectorField
|
||||||
|
|
||||||
|
|
||||||
|
call homogenization_thermal_response(Delta_t_,1,product(cells(1:2))*cells3)
|
||||||
|
|
||||||
associate(T => x_scal)
|
associate(T => x_scal)
|
||||||
vectorField = utilities_ScalarGradient(T)
|
vectorField = utilities_ScalarGradient(T)
|
||||||
ce = 0
|
ce = 0
|
||||||
|
@ -365,10 +367,10 @@ subroutine updateReference()
|
||||||
|
|
||||||
K_ref = K_ref*wgt
|
K_ref = K_ref*wgt
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,K_ref,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,K_ref,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
mu_ref = mu_ref*wgt
|
mu_ref = mu_ref*wgt
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
end subroutine updateReference
|
end subroutine updateReference
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,7 @@ module spectral_utilities
|
||||||
|
|
||||||
type :: tNumerics
|
type :: tNumerics
|
||||||
integer :: &
|
integer :: &
|
||||||
divergence_correction !< scale divergence/curl calculation: [0: no correction, 1: size scaled to 1, 2: size scaled to Npoints]
|
divergence_correction !< scale divergence/curl calculation
|
||||||
logical :: &
|
logical :: &
|
||||||
memory_efficient !< calculate gamma operator on the fly
|
memory_efficient !< calculate gamma operator on the fly
|
||||||
end type tNumerics
|
end type tNumerics
|
||||||
|
@ -563,7 +563,7 @@ real(pREAL) function utilities_divergenceRMS(tensorField)
|
||||||
conjg(-xi1st(1:3,cells1Red,k,j))*rescaledGeom))**2)
|
conjg(-xi1st(1:3,cells1Red,k,j))*rescaledGeom))**2)
|
||||||
end do; end do
|
end do; end do
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space
|
utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space
|
||||||
if (cells(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pREAL ! counted twice in case of cells(1) == 1
|
if (cells(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pREAL ! counted twice in case of cells(1) == 1
|
||||||
|
|
||||||
|
@ -629,7 +629,7 @@ real(pREAL) function utilities_curlRMS(tensorField)
|
||||||
end do; end do
|
end do; end do
|
||||||
|
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
utilities_curlRMS = sqrt(utilities_curlRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space
|
utilities_curlRMS = sqrt(utilities_curlRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space
|
||||||
if (cells(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pREAL ! counted twice in case of cells(1) == 1
|
if (cells(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pREAL ! counted twice in case of cells(1) == 1
|
||||||
|
|
||||||
|
@ -778,7 +778,7 @@ subroutine utilities_updateCoords(F)
|
||||||
! average F
|
! average F
|
||||||
if (cells3Offset == 0) Favg = tensorField_fourier(1:3,1:3,1,1,1)%re*wgt
|
if (cells3Offset == 0) Favg = tensorField_fourier(1:3,1:3,1,1,1)%re*wgt
|
||||||
call MPI_Bcast(Favg,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(Favg,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! integration in Fourier space to get fluctuations of cell center displacements
|
! integration in Fourier space to get fluctuations of cell center displacements
|
||||||
|
@ -798,24 +798,24 @@ subroutine utilities_updateCoords(F)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! pad cell center fluctuations along z-direction (needed when running MPI simulation)
|
! pad cell center fluctuations along z-direction (needed when running MPI simulation)
|
||||||
c = product(shape(u_tilde_p_padded(:,:,:,1))) !< amount of data to transfer
|
c = product(shape(u_tilde_p_padded(:,:,:,1))) !< amount of data to transfer
|
||||||
rank_t = modulo(worldrank+1_MPI_INTEGER_KIND,worldsize)
|
rank_t = modulo(worldrank+1_MPI_INTEGER_KIND,worldsize)
|
||||||
rank_b = modulo(worldrank-1_MPI_INTEGER_KIND,worldsize)
|
rank_b = modulo(worldrank-1_MPI_INTEGER_KIND,worldsize)
|
||||||
|
|
||||||
! send bottom layer to process below
|
! send bottom layer to process below
|
||||||
call MPI_Isend(u_tilde_p_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(1),err_MPI)
|
call MPI_Isend(u_tilde_p_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(1),err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call MPI_Irecv(u_tilde_p_padded(:,:,:,cells3+1),c,MPI_DOUBLE,rank_t,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(2),err_MPI)
|
call MPI_Irecv(u_tilde_p_padded(:,:,:,cells3+1),c,MPI_DOUBLE,rank_t,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(2),err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
! send top layer to process above
|
! send top layer to process above
|
||||||
call MPI_Isend(u_tilde_p_padded(:,:,:,cells3) ,c,MPI_DOUBLE,rank_t,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(3),err_MPI)
|
call MPI_Isend(u_tilde_p_padded(:,:,:,cells3) ,c,MPI_DOUBLE,rank_t,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(3),err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call MPI_Irecv(u_tilde_p_padded(:,:,:,0), c,MPI_DOUBLE,rank_b,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(4),err_MPI)
|
call MPI_Irecv(u_tilde_p_padded(:,:,:,0), c,MPI_DOUBLE,rank_b,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(4),err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
call MPI_Waitall(4,request,status,err_MPI)
|
call MPI_Waitall(4,request,status,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
! ToDo
|
! ToDo
|
||||||
#else
|
#else
|
||||||
|
@ -868,7 +868,7 @@ subroutine selfTest()
|
||||||
call fftw_mpi_execute_dft_r2c(planTensorForth,tensorField_real,tensorField_fourier)
|
call fftw_mpi_execute_dft_r2c(planTensorForth,tensorField_real,tensorField_fourier)
|
||||||
call MPI_Allreduce(sum(sum(sum(tensorField_real_,dim=5),dim=4),dim=3),tensorSum,9_MPI_INTEGER_KIND, &
|
call MPI_Allreduce(sum(sum(sum(tensorField_real_,dim=5),dim=4),dim=3),tensorSum,9_MPI_INTEGER_KIND, &
|
||||||
MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
if (worldrank==0) then
|
if (worldrank==0) then
|
||||||
if (any(dNeq(tensorSum/tensorField_fourier(:,:,1,1,1)%re,1.0_pREAL,1.0e-12_pREAL))) &
|
if (any(dNeq(tensorSum/tensorField_fourier(:,:,1,1,1)%re,1.0_pREAL,1.0e-12_pREAL))) &
|
||||||
error stop 'mismatch avg tensorField FFT <-> real'
|
error stop 'mismatch avg tensorField FFT <-> real'
|
||||||
|
@ -884,7 +884,7 @@ subroutine selfTest()
|
||||||
call fftw_mpi_execute_dft_r2c(planVectorForth,vectorField_real,vectorField_fourier)
|
call fftw_mpi_execute_dft_r2c(planVectorForth,vectorField_real,vectorField_fourier)
|
||||||
call MPI_Allreduce(sum(sum(sum(vectorField_real_,dim=4),dim=3),dim=2),vectorSum,3_MPI_INTEGER_KIND, &
|
call MPI_Allreduce(sum(sum(sum(vectorField_real_,dim=4),dim=3),dim=2),vectorSum,3_MPI_INTEGER_KIND, &
|
||||||
MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
if (worldrank==0) then
|
if (worldrank==0) then
|
||||||
if (any(dNeq(vectorSum/vectorField_fourier(:,1,1,1)%re,1.0_pREAL,1.0e-12_pREAL))) &
|
if (any(dNeq(vectorSum/vectorField_fourier(:,1,1,1)%re,1.0_pREAL,1.0e-12_pREAL))) &
|
||||||
error stop 'mismatch avg vectorField FFT <-> real'
|
error stop 'mismatch avg vectorField FFT <-> real'
|
||||||
|
@ -900,7 +900,7 @@ subroutine selfTest()
|
||||||
call fftw_mpi_execute_dft_r2c(planScalarForth,scalarField_real,scalarField_fourier)
|
call fftw_mpi_execute_dft_r2c(planScalarForth,scalarField_real,scalarField_fourier)
|
||||||
call MPI_Allreduce(sum(sum(sum(scalarField_real_,dim=3),dim=2),dim=1),scalarSum,1_MPI_INTEGER_KIND, &
|
call MPI_Allreduce(sum(sum(sum(scalarField_real_,dim=3),dim=2),dim=1),scalarSum,1_MPI_INTEGER_KIND, &
|
||||||
MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
if (worldrank==0) then
|
if (worldrank==0) then
|
||||||
if (dNeq(scalarSum/scalarField_fourier(1,1,1)%re,1.0_pREAL,1.0e-12_pREAL)) &
|
if (dNeq(scalarSum/scalarField_fourier(1,1,1)%re,1.0_pREAL,1.0e-12_pREAL)) &
|
||||||
error stop 'mismatch avg scalarField FFT <-> real'
|
error stop 'mismatch avg scalarField FFT <-> real'
|
||||||
|
@ -912,7 +912,7 @@ subroutine selfTest()
|
||||||
|
|
||||||
call random_number(r)
|
call random_number(r)
|
||||||
call MPI_Bcast(r,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(r,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
scalarField_real_ = r(1,1)
|
scalarField_real_ = r(1,1)
|
||||||
if (maxval(abs(utilities_scalarGradient(scalarField_real_)))>5.0e-9_pREAL) error stop 'non-zero grad(const)'
|
if (maxval(abs(utilities_scalarGradient(scalarField_real_)))>5.0e-9_pREAL) error stop 'non-zero grad(const)'
|
||||||
|
|
|
@ -168,7 +168,6 @@ module homogenization
|
||||||
public :: &
|
public :: &
|
||||||
homogenization_init, &
|
homogenization_init, &
|
||||||
homogenization_mechanical_response, &
|
homogenization_mechanical_response, &
|
||||||
homogenization_mechanical_response2, &
|
|
||||||
homogenization_thermal_response, &
|
homogenization_thermal_response, &
|
||||||
homogenization_thermal_active, &
|
homogenization_thermal_active, &
|
||||||
homogenization_mu_T, &
|
homogenization_mu_T, &
|
||||||
|
@ -227,7 +226,8 @@ subroutine homogenization_mechanical_response(Delta_t,cell_start,cell_end)
|
||||||
doneAndHappy
|
doneAndHappy
|
||||||
|
|
||||||
|
|
||||||
!$OMP PARALLEL DO PRIVATE(en,ho,co,converged,doneAndHappy)
|
!$OMP PARALLEL
|
||||||
|
!$OMP DO PRIVATE(en,ho,co,converged,doneAndHappy)
|
||||||
do ce = cell_start, cell_end
|
do ce = cell_start, cell_end
|
||||||
|
|
||||||
en = material_entry_homogenization(ce)
|
en = material_entry_homogenization(ce)
|
||||||
|
@ -260,7 +260,18 @@ subroutine homogenization_mechanical_response(Delta_t,cell_start,cell_end)
|
||||||
terminallyIll = .true.
|
terminallyIll = .true.
|
||||||
end if
|
end if
|
||||||
end do
|
end do
|
||||||
!$OMP END PARALLEL DO
|
!$OMP END DO
|
||||||
|
|
||||||
|
!$OMP DO PRIVATE(ho)
|
||||||
|
do ce = cell_start, cell_end
|
||||||
|
ho = material_ID_homogenization(ce)
|
||||||
|
do co = 1, homogenization_Nconstituents(ho)
|
||||||
|
call crystallite_orientations(co,ce)
|
||||||
|
end do
|
||||||
|
call mechanical_homogenize(Delta_t,ce)
|
||||||
|
end do
|
||||||
|
!$OMP END DO
|
||||||
|
!$OMP END PARALLEL
|
||||||
|
|
||||||
end subroutine homogenization_mechanical_response
|
end subroutine homogenization_mechanical_response
|
||||||
|
|
||||||
|
@ -294,32 +305,6 @@ subroutine homogenization_thermal_response(Delta_t,cell_start,cell_end)
|
||||||
end subroutine homogenization_thermal_response
|
end subroutine homogenization_thermal_response
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
!> @brief
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
subroutine homogenization_mechanical_response2(Delta_t,cell_start,cell_end)
|
|
||||||
|
|
||||||
real(pREAL), intent(in) :: Delta_t !< time increment
|
|
||||||
integer, intent(in) :: &
|
|
||||||
cell_start, cell_end
|
|
||||||
|
|
||||||
integer :: &
|
|
||||||
co, ce, ho
|
|
||||||
|
|
||||||
|
|
||||||
!$OMP PARALLEL DO PRIVATE(ho)
|
|
||||||
do ce = cell_start, cell_end
|
|
||||||
ho = material_ID_homogenization(ce)
|
|
||||||
do co = 1, homogenization_Nconstituents(ho)
|
|
||||||
call crystallite_orientations(co,ce)
|
|
||||||
end do
|
|
||||||
call mechanical_homogenize(Delta_t,ce)
|
|
||||||
end do
|
|
||||||
!$OMP END PARALLEL DO
|
|
||||||
|
|
||||||
end subroutine homogenization_mechanical_response2
|
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief writes homogenization results to HDF5 output file
|
!> @brief writes homogenization results to HDF5 output file
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
|
|
@ -18,6 +18,7 @@ module FEM_utilities
|
||||||
use math
|
use math
|
||||||
use misc
|
use misc
|
||||||
use IO
|
use IO
|
||||||
|
use parallelization
|
||||||
use discretization_mesh
|
use discretization_mesh
|
||||||
use homogenization
|
use homogenization
|
||||||
use FEM_quadrature
|
use FEM_quadrature
|
||||||
|
@ -144,16 +145,15 @@ subroutine utilities_constitutiveResponse(Delta_t,P_av,forwardData)
|
||||||
|
|
||||||
integer(MPI_INTEGER_KIND) :: err_MPI
|
integer(MPI_INTEGER_KIND) :: err_MPI
|
||||||
|
|
||||||
|
|
||||||
print'(/,1x,a)', '... evaluating constitutive response ......................................'
|
print'(/,1x,a)', '... evaluating constitutive response ......................................'
|
||||||
|
|
||||||
call homogenization_mechanical_response(Delta_t,1,mesh_maxNips*mesh_NcpElems) ! calculate P field
|
call homogenization_mechanical_response(Delta_t,1,mesh_maxNips*mesh_NcpElems) ! calculate P field
|
||||||
if (.not. terminallyIll) &
|
|
||||||
call homogenization_mechanical_response2(Delta_t,1,mesh_maxNips*mesh_NcpElems)
|
|
||||||
cutBack = .false.
|
cutBack = .false.
|
||||||
|
|
||||||
P_av = sum(homogenization_P,dim=3) * wgt
|
P_av = sum(homogenization_P,dim=3) * wgt
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
|
|
||||||
end subroutine utilities_constitutiveResponse
|
end subroutine utilities_constitutiveResponse
|
||||||
|
|
|
@ -121,13 +121,13 @@ subroutine discretization_mesh_init(restart)
|
||||||
CHKERRQ(err_PETSc)
|
CHKERRQ(err_PETSc)
|
||||||
mesh_Nboundaries = int(Nboundaries)
|
mesh_Nboundaries = int(Nboundaries)
|
||||||
call MPI_Bcast(mesh_Nboundaries,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(mesh_Nboundaries,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
call MPI_Bcast(mesh_NcpElemsGlobal,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(mesh_NcpElemsGlobal,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
dim = int(dimPlex)
|
dim = int(dimPlex)
|
||||||
call MPI_Bcast(dim,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(dim,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
dimPlex = int(dim,pPETSCINT)
|
dimPlex = int(dim,pPETSCINT)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
if (worldsize == 1) then
|
if (worldsize == 1) then
|
||||||
call DMClone(globalMesh,geomMesh,err_PETSc)
|
call DMClone(globalMesh,geomMesh,err_PETSc)
|
||||||
|
@ -149,7 +149,7 @@ subroutine discretization_mesh_init(restart)
|
||||||
call ISRestoreIndicesF90(faceSetIS,pFaceSets,err_PETSc)
|
call ISRestoreIndicesF90(faceSetIS,pFaceSets,err_PETSc)
|
||||||
end if
|
end if
|
||||||
call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
|
|
||||||
call DMDestroy(globalMesh,err_PETSc)
|
call DMDestroy(globalMesh,err_PETSc)
|
||||||
CHKERRQ(err_PETSc)
|
CHKERRQ(err_PETSc)
|
||||||
|
|
|
@ -461,7 +461,7 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,err_PETSc
|
||||||
! evaluate constitutive response
|
! evaluate constitutive response
|
||||||
call utilities_constitutiveResponse(params%Delta_t,P_av,ForwardData)
|
call utilities_constitutiveResponse(params%Delta_t,P_av,ForwardData)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
|
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
|
||||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
call parallelization_chkerr(err_MPI)
|
||||||
ForwardData = .false.
|
ForwardData = .false.
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
|
Loading…
Reference in New Issue