Merge remote-tracking branch 'remotes/origin/MiscImprovements' into development

This commit is contained in:
Franz Roters 2020-02-27 10:16:16 +01:00
commit 303ab40793
15 changed files with 686 additions and 760 deletions

View File

@ -104,7 +104,7 @@ checkout:
- release
###################################################################################################
Pytest:
Pytest_python:
stage: python
script:
- cd $DAMASKROOT/python
@ -385,6 +385,15 @@ Phenopowerlaw_singleSlip:
- master
- release
Pytest_grid:
stage: grid
script:
- cd pytest
- pytest
except:
- master
- release
###################################################################################################
Marc_compileIfort:
stage: compileMarc

@ -1 +1 @@
Subproject commit 6db5f4666fc651b4de3b44ceaed3f2b848170ac9
Subproject commit 9573ce7bd2c1a7188c1aac5b83aa76d480c2bdb0

View File

@ -1,49 +0,0 @@
[Aluminum]
elasticity hooke
plasticity phenopowerlaw
(output) resistance_slip
(output) shearrate_slip
(output) resolvedstress_slip
(output) accumulated_shear_slip
(output) resistance_twin
(output) shearrate_twin
(output) resolvedstress_twin
(output) accumulated_shear_twin
lattice_structure fcc
Nslip 12 # per family
Ntwin 0 # per family
c11 106.75e9
c12 60.41e9
c44 28.34e9
gdot0_slip 0.001
n_slip 20
tau0_slip 31e6 # per family
tausat_slip 63e6 # per family
a_slip 2.25
gdot0_twin 0.001
n_twin 20
tau0_twin 31e6 # per family
h0_slipslip 75e6
interaction_slipslip 1 1 1.4 1.4 1.4 1.4
atol_resistance 1
(stiffness_degradation) damage
(stiffness_degradation) porosity
{./Phase_Damage.config}
{./Phase_Thermal.config}
{./Phase_Vacancy.config}
{./Phase_Porosity.config}
{./Phase_Hydrogen.config}
{./Source_Damage_IsoBrittle.config}
{./Source_Thermal_Dissipation.config}
{./Source_Vacancy_PhenoPlasticity.config}
{./Source_Vacancy_Irradiation.config}
{./Kinematics_Thermal_Expansion.config}
{./Kinematics_Vacancy_Strain.config}
{./Kinematics_Hydrogen_Strain.config}

View File

@ -83,7 +83,7 @@ for name in filenames:
# ------------------------------------------ assemble header ---------------------------------------
randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed is None else options.randomSeed # random seed per file
randomSeed = int(os.urandom(4).hex(), 16) if options.randomSeed is None else options.randomSeed # random seed per file
np.random.seed(randomSeed)
table.info_append([scriptID + '\t' + ' '.join(sys.argv[1:]),

View File

@ -1,11 +1,9 @@
#!/usr/bin/env python3
# -*- coding: UTF-8 no BOM -*-
import threading,time,os,sys,random
import numpy as np
from optparse import OptionParser
from io import StringIO
import binascii
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
@ -16,9 +14,10 @@ currentSeedsName = None
#---------------------------------------------------------------------------------------------------
class myThread (threading.Thread):
"""perturbes seed in seed file, performes Voronoi tessellation, evaluates, and updates best match"""
"""Perturb seed in seed file, performes Voronoi tessellation, evaluates, and updates best match."""
def __init__(self, threadID):
"""Threading class with thread ID."""
threading.Thread.__init__(self)
self.threadID = threadID
@ -215,7 +214,7 @@ options = parser.parse_args()[0]
damask.util.report(scriptName,options.seedFile)
if options.randomSeed is None:
options.randomSeed = int(binascii.hexlify(os.urandom(4)),16)
options.randomSeed = int(os.urandom(4).hex(),16)
damask.util.croak(options.randomSeed)
delta = (options.scale/options.grid[0],options.scale/options.grid[1],options.scale/options.grid[2])
baseFile=os.path.splitext(os.path.basename(options.seedFile))[0]

View File

@ -400,6 +400,8 @@ subroutine IO_error(error_ID,el,ip,g,instance,ext_msg)
msg = 'number of values does not match'
case (147)
msg = 'not supported anymore'
case (148)
msg = 'Nconstituents mismatch between homogenization and microstructure'
!--------------------------------------------------------------------------------------------------
! material error messages and related messages in mesh

View File

@ -729,7 +729,7 @@ subroutine crystallite_results
real(pReal), allocatable, dimension(:,:,:) :: select_tensors
integer :: e,i,c,j
allocate(select_tensors(3,3,count(material_phaseAt==instance)*homogenization_maxNgrains*discretization_nIP))
allocate(select_tensors(3,3,count(material_phaseAt==instance)*discretization_nIP))
j=0
do e = 1, size(material_phaseAt,2)

View File

@ -21,10 +21,10 @@ module grid_mech_FEM
use discretization
use mesh_grid
use debug
implicit none
private
!--------------------------------------------------------------------------------------------------
! derived types
type(tSolutionParams), private :: params
@ -52,20 +52,20 @@ module grid_mech_FEM
F_aim_lastIter = math_I3, &
F_aim_lastInc = math_I3, & !< previous average deformation gradient
P_av = 0.0_pReal !< average 1st Piola--Kirchhoff stress
character(len=pStringLen), private :: incInfo !< time and increment information
real(pReal), private, dimension(3,3,3,3) :: &
C_volAvg = 0.0_pReal, & !< current volume average stiffness
C_volAvg = 0.0_pReal, & !< current volume average stiffness
C_volAvgLastInc = 0.0_pReal, & !< previous volume average stiffness
S = 0.0_pReal !< current compliance (filled up with zeros)
real(pReal), private :: &
err_BC !< deviation from stress BC
integer, private :: &
totalIter = 0 !< total iteration in current increment
public :: &
grid_mech_FEM_init, &
grid_mech_FEM_solution, &
@ -79,7 +79,7 @@ contains
!> @brief allocates all necessary fields and fills them with data, potentially from restart info
!--------------------------------------------------------------------------------------------------
subroutine grid_mech_FEM_init
real(pReal) :: HGCoeff = 0.0e-2_pReal
PetscInt, dimension(0:worldsize-1) :: localK
real(pReal), dimension(3,3) :: &
@ -99,9 +99,9 @@ subroutine grid_mech_FEM_init
real(pReal), dimension(3,3,3,3) :: devNull
PetscScalar, pointer, dimension(:,:,:,:) :: &
u_current,u_lastInc
write(6,'(/,a)') ' <<<+- grid_mech_FEM init -+>>>'; flush(6)
!--------------------------------------------------------------------------------------------------
! set default and user defined options for PETSc
call PETScOptionsInsertString(PETSC_NULL_OPTIONS,'-mech_snes_type newtonls -mech_ksp_type fgmres &
@ -115,11 +115,11 @@ subroutine grid_mech_FEM_init
allocate(F (3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
allocate(P_current (3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
allocate(F_lastInc (3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
!--------------------------------------------------------------------------------------------------
! initialize solver specific parts of PETSc
call SNESCreate(PETSC_COMM_WORLD,mech_snes,ierr); CHKERRQ(ierr)
call SNESSetOptionsPrefix(mech_snes,'mech_',ierr);CHKERRQ(ierr)
call SNESSetOptionsPrefix(mech_snes,'mech_',ierr);CHKERRQ(ierr)
localK = 0
localK(worldrank) = grid3
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr)
@ -141,12 +141,12 @@ subroutine grid_mech_FEM_init
call DMCreateGlobalVector(mech_grid,solution_lastInc,ierr); CHKERRQ(ierr)
call DMCreateGlobalVector(mech_grid,solution_rate ,ierr); CHKERRQ(ierr)
call DMSNESSetFunctionLocal(mech_grid,formResidual,PETSC_NULL_SNES,ierr)
CHKERRQ(ierr)
CHKERRQ(ierr)
call DMSNESSetJacobianLocal(mech_grid,formJacobian,PETSC_NULL_SNES,ierr)
CHKERRQ(ierr)
call SNESSetConvergenceTest(mech_snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,ierr) ! specify custom convergence check function "_converged"
CHKERRQ(ierr)
call SNESSetMaxLinearSolveFailures(mech_snes, huge(1), ierr); CHKERRQ(ierr) ! ignore linear solve failures
call SNESSetMaxLinearSolveFailures(mech_snes, huge(1), ierr); CHKERRQ(ierr) ! ignore linear solve failures
call SNESSetFromOptions(mech_snes,ierr); CHKERRQ(ierr) ! pull it all together with additional cli arguments
!--------------------------------------------------------------------------------------------------
@ -156,15 +156,15 @@ subroutine grid_mech_FEM_init
call VecSet(solution_rate ,0.0_pReal,ierr);CHKERRQ(ierr)
call DMDAVecGetArrayF90(mech_grid,solution_current,u_current,ierr); CHKERRQ(ierr)
call DMDAVecGetArrayF90(mech_grid,solution_lastInc,u_lastInc,ierr); CHKERRQ(ierr)
call DMDAGetCorners(mech_grid,xstart,ystart,zstart,xend,yend,zend,ierr) ! local grid extent
CHKERRQ(ierr)
CHKERRQ(ierr)
xend = xstart+xend-1
yend = ystart+yend-1
zend = zstart+zend-1
delta = geomSize/real(grid,pReal) ! grid spacing
detJ = product(delta) ! cell volume
BMat = reshape(real([-1.0_pReal/delta(1),-1.0_pReal/delta(2),-1.0_pReal/delta(3), &
1.0_pReal/delta(1),-1.0_pReal/delta(2),-1.0_pReal/delta(3), &
-1.0_pReal/delta(1), 1.0_pReal/delta(2),-1.0_pReal/delta(3), &
@ -173,7 +173,7 @@ subroutine grid_mech_FEM_init
1.0_pReal/delta(1),-1.0_pReal/delta(2), 1.0_pReal/delta(3), &
-1.0_pReal/delta(1), 1.0_pReal/delta(2), 1.0_pReal/delta(3), &
1.0_pReal/delta(1), 1.0_pReal/delta(2), 1.0_pReal/delta(3)],pReal), [3,8])/4.0_pReal ! shape function derivative matrix
HGMat = matmul(transpose(HGcomp),HGcomp) &
* HGCoeff*(delta(1)*delta(2) + delta(2)*delta(3) + delta(3)*delta(1))/16.0_pReal ! hourglass stabilization matrix
@ -181,11 +181,11 @@ subroutine grid_mech_FEM_init
! init fields
restartRead: if (interface_restartInc > 0) then
write(6,'(/,a,i0,a)') ' reading restart data of increment ', interface_restartInc, ' from file'
write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5'
fileHandle = HDF5_openFile(fileName)
groupHandle = HDF5_openGroup(fileHandle,'solver')
call HDF5_read(groupHandle,F_aim, 'F_aim')
call HDF5_read(groupHandle,F_aim_lastInc,'F_aim_lastInc')
call HDF5_read(groupHandle,F_aimDot, 'F_aimDot')
@ -193,7 +193,7 @@ subroutine grid_mech_FEM_init
call HDF5_read(groupHandle,F_lastInc, 'F_lastInc')
call HDF5_read(groupHandle,u_current, 'u')
call HDF5_read(groupHandle,u_lastInc, 'u_lastInc')
elseif (interface_restartInc == 0) then restartRead
F_lastInc = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3) ! initialize to identity
F = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3)
@ -207,12 +207,12 @@ subroutine grid_mech_FEM_init
CHKERRQ(ierr)
call DMDAVecRestoreArrayF90(mech_grid,solution_lastInc,u_lastInc,ierr)
CHKERRQ(ierr)
restartRead2: if (interface_restartInc > 0) then
write(6,'(/,a,i0,a)') ' reading more restart data of increment ', interface_restartInc, ' from file'
call HDF5_read(groupHandle,C_volAvg, 'C_volAvg')
call HDF5_read(groupHandle,C_volAvgLastInc,'C_volAvgLastInc')
call HDF5_closeGroup(groupHandle)
call HDF5_closeFile(fileHandle)
@ -243,14 +243,14 @@ function grid_mech_FEM_solution(incInfoIn,timeinc,timeinc_old,stress_BC,rotation
! PETSc Data
PetscErrorCode :: ierr
SNESConvergedReason :: reason
incInfo = incInfoIn
!--------------------------------------------------------------------------------------------------
! update stiffness (and gamma operator)
S = utilities_maskedCompliance(rotation_BC,stress_BC%maskLogical,C_volAvg)
!--------------------------------------------------------------------------------------------------
! set module wide available data
! set module wide available data
params%stress_mask = stress_BC%maskFloat
params%stress_BC = stress_BC%values
params%rotation_BC = rotation_BC
@ -258,13 +258,13 @@ function grid_mech_FEM_solution(incInfoIn,timeinc,timeinc_old,stress_BC,rotation
params%timeincOld = timeinc_old
!--------------------------------------------------------------------------------------------------
! solve BVP
! solve BVP
call SNESsolve(mech_snes,PETSC_NULL_VEC,solution_current,ierr);CHKERRQ(ierr)
!--------------------------------------------------------------------------------------------------
! check convergence
call SNESGetConvergedReason(mech_snes,reason,ierr);CHKERRQ(ierr)
solution%converged = reason > 0
solution%iterationsNeeded = totalIter
solution%termIll = terminallyIll
@ -296,15 +296,15 @@ subroutine grid_mech_FEM_forward(cutBack,guess,timeinc,timeinc_old,loadCaseTime,
PetscErrorCode :: ierr
PetscScalar, pointer, dimension(:,:,:,:) :: &
u_current,u_lastInc
call DMDAVecGetArrayF90(mech_grid,solution_current,u_current,ierr); CHKERRQ(ierr)
call DMDAVecGetArrayF90(mech_grid,solution_lastInc,u_lastInc,ierr); CHKERRQ(ierr)
if (cutBack) then
C_volAvg = C_volAvgLastInc
else
C_volAvgLastInc = C_volAvg
F_aimDot = merge(stress_BC%maskFloat*(F_aim-F_aim_lastInc)/timeinc_old, 0.0_pReal, guess)
F_aim_lastInc = F_aim
@ -329,10 +329,10 @@ subroutine grid_mech_FEM_forward(cutBack,guess,timeinc,timeinc_old,loadCaseTime,
call VecSet(solution_rate,0.0_pReal,ierr); CHKERRQ(ierr)
endif
call VecCopy(solution_current,solution_lastInc,ierr); CHKERRQ(ierr)
F_lastInc = F
materialpoint_F0 = reshape(F, [3,3,1,product(grid(1:2))*grid3])
materialpoint_F0 = reshape(F, [3,3,1,product(grid(1:2))*grid3])
endif
!--------------------------------------------------------------------------------------------------
@ -365,12 +365,12 @@ subroutine grid_mech_FEM_restartWrite
integer(HID_T) :: fileHandle, groupHandle
PetscScalar, dimension(:,:,:,:), pointer :: u_current,u_lastInc
character(len=pStringLen) :: fileName
call DMDAVecGetArrayF90(mech_grid,solution_current,u_current,ierr); CHKERRQ(ierr)
call DMDAVecGetArrayF90(mech_grid,solution_lastInc,u_lastInc,ierr); CHKERRQ(ierr)
write(6,'(a)') ' writing solver data required for restart to file'; flush(6)
write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5'
fileHandle = HDF5_openFile(fileName,'w')
groupHandle = HDF5_addGroup(fileHandle,'solver')
@ -388,7 +388,7 @@ subroutine grid_mech_FEM_restartWrite
call HDF5_closeGroup(groupHandle)
call HDF5_closeFile(fileHandle)
call DMDAVecRestoreArrayF90(mech_grid,solution_current,u_current,ierr);CHKERRQ(ierr)
call DMDAVecRestoreArrayF90(mech_grid,solution_lastInc,u_lastInc,ierr);CHKERRQ(ierr)
@ -405,7 +405,7 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,fnorm,reason,dummy,i
PetscReal, intent(in) :: &
devNull1, &
devNull2, &
fnorm
fnorm
SNESConvergedReason :: reason
PetscObject :: dummy
PetscErrorCode :: ierr
@ -421,7 +421,7 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,fnorm,reason,dummy,i
if ((totalIter >= itmin .and. &
all([ err_div/divTol, &
err_BC /BCTol ] < 1.0_pReal)) &
.or. terminallyIll) then
.or. terminallyIll) then
reason = 1
elseif (totalIter >= itmax) then
reason = -1
@ -435,10 +435,10 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,fnorm,reason,dummy,i
write(6,'(1/,a,f12.2,a,es8.2,a,es9.2,a)') ' error divergence = ', &
err_div/divTol, ' (',err_div,' / m, tol = ',divTol,')'
write(6,'(a,f12.2,a,es8.2,a,es9.2,a)') ' error stress BC = ', &
err_BC/BCTol, ' (',err_BC, ' Pa, tol = ',BCTol,')'
err_BC/BCTol, ' (',err_BC, ' Pa, tol = ',BCTol,')'
write(6,'(/,a)') ' ==========================================================================='
flush(6)
end subroutine converged
@ -475,7 +475,7 @@ subroutine formResidual(da_local,x_local, &
write(6,'(1x,a,3(a,i0))') trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter+1, '≤', itmax
if (iand(debug_level(debug_spectral),debug_spectralRotation) /= 0) &
write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') &
' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotTensor2(F_aim,active=.true.))
' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotate(F_aim,active=.true.))
write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') &
' deformation gradient aim =', transpose(F_aim)
flush(6)
@ -491,7 +491,7 @@ subroutine formResidual(da_local,x_local, &
x_elem(ctr,1:3) = x_scal(0:2,i+ii,j+jj,k+kk)
enddo; enddo; enddo
ii = i-xstart+1; jj = j-ystart+1; kk = k-zstart+1
F(1:3,1:3,ii,jj,kk) = params%rotation_BC%rotTensor2(F_aim,active=.true.) + transpose(matmul(BMat,x_elem))
F(1:3,1:3,ii,jj,kk) = params%rotation_BC%rotate(F_aim,active=.true.) + transpose(matmul(BMat,x_elem))
enddo; enddo; enddo
call DMDAVecRestoreArrayF90(da_local,x_local,x_scal,ierr);CHKERRQ(ierr)
@ -501,7 +501,7 @@ subroutine formResidual(da_local,x_local, &
P_av,C_volAvg,devNull, &
F,params%timeinc,params%rotation_BC)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr)
!--------------------------------------------------------------------------------------------------
! stress BC handling
F_aim_lastIter = F_aim
@ -535,24 +535,24 @@ subroutine formResidual(da_local,x_local, &
enddo; enddo; enddo
call DMDAVecRestoreArrayF90(da_local,x_local,x_scal,ierr);CHKERRQ(ierr)
call DMDAVecRestoreArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr)
!--------------------------------------------------------------------------------------------------
! applying boundary conditions
call DMDAVecGetArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr)
if (zstart == 0) then
f_scal(0:2,xstart,ystart,zstart) = 0.0
f_scal(0:2,xend+1,ystart,zstart) = 0.0
f_scal(0:2,xstart,yend+1,zstart) = 0.0
f_scal(0:2,xend+1,yend+1,zstart) = 0.0
f_scal(0:2,xstart,ystart,zstart) = 0.0
f_scal(0:2,xend+1,ystart,zstart) = 0.0
f_scal(0:2,xstart,yend+1,zstart) = 0.0
f_scal(0:2,xend+1,yend+1,zstart) = 0.0
endif
if (zend + 1 == grid(3)) then
f_scal(0:2,xstart,ystart,zend+1) = 0.0
f_scal(0:2,xend+1,ystart,zend+1) = 0.0
f_scal(0:2,xstart,yend+1,zend+1) = 0.0
f_scal(0:2,xend+1,yend+1,zend+1) = 0.0
f_scal(0:2,xstart,ystart,zend+1) = 0.0
f_scal(0:2,xend+1,ystart,zend+1) = 0.0
f_scal(0:2,xstart,yend+1,zend+1) = 0.0
f_scal(0:2,xend+1,yend+1,zend+1) = 0.0
endif
call DMDAVecRestoreArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr)
call DMDAVecRestoreArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr)
end subroutine formResidual
@ -574,7 +574,7 @@ subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,ierr)
PetscObject :: dummy
MatNullSpace :: matnull
PetscErrorCode :: ierr
BMatFull = 0.0
BMatFull(1:3,1 :8 ) = BMat
BMatFull(4:6,9 :16) = BMat
@ -623,7 +623,7 @@ subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,ierr)
call MatAssemblyEnd(Jac,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr)
call MatAssemblyBegin(Jac_pre,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr)
call MatAssemblyEnd(Jac_pre,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr)
!--------------------------------------------------------------------------------------------------
! applying boundary conditions
diag = (C_volAvg(1,1,1,1)/delta(1)**2.0_pReal + &

View File

@ -28,11 +28,11 @@ module grid_mech_spectral_basic
!--------------------------------------------------------------------------------------------------
! derived types
type(tSolutionParams), private :: params
type, private :: tNumerics
logical :: update_gamma !< update gamma operator with current stiffness
end type tNumerics
type(tNumerics) :: num ! numerics parameters. Better name?
!--------------------------------------------------------------------------------------------------
@ -55,21 +55,21 @@ module grid_mech_spectral_basic
F_aim_lastInc = math_I3, & !< previous average deformation gradient
P_av = 0.0_pReal !< average 1st Piola--Kirchhoff stress
character(len=pStringLen), private :: incInfo !< time and increment information
character(len=pStringLen), private :: incInfo !< time and increment information
real(pReal), private, dimension(3,3,3,3) :: &
C_volAvg = 0.0_pReal, & !< current volume average stiffness
C_volAvg = 0.0_pReal, & !< current volume average stiffness
C_volAvgLastInc = 0.0_pReal, & !< previous volume average stiffness
C_minMaxAvg = 0.0_pReal, & !< current (min+max)/2 stiffness
C_minMaxAvgLastInc = 0.0_pReal, & !< previous (min+max)/2 stiffness
S = 0.0_pReal !< current compliance (filled up with zeros)
real(pReal), private :: &
err_BC, & !< deviation from stress BC
err_div !< RMS of div of P
integer, private :: &
totalIter = 0 !< total iteration in current increment
public :: &
grid_mech_spectral_basic_init, &
grid_mech_spectral_basic_solution, &
@ -83,27 +83,27 @@ contains
!> @brief allocates all necessary fields and fills them with data, potentially from restart info
!--------------------------------------------------------------------------------------------------
subroutine grid_mech_spectral_basic_init
real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: P
real(pReal), dimension(3,3) :: &
temp33_Real = 0.0_pReal
PetscErrorCode :: ierr
PetscScalar, pointer, dimension(:,:,:,:) :: &
F ! pointer to solution data
PetscInt, dimension(worldsize) :: localK
PetscInt, dimension(worldsize) :: localK
integer(HID_T) :: fileHandle, groupHandle
integer :: fileUnit
character(len=pStringLen) :: fileName
write(6,'(/,a)') ' <<<+- grid_mech_spectral_basic init -+>>>'; flush(6)
write(6,'(/,a)') ' Eisenlohr et al., International Journal of Plasticity 46:3753, 2013'
write(6,'(a)') ' https://doi.org/10.1016/j.ijplas.2012.09.012'
write(6,'(/,a)') ' Shanthraj et al., International Journal of Plasticity 66:3145, 2015'
write(6,'(a)') ' https://doi.org/10.1016/j.ijplas.2014.02.006'
num%update_gamma = config_numerics%getInt('update_gamma',defaultVal=0) > 0
!--------------------------------------------------------------------------------------------------
@ -117,11 +117,11 @@ subroutine grid_mech_spectral_basic_init
! allocate global fields
allocate (F_lastInc(3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
allocate (Fdot (3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
!--------------------------------------------------------------------------------------------------
! initialize solver specific parts of PETSc
call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr)
call SNESSetOptionsPrefix(snes,'mech_',ierr);CHKERRQ(ierr)
call SNESSetOptionsPrefix(snes,'mech_',ierr);CHKERRQ(ierr)
localK = 0
localK(worldrank+1) = grid3
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr)
@ -139,45 +139,45 @@ subroutine grid_mech_spectral_basic_init
call DMsetUp(da,ierr); CHKERRQ(ierr)
call DMcreateGlobalVector(da,solution_vec,ierr); CHKERRQ(ierr) ! global solution vector (grid x 9, i.e. every def grad tensor)
call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,ierr) ! residual vector of same shape as solution vector
CHKERRQ(ierr)
CHKERRQ(ierr)
call SNESsetConvergenceTest(snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,ierr) ! specify custom convergence check function "converged"
CHKERRQ(ierr)
call SNESsetFromOptions(snes,ierr); CHKERRQ(ierr) ! pull it all together with additional CLI arguments
!--------------------------------------------------------------------------------------------------
! init fields
! init fields
call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) ! places pointer on PETSc data
restartRead: if (interface_restartInc > 0) then
write(6,'(/,a,i0,a)') ' reading restart data of increment ', interface_restartInc, ' from file'
write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5'
fileHandle = HDF5_openFile(fileName)
groupHandle = HDF5_openGroup(fileHandle,'solver')
call HDF5_read(groupHandle,F_aim, 'F_aim')
call HDF5_read(groupHandle,F_aim_lastInc,'F_aim_lastInc')
call HDF5_read(groupHandle,F_aimDot, 'F_aimDot')
call HDF5_read(groupHandle,F, 'F')
call HDF5_read(groupHandle,F_lastInc, 'F_lastInc')
elseif (interface_restartInc == 0) then restartRead
F_lastInc = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3) ! initialize to identity
F = reshape(F_lastInc,[9,grid(1),grid(2),grid3])
endif restartRead
materialpoint_F0 = reshape(F_lastInc, [3,3,1,product(grid(1:2))*grid3]) ! set starting condition for materialpoint_stressAndItsTangent
call Utilities_updateCoords(reshape(F,shape(F_lastInc)))
call Utilities_constitutiveResponse(P,temp33_Real,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2
reshape(F,shape(F_lastInc)), & ! target F
0.0_pReal) ! time increment
call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) ! deassociate pointer
restartRead2: if (interface_restartInc > 0) then
write(6,'(/,a,i0,a)') ' reading more restart data of increment ', interface_restartInc, ' from file'
call HDF5_read(groupHandle,C_volAvg, 'C_volAvg')
call HDF5_read(groupHandle,C_volAvgLastInc,'C_volAvgLastInc')
call HDF5_closeGroup(groupHandle)
call HDF5_closeFile(fileHandle)
@ -197,7 +197,7 @@ end subroutine grid_mech_spectral_basic_init
!> @brief solution for the basic scheme with internal iterations
!--------------------------------------------------------------------------------------------------
function grid_mech_spectral_basic_solution(incInfoIn,timeinc,timeinc_old,stress_BC,rotation_BC) result(solution)
!--------------------------------------------------------------------------------------------------
! input data for solution
character(len=*), intent(in) :: &
@ -215,7 +215,7 @@ function grid_mech_spectral_basic_solution(incInfoIn,timeinc,timeinc_old,stress_
! PETSc Data
PetscErrorCode :: ierr
SNESConvergedReason :: reason
incInfo = incInfoIn
!--------------------------------------------------------------------------------------------------
@ -224,7 +224,7 @@ function grid_mech_spectral_basic_solution(incInfoIn,timeinc,timeinc_old,stress_
if(num%update_gamma) call utilities_updateGamma(C_minMaxAvg)
!--------------------------------------------------------------------------------------------------
! set module wide available data
! set module wide available data
params%stress_mask = stress_BC%maskFloat
params%stress_BC = stress_BC%values
params%rotation_BC = rotation_BC
@ -232,13 +232,13 @@ function grid_mech_spectral_basic_solution(incInfoIn,timeinc,timeinc_old,stress_
params%timeincOld = timeinc_old
!--------------------------------------------------------------------------------------------------
! solve BVP
! solve BVP
call SNESsolve(snes,PETSC_NULL_VEC,solution_vec,ierr); CHKERRQ(ierr)
!--------------------------------------------------------------------------------------------------
! check convergence
call SNESGetConvergedReason(snes,reason,ierr); CHKERRQ(ierr)
solution%converged = reason > 0
solution%iterationsNeeded = totalIter
solution%termIll = terminallyIll
@ -271,14 +271,14 @@ subroutine grid_mech_spectral_basic_forward(cutBack,guess,timeinc,timeinc_old,lo
PetscScalar, dimension(:,:,:,:), pointer :: F
call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr)
if (cutBack) then
C_volAvg = C_volAvgLastInc
C_minMaxAvg = C_minMaxAvgLastInc
else
C_volAvgLastInc = C_volAvg
C_minMaxAvgLastInc = C_minMaxAvg
F_aimDot = merge(stress_BC%maskFloat*(F_aim-F_aim_lastInc)/timeinc_old, 0.0_pReal, guess)
F_aim_lastInc = F_aim
@ -297,9 +297,9 @@ subroutine grid_mech_spectral_basic_forward(cutBack,guess,timeinc,timeinc_old,lo
Fdot = utilities_calculateRate(guess, &
F_lastInc,reshape(F,[3,3,grid(1),grid(2),grid3]),timeinc_old, &
rotation_BC%rotTensor2(F_aimDot,active=.true.))
rotation_BC%rotate(F_aimDot,active=.true.))
F_lastInc = reshape(F,[3,3,grid(1),grid(2),grid3])
materialpoint_F0 = reshape(F, [3,3,1,product(grid(1:2))*grid3])
endif
@ -307,9 +307,9 @@ subroutine grid_mech_spectral_basic_forward(cutBack,guess,timeinc,timeinc_old,lo
! update average and local deformation gradients
F_aim = F_aim_lastInc + F_aimDot * timeinc
F = reshape(Utilities_forwardField(timeinc,F_lastInc,Fdot, & ! estimate of F at end of time+timeinc that matches rotated F_aim on average
rotation_BC%rotTensor2(F_aim,active=.true.)),[9,grid(1),grid(2),grid3])
rotation_BC%rotate(F_aim,active=.true.)),[9,grid(1),grid(2),grid3])
call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr)
end subroutine grid_mech_spectral_basic_forward
@ -341,11 +341,11 @@ subroutine grid_mech_spectral_basic_restartWrite
call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr)
write(6,'(a)') ' writing solver data required for restart to file'; flush(6)
write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5'
fileHandle = HDF5_openFile(fileName,'w')
groupHandle = HDF5_addGroup(fileHandle,'solver')
call HDF5_write(groupHandle,F_aim, 'F_aim')
call HDF5_write(groupHandle,F_aim_lastInc,'F_aim_lastInc')
call HDF5_write(groupHandle,F_aimDot, 'F_aimDot')
@ -358,7 +358,7 @@ subroutine grid_mech_spectral_basic_restartWrite
call HDF5_closeGroup(groupHandle)
call HDF5_closeFile(fileHandle)
if (num%update_gamma) call utilities_saveReferenceStiffness
call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr)
@ -376,7 +376,7 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dumm
PetscReal, intent(in) :: &
devNull1, &
devNull2, &
devNull3
devNull3
SNESConvergedReason :: reason
PetscObject :: dummy
PetscErrorCode :: ierr
@ -390,7 +390,7 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dumm
if ((totalIter >= itmin .and. &
all([ err_div/divTol, &
err_BC /BCTol ] < 1.0_pReal)) &
.or. terminallyIll) then
.or. terminallyIll) then
reason = 1
elseif (totalIter >= itmax) then
reason = -1
@ -404,10 +404,10 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dumm
write(6,'(1/,a,f12.2,a,es8.2,a,es9.2,a)') ' error divergence = ', &
err_div/divTol, ' (',err_div,' / m, tol = ',divTol,')'
write(6,'(a,f12.2,a,es8.2,a,es9.2,a)') ' error stress BC = ', &
err_BC/BCTol, ' (',err_BC, ' Pa, tol = ',BCTol,')'
err_BC/BCTol, ' (',err_BC, ' Pa, tol = ',BCTol,')'
write(6,'(/,a)') ' ==========================================================================='
flush(6)
flush(6)
end subroutine converged
@ -441,7 +441,7 @@ subroutine formResidual(in, F, &
write(6,'(1x,a,3(a,i0))') trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter, '≤', itmax
if (iand(debug_level(debug_spectral),debug_spectralRotation) /= 0) &
write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') &
' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotTensor2(F_aim,active=.true.))
' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotate(F_aim,active=.true.))
write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') &
' deformation gradient aim =', transpose(F_aim)
flush(6)
@ -453,7 +453,7 @@ subroutine formResidual(in, F, &
P_av,C_volAvg,C_minMaxAvg, &
F,params%timeinc,params%rotation_BC)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr)
!--------------------------------------------------------------------------------------------------
! stress BC handling
deltaF_aim = math_mul3333xx33(S, P_av - params%stress_BC)
@ -466,9 +466,9 @@ subroutine formResidual(in, F, &
tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) = residuum ! store fPK field for subsequent FFT forward transform
call utilities_FFTtensorForward ! FFT forward of global "tensorField_real"
err_div = Utilities_divergenceRMS() ! divRMS of tensorField_fourier for later use
call utilities_fourierGammaConvolution(params%rotation_BC%rotTensor2(deltaF_aim,active=.true.)) ! convolution of Gamma and tensorField_fourier
call utilities_fourierGammaConvolution(params%rotation_BC%rotate(deltaF_aim,active=.true.)) ! convolution of Gamma and tensorField_fourier
call utilities_FFTtensorBackward ! FFT backward of global tensorField_fourier
!--------------------------------------------------------------------------------------------------
! constructing residual
residuum = tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) ! Gamma*P gives correction towards div(P) = 0, so needs to be zero, too

View File

@ -22,20 +22,20 @@ module grid_mech_spectral_polarisation
use homogenization
use mesh_grid
use debug
implicit none
private
!--------------------------------------------------------------------------------------------------
! derived types
type(tSolutionParams), private :: params
type, private :: tNumerics
logical :: update_gamma !< update gamma operator with current stiffness
end type tNumerics
type(tNumerics) :: num ! numerics parameters. Better name?
!--------------------------------------------------------------------------------------------------
! PETSc data
DM, private :: da
@ -46,7 +46,7 @@ module grid_mech_spectral_polarisation
! common pointwise data
real(pReal), private, dimension(:,:,:,:,:), allocatable :: &
F_lastInc, & !< field of previous compatible deformation gradients
F_tau_lastInc, & !< field of previous incompatible deformation gradient
F_tau_lastInc, & !< field of previous incompatible deformation gradient
Fdot, & !< field of assumed rate of compatible deformation gradient
F_tauDot !< field of assumed rate of incopatible deformation gradient
@ -58,25 +58,25 @@ module grid_mech_spectral_polarisation
F_aim_lastInc = math_I3, & !< previous average deformation gradient
F_av = 0.0_pReal, & !< average incompatible def grad field
P_av = 0.0_pReal !< average 1st Piola--Kirchhoff stress
character(len=pStringLen), private :: incInfo !< time and increment information
real(pReal), private, dimension(3,3,3,3) :: &
C_volAvg = 0.0_pReal, & !< current volume average stiffness
C_volAvg = 0.0_pReal, & !< current volume average stiffness
C_volAvgLastInc = 0.0_pReal, & !< previous volume average stiffness
C_minMaxAvg = 0.0_pReal, & !< current (min+max)/2 stiffness
C_minMaxAvgLastInc = 0.0_pReal, & !< previous (min+max)/2 stiffness
S = 0.0_pReal, & !< current compliance (filled up with zeros)
C_scale = 0.0_pReal, &
S_scale = 0.0_pReal
real(pReal), private :: &
err_BC, & !< deviation from stress BC
err_curl, & !< RMS of curl of F
err_div !< RMS of div of P
integer, private :: &
totalIter = 0 !< total iteration in current increment
public :: &
grid_mech_spectral_polarisation_init, &
grid_mech_spectral_polarisation_solution, &
@ -90,26 +90,26 @@ contains
!> @brief allocates all necessary fields and fills them with data, potentially from restart info
!--------------------------------------------------------------------------------------------------
subroutine grid_mech_spectral_polarisation_init
real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: P
real(pReal), dimension(3,3) :: &
temp33_Real = 0.0_pReal
PetscErrorCode :: ierr
PetscScalar, pointer, dimension(:,:,:,:) :: &
FandF_tau, & ! overall pointer to solution data
F, & ! specific (sub)pointer
F_tau ! specific (sub)pointer
PetscInt, dimension(0:worldsize-1) :: localK
PetscInt, dimension(0:worldsize-1) :: localK
integer(HID_T) :: fileHandle, groupHandle
integer :: fileUnit
character(len=pStringLen) :: fileName
write(6,'(/,a)') ' <<<+- grid_mech_spectral_polarisation init -+>>>'; flush(6)
write(6,'(/,a)') ' Shanthraj et al., International Journal of Plasticity 66:3145, 2015'
write(6,'(a)') ' https://doi.org/10.1016/j.ijplas.2014.02.006'
num%update_gamma = config_numerics%getInt('update_gamma',defaultVal=0) > 0
!--------------------------------------------------------------------------------------------------
@ -125,11 +125,11 @@ subroutine grid_mech_spectral_polarisation_init
allocate(Fdot (3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
allocate(F_tau_lastInc(3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
allocate(F_tauDot (3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
!--------------------------------------------------------------------------------------------------
! initialize solver specific parts of PETSc
call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr)
call SNESSetOptionsPrefix(snes,'mech_',ierr);CHKERRQ(ierr)
call SNESSetOptionsPrefix(snes,'mech_',ierr);CHKERRQ(ierr)
localK = 0
localK(worldrank) = grid3
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr)
@ -147,24 +147,24 @@ subroutine grid_mech_spectral_polarisation_init
call DMsetUp(da,ierr); CHKERRQ(ierr)
call DMcreateGlobalVector(da,solution_vec,ierr); CHKERRQ(ierr) ! global solution vector (grid x 18, i.e. every def grad tensor)
call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,ierr) ! residual vector of same shape as solution vector
CHKERRQ(ierr)
CHKERRQ(ierr)
call SNESsetConvergenceTest(snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,ierr) ! specify custom convergence check function "converged"
CHKERRQ(ierr)
call SNESsetFromOptions(snes,ierr); CHKERRQ(ierr) ! pull it all together with additional CLI arguments
!--------------------------------------------------------------------------------------------------
! init fields
! init fields
call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) ! places pointer on PETSc data
F => FandF_tau(0: 8,:,:,:)
F_tau => FandF_tau(9:17,:,:,:)
restartRead: if (interface_restartInc > 0) then
write(6,'(/,a,i0,a)') ' reading restart data of increment ', interface_restartInc, ' from file'
write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5'
fileHandle = HDF5_openFile(fileName)
groupHandle = HDF5_openGroup(fileHandle,'solver')
call HDF5_read(groupHandle,F_aim, 'F_aim')
call HDF5_read(groupHandle,F_aim_lastInc,'F_aim_lastInc')
call HDF5_read(groupHandle,F_aimDot, 'F_aimDot')
@ -172,21 +172,21 @@ subroutine grid_mech_spectral_polarisation_init
call HDF5_read(groupHandle,F_lastInc, 'F_lastInc')
call HDF5_read(groupHandle,F_tau, 'F_tau')
call HDF5_read(groupHandle,F_tau_lastInc,'F_tau_lastInc')
elseif (interface_restartInc == 0) then restartRead
F_lastInc = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3) ! initialize to identity
F = reshape(F_lastInc,[9,grid(1),grid(2),grid3])
F_tau = 2.0_pReal*F
F_tau_lastInc = 2.0_pReal*F_lastInc
endif restartRead
materialpoint_F0 = reshape(F_lastInc, [3,3,1,product(grid(1:2))*grid3]) ! set starting condition for materialpoint_stressAndItsTangent
call Utilities_updateCoords(reshape(F,shape(F_lastInc)))
call Utilities_constitutiveResponse(P,temp33_Real,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2
reshape(F,shape(F_lastInc)), & ! target F
0.0_pReal) ! time increment
call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) ! deassociate pointer
restartRead2: if (interface_restartInc > 0) then
write(6,'(/,a,i0,a)') ' reading more restart data of increment ', interface_restartInc, ' from file'
call HDF5_read(groupHandle,C_volAvg, 'C_volAvg')
@ -200,12 +200,12 @@ subroutine grid_mech_spectral_polarisation_init
call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr)
call MPI_File_close(fileUnit,ierr)
endif restartRead2
call utilities_updateGamma(C_minMaxAvg)
call utilities_saveReferenceStiffness
C_scale = C_minMaxAvg
S_scale = math_invSym3333(C_minMaxAvg)
end subroutine grid_mech_spectral_polarisation_init
@ -229,9 +229,9 @@ function grid_mech_spectral_polarisation_solution(incInfoIn,timeinc,timeinc_old,
solution
!--------------------------------------------------------------------------------------------------
! PETSc Data
PetscErrorCode :: ierr
PetscErrorCode :: ierr
SNESConvergedReason :: reason
incInfo = incInfoIn
!--------------------------------------------------------------------------------------------------
@ -241,10 +241,10 @@ function grid_mech_spectral_polarisation_solution(incInfoIn,timeinc,timeinc_old,
call utilities_updateGamma(C_minMaxAvg)
C_scale = C_minMaxAvg
S_scale = math_invSym3333(C_minMaxAvg)
endif
endif
!--------------------------------------------------------------------------------------------------
! set module wide available data
! set module wide available data
params%stress_mask = stress_BC%maskFloat
params%stress_BC = stress_BC%values
params%rotation_BC = rotation_BC
@ -252,13 +252,13 @@ function grid_mech_spectral_polarisation_solution(incInfoIn,timeinc,timeinc_old,
params%timeincOld = timeinc_old
!--------------------------------------------------------------------------------------------------
! solve BVP
! solve BVP
call SNESsolve(snes,PETSC_NULL_VEC,solution_vec,ierr); CHKERRQ(ierr)
!--------------------------------------------------------------------------------------------------
! check convergence
call SNESGetConvergedReason(snes,reason,ierr); CHKERRQ(ierr)
solution%converged = reason > 0
solution%iterationsNeeded = totalIter
solution%termIll = terminallyIll
@ -299,7 +299,7 @@ subroutine grid_mech_spectral_polarisation_forward(cutBack,guess,timeinc,timeinc
if (cutBack) then
C_volAvg = C_volAvgLastInc
C_minMaxAvg = C_minMaxAvgLastInc
else
else
C_volAvgLastInc = C_volAvg
C_minMaxAvgLastInc = C_minMaxAvg
@ -321,13 +321,13 @@ subroutine grid_mech_spectral_polarisation_forward(cutBack,guess,timeinc,timeinc
Fdot = utilities_calculateRate(guess, &
F_lastInc,reshape(F,[3,3,grid(1),grid(2),grid3]),timeinc_old, &
rotation_BC%rotTensor2(F_aimDot,active=.true.))
rotation_BC%rotate(F_aimDot,active=.true.))
F_tauDot = utilities_calculateRate(guess, &
F_tau_lastInc,reshape(F_tau,[3,3,grid(1),grid(2),grid3]), timeinc_old, &
rotation_BC%rotTensor2(F_aimDot,active=.true.))
rotation_BC%rotate(F_aimDot,active=.true.))
F_lastInc = reshape(F, [3,3,grid(1),grid(2),grid3])
F_tau_lastInc = reshape(F_tau,[3,3,grid(1),grid(2),grid3])
materialpoint_F0 = reshape(F,[3,3,1,product(grid(1:2))*grid3])
endif
@ -335,7 +335,7 @@ subroutine grid_mech_spectral_polarisation_forward(cutBack,guess,timeinc,timeinc
! update average and local deformation gradients
F_aim = F_aim_lastInc + F_aimDot * timeinc
F = reshape(utilities_forwardField(timeinc,F_lastInc,Fdot, & ! estimate of F at end of time+timeinc that matches rotated F_aim on average
rotation_BC%rotTensor2(F_aim,active=.true.)),&
rotation_BC%rotate(F_aim,active=.true.)),&
[9,grid(1),grid(2),grid3])
if (guess) then
F_tau = reshape(Utilities_forwardField(timeinc,F_tau_lastInc,F_taudot), &
@ -351,7 +351,7 @@ subroutine grid_mech_spectral_polarisation_forward(cutBack,guess,timeinc,timeinc
F_tau(1:9,i,j,k) = reshape(F_lambda33,[9])+F(1:9,i,j,k)
enddo; enddo; enddo
endif
call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr)
end subroutine grid_mech_spectral_polarisation_forward
@ -364,7 +364,7 @@ subroutine grid_mech_spectral_polarisation_updateCoords
PetscErrorCode :: ierr
PetscScalar, dimension(:,:,:,:), pointer :: FandF_tau
call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr)
call utilities_updateCoords(FandF_tau(0:8,:,:,:))
call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr)
@ -391,7 +391,7 @@ subroutine grid_mech_spectral_polarisation_restartWrite
write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5'
fileHandle = HDF5_openFile(fileName,'w')
groupHandle = HDF5_addGroup(fileHandle,'solver')
call HDF5_write(groupHandle,F_aim, 'F_aim')
call HDF5_write(groupHandle,F_aim_lastInc,'F_aim_lastInc')
call HDF5_write(groupHandle,F_aimDot, 'F_aimDot')
@ -405,9 +405,9 @@ subroutine grid_mech_spectral_polarisation_restartWrite
call HDF5_closeGroup(groupHandle)
call HDF5_closeFile(fileHandle)
if(num%update_gamma) call utilities_saveReferenceStiffness
call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr)
end subroutine grid_mech_spectral_polarisation_restartWrite
@ -417,13 +417,13 @@ end subroutine grid_mech_spectral_polarisation_restartWrite
!> @brief convergence check
!--------------------------------------------------------------------------------------------------
subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dummy,ierr)
SNES :: snes_local
PetscInt, intent(in) :: PETScIter
PetscReal, intent(in) :: &
devNull1, &
devNull2, &
devNull3
devNull3
SNESConvergedReason :: reason
PetscObject :: dummy
PetscErrorCode :: ierr
@ -431,11 +431,11 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dumm
curlTol, &
divTol, &
BCTol
curlTol = max(maxval(abs(F_aim-math_I3))*err_curl_tolRel ,err_curl_tolAbs)
divTol = max(maxval(abs(P_av)) *err_div_tolRel ,err_div_tolAbs)
BCTol = max(maxval(abs(P_av)) *err_stress_tolRel,err_stress_tolAbs)
if ((totalIter >= itmin .and. &
all([ err_div /divTol, &
err_curl/curlTol, &
@ -456,9 +456,9 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dumm
write(6, '(a,f12.2,a,es8.2,a,es9.2,a)') ' error curl = ', &
err_curl/curlTol,' (',err_curl,' -, tol = ',curlTol,')'
write(6, '(a,f12.2,a,es8.2,a,es9.2,a)') ' error BC = ', &
err_BC/BCTol, ' (',err_BC, ' Pa, tol = ',BCTol,')'
err_BC/BCTol, ' (',err_BC, ' Pa, tol = ',BCTol,')'
write(6,'(/,a)') ' ==========================================================================='
flush(6)
flush(6)
end subroutine converged
@ -498,7 +498,7 @@ subroutine formResidual(in, FandF_tau, &
F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt
call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
call SNESGetNumberFunctionEvals(snes,nfuncs,ierr); CHKERRQ(ierr)
call SNESGetIterationNumber(snes,PETScIter,ierr); CHKERRQ(ierr)
@ -510,14 +510,14 @@ subroutine formResidual(in, FandF_tau, &
write(6,'(1x,a,3(a,i0))') trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter, '≤', itmax
if (iand(debug_level(debug_spectral),debug_spectralRotation) /= 0) &
write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') &
' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotTensor2(F_aim,active=.true.))
' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotate(F_aim,active=.true.))
write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') &
' deformation gradient aim =', transpose(F_aim)
flush(6)
endif newIteration
!--------------------------------------------------------------------------------------------------
!
!
tensorField_real = 0.0_pReal
do k = 1, grid3; do j = 1, grid(2); do i = 1, grid(1)
tensorField_real(1:3,1:3,i,j,k) = &
@ -525,15 +525,15 @@ subroutine formResidual(in, FandF_tau, &
polarAlpha*matmul(F(1:3,1:3,i,j,k), &
math_mul3333xx33(C_scale,F_tau(1:3,1:3,i,j,k) - F(1:3,1:3,i,j,k) - math_I3))
enddo; enddo; enddo
!--------------------------------------------------------------------------------------------------
! doing convolution in Fourier space
! doing convolution in Fourier space
call utilities_FFTtensorForward
call utilities_fourierGammaConvolution(params%rotation_BC%rotTensor2(polarBeta*F_aim,active=.true.))
call utilities_fourierGammaConvolution(params%rotation_BC%rotate(polarBeta*F_aim,active=.true.))
call utilities_FFTtensorBackward
!--------------------------------------------------------------------------------------------------
! constructing residual
! constructing residual
residual_F_tau = polarBeta*F - tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3)
!--------------------------------------------------------------------------------------------------
@ -541,13 +541,13 @@ subroutine formResidual(in, FandF_tau, &
call utilities_constitutiveResponse(residual_F, & ! "residuum" gets field of first PK stress (to save memory)
P_av,C_volAvg,C_minMaxAvg, &
F - residual_F_tau/polarBeta,params%timeinc,params%rotation_BC)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr)
!--------------------------------------------------------------------------------------------------
! stress BC handling
F_aim = F_aim - math_mul3333xx33(S, ((P_av - params%stress_BC))) ! S = 0.0 for no bc
err_BC = maxval(abs((1.0_pReal-params%stress_mask) * math_mul3333xx33(C_scale,F_aim &
-params%rotation_BC%rotTensor2(F_av)) + &
-params%rotation_BC%rotate(F_av)) + &
params%stress_mask * (P_av-params%stress_BC))) ! mask = 0.0 for no bc
! calculate divergence
tensorField_real = 0.0_pReal
@ -566,7 +566,7 @@ subroutine formResidual(in, FandF_tau, &
math_mul3333xx33(C_scale,F_tau(1:3,1:3,i,j,k) - F(1:3,1:3,i,j,k) - math_I3))) &
+ residual_F_tau(1:3,1:3,i,j,k)
enddo; enddo; enddo
!--------------------------------------------------------------------------------------------------
! calculating curl
tensorField_real = 0.0_pReal

View File

@ -18,12 +18,12 @@ module spectral_utilities
use config
use discretization
use homogenization
implicit none
private
include 'fftw3-mpi.f03'
!--------------------------------------------------------------------------------------------------
! field labels information
enum, bind(c)
@ -109,8 +109,8 @@ module spectral_utilities
real(pReal) :: timeinc
real(pReal) :: timeincOld
end type tSolutionParams
type, private :: tNumerics
type, private :: tNumerics
real(pReal) :: &
FFTW_timelimit !< timelimit for FFTW plan creation, see www.fftw.org
integer :: &
@ -122,7 +122,7 @@ module spectral_utilities
FFTW_plan_mode, & !< FFTW plan mode, see www.fftw.org
PETSc_options
end type tNumerics
type(tNumerics) :: num ! numerics parameters. Better name?
enum, bind(c)
@ -189,18 +189,18 @@ subroutine utilities_init
scalarSize = 1_C_INTPTR_T, &
vecSize = 3_C_INTPTR_T, &
tensorSize = 9_C_INTPTR_T
write(6,'(/,a)') ' <<<+- spectral_utilities init -+>>>'
write(6,'(/,a)') ' Diehl, Diploma Thesis TU München, 2010'
write(6,'(a)') ' https://doi.org/10.13140/2.1.3234.3840'
write(6,'(/,a)') ' Eisenlohr et al., International Journal of Plasticity 46:3753, 2013'
write(6,'(a)') ' https://doi.org/10.1016/j.ijplas.2012.09.012'
write(6,'(/,a)') ' Shanthraj et al., International Journal of Plasticity 66:3145, 2015'
write(6,'(a)') ' https://doi.org/10.1016/j.ijplas.2014.02.006'
write(6,'(/,a)') ' Shanthraj et al., Handbook of Mechanics of Materials, 2019'
write(6,'(a)') ' https://doi.org/10.1007/978-981-10-6855-3_80'
@ -209,34 +209,34 @@ subroutine utilities_init
debugGeneral = iand(debug_level(debug_SPECTRAL),debug_LEVELBASIC) /= 0
debugRotation = iand(debug_level(debug_SPECTRAL),debug_SPECTRALROTATION) /= 0
debugPETSc = iand(debug_level(debug_SPECTRAL),debug_SPECTRALPETSC) /= 0
if(debugPETSc) write(6,'(3(/,a),/)') &
' Initializing PETSc with debug options: ', &
trim(PETScDebug), &
' add more using the PETSc_Options keyword in numerics.config '; flush(6)
call PETScOptionsClear(PETSC_NULL_OPTIONS,ierr)
CHKERRQ(ierr)
if(debugPETSc) call PETScOptionsInsertString(PETSC_NULL_OPTIONS,trim(PETSCDEBUG),ierr)
CHKERRQ(ierr)
call PETScOptionsInsertString(PETSC_NULL_OPTIONS,trim(petsc_options),ierr)
CHKERRQ(ierr)
grid1Red = grid(1)/2 + 1
wgt = 1.0/real(product(grid),pReal)
write(6,'(/,a,3(i12 ))') ' grid a b c: ', grid
write(6,'(a,3(es12.5))') ' size x y z: ', geomSize
num%memory_efficient = config_numerics%getInt ('memory_efficient', defaultVal=1) > 0
num%FFTW_timelimit = config_numerics%getFloat ('fftw_timelimit', defaultVal=-1.0_pReal)
num%divergence_correction = config_numerics%getInt ('divergence_correction', defaultVal=2)
num%spectral_derivative = config_numerics%getString('spectral_derivative', defaultVal='continuous')
num%FFTW_plan_mode = config_numerics%getString('fftw_plan_mode', defaultVal='FFTW_MEASURE')
if (num%divergence_correction < 0 .or. num%divergence_correction > 2) &
call IO_error(301,ext_msg='divergence_correction')
select case (num%spectral_derivative)
case ('continuous')
spectral_derivative_ID = DERIVATIVE_CONTINUOUS_ID
@ -265,8 +265,8 @@ subroutine utilities_init
else
scaledGeomSize = geomSize
endif
select case(IO_lc(num%FFTW_plan_mode)) ! setting parameters for the plan creation of FFTW. Basically a translation from fftw3.f
case('fftw_estimate') ! ordered from slow execution (but fast plan creation) to fast execution
FFTW_planner_flag = FFTW_ESTIMATE
@ -285,7 +285,7 @@ subroutine utilities_init
! general initialization of FFTW (see manual on fftw.org for more details)
if (pReal /= C_DOUBLE .or. kind(1) /= C_INT) call IO_error(0,ext_msg='Fortran to C') ! check for correct precision in C
call fftw_set_timelimit(num%FFTW_timelimit) ! set timelimit for plan creation
if (debugGeneral) write(6,'(/,a)') ' FFTW initialized'; flush(6)
!--------------------------------------------------------------------------------------------------
@ -295,19 +295,19 @@ subroutine utilities_init
PETSC_COMM_WORLD, local_K, local_K_offset)
allocate (xi1st (3,grid1Red,grid(2),grid3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for first derivatives, only half the size for first dimension
allocate (xi2nd (3,grid1Red,grid(2),grid3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for second derivatives, only half the size for first dimension
tensorField = fftw_alloc_complex(tensorSize*alloc_local)
call c_f_pointer(tensorField, tensorField_real, [3_C_INTPTR_T,3_C_INTPTR_T, &
2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),gridFFTW(2),local_K]) ! place a pointer for a real tensor representation
call c_f_pointer(tensorField, tensorField_fourier, [3_C_INTPTR_T,3_C_INTPTR_T, &
gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T , gridFFTW(2),local_K]) ! place a pointer for a fourier tensor representation
vectorField = fftw_alloc_complex(vecSize*alloc_local)
call c_f_pointer(vectorField, vectorField_real, [3_C_INTPTR_T,&
2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),gridFFTW(2),local_K]) ! place a pointer for a real vector representation
call c_f_pointer(vectorField, vectorField_fourier,[3_C_INTPTR_T,&
gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T, gridFFTW(2),local_K]) ! place a pointer for a fourier vector representation
scalarField = fftw_alloc_complex(scalarSize*alloc_local) ! allocate data for real representation (no in place transform)
call c_f_pointer(scalarField, scalarField_real, &
[2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1),gridFFTW(2),local_K]) ! place a pointer for a real scalar representation
@ -371,7 +371,7 @@ subroutine utilities_init
xi1st(1:3,i,j,k-grid3Offset) = xi2nd(1:3,i,j,k-grid3Offset)
endwhere
enddo; enddo; enddo
if(num%memory_efficient) then ! allocate just single fourth order tensor
allocate (gamma_hat(3,3,3,3,1,1,1), source = cmplx(0.0_pReal,0.0_pReal,pReal))
else ! precalculation of gamma_hat field
@ -388,7 +388,7 @@ end subroutine utilities_init
!> In case of an on-the-fly calculation, only the reference stiffness is updated.
!---------------------------------------------------------------------------------------------------
subroutine utilities_updateGamma(C)
real(pReal), intent(in), dimension(3,3,3,3) :: C !< input stiffness to store as reference stiffness
complex(pReal), dimension(3,3) :: temp33_complex, xiDyad_cmplx
real(pReal), dimension(6,6) :: A, A_inv
@ -396,9 +396,9 @@ subroutine utilities_updateGamma(C)
i, j, k, &
l, m, n, o
logical :: err
C_ref = C
if(.not. num%memory_efficient) then
gamma_hat = cmplx(0.0_pReal,0.0_pReal,pReal) ! for the singular point and any non invertible A
do k = grid3Offset+1, grid3Offset+grid3; do j = 1, grid(2); do i = 1, grid1Red
@ -419,7 +419,7 @@ subroutine utilities_updateGamma(C)
endif
enddo; enddo; enddo
endif
end subroutine utilities_updateGamma
@ -501,17 +501,17 @@ end subroutine utilities_FFTvectorBackward
!> @brief doing convolution gamma_hat * field_real, ensuring that average value = fieldAim
!--------------------------------------------------------------------------------------------------
subroutine utilities_fourierGammaConvolution(fieldAim)
real(pReal), intent(in), dimension(3,3) :: fieldAim !< desired average value of the field after convolution
complex(pReal), dimension(3,3) :: temp33_complex, xiDyad_cmplx
real(pReal), dimension(6,6) :: A, A_inv
integer :: &
i, j, k, &
l, m, n, o
logical :: err
write(6,'(/,a)') ' ... doing gamma convolution ...............................................'
flush(6)
@ -531,7 +531,7 @@ subroutine utilities_fourierGammaConvolution(fieldAim)
temp33_complex = cmplx(A_inv(1:3,1:3),A_inv(1:3,4:6),pReal)
forall(l=1:3, m=1:3, n=1:3, o=1:3) &
gamma_hat(l,m,n,o,1,1,1) = temp33_complex(l,n)*conjg(-xi1st(o,i,j,k))*xi1st(m,i,j,k)
else
else
gamma_hat(1:3,1:3,1:3,1:3,1,1,1) = cmplx(0.0_pReal,0.0_pReal,pReal)
endif
forall(l = 1:3, m = 1:3) &
@ -546,7 +546,7 @@ subroutine utilities_fourierGammaConvolution(fieldAim)
tensorField_fourier(1:3,1:3,i,j,k) = temp33_Complex
enddo; enddo; enddo
endif memoryEfficient
if (grid3Offset == 0) tensorField_fourier(1:3,1:3,1,1,1) = cmplx(fieldAim/wgt,0.0_pReal,pReal)
end subroutine utilities_fourierGammaConvolution
@ -561,7 +561,7 @@ subroutine utilities_fourierGreenConvolution(D_ref, mobility_ref, deltaT)
real(pReal), intent(in) :: mobility_ref, deltaT
complex(pReal) :: GreenOp_hat
integer :: i, j, k
!--------------------------------------------------------------------------------------------------
! do the actual spectral method calculation
do k = 1, grid3; do j = 1, grid(2) ;do i = 1, grid1Red
@ -625,16 +625,16 @@ real(pReal) function utilities_curlRMS()
integer :: i, j, k, l, ierr
complex(pReal), dimension(3,3) :: curl_fourier
complex(pReal), dimension(3) :: rescaledGeom
write(6,'(/,a)') ' ... calculating curl ......................................................'
flush(6)
rescaledGeom = cmplx(geomSize/scaledGeomSize,0.0_pReal)
!--------------------------------------------------------------------------------------------------
! calculating max curl criterion in Fourier space
utilities_curlRMS = 0.0_pReal
do k = 1, grid3; do j = 1, grid(2);
do i = 2, grid1Red - 1
do l = 1, 3
@ -669,7 +669,7 @@ real(pReal) function utilities_curlRMS()
utilities_curlRMS = utilities_curlRMS &
+ sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1)
enddo; enddo
call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
if(ierr /=0) call IO_error(894, ext_msg='utilities_curlRMS')
utilities_curlRMS = sqrt(utilities_curlRMS) * wgt
@ -688,9 +688,10 @@ function utilities_maskedCompliance(rot_BC,mask_stress,C)
type(rotation), intent(in) :: rot_BC !< rotation of load frame
logical, intent(in), dimension(3,3) :: mask_stress !< mask of stress BC
integer :: j, k, m, n
logical, dimension(9) :: mask_stressVector
real(pReal), dimension(9,9) :: temp99_Real
integer :: i, j
logical, dimension(9) :: mask_stressVector
logical, dimension(9,9) :: mask
real(pReal), dimension(9,9) :: temp99_real
integer :: size_reduced = 0
real(pReal), dimension(:,:), allocatable :: &
s_reduced, & !< reduced compliance matrix (depending on number of stress BC)
@ -698,57 +699,33 @@ function utilities_maskedCompliance(rot_BC,mask_stress,C)
sTimesC !< temp variable to check inversion
logical :: errmatinv
character(len=pStringLen):: formatString
mask_stressVector = reshape(transpose(mask_stress), [9])
size_reduced = count(mask_stressVector)
if(size_reduced > 0 )then
allocate (c_reduced(size_reduced,size_reduced), source =0.0_pReal)
allocate (s_reduced(size_reduced,size_reduced), source =0.0_pReal)
allocate (sTimesC(size_reduced,size_reduced), source =0.0_pReal)
temp99_Real = math_3333to99(rot_BC%rotTensor4(C))
if(size_reduced > 0) then
temp99_real = math_3333to99(rot_BC%rotate(C))
if(debugGeneral) then
write(6,'(/,a)') ' ... updating masked compliance ............................................'
write(6,'(/,a,/,9(9(2x,f12.7,1x)/))',advance='no') ' Stiffness C (load) / GPa =',&
transpose(temp99_Real)*1.0e-9_pReal
flush(6)
endif
k = 0 ! calculate reduced stiffness
do n = 1,9
if(mask_stressVector(n)) then
k = k + 1
j = 0
do m = 1,9
if(mask_stressVector(m)) then
j = j + 1
c_reduced(k,j) = temp99_Real(n,m)
endif; enddo; endif; enddo
do i = 1,9; do j = 1,9
mask(i,j) = mask_stressVector(i) .and. mask_stressVector(j)
enddo; enddo
c_reduced = reshape(pack(temp99_Real,mask),[size_reduced,size_reduced])
allocate(s_reduced,mold = c_reduced)
call math_invert(s_reduced, errmatinv, c_reduced) ! invert reduced stiffness
if (any(IEEE_is_NaN(s_reduced))) errmatinv = .true.
if (errmatinv) call IO_error(error_ID=400,ext_msg='utilities_maskedCompliance')
temp99_Real = 0.0_pReal ! fill up compliance with zeros
k = 0
do n = 1,9
if(mask_stressVector(n)) then
k = k + 1
j = 0
do m = 1,9
if(mask_stressVector(m)) then
j = j + 1
temp99_Real(n,m) = s_reduced(k,j)
endif; enddo; endif; enddo
!--------------------------------------------------------------------------------------------------
! check if inversion was successful
sTimesC = matmul(c_reduced,s_reduced)
do m=1, size_reduced
do n=1, size_reduced
errmatinv = errmatinv &
.or. (m==n .and. abs(sTimesC(m,n)-1.0_pReal) > 1.0e-12_pReal) & ! diagonal elements of S*C should be 1
.or. (m/=n .and. abs(sTimesC(m,n)) > 1.0e-12_pReal) ! off-diagonal elements of S*C should be 0
enddo
enddo
errmatinv = errmatinv .or. any(dNeq(sTimesC,math_identity2nd(size_reduced),1.0e-12_pReal))
if (debugGeneral .or. errmatinv) then
write(formatString, '(i2)') size_reduced
formatString = '(/,a,/,'//trim(formatString)//'('//trim(formatString)//'(2x,es9.2,1x)/))'
@ -757,15 +734,18 @@ function utilities_maskedCompliance(rot_BC,mask_stress,C)
write(6,trim(formatString),advance='no') ' S (load) ', transpose(s_reduced)
if(errmatinv) call IO_error(error_ID=400,ext_msg='utilities_maskedCompliance')
endif
temp99_real = reshape(unpack(reshape(s_reduced,[size_reduced**2]),reshape(mask,[81]),0.0_pReal),[9,9])
else
temp99_real = 0.0_pReal
endif
utilities_maskedCompliance = math_99to3333(temp99_Real)
if(debugGeneral) then
write(6,'(/,a,/,9(9(2x,f10.5,1x)/),/)',advance='no') &
' Masked Compliance (load) * GPa =', transpose(temp99_Real)*1.0e9_pReal
flush(6)
endif
utilities_maskedCompliance = math_99to3333(temp99_Real)
end function utilities_maskedCompliance
@ -774,9 +754,9 @@ end function utilities_maskedCompliance
!> @brief calculate scalar gradient in fourier field
!--------------------------------------------------------------------------------------------------
subroutine utilities_fourierScalarGradient()
integer :: i, j, k
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red
vectorField_fourier(1:3,i,j,k) = scalarField_fourier(i,j,k)*xi1st(1:3,i,j,k) ! ToDo: no -conjg?
enddo; enddo; enddo
@ -788,9 +768,9 @@ end subroutine utilities_fourierScalarGradient
!> @brief calculate vector divergence in fourier field
!--------------------------------------------------------------------------------------------------
subroutine utilities_fourierVectorDivergence()
integer :: i, j, k
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red
scalarField_fourier(i,j,k) = sum(vectorField_fourier(1:3,i,j,k)*conjg(-xi1st(1:3,i,j,k)))
enddo; enddo; enddo
@ -802,9 +782,9 @@ end subroutine utilities_fourierVectorDivergence
!> @brief calculate vector gradient in fourier field
!--------------------------------------------------------------------------------------------------
subroutine utilities_fourierVectorGradient()
integer :: i, j, k, m, n
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red
do m = 1, 3; do n = 1, 3
tensorField_fourier(m,n,i,j,k) = vectorField_fourier(m,i,j,k)*xi1st(n,i,j,k)
@ -820,7 +800,7 @@ end subroutine utilities_fourierVectorGradient
subroutine utilities_fourierTensorDivergence()
integer :: i, j, k
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red
vectorField_fourier(:,i,j,k) = matmul(tensorField_fourier(:,:,i,j,k),conjg(-xi1st(:,i,j,k)))
enddo; enddo; enddo
@ -833,28 +813,28 @@ end subroutine utilities_fourierTensorDivergence
!--------------------------------------------------------------------------------------------------
subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
F,timeinc,rotation_BC)
real(pReal), intent(out), dimension(3,3,3,3) :: C_volAvg, C_minmaxAvg !< average stiffness
real(pReal), intent(out), dimension(3,3) :: P_av !< average PK stress
real(pReal), intent(out), dimension(3,3,grid(1),grid(2),grid3) :: P !< PK stress
real(pReal), intent(in), dimension(3,3,grid(1),grid(2),grid3) :: F !< deformation gradient target
real(pReal), intent(in) :: timeinc !< loading time
type(rotation), intent(in), optional :: rotation_BC !< rotation of load frame
integer :: &
i,ierr
real(pReal), dimension(3,3,3,3) :: dPdF_max, dPdF_min
real(pReal) :: dPdF_norm_max, dPdF_norm_min
real(pReal), dimension(2) :: valueAndRank !< pair of min/max norm of dPdF to synchronize min/max of dPdF
write(6,'(/,a)') ' ... evaluating constitutive response ......................................'
flush(6)
materialpoint_F = reshape(F,[3,3,1,product(grid(1:2))*grid3]) ! set materialpoint target F to estimated field
call materialpoint_stressAndItsTangent(.true.,timeinc) ! calculate P field
P = reshape(materialpoint_P, [3,3,grid(1),grid(2),grid3])
P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt ! average of P
call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
@ -862,11 +842,11 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
write(6,'(/,a,/,3(3(2x,f12.4,1x)/))',advance='no') ' Piola--Kirchhoff stress (lab) / MPa =',&
transpose(P_av)*1.e-6_pReal
if(present(rotation_BC)) &
P_av = rotation_BC%rotTensor2(P_av)
P_av = rotation_BC%rotate(P_av)
write(6,'(/,a,/,3(3(2x,f12.4,1x)/))',advance='no') ' Piola--Kirchhoff stress / MPa =',&
transpose(P_av)*1.e-6_pReal
flush(6)
dPdF_max = 0.0_pReal
dPdF_norm_max = 0.0_pReal
dPdF_min = huge(1.0_pReal)
@ -881,21 +861,21 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
dPdF_norm_min = sum(materialpoint_dPdF(1:3,1:3,1:3,1:3,1,i)**2.0_pReal)
endif
end do
valueAndRank = [dPdF_norm_max,real(worldrank,pReal)]
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, PETSC_COMM_WORLD, ierr)
if (ierr /= 0) call IO_error(894, ext_msg='MPI_Allreduce max')
call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr)
if (ierr /= 0) call IO_error(894, ext_msg='MPI_Bcast max')
valueAndRank = [dPdF_norm_min,real(worldrank,pReal)]
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, PETSC_COMM_WORLD, ierr)
if (ierr /= 0) call IO_error(894, ext_msg='MPI_Allreduce min')
call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr)
if (ierr /= 0) call IO_error(894, ext_msg='MPI_Bcast min')
C_minmaxAvg = 0.5_pReal*(dPdF_max + dPdF_min)
C_volAvg = sum(sum(materialpoint_dPdF,dim=6),dim=5)
call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
C_volAvg = C_volAvg * wgt
@ -908,7 +888,7 @@ end subroutine utilities_constitutiveResponse
!> @brief calculates forward rate, either guessing or just add delta/timeinc
!--------------------------------------------------------------------------------------------------
pure function utilities_calculateRate(heterogeneous,field0,field,dt,avRate)
real(pReal), intent(in), dimension(3,3) :: &
avRate !< homogeneous addon
real(pReal), intent(in) :: &
@ -920,7 +900,7 @@ pure function utilities_calculateRate(heterogeneous,field0,field,dt,avRate)
field !< data of current step
real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: &
utilities_calculateRate
if (heterogeneous) then
utilities_calculateRate = (field-field0) / dt
else
@ -971,14 +951,14 @@ pure function utilities_getFreqDerivative(k_s)
complex(pReal), dimension(3) :: utilities_getFreqDerivative
select case (spectral_derivative_ID)
case (DERIVATIVE_CONTINUOUS_ID)
case (DERIVATIVE_CONTINUOUS_ID)
utilities_getFreqDerivative = cmplx(0.0_pReal, 2.0_pReal*PI*real(k_s,pReal)/geomSize,pReal)
case (DERIVATIVE_CENTRAL_DIFF_ID)
case (DERIVATIVE_CENTRAL_DIFF_ID)
utilities_getFreqDerivative = cmplx(0.0_pReal, sin(2.0_pReal*PI*real(k_s,pReal)/real(grid,pReal)), pReal)/ &
cmplx(2.0_pReal*geomSize/real(grid,pReal), 0.0_pReal, pReal)
case (DERIVATIVE_FWBW_DIFF_ID)
cmplx(2.0_pReal*geomSize/real(grid,pReal), 0.0_pReal, pReal)
case (DERIVATIVE_FWBW_DIFF_ID)
utilities_getFreqDerivative(1) = &
cmplx(cos(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)) - 1.0_pReal, &
sin(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)), pReal)* &
@ -986,7 +966,7 @@ pure function utilities_getFreqDerivative(k_s)
sin(2.0_pReal*PI*real(k_s(2),pReal)/real(grid(2),pReal)), pReal)* &
cmplx(cos(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)) + 1.0_pReal, &
sin(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)), pReal)/ &
cmplx(4.0_pReal*geomSize(1)/real(grid(1),pReal), 0.0_pReal, pReal)
cmplx(4.0_pReal*geomSize(1)/real(grid(1),pReal), 0.0_pReal, pReal)
utilities_getFreqDerivative(2) = &
cmplx(cos(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)) + 1.0_pReal, &
sin(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)), pReal)* &
@ -994,7 +974,7 @@ pure function utilities_getFreqDerivative(k_s)
sin(2.0_pReal*PI*real(k_s(2),pReal)/real(grid(2),pReal)), pReal)* &
cmplx(cos(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)) + 1.0_pReal, &
sin(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)), pReal)/ &
cmplx(4.0_pReal*geomSize(2)/real(grid(2),pReal), 0.0_pReal, pReal)
cmplx(4.0_pReal*geomSize(2)/real(grid(2),pReal), 0.0_pReal, pReal)
utilities_getFreqDerivative(3) = &
cmplx(cos(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)) + 1.0_pReal, &
sin(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)), pReal)* &
@ -1002,7 +982,7 @@ pure function utilities_getFreqDerivative(k_s)
sin(2.0_pReal*PI*real(k_s(2),pReal)/real(grid(2),pReal)), pReal)* &
cmplx(cos(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)) - 1.0_pReal, &
sin(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)), pReal)/ &
cmplx(4.0_pReal*geomSize(3)/real(grid(3),pReal), 0.0_pReal, pReal)
cmplx(4.0_pReal*geomSize(3)/real(grid(3),pReal), 0.0_pReal, pReal)
end select
end function utilities_getFreqDerivative
@ -1014,7 +994,7 @@ end function utilities_getFreqDerivative
! convolution
!--------------------------------------------------------------------------------------------------
subroutine utilities_updateCoords(F)
real(pReal), dimension(3,3,grid(1),grid(2),grid3), intent(in) :: F
real(pReal), dimension(3, grid(1),grid(2),grid3) :: IPcoords
real(pReal), dimension(3, grid(1),grid(2),grid3+2) :: IPfluct_padded ! Fluctuations of cell center displacement (padded along z for MPI)
@ -1040,7 +1020,7 @@ subroutine utilities_updateCoords(F)
1, 0, 1, &
1, 1, 1, &
0, 1, 1 ], [3,8])
step = geomSize/real(grid, pReal)
!--------------------------------------------------------------------------------------------------
! integration in Fourier space to get fluctuations of cell center discplacements
@ -1057,27 +1037,27 @@ subroutine utilities_updateCoords(F)
enddo; enddo; enddo
call fftw_mpi_execute_dft_c2r(planVectorBack,vectorField_fourier,vectorField_real)
!--------------------------------------------------------------------------------------------------
! average F
if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt
call MPI_Bcast(Favg,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Bcast')
!--------------------------------------------------------------------------------------------------
! pad cell center fluctuations along z-direction (needed when running MPI simulation)
IPfluct_padded(1:3,1:grid(1),1:grid(2),2:grid3+1) = vectorField_real(1:3,1:grid(1),1:grid(2),1:grid3)
c = product(shape(IPfluct_padded(:,:,:,1))) !< amount of data to transfer
rank_t = modulo(worldrank+1,worldsize)
rank_b = modulo(worldrank-1,worldsize)
! send bottom layer to process below
call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,PETSC_COMM_WORLD,r,ierr)
if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Isend')
call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,PETSC_COMM_WORLD,r,ierr)
if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Irecv')
call MPI_Wait(r,s,ierr)
! send top layer to process above
if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Wait')
call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,0,PETSC_COMM_WORLD,r,ierr)
@ -1085,9 +1065,9 @@ subroutine utilities_updateCoords(F)
call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,0,PETSC_COMM_WORLD,r,ierr)
if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Irecv')
call MPI_Wait(r,s,ierr)
!--------------------------------------------------------------------------------------------------
! calculate nodal displacements
! calculate nodal displacements
nodeCoords = 0.0_pReal
do k = 0,grid3; do j = 0,grid(2); do i = 0,grid(1)
nodeCoords(1:3,i+1,j+1,k+1) = matmul(Favg,step*(real([i,j,k+grid3Offset],pReal)))
@ -1097,17 +1077,17 @@ subroutine utilities_updateCoords(F)
+ IPfluct_padded(1:3,modulo(me(1)-1,grid(1))+1,modulo(me(2)-1,grid(2))+1,me(3)+1)*0.125_pReal
enddo averageFluct
enddo; enddo; enddo
!--------------------------------------------------------------------------------------------------
! calculate cell center displacements
do k = 1,grid3; do j = 1,grid(2); do i = 1,grid(1)
IPcoords(1:3,i,j,k) = vectorField_real(1:3,i,j,k) &
+ matmul(Favg,step*real([i,j,k+grid3Offset]-0.5_pReal,pReal))
enddo; enddo; enddo
call discretization_setNodeCoords(reshape(NodeCoords,[3,(grid(1)+1)*(grid(2)+1)*(grid3+1)]))
call discretization_setIPcoords (reshape(IPcoords, [3,grid(1)*grid(2)*grid3]))
end subroutine utilities_updateCoords
@ -1115,7 +1095,7 @@ end subroutine utilities_updateCoords
!> @brief Write out the current reference stiffness for restart.
!---------------------------------------------------------------------------------------------------
subroutine utilities_saveReferenceStiffness
integer :: &
fileUnit
@ -1125,7 +1105,7 @@ subroutine utilities_saveReferenceStiffness
write(fileUnit) C_ref
close(fileUnit)
endif
end subroutine utilities_saveReferenceStiffness
end module spectral_utilities

View File

@ -146,9 +146,7 @@ subroutine homogenization_init
!--------------------------------------------------------------------------------------------------
! allocate and initialize global variables
allocate(materialpoint_dPdF(3,3,3,3,discretization_nIP,discretization_nElem), source=0.0_pReal)
allocate(materialpoint_F0(3,3,discretization_nIP,discretization_nElem), source=0.0_pReal)
materialpoint_F0 = spread(spread(math_I3,3,discretization_nIP),4,discretization_nElem) ! initialize to identity
allocate(materialpoint_F(3,3,discretization_nIP,discretization_nElem), source=0.0_pReal)
materialpoint_F = materialpoint_F0 ! initialize to identity
allocate(materialpoint_subF0(3,3,discretization_nIP,discretization_nElem), source=0.0_pReal)
allocate(materialpoint_subF(3,3,discretization_nIP,discretization_nElem), source=0.0_pReal)
@ -333,12 +331,10 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
!$OMP FLUSH(terminallyIll)
if (.not. terminallyIll) then ! so first signals terminally ill...
!$OMP CRITICAL (write2out)
write(6,*) 'Integration point ', i,' at element ', e, ' terminally ill'
write(6,*) 'Integration point ', i,' at element ', e, ' terminally ill'
!$OMP END CRITICAL (write2out)
endif
!$OMP CRITICAL (setTerminallyIll)
terminallyIll = .true. ! ...and kills all others
!$OMP END CRITICAL (setTerminallyIll)
terminallyIll = .true. ! ...and kills all others
else ! cutback makes sense
materialpoint_subStep(i,e) = subStepSizeHomog * materialpoint_subStep(i,e) ! crystallite had severe trouble, so do a significant cutback

File diff suppressed because it is too large Load Diff

View File

@ -257,7 +257,7 @@ subroutine material_init
allocate(damage (material_Nhomogenization))
allocate(temperatureRate (material_Nhomogenization))
do m = 1,size(config_microstructure)
if(minval(microstructure_phase(1:microstructure_Nconstituents(m),m)) < 1 .or. &
maxval(microstructure_phase(1:microstructure_Nconstituents(m),m)) > size(config_phase)) &
@ -268,6 +268,7 @@ subroutine material_init
if(microstructure_Nconstituents(m) < 1) &
call IO_error(151,m)
enddo
if(homogenization_maxNgrains > size(microstructure_phase,1)) call IO_error(148)
debugOut: if (iand(myDebug,debug_levelExtensive) /= 0) then
write(6,'(/,a,/)') ' MATERIAL configuration'
@ -290,6 +291,7 @@ subroutine material_init
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! new mappings
allocate(material_phaseAt(homogenization_maxNgrains,discretization_nElem), source=0)
allocate(material_texture(homogenization_maxNgrains,discretization_nIP,discretization_nElem),source=0) !this is only needed by plasticity nonlocal
allocate(material_orientation0(homogenization_maxNgrains,discretization_nIP,discretization_nElem))
@ -298,15 +300,24 @@ subroutine material_init
do i = 1, discretization_nIP
myMicro = discretization_microstructureAt(e)
do c = 1, homogenization_Ngrains(discretization_homogenizationAt(e))
material_phaseAt(c,e) = microstructure_phase(c,myMicro)
material_texture(c,i,e) = microstructure_texture(c,myMicro)
material_orientation0(c,i,e) = texture_orientation(material_texture(c,i,e))
if(microstructure_phase(c,myMicro) > 0) then
material_phaseAt(c,e) = microstructure_phase(c,myMicro)
else
call IO_error(150,ext_msg='phase')
endif
if(microstructure_texture(c,myMicro) > 0) then
material_texture(c,i,e) = microstructure_texture(c,myMicro)
material_orientation0(c,i,e) = texture_orientation(material_texture(c,i,e))
else
call IO_error(150,ext_msg='texture')
endif
enddo
enddo
enddo
deallocate(microstructure_phase)
deallocate(microstructure_texture)
deallocate(texture_orientation)
allocate(material_homogenizationAt,source=discretization_homogenizationAt)
@ -464,7 +475,7 @@ subroutine material_parseMicrostructure
real(pReal), dimension(:,:), allocatable :: &
microstructure_fraction !< vol fraction of each constituent in microstructure
integer :: &
microstructure_maxNconstituents !< max number of constituents in any phase
maxNconstituents !< max number of constituents in any phase
allocate(microstructure_Nconstituents(size(config_microstructure)), source=0)
@ -475,10 +486,10 @@ subroutine material_parseMicrostructure
microstructure_Nconstituents(m) = config_microstructure(m)%countKeys('(constituent)')
enddo
microstructure_maxNconstituents = maxval(microstructure_Nconstituents)
allocate(microstructure_phase (microstructure_maxNconstituents,size(config_microstructure)),source=0)
allocate(microstructure_texture (microstructure_maxNconstituents,size(config_microstructure)),source=0)
allocate(microstructure_fraction(microstructure_maxNconstituents,size(config_microstructure)),source=0.0_pReal)
maxNconstituents = maxval(microstructure_Nconstituents)
allocate(microstructure_phase (maxNconstituents,size(config_microstructure)),source=0)
allocate(microstructure_texture (maxNconstituents,size(config_microstructure)),source=0)
allocate(microstructure_fraction(maxNconstituents,size(config_microstructure)),source=0.0_pReal)
allocate(strings(1)) ! Intel 16.0 Bug
do m=1, size(config_microstructure)

View File

@ -485,15 +485,15 @@ end subroutine results_writeScalarDataset_rotation
!--------------------------------------------------------------------------------------------------
!> @brief adds the unique mapping from spatial position and constituent ID to results
!--------------------------------------------------------------------------------------------------
subroutine results_mapping_constituent(phaseAt,memberAt,label)
subroutine results_mapping_constituent(phaseAt,memberAtLocal,label)
integer, dimension(:,:), intent(in) :: phaseAt !< phase section at (constituent,element)
integer, dimension(:,:,:), intent(in) :: memberAt !< phase member at (constituent,IP,element)
character(len=*), dimension(:), intent(in) :: label !< label of each phase section
integer, dimension(:,:,:), intent(in) :: memberAtLocal !< phase member at (constituent,IP,element)
character(len=pStringLen), dimension(:), intent(in) :: label !< label of each phase section
integer, dimension(size(memberAt,1),size(memberAt,2),size(memberAt,3)) :: &
phaseAt_perIP, &
memberAt_total
integer, dimension(size(memberAtLocal,1),size(memberAtLocal,2),size(memberAtLocal,3)) :: &
phaseAtMaterialpoint, &
memberAtGlobal
integer, dimension(size(label),0:worldsize-1) :: memberOffset !< offset in member counting per process
integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process
integer(HSIZE_T), dimension(2) :: &
@ -543,10 +543,10 @@ subroutine results_mapping_constituent(phaseAt,memberAt,label)
call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, ierr)
memberOffset = 0
do i=1, size(label)
memberOffset(i,worldrank) = count(phaseAt == i)*size(memberAt,2) ! number of points/instance of this process
memberOffset(i,worldrank) = count(phaseAt == i)*size(memberAtLocal,2) ! number of points/instance of this process
enddo
writeSize = 0
writeSize(worldrank) = size(memberAt(1,:,:)) ! total number of points by this process
writeSize(worldrank) = size(memberAtLocal(1,:,:)) ! total number of points by this process
!--------------------------------------------------------------------------------------------------
! MPI settings and communication
@ -578,14 +578,14 @@ subroutine results_mapping_constituent(phaseAt,memberAt,label)
!---------------------------------------------------------------------------------------------------
! expand phaseAt to consider IPs (is not stored per IP)
do i = 1, size(phaseAt_perIP,2)
phaseAt_perIP(:,i,:) = phaseAt
do i = 1, size(phaseAtMaterialpoint,2)
phaseAtMaterialpoint(:,i,:) = phaseAt
enddo
!---------------------------------------------------------------------------------------------------
! renumber member from my process to all processes
do i = 1, size(label)
where(phaseAt_perIP == i) memberAt_total = memberAt + sum(memberOffset(i,0:worldrank-1)) -1 ! convert to 0-based
where(phaseAtMaterialpoint == i) memberAtGlobal = memberAtLocal + sum(memberOffset(i,0:worldrank-1)) -1 ! convert to 0-based
enddo
!--------------------------------------------------------------------------------------------------
@ -596,10 +596,10 @@ subroutine results_mapping_constituent(phaseAt,memberAt,label)
call h5dcreate_f(loc_id, 'constituent', dtype_id, filespace_id, dset_id, ierr)
if (ierr < 0) call IO_error(1,ext_msg='results_mapping_constituent: h5dcreate_f')
call h5dwrite_f(dset_id, name_id, reshape(label(pack(phaseAt_perIP,.true.)),myShape), &
call h5dwrite_f(dset_id, name_id, reshape(label(pack(phaseAtMaterialpoint,.true.)),myShape), &
myShape, ierr, file_space_id = filespace_id, mem_space_id = memspace_id, xfer_prp = plist_id)
if (ierr < 0) call IO_error(1,ext_msg='results_mapping_constituent: h5dwrite_f/name_id')
call h5dwrite_f(dset_id, position_id, reshape(pack(memberAt_total,.true.),myShape), &
call h5dwrite_f(dset_id, position_id, reshape(pack(memberAtGlobal,.true.),myShape), &
myShape, ierr, file_space_id = filespace_id, mem_space_id = memspace_id, xfer_prp = plist_id)
if (ierr < 0) call IO_error(1,ext_msg='results_mapping_constituent: h5dwrite_f/position_id')
@ -620,15 +620,15 @@ end subroutine results_mapping_constituent
!--------------------------------------------------------------------------------------------------
!> @brief adds the unique mapping from spatial position and constituent ID to results
!--------------------------------------------------------------------------------------------------
subroutine results_mapping_materialpoint(homogenizationAt,memberAt,label)
subroutine results_mapping_materialpoint(homogenizationAt,memberAtLocal,label)
integer, dimension(:), intent(in) :: homogenizationAt !< homogenization section at (element)
integer, dimension(:,:), intent(in) :: memberAt !< homogenization member at (IP,element)
character(len=*), dimension(:), intent(in) :: label !< label of each homogenization section
integer, dimension(:,:), intent(in) :: memberAtLocal !< homogenization member at (IP,element)
character(len=pStringLen), dimension(:), intent(in) :: label !< label of each homogenization section
integer, dimension(size(memberAt,1),size(memberAt,2)) :: &
homogenizationAt_perIP, &
memberAt_total
integer, dimension(size(memberAtLocal,1),size(memberAtLocal,2)) :: &
homogenizationAtMaterialpoint, &
memberAtGlobal
integer, dimension(size(label),0:worldsize-1) :: memberOffset !< offset in member counting per process
integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process
integer(HSIZE_T), dimension(1) :: &
@ -678,10 +678,10 @@ subroutine results_mapping_materialpoint(homogenizationAt,memberAt,label)
call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, ierr)
memberOffset = 0
do i=1, size(label)
memberOffset(i,worldrank) = count(homogenizationAt == i)*size(memberAt,1) ! number of points/instance of this process
memberOffset(i,worldrank) = count(homogenizationAt == i)*size(memberAtLocal,1) ! number of points/instance of this process
enddo
writeSize = 0
writeSize(worldrank) = size(memberAt) ! total number of points by this process
writeSize(worldrank) = size(memberAtLocal) ! total number of points by this process
!--------------------------------------------------------------------------------------------------
! MPI settings and communication
@ -713,14 +713,14 @@ subroutine results_mapping_materialpoint(homogenizationAt,memberAt,label)
!---------------------------------------------------------------------------------------------------
! expand phaseAt to consider IPs (is not stored per IP)
do i = 1, size(homogenizationAt_perIP,1)
homogenizationAt_perIP(i,:) = homogenizationAt
do i = 1, size(homogenizationAtMaterialpoint,1)
homogenizationAtMaterialpoint(i,:) = homogenizationAt
enddo
!---------------------------------------------------------------------------------------------------
! renumber member from my process to all processes
do i = 1, size(label)
where(homogenizationAt_perIP == i) memberAt_total = memberAt + sum(memberOffset(i,0:worldrank-1)) - 1 ! convert to 0-based
where(homogenizationAtMaterialpoint == i) memberAtGlobal = memberAtLocal + sum(memberOffset(i,0:worldrank-1)) - 1 ! convert to 0-based
enddo
!--------------------------------------------------------------------------------------------------
@ -731,10 +731,10 @@ subroutine results_mapping_materialpoint(homogenizationAt,memberAt,label)
call h5dcreate_f(loc_id, 'materialpoint', dtype_id, filespace_id, dset_id, ierr)
if (ierr < 0) call IO_error(1,ext_msg='results_mapping_materialpoint: h5dcreate_f')
call h5dwrite_f(dset_id, name_id, reshape(label(pack(homogenizationAt_perIP,.true.)),myShape), &
call h5dwrite_f(dset_id, name_id, reshape(label(pack(homogenizationAtMaterialpoint,.true.)),myShape), &
myShape, ierr, file_space_id = filespace_id, mem_space_id = memspace_id, xfer_prp = plist_id)
if (ierr < 0) call IO_error(1,ext_msg='results_mapping_materialpoint: h5dwrite_f/name_id')
call h5dwrite_f(dset_id, position_id, reshape(pack(memberAt_total,.true.),myShape), &
call h5dwrite_f(dset_id, position_id, reshape(pack(memberAtGlobal,.true.),myShape), &
myShape, ierr, file_space_id = filespace_id, mem_space_id = memspace_id, xfer_prp = plist_id)
if (ierr < 0) call IO_error(1,ext_msg='results_mapping_materialpoint: h5dwrite_f/position_id')