added spectral thermal and damage solvers
This commit is contained in:
parent
240c865afc
commit
74982294a0
|
@ -52,38 +52,40 @@ program DAMASK_spectral_Driver
|
||||||
use numerics, only: &
|
use numerics, only: &
|
||||||
worldrank, &
|
worldrank, &
|
||||||
worldsize, &
|
worldsize, &
|
||||||
|
stagItMax, &
|
||||||
maxCutBack, &
|
maxCutBack, &
|
||||||
spectral_solver, &
|
spectral_solver, &
|
||||||
continueCalculation
|
continueCalculation
|
||||||
use homogenization, only: &
|
use homogenization, only: &
|
||||||
materialpoint_sizeResults, &
|
materialpoint_sizeResults, &
|
||||||
materialpoint_results
|
materialpoint_results
|
||||||
|
use material, only: &
|
||||||
|
thermal_type, &
|
||||||
|
damage_type, &
|
||||||
|
THERMAL_conduction_ID, &
|
||||||
|
DAMAGE_nonlocal_ID
|
||||||
use DAMASK_spectral_Utilities, only: &
|
use DAMASK_spectral_Utilities, only: &
|
||||||
tBoundaryCondition, &
|
utilities_init, &
|
||||||
|
utilities_destroy, &
|
||||||
tSolutionState, &
|
tSolutionState, &
|
||||||
cutBack
|
tLoadCase, &
|
||||||
|
cutBack, &
|
||||||
|
nActiveFields, &
|
||||||
|
FIELD_UNDEFINED_ID, &
|
||||||
|
FIELD_MECH_ID, &
|
||||||
|
FIELD_THERMAL_ID, &
|
||||||
|
FIELD_DAMAGE_ID
|
||||||
use DAMASK_spectral_SolverBasicPETSC
|
use DAMASK_spectral_SolverBasicPETSC
|
||||||
use DAMASK_spectral_SolverAL
|
use DAMASK_spectral_SolverAL
|
||||||
use DAMASK_spectral_SolverPolarisation
|
use DAMASK_spectral_SolverPolarisation
|
||||||
|
use spectral_damage
|
||||||
|
use spectral_thermal
|
||||||
|
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
|
|
||||||
#include <petsc-finclude/petscsys.h>
|
#include <petsc-finclude/petscsys.h>
|
||||||
|
|
||||||
type tLoadCase
|
|
||||||
real(pReal), dimension (3,3) :: rotation = math_I3 !< rotation of BC
|
|
||||||
type(tBoundaryCondition) :: P, & !< stress BC
|
|
||||||
deformation !< deformation BC (Fdot or L)
|
|
||||||
real(pReal) :: time = 0.0_pReal, & !< length of increment
|
|
||||||
temperature = 300.0_pReal, & !< isothermal starting conditions
|
|
||||||
density = 0.0_pReal !< density
|
|
||||||
integer(pInt) :: incs = 0_pInt, & !< number of increments
|
|
||||||
outputfrequency = 1_pInt, & !< frequency of result writes
|
|
||||||
restartfrequency = 0_pInt, & !< frequency of restart writes
|
|
||||||
logscale = 0_pInt !< linear/logarithmic time inc flag
|
|
||||||
logical :: followFormerTrajectory = .true. !< follow trajectory of former loadcase
|
|
||||||
end type tLoadCase
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! variables related to information from load case and geom file
|
! variables related to information from load case and geom file
|
||||||
real(pReal), dimension(9) :: temp_valueVector = 0.0_pReal !< temporarily from loadcase file when reading in tensors (initialize to 0.0)
|
real(pReal), dimension(9) :: temp_valueVector = 0.0_pReal !< temporarily from loadcase file when reading in tensors (initialize to 0.0)
|
||||||
|
@ -117,7 +119,7 @@ program DAMASK_spectral_Driver
|
||||||
logical :: &
|
logical :: &
|
||||||
guess !< guess along former trajectory
|
guess !< guess along former trajectory
|
||||||
integer(pInt) :: &
|
integer(pInt) :: &
|
||||||
i, j, k, l, &
|
i, j, k, l, field, &
|
||||||
errorID, &
|
errorID, &
|
||||||
cutBackLevel = 0_pInt, & !< cut back level \f$ t = \frac{t_{inc}}{2^l} \f$
|
cutBackLevel = 0_pInt, & !< cut back level \f$ t = \frac{t_{inc}}{2^l} \f$
|
||||||
stepFraction = 0_pInt !< fraction of current time interval
|
stepFraction = 0_pInt !< fraction of current time interval
|
||||||
|
@ -133,9 +135,11 @@ program DAMASK_spectral_Driver
|
||||||
character(len=6) :: loadcase_string
|
character(len=6) :: loadcase_string
|
||||||
character(len=1024) :: incInfo !< string parsed to solution with information about current load case
|
character(len=1024) :: incInfo !< string parsed to solution with information about current load case
|
||||||
type(tLoadCase), allocatable, dimension(:) :: loadCases !< array of all load cases
|
type(tLoadCase), allocatable, dimension(:) :: loadCases !< array of all load cases
|
||||||
type(tSolutionState) solres
|
type(tSolutionState), allocatable, dimension(:) :: solres
|
||||||
integer(kind=MPI_OFFSET_KIND) :: my_offset
|
integer(kind=MPI_OFFSET_KIND) :: my_offset
|
||||||
integer, dimension(:), allocatable :: outputSize
|
integer, dimension(:), allocatable :: outputSize
|
||||||
|
integer(pInt) :: stagIter
|
||||||
|
logical :: stagIterate
|
||||||
PetscErrorCode :: ierr
|
PetscErrorCode :: ierr
|
||||||
external :: quit
|
external :: quit
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
@ -148,6 +152,13 @@ program DAMASK_spectral_Driver
|
||||||
#include "compilation_info.f90"
|
#include "compilation_info.f90"
|
||||||
endif mainProcess
|
endif mainProcess
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! initialize field solver information
|
||||||
|
nActiveFields = 1
|
||||||
|
if (any(thermal_type == THERMAL_conduction_ID )) nActiveFields = nActiveFields + 1
|
||||||
|
if (any(damage_type == DAMAGE_nonlocal_ID )) nActiveFields = nActiveFields + 1
|
||||||
|
allocate(solres(nActiveFields))
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! reading basic information from load case file and allocate data structure containing load cases
|
! reading basic information from load case file and allocate data structure containing load cases
|
||||||
call IO_open_file(FILEUNIT,trim(loadCaseFile))
|
call IO_open_file(FILEUNIT,trim(loadCaseFile))
|
||||||
|
@ -174,6 +185,20 @@ program DAMASK_spectral_Driver
|
||||||
allocate (loadCases(N_n)) ! array of load cases
|
allocate (loadCases(N_n)) ! array of load cases
|
||||||
loadCases%P%myType='p'
|
loadCases%P%myType='p'
|
||||||
|
|
||||||
|
do i = 1, size(loadCases)
|
||||||
|
allocate(loadCases(i)%ID(nActiveFields))
|
||||||
|
field = 1
|
||||||
|
loadCases(i)%ID(field) = FIELD_MECH_ID ! mechanical active by default
|
||||||
|
if (any(thermal_type == THERMAL_conduction_ID)) then ! thermal field active
|
||||||
|
field = field + 1
|
||||||
|
loadCases(i)%ID(field) = FIELD_THERMAL_ID
|
||||||
|
endif
|
||||||
|
if (any(damage_type == DAMAGE_nonlocal_ID)) then ! damage field active
|
||||||
|
field = field + 1
|
||||||
|
loadCases(i)%ID(field) = FIELD_DAMAGE_ID
|
||||||
|
endif
|
||||||
|
enddo
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! reading the load case and assign values to the allocated data structure
|
! reading the load case and assign values to the allocated data structure
|
||||||
rewind(FILEUNIT)
|
rewind(FILEUNIT)
|
||||||
|
@ -222,8 +247,6 @@ program DAMASK_spectral_Driver
|
||||||
loadCases(currentLoadCase)%time = IO_floatValue(line,positions,i+1_pInt)
|
loadCases(currentLoadCase)%time = IO_floatValue(line,positions,i+1_pInt)
|
||||||
case('temp','temperature') ! starting temperature
|
case('temp','temperature') ! starting temperature
|
||||||
loadCases(currentLoadCase)%temperature = IO_floatValue(line,positions,i+1_pInt)
|
loadCases(currentLoadCase)%temperature = IO_floatValue(line,positions,i+1_pInt)
|
||||||
case('den','density') ! starting density
|
|
||||||
loadCases(currentLoadCase)%density = IO_floatValue(line,positions,i+1_pInt)
|
|
||||||
case('n','incs','increments','steps') ! number of increments
|
case('n','incs','increments','steps') ! number of increments
|
||||||
loadCases(currentLoadCase)%incs = IO_intValue(line,positions,i+1_pInt)
|
loadCases(currentLoadCase)%incs = IO_intValue(line,positions,i+1_pInt)
|
||||||
case('logincs','logincrements','logsteps') ! number of increments (switch to log time scaling)
|
case('logincs','logincrements','logsteps') ! number of increments (switch to log time scaling)
|
||||||
|
@ -307,7 +330,6 @@ program DAMASK_spectral_Driver
|
||||||
write(6,'(2x,a,/,3(3(3x,f12.7,1x)/))',advance='no') 'rotation of loadframe:',&
|
write(6,'(2x,a,/,3(3(3x,f12.7,1x)/))',advance='no') 'rotation of loadframe:',&
|
||||||
math_transpose33(loadCases(currentLoadCase)%rotation)
|
math_transpose33(loadCases(currentLoadCase)%rotation)
|
||||||
write(6,'(2x,a,f12.6)') 'temperature:', loadCases(currentLoadCase)%temperature
|
write(6,'(2x,a,f12.6)') 'temperature:', loadCases(currentLoadCase)%temperature
|
||||||
write(6,'(2x,a,f12.6)') 'density: ', loadCases(currentLoadCase)%density
|
|
||||||
if (loadCases(currentLoadCase)%time < 0.0_pReal) errorID = 834_pInt ! negative time increment
|
if (loadCases(currentLoadCase)%time < 0.0_pReal) errorID = 834_pInt ! negative time increment
|
||||||
write(6,'(2x,a,f12.6)') 'time: ', loadCases(currentLoadCase)%time
|
write(6,'(2x,a,f12.6)') 'time: ', loadCases(currentLoadCase)%time
|
||||||
if (loadCases(currentLoadCase)%incs < 1_pInt) errorID = 835_pInt ! non-positive incs count
|
if (loadCases(currentLoadCase)%incs < 1_pInt) errorID = 835_pInt ! non-positive incs count
|
||||||
|
@ -323,20 +345,36 @@ program DAMASK_spectral_Driver
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! doing initialization depending on selected solver
|
! doing initialization depending on selected solver
|
||||||
select case (spectral_solver)
|
call Utilities_init()
|
||||||
case (DAMASK_spectral_SolverBasicPETSc_label)
|
do field = 1, nActiveFields
|
||||||
call basicPETSc_init(loadCases(1)%temperature)
|
select case (loadCases(1)%ID(field))
|
||||||
case (DAMASK_spectral_SolverAL_label)
|
case(FIELD_MECH_ID)
|
||||||
if(iand(debug_level(debug_spectral),debug_levelBasic)/= 0 .and. worldrank == 0_pInt) &
|
select case (spectral_solver)
|
||||||
call IO_warning(42_pInt, ext_msg='debug Divergence')
|
case (DAMASK_spectral_SolverBasicPETSc_label)
|
||||||
call AL_init(loadCases(1)%temperature)
|
call basicPETSc_init(loadCases(1)%temperature)
|
||||||
case (DAMASK_spectral_SolverPolarisation_label)
|
case (DAMASK_spectral_SolverAL_label)
|
||||||
if(iand(debug_level(debug_spectral),debug_levelBasic)/= 0 .and. worldrank == 0_pInt) &
|
if(iand(debug_level(debug_spectral),debug_levelBasic)/= 0 .and. worldrank == 0_pInt) &
|
||||||
call IO_warning(42_pInt, ext_msg='debug Divergence')
|
call IO_warning(42_pInt, ext_msg='debug Divergence')
|
||||||
call Polarisation_init(loadCases(1)%temperature)
|
call AL_init(loadCases(1)%temperature)
|
||||||
case default
|
|
||||||
call IO_error(error_ID = 891, ext_msg = trim(spectral_solver))
|
case (DAMASK_spectral_SolverPolarisation_label)
|
||||||
end select
|
if(iand(debug_level(debug_spectral),debug_levelBasic)/= 0 .and. worldrank == 0_pInt) &
|
||||||
|
call IO_warning(42_pInt, ext_msg='debug Divergence')
|
||||||
|
call Polarisation_init(loadCases(1)%temperature)
|
||||||
|
|
||||||
|
case default
|
||||||
|
call IO_error(error_ID = 891, ext_msg = trim(spectral_solver))
|
||||||
|
|
||||||
|
end select
|
||||||
|
|
||||||
|
case(FIELD_THERMAL_ID)
|
||||||
|
call spectral_thermal_init(loadCases(1)%temperature)
|
||||||
|
|
||||||
|
case(FIELD_DAMAGE_ID)
|
||||||
|
call spectral_damage_init()
|
||||||
|
|
||||||
|
end select
|
||||||
|
enddo
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! write header of output file
|
! write header of output file
|
||||||
|
@ -442,30 +480,6 @@ program DAMASK_spectral_Driver
|
||||||
time = time + timeinc ! forward time
|
time = time + timeinc ! forward time
|
||||||
stepFraction = stepFraction + 1_pInt
|
stepFraction = stepFraction + 1_pInt
|
||||||
remainingLoadCaseTime = time0 - time + loadCases(currentLoadCase)%time + timeInc
|
remainingLoadCaseTime = time0 - time + loadCases(currentLoadCase)%time + timeInc
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
! forward solution
|
|
||||||
select case(spectral_solver)
|
|
||||||
case (DAMASK_spectral_SolverBasicPETSC_label)
|
|
||||||
call BasicPETSC_forward (&
|
|
||||||
guess,timeinc,timeIncOld,remainingLoadCaseTime, &
|
|
||||||
P_BC = loadCases(currentLoadCase)%P, &
|
|
||||||
F_BC = loadCases(currentLoadCase)%deformation, &
|
|
||||||
rotation_BC = loadCases(currentLoadCase)%rotation)
|
|
||||||
|
|
||||||
case (DAMASK_spectral_SolverAL_label)
|
|
||||||
call AL_forward (&
|
|
||||||
guess,timeinc,timeIncOld,remainingLoadCaseTime, &
|
|
||||||
P_BC = loadCases(currentLoadCase)%P, &
|
|
||||||
F_BC = loadCases(currentLoadCase)%deformation, &
|
|
||||||
rotation_BC = loadCases(currentLoadCase)%rotation)
|
|
||||||
|
|
||||||
case (DAMASK_spectral_SolverPolarisation_label)
|
|
||||||
call Polarisation_forward (&
|
|
||||||
guess,timeinc,timeIncOld,remainingLoadCaseTime, &
|
|
||||||
P_BC = loadCases(currentLoadCase)%P, &
|
|
||||||
F_BC = loadCases(currentLoadCase)%deformation, &
|
|
||||||
rotation_BC = loadCases(currentLoadCase)%rotation)
|
|
||||||
end select
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! report begin of new increment
|
! report begin of new increment
|
||||||
|
@ -487,47 +501,105 @@ program DAMASK_spectral_Driver
|
||||||
endif
|
endif
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! calculate solution
|
! forward fields
|
||||||
select case(spectral_solver)
|
do field = 1, nActiveFields
|
||||||
case (DAMASK_spectral_SolverBasicPETSC_label)
|
select case(loadCases(currentLoadCase)%ID(field))
|
||||||
solres = BasicPETSC_solution (&
|
case(FIELD_MECH_ID)
|
||||||
incInfo,guess,timeinc,timeIncOld,remainingLoadCaseTime, &
|
select case (spectral_solver)
|
||||||
P_BC = loadCases(currentLoadCase)%P, &
|
case (DAMASK_spectral_SolverBasicPETSc_label)
|
||||||
F_BC = loadCases(currentLoadCase)%deformation, &
|
call BasicPETSc_forward (&
|
||||||
temperature_bc = loadCases(currentLoadCase)%temperature, &
|
guess,timeinc,timeIncOld,remainingLoadCaseTime, &
|
||||||
rotation_BC = loadCases(currentLoadCase)%rotation, &
|
F_BC = loadCases(currentLoadCase)%deformation, &
|
||||||
density = loadCases(currentLoadCase)%density)
|
P_BC = loadCases(currentLoadCase)%P, &
|
||||||
case (DAMASK_spectral_SolverAL_label)
|
rotation_BC = loadCases(currentLoadCase)%rotation)
|
||||||
solres = AL_solution (&
|
case (DAMASK_spectral_SolverAL_label)
|
||||||
incInfo,guess,timeinc,timeIncOld,remainingLoadCaseTime, &
|
call AL_forward (&
|
||||||
P_BC = loadCases(currentLoadCase)%P, &
|
guess,timeinc,timeIncOld,remainingLoadCaseTime, &
|
||||||
F_BC = loadCases(currentLoadCase)%deformation, &
|
F_BC = loadCases(currentLoadCase)%deformation, &
|
||||||
temperature_bc = loadCases(currentLoadCase)%temperature, &
|
P_BC = loadCases(currentLoadCase)%P, &
|
||||||
rotation_BC = loadCases(currentLoadCase)%rotation, &
|
rotation_BC = loadCases(currentLoadCase)%rotation)
|
||||||
density = loadCases(currentLoadCase)%density)
|
case (DAMASK_spectral_SolverPolarisation_label)
|
||||||
case (DAMASK_spectral_SolverPolarisation_label)
|
call Polarisation_forward (&
|
||||||
solres = Polarisation_solution (&
|
guess,timeinc,timeIncOld,remainingLoadCaseTime, &
|
||||||
incInfo,guess,timeinc,timeIncOld,remainingLoadCaseTime, &
|
F_BC = loadCases(currentLoadCase)%deformation, &
|
||||||
P_BC = loadCases(currentLoadCase)%P, &
|
P_BC = loadCases(currentLoadCase)%P, &
|
||||||
F_BC = loadCases(currentLoadCase)%deformation, &
|
rotation_BC = loadCases(currentLoadCase)%rotation)
|
||||||
temperature_bc = loadCases(currentLoadCase)%temperature, &
|
end select
|
||||||
rotation_BC = loadCases(currentLoadCase)%rotation, &
|
|
||||||
density = loadCases(currentLoadCase)%density)
|
case(FIELD_THERMAL_ID)
|
||||||
end select
|
call spectral_thermal_forward (&
|
||||||
|
guess,timeinc,timeIncOld,remainingLoadCaseTime)
|
||||||
|
|
||||||
|
case(FIELD_DAMAGE_ID)
|
||||||
|
call spectral_damage_forward (&
|
||||||
|
guess,timeinc,timeIncOld,remainingLoadCaseTime)
|
||||||
|
end select
|
||||||
|
enddo
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! solve fields
|
||||||
|
stagIter = 0_pInt
|
||||||
|
stagIterate = .true.
|
||||||
|
do while (stagIterate)
|
||||||
|
do field = 1, nActiveFields
|
||||||
|
select case(loadCases(currentLoadCase)%ID(field))
|
||||||
|
case(FIELD_MECH_ID)
|
||||||
|
select case (spectral_solver)
|
||||||
|
case (DAMASK_spectral_SolverBasicPETSc_label)
|
||||||
|
solres(field) = BasicPETSC_solution (&
|
||||||
|
incInfo,guess,timeinc,timeIncOld,remainingLoadCaseTime, &
|
||||||
|
P_BC = loadCases(currentLoadCase)%P, &
|
||||||
|
F_BC = loadCases(currentLoadCase)%deformation, &
|
||||||
|
temperature_bc = loadCases(currentLoadCase)%temperature, &
|
||||||
|
rotation_BC = loadCases(currentLoadCase)%rotation)
|
||||||
|
|
||||||
|
case (DAMASK_spectral_SolverAL_label)
|
||||||
|
solres(field) = AL_solution (&
|
||||||
|
incInfo,guess,timeinc,timeIncOld,remainingLoadCaseTime, &
|
||||||
|
P_BC = loadCases(currentLoadCase)%P, &
|
||||||
|
F_BC = loadCases(currentLoadCase)%deformation, &
|
||||||
|
temperature_bc = loadCases(currentLoadCase)%temperature, &
|
||||||
|
rotation_BC = loadCases(currentLoadCase)%rotation)
|
||||||
|
|
||||||
|
case (DAMASK_spectral_SolverPolarisation_label)
|
||||||
|
solres(field) = Polarisation_solution (&
|
||||||
|
incInfo,guess,timeinc,timeIncOld,remainingLoadCaseTime, &
|
||||||
|
P_BC = loadCases(currentLoadCase)%P, &
|
||||||
|
F_BC = loadCases(currentLoadCase)%deformation, &
|
||||||
|
temperature_bc = loadCases(currentLoadCase)%temperature, &
|
||||||
|
rotation_BC = loadCases(currentLoadCase)%rotation)
|
||||||
|
|
||||||
|
end select
|
||||||
|
|
||||||
|
case(FIELD_THERMAL_ID)
|
||||||
|
solres(field) = spectral_thermal_solution (&
|
||||||
|
guess,timeinc,timeIncOld,remainingLoadCaseTime)
|
||||||
|
|
||||||
|
case(FIELD_DAMAGE_ID)
|
||||||
|
solres(field) = spectral_damage_solution (&
|
||||||
|
guess,timeinc,timeIncOld,remainingLoadCaseTime)
|
||||||
|
|
||||||
|
end select
|
||||||
|
if(.not. solres(field)%converged) exit ! no solution found
|
||||||
|
enddo
|
||||||
|
stagIter = stagIter + 1_pInt
|
||||||
|
stagIterate = stagIter < stagItMax .and. &
|
||||||
|
all(solres(:)%converged) .and. &
|
||||||
|
.not. all(solres(:)%stagConverged)
|
||||||
|
enddo
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! check solution
|
! check solution
|
||||||
cutBack = .False.
|
cutBack = .False.
|
||||||
if(solres%termIll .or. .not. solres%converged) then ! no solution found
|
if(solres(1)%termIll .or. .not. all(solres(:)%converged .and. solres(:)%stagConverged)) then ! no solution found
|
||||||
if (cutBackLevel < maxCutBack) then ! do cut back
|
if (cutBackLevel < maxCutBack) then ! do cut back
|
||||||
if (worldrank == 0) write(6,'(/,a)') ' cut back detected'
|
if (worldrank == 0) write(6,'(/,a)') ' cut back detected'
|
||||||
cutBack = .True.
|
cutBack = .True.
|
||||||
stepFraction = (stepFraction - 1_pInt) * subStepFactor ! adjust to new denominator
|
stepFraction = (stepFraction - 1_pInt) * subStepFactor ! adjust to new denominator
|
||||||
cutBackLevel = cutBackLevel + 1_pInt
|
cutBackLevel = cutBackLevel + 1_pInt
|
||||||
time = time - timeinc ! rewind time
|
time = time - timeinc ! rewind time
|
||||||
timeIncOld = timeinc
|
|
||||||
timeinc = timeinc/2.0_pReal
|
timeinc = timeinc/2.0_pReal
|
||||||
elseif (solres%termIll) then ! material point model cannot find a solution, exit in any casy
|
elseif (solres(1)%termIll) then ! material point model cannot find a solution, exit in any casy
|
||||||
call IO_warning(850_pInt)
|
call IO_warning(850_pInt)
|
||||||
call quit(-1_pInt*(lastRestartWritten+1_pInt)) ! quit and provide information about last restart inc written (e.g. for regridding)
|
call quit(-1_pInt*(lastRestartWritten+1_pInt)) ! quit and provide information about last restart inc written (e.g. for regridding)
|
||||||
elseif (continueCalculation == 1_pInt) then
|
elseif (continueCalculation == 1_pInt) then
|
||||||
|
@ -548,7 +620,7 @@ program DAMASK_spectral_Driver
|
||||||
endif
|
endif
|
||||||
enddo subIncLooping
|
enddo subIncLooping
|
||||||
cutBackLevel = max(0_pInt, cutBackLevel - 1_pInt) ! try half number of subincs next inc
|
cutBackLevel = max(0_pInt, cutBackLevel - 1_pInt) ! try half number of subincs next inc
|
||||||
if(solres%converged) then ! report converged inc
|
if(all(solres(:)%converged)) then ! report converged inc
|
||||||
convergedCounter = convergedCounter + 1_pInt
|
convergedCounter = convergedCounter + 1_pInt
|
||||||
if (worldrank == 0) &
|
if (worldrank == 0) &
|
||||||
write(6,'(/,a,'//IO_intOut(totalIncsCounter)//',a)') &
|
write(6,'(/,a,'//IO_intOut(totalIncsCounter)//',a)') &
|
||||||
|
@ -593,14 +665,24 @@ program DAMASK_spectral_Driver
|
||||||
call MPI_file_close(resUnit,ierr)
|
call MPI_file_close(resUnit,ierr)
|
||||||
close(statUnit)
|
close(statUnit)
|
||||||
|
|
||||||
select case (spectral_solver)
|
do field = 1, nActiveFields
|
||||||
case (DAMASK_spectral_SolverBasicPETSC_label)
|
select case(loadCases(1)%ID(field))
|
||||||
call BasicPETSC_destroy()
|
case(FIELD_MECH_ID)
|
||||||
case (DAMASK_spectral_SolverAL_label)
|
select case (spectral_solver)
|
||||||
call AL_destroy()
|
case (DAMASK_spectral_SolverBasicPETSc_label)
|
||||||
case (DAMASK_spectral_SolverPolarisation_label)
|
call BasicPETSC_destroy()
|
||||||
call Polarisation_destroy()
|
case (DAMASK_spectral_SolverAL_label)
|
||||||
end select
|
call AL_destroy()
|
||||||
|
case (DAMASK_spectral_SolverPolarisation_label)
|
||||||
|
call Polarisation_destroy()
|
||||||
|
end select
|
||||||
|
case(FIELD_THERMAL_ID)
|
||||||
|
call spectral_thermal_destroy()
|
||||||
|
case(FIELD_DAMAGE_ID)
|
||||||
|
call spectral_damage_destroy()
|
||||||
|
end select
|
||||||
|
enddo
|
||||||
|
call utilities_destroy()
|
||||||
|
|
||||||
call PetscFinalize(ierr); CHKERRQ(ierr)
|
call PetscFinalize(ierr); CHKERRQ(ierr)
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,8 @@ module DAMASK_spectral_solverAL
|
||||||
use math, only: &
|
use math, only: &
|
||||||
math_I3
|
math_I3
|
||||||
use DAMASK_spectral_utilities, only: &
|
use DAMASK_spectral_utilities, only: &
|
||||||
tSolutionState
|
tSolutionState, &
|
||||||
|
tSolutionParams
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
private
|
private
|
||||||
|
@ -24,14 +25,6 @@ module DAMASK_spectral_solverAL
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! derived types
|
! derived types
|
||||||
type tSolutionParams !< @todo use here the type definition for a full loadcase including mask
|
|
||||||
real(pReal), dimension(3,3) :: P_BC, rotation_BC
|
|
||||||
real(pReal) :: timeinc
|
|
||||||
real(pReal) :: timeincOld
|
|
||||||
real(pReal) :: temperature
|
|
||||||
real(pReal) :: density
|
|
||||||
end type tSolutionParams
|
|
||||||
|
|
||||||
type(tSolutionParams), private :: params
|
type(tSolutionParams), private :: params
|
||||||
real(pReal), private, dimension(3,3) :: mask_stress = 0.0_pReal
|
real(pReal), private, dimension(3,3) :: mask_stress = 0.0_pReal
|
||||||
|
|
||||||
|
@ -124,12 +117,9 @@ subroutine AL_init(temperature)
|
||||||
use DAMASK_interface, only: &
|
use DAMASK_interface, only: &
|
||||||
getSolverJobName
|
getSolverJobName
|
||||||
use DAMASK_spectral_Utilities, only: &
|
use DAMASK_spectral_Utilities, only: &
|
||||||
Utilities_init, &
|
|
||||||
Utilities_constitutiveResponse, &
|
Utilities_constitutiveResponse, &
|
||||||
Utilities_updateGamma, &
|
Utilities_updateGamma, &
|
||||||
Utilities_updateIPcoords, &
|
Utilities_updateIPcoords
|
||||||
grid1Red, &
|
|
||||||
wgt
|
|
||||||
use mesh, only: &
|
use mesh, only: &
|
||||||
gridLocal, &
|
gridLocal, &
|
||||||
gridGlobal
|
gridGlobal
|
||||||
|
@ -150,7 +140,6 @@ subroutine AL_init(temperature)
|
||||||
integer(pInt) :: proc
|
integer(pInt) :: proc
|
||||||
character(len=1024) :: rankStr
|
character(len=1024) :: rankStr
|
||||||
|
|
||||||
call Utilities_init()
|
|
||||||
if (worldrank == 0_pInt) then
|
if (worldrank == 0_pInt) then
|
||||||
write(6,'(/,a)') ' <<<+- DAMASK_spectral_solverAL init -+>>>'
|
write(6,'(/,a)') ' <<<+- DAMASK_spectral_solverAL init -+>>>'
|
||||||
write(6,'(a)') ' $Id$'
|
write(6,'(a)') ' $Id$'
|
||||||
|
@ -169,6 +158,7 @@ subroutine AL_init(temperature)
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! PETSc Init
|
! PETSc Init
|
||||||
call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr)
|
call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr)
|
||||||
|
call SNESSetOptionsPrefix(snes,'mech_',ierr);CHKERRQ(ierr)
|
||||||
allocate(localK(worldsize), source = 0); localK(worldrank+1) = gridLocal(3)
|
allocate(localK(worldsize), source = 0); localK(worldrank+1) = gridLocal(3)
|
||||||
do proc = 1, worldsize
|
do proc = 1, worldsize
|
||||||
call MPI_Bcast(localK(proc),1,MPI_INTEGER,proc-1,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(localK(proc),1,MPI_INTEGER,proc-1,PETSC_COMM_WORLD,ierr)
|
||||||
|
@ -182,6 +172,7 @@ subroutine AL_init(temperature)
|
||||||
gridLocal (1),gridLocal (2),localK, & ! local grid
|
gridLocal (1),gridLocal (2),localK, & ! local grid
|
||||||
da,ierr) ! handle, error
|
da,ierr) ! handle, error
|
||||||
CHKERRQ(ierr)
|
CHKERRQ(ierr)
|
||||||
|
call SNESSetDM(snes,da,ierr); CHKERRQ(ierr)
|
||||||
call DMCreateGlobalVector(da,solution_vec,ierr); CHKERRQ(ierr)
|
call DMCreateGlobalVector(da,solution_vec,ierr); CHKERRQ(ierr)
|
||||||
call DMDASNESSetFunctionLocal(da,INSERT_VALUES,AL_formResidual,dummy,ierr)
|
call DMDASNESSetFunctionLocal(da,INSERT_VALUES,AL_formResidual,dummy,ierr)
|
||||||
CHKERRQ(ierr)
|
CHKERRQ(ierr)
|
||||||
|
@ -266,7 +257,7 @@ end subroutine AL_init
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
type(tSolutionState) function &
|
type(tSolutionState) function &
|
||||||
AL_solution(incInfoIn,guess,timeinc,timeinc_old,loadCaseTime,P_BC,F_BC,temperature_bc, &
|
AL_solution(incInfoIn,guess,timeinc,timeinc_old,loadCaseTime,P_BC,F_BC,temperature_bc, &
|
||||||
rotation_BC,density)
|
rotation_BC)
|
||||||
use numerics, only: &
|
use numerics, only: &
|
||||||
update_gamma
|
update_gamma
|
||||||
use math, only: &
|
use math, only: &
|
||||||
|
@ -287,8 +278,7 @@ type(tSolutionState) function &
|
||||||
timeinc, & !< increment in time for current solution
|
timeinc, & !< increment in time for current solution
|
||||||
timeinc_old, & !< increment in time of last increment
|
timeinc_old, & !< increment in time of last increment
|
||||||
loadCaseTime, & !< remaining time of current load case
|
loadCaseTime, & !< remaining time of current load case
|
||||||
temperature_bc, &
|
temperature_bc
|
||||||
density
|
|
||||||
logical, intent(in) :: &
|
logical, intent(in) :: &
|
||||||
guess
|
guess
|
||||||
type(tBoundaryCondition), intent(in) :: &
|
type(tBoundaryCondition), intent(in) :: &
|
||||||
|
@ -324,7 +314,6 @@ type(tSolutionState) function &
|
||||||
params%timeinc = timeinc
|
params%timeinc = timeinc
|
||||||
params%timeincOld = timeinc_old
|
params%timeincOld = timeinc_old
|
||||||
params%temperature = temperature_bc
|
params%temperature = temperature_bc
|
||||||
params%density = density
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! solve BVP
|
! solve BVP
|
||||||
|
@ -363,16 +352,14 @@ subroutine AL_formResidual(in,x_scal,f_scal,dummy,ierr)
|
||||||
math_transpose33, &
|
math_transpose33, &
|
||||||
math_mul3333xx33, &
|
math_mul3333xx33, &
|
||||||
math_invSym3333, &
|
math_invSym3333, &
|
||||||
math_mul33x33, &
|
math_mul33x33
|
||||||
PI
|
|
||||||
use DAMASK_spectral_Utilities, only: &
|
use DAMASK_spectral_Utilities, only: &
|
||||||
wgt, &
|
wgt, &
|
||||||
field_realMPI, &
|
tensorField_realMPI, &
|
||||||
field_fourierMPI, &
|
utilities_FFTtensorForward, &
|
||||||
Utilities_FFTforward, &
|
utilities_fourierGammaConvolution, &
|
||||||
Utilities_fourierConvolution, &
|
|
||||||
Utilities_inverseLaplace, &
|
Utilities_inverseLaplace, &
|
||||||
Utilities_FFTbackward, &
|
utilities_FFTtensorBackward, &
|
||||||
Utilities_constitutiveResponse, &
|
Utilities_constitutiveResponse, &
|
||||||
Utilities_divergenceRMS, &
|
Utilities_divergenceRMS, &
|
||||||
Utilities_curlRMS
|
Utilities_curlRMS
|
||||||
|
@ -444,9 +431,9 @@ subroutine AL_formResidual(in,x_scal,f_scal,dummy,ierr)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!
|
!
|
||||||
field_realMPI = 0.0_pReal
|
tensorField_realMPI = 0.0_pReal
|
||||||
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt, gridLocal(1)
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt, gridLocal(1)
|
||||||
field_realMPI(1:3,1:3,i,j,k) = &
|
tensorField_realMPI(1:3,1:3,i,j,k) = &
|
||||||
polarBeta*math_mul3333xx33(C_scale,F(1:3,1:3,i,j,k) - math_I3) -&
|
polarBeta*math_mul3333xx33(C_scale,F(1:3,1:3,i,j,k) - math_I3) -&
|
||||||
polarAlpha*math_mul33x33(F(1:3,1:3,i,j,k), &
|
polarAlpha*math_mul33x33(F(1:3,1:3,i,j,k), &
|
||||||
math_mul3333xx33(C_scale,F_lambda(1:3,1:3,i,j,k) - math_I3))
|
math_mul3333xx33(C_scale,F_lambda(1:3,1:3,i,j,k) - math_I3))
|
||||||
|
@ -455,13 +442,13 @@ subroutine AL_formResidual(in,x_scal,f_scal,dummy,ierr)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! doing convolution in Fourier space
|
! doing convolution in Fourier space
|
||||||
call Utilities_FFTforward()
|
call utilities_FFTtensorForward()
|
||||||
call Utilities_fourierConvolution(math_rotate_backward33(polarBeta*F_aim,params%rotation_BC))
|
call utilities_fourierGammaConvolution(math_rotate_backward33(polarBeta*F_aim,params%rotation_BC))
|
||||||
call Utilities_FFTbackward()
|
call utilities_FFTtensorBackward()
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! constructing residual
|
! constructing residual
|
||||||
residual_F_lambda = polarBeta*F - field_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3))
|
residual_F_lambda = polarBeta*F - tensorField_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3))
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! evaluate constitutive response
|
! evaluate constitutive response
|
||||||
|
@ -473,11 +460,11 @@ subroutine AL_formResidual(in,x_scal,f_scal,dummy,ierr)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! calculate divergence
|
! calculate divergence
|
||||||
field_realMPI = 0.0_pReal
|
tensorField_realMPI = 0.0_pReal
|
||||||
field_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = residual_F
|
tensorField_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = residual_F
|
||||||
call Utilities_FFTforward()
|
call utilities_FFTtensorForward()
|
||||||
err_div = Utilities_divergenceRMS()
|
err_div = Utilities_divergenceRMS()
|
||||||
call Utilities_FFTbackward()
|
call utilities_FFTtensorBackward()
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! constructing residual
|
! constructing residual
|
||||||
|
@ -493,11 +480,11 @@ subroutine AL_formResidual(in,x_scal,f_scal,dummy,ierr)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! calculating curl
|
! calculating curl
|
||||||
field_realMPI = 0.0_pReal
|
tensorField_realMPI = 0.0_pReal
|
||||||
field_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = F
|
tensorField_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = F
|
||||||
call Utilities_FFTforward()
|
call utilities_FFTtensorForward()
|
||||||
err_curl = Utilities_curlRMS()
|
err_curl = Utilities_curlRMS()
|
||||||
call Utilities_FFTbackward()
|
call utilities_FFTtensorBackward()
|
||||||
|
|
||||||
end subroutine AL_formResidual
|
end subroutine AL_formResidual
|
||||||
|
|
||||||
|
@ -727,7 +714,6 @@ subroutine AL_destroy()
|
||||||
call VecDestroy(solution_vec,ierr); CHKERRQ(ierr)
|
call VecDestroy(solution_vec,ierr); CHKERRQ(ierr)
|
||||||
call SNESDestroy(snes,ierr); CHKERRQ(ierr)
|
call SNESDestroy(snes,ierr); CHKERRQ(ierr)
|
||||||
call DMDestroy(da,ierr); CHKERRQ(ierr)
|
call DMDestroy(da,ierr); CHKERRQ(ierr)
|
||||||
call Utilities_destroy()
|
|
||||||
|
|
||||||
end subroutine AL_destroy
|
end subroutine AL_destroy
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,8 @@ module DAMASK_spectral_SolverBasicPETSc
|
||||||
use math, only: &
|
use math, only: &
|
||||||
math_I3
|
math_I3
|
||||||
use DAMASK_spectral_Utilities, only: &
|
use DAMASK_spectral_Utilities, only: &
|
||||||
tSolutionState
|
tSolutionState, &
|
||||||
|
tSolutionParams
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
private
|
private
|
||||||
|
@ -24,14 +25,6 @@ module DAMASK_spectral_SolverBasicPETSc
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! derived types
|
! derived types
|
||||||
type tSolutionParams
|
|
||||||
real(pReal), dimension(3,3) :: P_BC, rotation_BC
|
|
||||||
real(pReal) :: timeinc
|
|
||||||
real(pReal) :: timeincOld
|
|
||||||
real(pReal) :: temperature
|
|
||||||
real(pReal) :: density
|
|
||||||
end type tSolutionParams
|
|
||||||
|
|
||||||
type(tSolutionParams), private :: params
|
type(tSolutionParams), private :: params
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
@ -58,7 +51,7 @@ module DAMASK_spectral_SolverBasicPETSc
|
||||||
C_volAvgLastInc = 0.0_pReal, & !< previous volume average stiffness
|
C_volAvgLastInc = 0.0_pReal, & !< previous volume average stiffness
|
||||||
C_minMaxAvg = 0.0_pReal, & !< current (min+max)/2 stiffness
|
C_minMaxAvg = 0.0_pReal, & !< current (min+max)/2 stiffness
|
||||||
S = 0.0_pReal !< current compliance (filled up with zeros)
|
S = 0.0_pReal !< current compliance (filled up with zeros)
|
||||||
real(pReal), private :: err_stress, err_div, err_divPrev, err_divDummy
|
real(pReal), private :: err_stress, err_div
|
||||||
logical, private :: ForwardData
|
logical, private :: ForwardData
|
||||||
integer(pInt), private :: &
|
integer(pInt), private :: &
|
||||||
totalIter = 0_pInt !< total iteration in current increment
|
totalIter = 0_pInt !< total iteration in current increment
|
||||||
|
@ -112,16 +105,13 @@ subroutine basicPETSc_init(temperature)
|
||||||
use DAMASK_interface, only: &
|
use DAMASK_interface, only: &
|
||||||
getSolverJobName
|
getSolverJobName
|
||||||
use DAMASK_spectral_Utilities, only: &
|
use DAMASK_spectral_Utilities, only: &
|
||||||
Utilities_init, &
|
|
||||||
Utilities_constitutiveResponse, &
|
Utilities_constitutiveResponse, &
|
||||||
Utilities_updateGamma, &
|
Utilities_updateGamma, &
|
||||||
utilities_updateIPcoords, &
|
utilities_updateIPcoords, &
|
||||||
grid1Red, &
|
|
||||||
wgt
|
wgt
|
||||||
use mesh, only: &
|
use mesh, only: &
|
||||||
gridLocal, &
|
gridLocal, &
|
||||||
gridGlobal, &
|
gridGlobal
|
||||||
mesh_ipCoordinates
|
|
||||||
use math, only: &
|
use math, only: &
|
||||||
math_invSym3333
|
math_invSym3333
|
||||||
|
|
||||||
|
@ -138,7 +128,6 @@ subroutine basicPETSc_init(temperature)
|
||||||
integer(pInt) :: proc
|
integer(pInt) :: proc
|
||||||
character(len=1024) :: rankStr
|
character(len=1024) :: rankStr
|
||||||
|
|
||||||
call Utilities_init()
|
|
||||||
mainProcess: if (worldrank == 0_pInt) then
|
mainProcess: if (worldrank == 0_pInt) then
|
||||||
write(6,'(/,a)') ' <<<+- DAMASK_spectral_solverBasicPETSc init -+>>>'
|
write(6,'(/,a)') ' <<<+- DAMASK_spectral_solverBasicPETSc init -+>>>'
|
||||||
write(6,'(a)') ' $Id$'
|
write(6,'(a)') ' $Id$'
|
||||||
|
@ -155,6 +144,7 @@ subroutine basicPETSc_init(temperature)
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! initialize solver specific parts of PETSc
|
! initialize solver specific parts of PETSc
|
||||||
call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr)
|
call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr)
|
||||||
|
call SNESSetOptionsPrefix(snes,'mech_',ierr);CHKERRQ(ierr)
|
||||||
allocate(localK(worldsize), source = 0); localK(worldrank+1) = gridLocal(3)
|
allocate(localK(worldsize), source = 0); localK(worldrank+1) = gridLocal(3)
|
||||||
do proc = 1, worldsize
|
do proc = 1, worldsize
|
||||||
call MPI_Bcast(localK(proc),1,MPI_INTEGER,proc-1,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(localK(proc),1,MPI_INTEGER,proc-1,PETSC_COMM_WORLD,ierr)
|
||||||
|
@ -168,6 +158,7 @@ subroutine basicPETSc_init(temperature)
|
||||||
gridLocal (1),gridLocal (2),localK, & ! local grid
|
gridLocal (1),gridLocal (2),localK, & ! local grid
|
||||||
da,ierr) ! handle, error
|
da,ierr) ! handle, error
|
||||||
CHKERRQ(ierr)
|
CHKERRQ(ierr)
|
||||||
|
call SNESSetDM(snes,da,ierr); CHKERRQ(ierr)
|
||||||
call DMCreateGlobalVector(da,solution_vec,ierr); CHKERRQ(ierr) ! global solution vector (grid x 9, i.e. every def grad tensor)
|
call DMCreateGlobalVector(da,solution_vec,ierr); CHKERRQ(ierr) ! global solution vector (grid x 9, i.e. every def grad tensor)
|
||||||
call DMDASNESSetFunctionLocal(da,INSERT_VALUES,BasicPETSC_formResidual,dummy,ierr) ! residual vector of same shape as solution vector
|
call DMDASNESSetFunctionLocal(da,INSERT_VALUES,BasicPETSC_formResidual,dummy,ierr) ! residual vector of same shape as solution vector
|
||||||
CHKERRQ(ierr)
|
CHKERRQ(ierr)
|
||||||
|
@ -205,12 +196,13 @@ subroutine basicPETSc_init(temperature)
|
||||||
call Utilities_updateIPcoords(F)
|
call Utilities_updateIPcoords(F)
|
||||||
call Utilities_constitutiveResponse(F_lastInc, F, &
|
call Utilities_constitutiveResponse(F_lastInc, F, &
|
||||||
temperature, &
|
temperature, &
|
||||||
1.0_pReal, &
|
0.0_pReal, &
|
||||||
P, &
|
P, &
|
||||||
C_volAvg,C_minMaxAvg, & ! global average of stiffness and (min+max)/2
|
C_volAvg,C_minMaxAvg, & ! global average of stiffness and (min+max)/2
|
||||||
temp33_Real, &
|
temp33_Real, &
|
||||||
.false., &
|
.false., &
|
||||||
math_I3)
|
math_I3)
|
||||||
|
|
||||||
call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) ! write data back to PETSc
|
call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) ! write data back to PETSc
|
||||||
|
|
||||||
if (restartInc > 1_pInt) then ! using old values from files
|
if (restartInc > 1_pInt) then ! using old values from files
|
||||||
|
@ -237,11 +229,10 @@ end subroutine basicPETSc_init
|
||||||
!> @brief solution for the Basic PETSC scheme with internal iterations
|
!> @brief solution for the Basic PETSC scheme with internal iterations
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
type(tSolutionState) function basicPETSc_solution( &
|
type(tSolutionState) function basicPETSc_solution( &
|
||||||
incInfoIn,guess,timeinc,timeinc_old,loadCaseTime,P_BC,F_BC,temperature_bc,rotation_BC,density)
|
incInfoIn,guess,timeinc,timeinc_old,loadCaseTime,P_BC,F_BC,temperature_bc,rotation_BC)
|
||||||
use numerics, only: &
|
use numerics, only: &
|
||||||
update_gamma, &
|
update_gamma, &
|
||||||
itmax, &
|
itmax
|
||||||
worldrank
|
|
||||||
use DAMASK_spectral_Utilities, only: &
|
use DAMASK_spectral_Utilities, only: &
|
||||||
tBoundaryCondition, &
|
tBoundaryCondition, &
|
||||||
Utilities_maskedCompliance, &
|
Utilities_maskedCompliance, &
|
||||||
|
@ -258,8 +249,7 @@ type(tSolutionState) function basicPETSc_solution( &
|
||||||
timeinc, & !< increment in time for current solution
|
timeinc, & !< increment in time for current solution
|
||||||
timeinc_old, & !< increment in time of last increment
|
timeinc_old, & !< increment in time of last increment
|
||||||
loadCaseTime, & !< remaining time of current load case
|
loadCaseTime, & !< remaining time of current load case
|
||||||
temperature_bc, &
|
temperature_bc
|
||||||
density
|
|
||||||
type(tBoundaryCondition), intent(in) :: &
|
type(tBoundaryCondition), intent(in) :: &
|
||||||
P_BC, &
|
P_BC, &
|
||||||
F_BC
|
F_BC
|
||||||
|
@ -290,7 +280,6 @@ type(tSolutionState) function basicPETSc_solution( &
|
||||||
params%timeinc = timeinc
|
params%timeinc = timeinc
|
||||||
params%timeincOld = timeinc_old
|
params%timeincOld = timeinc_old
|
||||||
params%temperature = temperature_BC
|
params%temperature = temperature_BC
|
||||||
params%density = density
|
|
||||||
|
|
||||||
call SNESSolve(snes,PETSC_NULL_OBJECT,solution_vec,ierr); CHKERRQ(ierr)
|
call SNESSolve(snes,PETSC_NULL_OBJECT,solution_vec,ierr); CHKERRQ(ierr)
|
||||||
call SNESGetConvergedReason(snes,reason,ierr); CHKERRQ(ierr)
|
call SNESGetConvergedReason(snes,reason,ierr); CHKERRQ(ierr)
|
||||||
|
@ -329,11 +318,11 @@ subroutine BasicPETSC_formResidual(in,x_scal,f_scal,dummy,ierr)
|
||||||
debug_spectralRotation
|
debug_spectralRotation
|
||||||
use DAMASK_spectral_Utilities, only: &
|
use DAMASK_spectral_Utilities, only: &
|
||||||
wgt, &
|
wgt, &
|
||||||
field_realMPI, &
|
tensorField_realMPI, &
|
||||||
field_fourierMPI, &
|
tensorField_fourierMPI, &
|
||||||
Utilities_FFTforward, &
|
utilities_FFTtensorForward, &
|
||||||
Utilities_FFTbackward, &
|
utilities_FFTtensorBackward, &
|
||||||
Utilities_fourierConvolution, &
|
utilities_fourierGammaConvolution, &
|
||||||
Utilities_inverseLaplace, &
|
Utilities_inverseLaplace, &
|
||||||
Utilities_constitutiveResponse, &
|
Utilities_constitutiveResponse, &
|
||||||
Utilities_divergenceRMS
|
Utilities_divergenceRMS
|
||||||
|
@ -392,16 +381,16 @@ subroutine BasicPETSC_formResidual(in,x_scal,f_scal,dummy,ierr)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! updated deformation gradient using fix point algorithm of basic scheme
|
! updated deformation gradient using fix point algorithm of basic scheme
|
||||||
field_realMPI = 0.0_pReal
|
tensorField_realMPI = 0.0_pReal
|
||||||
field_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = f_scal
|
tensorField_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = f_scal
|
||||||
call Utilities_FFTforward()
|
call utilities_FFTtensorForward()
|
||||||
err_div = Utilities_divergenceRMS()
|
err_div = Utilities_divergenceRMS()
|
||||||
call Utilities_fourierConvolution(math_rotate_backward33(F_aim_lastIter-F_aim,params%rotation_BC))
|
call utilities_fourierGammaConvolution(math_rotate_backward33(F_aim_lastIter-F_aim,params%rotation_BC))
|
||||||
call Utilities_FFTbackward()
|
call utilities_FFTtensorBackward()
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! constructing residual
|
! constructing residual
|
||||||
f_scal = field_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3))
|
f_scal = tensorField_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3))
|
||||||
|
|
||||||
end subroutine BasicPETSc_formResidual
|
end subroutine BasicPETSc_formResidual
|
||||||
|
|
||||||
|
@ -578,7 +567,6 @@ subroutine BasicPETSc_destroy()
|
||||||
call VecDestroy(solution_vec,ierr); CHKERRQ(ierr)
|
call VecDestroy(solution_vec,ierr); CHKERRQ(ierr)
|
||||||
call SNESDestroy(snes,ierr); CHKERRQ(ierr)
|
call SNESDestroy(snes,ierr); CHKERRQ(ierr)
|
||||||
call DMDestroy(da,ierr); CHKERRQ(ierr)
|
call DMDestroy(da,ierr); CHKERRQ(ierr)
|
||||||
call Utilities_destroy()
|
|
||||||
|
|
||||||
end subroutine BasicPETSc_destroy
|
end subroutine BasicPETSc_destroy
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,8 @@ module DAMASK_spectral_solverPolarisation
|
||||||
use math, only: &
|
use math, only: &
|
||||||
math_I3
|
math_I3
|
||||||
use DAMASK_spectral_utilities, only: &
|
use DAMASK_spectral_utilities, only: &
|
||||||
tSolutionState
|
tSolutionState, &
|
||||||
|
tSolutionParams
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
private
|
private
|
||||||
|
@ -24,14 +25,6 @@ module DAMASK_spectral_solverPolarisation
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! derived types
|
! derived types
|
||||||
type tSolutionParams !< @todo use here the type definition for a full loadcase including mask
|
|
||||||
real(pReal), dimension(3,3) :: P_BC, rotation_BC
|
|
||||||
real(pReal) :: timeinc
|
|
||||||
real(pReal) :: timeincOld
|
|
||||||
real(pReal) :: temperature
|
|
||||||
real(pReal) :: density
|
|
||||||
end type tSolutionParams
|
|
||||||
|
|
||||||
type(tSolutionParams), private :: params
|
type(tSolutionParams), private :: params
|
||||||
real(pReal), private, dimension(3,3) :: mask_stress = 0.0_pReal
|
real(pReal), private, dimension(3,3) :: mask_stress = 0.0_pReal
|
||||||
|
|
||||||
|
@ -124,12 +117,9 @@ subroutine Polarisation_init(temperature)
|
||||||
use DAMASK_interface, only: &
|
use DAMASK_interface, only: &
|
||||||
getSolverJobName
|
getSolverJobName
|
||||||
use DAMASK_spectral_Utilities, only: &
|
use DAMASK_spectral_Utilities, only: &
|
||||||
Utilities_init, &
|
|
||||||
Utilities_constitutiveResponse, &
|
Utilities_constitutiveResponse, &
|
||||||
Utilities_updateGamma, &
|
Utilities_updateGamma, &
|
||||||
Utilities_updateIPcoords, &
|
Utilities_updateIPcoords
|
||||||
grid1Red, &
|
|
||||||
wgt
|
|
||||||
use mesh, only: &
|
use mesh, only: &
|
||||||
gridLocal, &
|
gridLocal, &
|
||||||
gridGlobal
|
gridGlobal
|
||||||
|
@ -150,7 +140,6 @@ subroutine Polarisation_init(temperature)
|
||||||
integer(pInt) :: proc
|
integer(pInt) :: proc
|
||||||
character(len=1024) :: rankStr
|
character(len=1024) :: rankStr
|
||||||
|
|
||||||
call Utilities_init()
|
|
||||||
if (worldrank == 0_pInt) then
|
if (worldrank == 0_pInt) then
|
||||||
write(6,'(/,a)') ' <<<+- DAMASK_spectral_solverPolarisation init -+>>>'
|
write(6,'(/,a)') ' <<<+- DAMASK_spectral_solverPolarisation init -+>>>'
|
||||||
write(6,'(a)') ' $Id$'
|
write(6,'(a)') ' $Id$'
|
||||||
|
@ -169,6 +158,7 @@ subroutine Polarisation_init(temperature)
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! PETSc Init
|
! PETSc Init
|
||||||
call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr)
|
call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr)
|
||||||
|
call SNESSetOptionsPrefix(snes,'mech_',ierr);CHKERRQ(ierr)
|
||||||
allocate(localK(worldsize), source = 0); localK(worldrank+1) = gridLocal(3)
|
allocate(localK(worldsize), source = 0); localK(worldrank+1) = gridLocal(3)
|
||||||
do proc = 1, worldsize
|
do proc = 1, worldsize
|
||||||
call MPI_Bcast(localK(proc),1,MPI_INTEGER,proc-1,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(localK(proc),1,MPI_INTEGER,proc-1,PETSC_COMM_WORLD,ierr)
|
||||||
|
@ -182,10 +172,10 @@ subroutine Polarisation_init(temperature)
|
||||||
gridLocal (1),gridLocal (2),localK, & ! local grid
|
gridLocal (1),gridLocal (2),localK, & ! local grid
|
||||||
da,ierr) ! handle, error
|
da,ierr) ! handle, error
|
||||||
CHKERRQ(ierr)
|
CHKERRQ(ierr)
|
||||||
|
call SNESSetDM(snes,da,ierr); CHKERRQ(ierr)
|
||||||
call DMCreateGlobalVector(da,solution_vec,ierr); CHKERRQ(ierr)
|
call DMCreateGlobalVector(da,solution_vec,ierr); CHKERRQ(ierr)
|
||||||
call DMDASNESSetFunctionLocal(da,INSERT_VALUES,Polarisation_formResidual,dummy,ierr)
|
call DMDASNESSetFunctionLocal(da,INSERT_VALUES,Polarisation_formResidual,dummy,ierr)
|
||||||
CHKERRQ(ierr)
|
CHKERRQ(ierr)
|
||||||
call SNESSetDM(snes,da,ierr); CHKERRQ(ierr)
|
|
||||||
call SNESSetConvergenceTest(snes,Polarisation_converged,dummy,PETSC_NULL_FUNCTION,ierr)
|
call SNESSetConvergenceTest(snes,Polarisation_converged,dummy,PETSC_NULL_FUNCTION,ierr)
|
||||||
CHKERRQ(ierr)
|
CHKERRQ(ierr)
|
||||||
call SNESSetFromOptions(snes,ierr); CHKERRQ(ierr)
|
call SNESSetFromOptions(snes,ierr); CHKERRQ(ierr)
|
||||||
|
@ -265,7 +255,7 @@ end subroutine Polarisation_init
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
type(tSolutionState) function &
|
type(tSolutionState) function &
|
||||||
Polarisation_solution(incInfoIn,guess,timeinc,timeinc_old,loadCaseTime,P_BC,F_BC,temperature_bc, &
|
Polarisation_solution(incInfoIn,guess,timeinc,timeinc_old,loadCaseTime,P_BC,F_BC,temperature_bc, &
|
||||||
rotation_BC,density)
|
rotation_BC)
|
||||||
use numerics, only: &
|
use numerics, only: &
|
||||||
update_gamma
|
update_gamma
|
||||||
use math, only: &
|
use math, only: &
|
||||||
|
@ -277,8 +267,6 @@ type(tSolutionState) function &
|
||||||
use FEsolving, only: &
|
use FEsolving, only: &
|
||||||
restartWrite, &
|
restartWrite, &
|
||||||
terminallyIll
|
terminallyIll
|
||||||
use numerics, only: &
|
|
||||||
worldrank
|
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
|
|
||||||
|
@ -288,8 +276,7 @@ type(tSolutionState) function &
|
||||||
timeinc, & !< increment in time for current solution
|
timeinc, & !< increment in time for current solution
|
||||||
timeinc_old, & !< increment in time of last increment
|
timeinc_old, & !< increment in time of last increment
|
||||||
loadCaseTime, & !< remaining time of current load case
|
loadCaseTime, & !< remaining time of current load case
|
||||||
temperature_bc, &
|
temperature_bc
|
||||||
density
|
|
||||||
logical, intent(in) :: &
|
logical, intent(in) :: &
|
||||||
guess
|
guess
|
||||||
type(tBoundaryCondition), intent(in) :: &
|
type(tBoundaryCondition), intent(in) :: &
|
||||||
|
@ -324,7 +311,6 @@ type(tSolutionState) function &
|
||||||
params%timeinc = timeinc
|
params%timeinc = timeinc
|
||||||
params%timeincOld = timeinc_old
|
params%timeincOld = timeinc_old
|
||||||
params%temperature = temperature_bc
|
params%temperature = temperature_bc
|
||||||
params%density = density
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! solve BVP
|
! solve BVP
|
||||||
|
@ -363,16 +349,14 @@ subroutine Polarisation_formResidual(in,x_scal,f_scal,dummy,ierr)
|
||||||
math_transpose33, &
|
math_transpose33, &
|
||||||
math_mul3333xx33, &
|
math_mul3333xx33, &
|
||||||
math_invSym3333, &
|
math_invSym3333, &
|
||||||
math_mul33x33, &
|
math_mul33x33
|
||||||
PI
|
|
||||||
use DAMASK_spectral_Utilities, only: &
|
use DAMASK_spectral_Utilities, only: &
|
||||||
wgt, &
|
wgt, &
|
||||||
field_realMPI, &
|
tensorField_realMPI, &
|
||||||
field_fourierMPI, &
|
utilities_FFTtensorForward, &
|
||||||
Utilities_FFTforward, &
|
utilities_fourierGammaConvolution, &
|
||||||
Utilities_fourierConvolution, &
|
|
||||||
Utilities_inverseLaplace, &
|
Utilities_inverseLaplace, &
|
||||||
Utilities_FFTbackward, &
|
utilities_FFTtensorBackward, &
|
||||||
Utilities_constitutiveResponse, &
|
Utilities_constitutiveResponse, &
|
||||||
Utilities_divergenceRMS, &
|
Utilities_divergenceRMS, &
|
||||||
Utilities_curlRMS
|
Utilities_curlRMS
|
||||||
|
@ -444,9 +428,9 @@ subroutine Polarisation_formResidual(in,x_scal,f_scal,dummy,ierr)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!
|
!
|
||||||
field_realMPI = 0.0_pReal
|
tensorField_realMPI = 0.0_pReal
|
||||||
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt, gridLocal(1)
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt, gridLocal(1)
|
||||||
field_realMPI(1:3,1:3,i,j,k) = &
|
tensorField_realMPI(1:3,1:3,i,j,k) = &
|
||||||
polarBeta*math_mul3333xx33(C_scale,F(1:3,1:3,i,j,k) - math_I3) -&
|
polarBeta*math_mul3333xx33(C_scale,F(1:3,1:3,i,j,k) - math_I3) -&
|
||||||
polarAlpha*math_mul33x33(F(1:3,1:3,i,j,k), &
|
polarAlpha*math_mul33x33(F(1:3,1:3,i,j,k), &
|
||||||
math_mul3333xx33(C_scale,F_tau(1:3,1:3,i,j,k) - F(1:3,1:3,i,j,k) - math_I3))
|
math_mul3333xx33(C_scale,F_tau(1:3,1:3,i,j,k) - F(1:3,1:3,i,j,k) - math_I3))
|
||||||
|
@ -454,13 +438,13 @@ subroutine Polarisation_formResidual(in,x_scal,f_scal,dummy,ierr)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! doing convolution in Fourier space
|
! doing convolution in Fourier space
|
||||||
call Utilities_FFTforward()
|
call utilities_FFTtensorForward()
|
||||||
call Utilities_fourierConvolution(math_rotate_backward33(polarBeta*F_aim,params%rotation_BC))
|
call utilities_fourierGammaConvolution(math_rotate_backward33(polarBeta*F_aim,params%rotation_BC))
|
||||||
call Utilities_FFTbackward()
|
call utilities_FFTtensorBackward()
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! constructing residual
|
! constructing residual
|
||||||
residual_F_tau = polarBeta*F - field_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3))
|
residual_F_tau = polarBeta*F - tensorField_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3))
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! evaluate constitutive response
|
! evaluate constitutive response
|
||||||
|
@ -472,11 +456,11 @@ subroutine Polarisation_formResidual(in,x_scal,f_scal,dummy,ierr)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! calculate divergence
|
! calculate divergence
|
||||||
field_realMPI = 0.0_pReal
|
tensorField_realMPI = 0.0_pReal
|
||||||
field_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = residual_F
|
tensorField_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = residual_F
|
||||||
call Utilities_FFTforward()
|
call utilities_FFTtensorForward()
|
||||||
err_div = Utilities_divergenceRMS()
|
err_div = Utilities_divergenceRMS()
|
||||||
call Utilities_FFTbackward()
|
call utilities_FFTtensorBackward()
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! constructing residual
|
! constructing residual
|
||||||
|
@ -492,11 +476,11 @@ subroutine Polarisation_formResidual(in,x_scal,f_scal,dummy,ierr)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! calculating curl
|
! calculating curl
|
||||||
field_realMPI = 0.0_pReal
|
tensorField_realMPI = 0.0_pReal
|
||||||
field_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = F
|
tensorField_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = F
|
||||||
call Utilities_FFTforward()
|
call utilities_FFTtensorForward()
|
||||||
err_curl = Utilities_curlRMS()
|
err_curl = Utilities_curlRMS()
|
||||||
call Utilities_FFTbackward()
|
call utilities_FFTtensorBackward()
|
||||||
|
|
||||||
end subroutine Polarisation_formResidual
|
end subroutine Polarisation_formResidual
|
||||||
|
|
||||||
|
@ -727,7 +711,6 @@ subroutine Polarisation_destroy()
|
||||||
call VecDestroy(solution_vec,ierr); CHKERRQ(ierr)
|
call VecDestroy(solution_vec,ierr); CHKERRQ(ierr)
|
||||||
call SNESDestroy(snes,ierr); CHKERRQ(ierr)
|
call SNESDestroy(snes,ierr); CHKERRQ(ierr)
|
||||||
call DMDestroy(da,ierr); CHKERRQ(ierr)
|
call DMDestroy(da,ierr); CHKERRQ(ierr)
|
||||||
call Utilities_destroy()
|
|
||||||
|
|
||||||
end subroutine Polarisation_destroy
|
end subroutine Polarisation_destroy
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,8 @@ module DAMASK_spectral_utilities
|
||||||
use prec, only: &
|
use prec, only: &
|
||||||
pReal, &
|
pReal, &
|
||||||
pInt
|
pInt
|
||||||
|
use math, only: &
|
||||||
|
math_I3
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
private
|
private
|
||||||
|
@ -21,6 +23,18 @@ module DAMASK_spectral_utilities
|
||||||
|
|
||||||
logical, public :: cutBack =.false. !< cut back of BVP solver in case convergence is not achieved or a material point is terminally ill
|
logical, public :: cutBack =.false. !< cut back of BVP solver in case convergence is not achieved or a material point is terminally ill
|
||||||
integer(pInt), public, parameter :: maxPhaseFields = 2_pInt
|
integer(pInt), public, parameter :: maxPhaseFields = 2_pInt
|
||||||
|
integer(pInt), public :: nActiveFields = 0_pInt
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! field labels information
|
||||||
|
enum, bind(c)
|
||||||
|
enumerator :: FIELD_UNDEFINED_ID, &
|
||||||
|
FIELD_MECH_ID, &
|
||||||
|
FIELD_THERMAL_ID, &
|
||||||
|
FIELD_DAMAGE_ID, &
|
||||||
|
FIELD_VACANCYDIFFUSION_ID
|
||||||
|
end enum
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! grid related information information
|
! grid related information information
|
||||||
real(pReal), public :: wgt !< weighting factor 1/Nelems
|
real(pReal), public :: wgt !< weighting factor 1/Nelems
|
||||||
|
@ -28,37 +42,29 @@ module DAMASK_spectral_utilities
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! variables storing information for spectral method and FFTW
|
! variables storing information for spectral method and FFTW
|
||||||
integer(pInt), public :: grid1Red !< grid(1)/2
|
integer(pInt), public :: grid1Red !< grid(1)/2
|
||||||
real (C_DOUBLE), public, dimension(:,:,:,:,:), pointer :: field_realMPI !< real representation (some stress or deformation) of field_fourier
|
real (C_DOUBLE), public, dimension(:,:,:,:,:), pointer :: tensorField_realMPI !< real representation (some stress or deformation) of field_fourier
|
||||||
complex(C_DOUBLE_COMPLEX),public, dimension(:,:,:,:,:), pointer :: field_fourierMPI !< field on which the Fourier transform operates
|
complex(C_DOUBLE_COMPLEX),public, dimension(:,:,:,:,:), pointer :: tensorField_fourierMPI !< field on which the Fourier transform operates
|
||||||
|
real(C_DOUBLE), public, dimension(:,:,:,:), pointer :: vectorField_realMPI !< vector field real representation for fftw
|
||||||
|
complex(C_DOUBLE_COMPLEX),public, dimension(:,:,:,:), pointer :: vectorField_fourierMPI !< vector field fourier representation for fftw
|
||||||
|
real(C_DOUBLE), public, dimension(:,:,:), pointer :: scalarField_realMPI !< scalar field real representation for fftw
|
||||||
|
complex(C_DOUBLE_COMPLEX),public, dimension(:,:,:), pointer :: scalarField_fourierMPI !< scalar field fourier representation for fftw
|
||||||
real(pReal), private, dimension(:,:,:,:,:,:,:), allocatable :: gamma_hat !< gamma operator (field) for spectral method
|
real(pReal), private, dimension(:,:,:,:,:,:,:), allocatable :: gamma_hat !< gamma operator (field) for spectral method
|
||||||
real(pReal), private, dimension(:,:,:,:), allocatable :: xi !< wave vector field for divergence and for gamma operator
|
real(pReal), private, dimension(:,:,:,:), allocatable :: xi !< wave vector field for divergence and for gamma operator
|
||||||
real(pReal), private, dimension(3,3,3,3) :: C_ref !< reference stiffness
|
real(pReal), private, dimension(3,3,3,3) :: C_ref !< mechanic reference stiffness
|
||||||
real(pReal), private, dimension(3) :: scaledGeomSize !< scaled geometry size for calculation of divergence (Basic, Basic PETSc)
|
real(pReal), protected, public, dimension(3) :: scaledGeomSize !< scaled geometry size for calculation of divergence (Basic, Basic PETSc)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
! debug fftw
|
|
||||||
real(C_DOUBLE), private, dimension(:,:,:), pointer :: scalarField_realMPI !< scalar field real representation for debugging fftw
|
|
||||||
complex(C_DOUBLE_COMPLEX),private, dimension(:,:,:), pointer :: scalarField_fourierMPI !< scalar field fourier representation for debugging fftw
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
! geometry reconstruction
|
|
||||||
real(C_DOUBLE), private, dimension(:,:,:,:), pointer :: coords_realMPI !< vector field real representation for geometry reconstruction
|
|
||||||
complex(C_DOUBLE_COMPLEX),private, dimension(:,:,:,:), pointer :: coords_fourierMPI !< vector field fourier representation for geometry reconstruction
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
! debug divergence
|
|
||||||
real(C_DOUBLE), private, dimension(:,:,:,:), pointer :: divRealMPI !< vector field real representation for debugging divergence calculation
|
|
||||||
complex(C_DOUBLE_COMPLEX),private, dimension(:,:,:,:), pointer :: divFourierMPI !< vector field fourier representation for debugging divergence calculation
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! plans for FFTW
|
! plans for FFTW
|
||||||
type(C_PTR), private :: &
|
type(C_PTR), private :: &
|
||||||
planForthMPI, & !< FFTW MPI plan P(x) to P(k)
|
planTensorForthMPI, & !< FFTW MPI plan P(x) to P(k)
|
||||||
planBackMPI, & !< FFTW MPI plan F(k) to F(x)
|
planTensorBackMPI, & !< FFTW MPI plan F(k) to F(x)
|
||||||
|
planVectorForthMPI, & !< FFTW MPI plan P(x) to P(k)
|
||||||
|
planVectorBackMPI, & !< FFTW MPI plan F(k) to F(x)
|
||||||
|
planScalarForthMPI, & !< FFTW MPI plan P(x) to P(k)
|
||||||
|
planScalarBackMPI, & !< FFTW MPI plan F(k) to F(x)
|
||||||
planDebugForthMPI, & !< FFTW MPI plan for scalar field
|
planDebugForthMPI, & !< FFTW MPI plan for scalar field
|
||||||
planDebugBackMPI, & !< FFTW MPI plan for scalar field inverse
|
planDebugBackMPI, & !< FFTW MPI plan for scalar field inverse
|
||||||
planDivMPI, & !< FFTW MPI plan for FFTW in case of debugging divergence calculation
|
planDivMPI !< FFTW MPI plan for FFTW in case of debugging divergence calculation
|
||||||
planCoordsMPI !< FFTW MPI plan for geometry reconstruction
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! variables controlling debugging
|
! variables controlling debugging
|
||||||
|
@ -74,6 +80,7 @@ module DAMASK_spectral_utilities
|
||||||
type, public :: tSolutionState !< return type of solution from spectral solver variants
|
type, public :: tSolutionState !< return type of solution from spectral solver variants
|
||||||
logical :: converged = .true.
|
logical :: converged = .true.
|
||||||
logical :: regrid = .false.
|
logical :: regrid = .false.
|
||||||
|
logical :: stagConverged = .true.
|
||||||
logical :: termIll = .false.
|
logical :: termIll = .false.
|
||||||
integer(pInt) :: iterationsNeeded = 0_pInt
|
integer(pInt) :: iterationsNeeded = 0_pInt
|
||||||
end type tSolutionState
|
end type tSolutionState
|
||||||
|
@ -85,6 +92,30 @@ module DAMASK_spectral_utilities
|
||||||
character(len=64) :: myType = 'None'
|
character(len=64) :: myType = 'None'
|
||||||
end type tBoundaryCondition
|
end type tBoundaryCondition
|
||||||
|
|
||||||
|
type, public :: tLoadCase
|
||||||
|
real(pReal), dimension (3,3) :: rotation = math_I3 !< rotation of BC
|
||||||
|
type(tBoundaryCondition) :: P, & !< stress BC
|
||||||
|
deformation !< deformation BC (Fdot or L)
|
||||||
|
real(pReal) :: time = 0.0_pReal, & !< length of increment
|
||||||
|
temperature = 300.0_pReal !< isothermal starting conditions
|
||||||
|
integer(pInt) :: incs = 0_pInt, & !< number of increments
|
||||||
|
outputfrequency = 1_pInt, & !< frequency of result writes
|
||||||
|
restartfrequency = 0_pInt, & !< frequency of restart writes
|
||||||
|
logscale = 0_pInt !< linear/logarithmic time inc flag
|
||||||
|
logical :: followFormerTrajectory = .true. !< follow trajectory of former loadcase
|
||||||
|
integer(kind(FIELD_UNDEFINED_ID)), allocatable :: ID(:)
|
||||||
|
end type tLoadCase
|
||||||
|
|
||||||
|
type, public :: tSolutionParams !< @todo use here the type definition for a full loadcase including mask
|
||||||
|
real(pReal), dimension(3,3) :: P_BC, rotation_BC
|
||||||
|
real(pReal) :: timeinc
|
||||||
|
real(pReal) :: timeincOld
|
||||||
|
real(pReal) :: temperature
|
||||||
|
real(pReal) :: density
|
||||||
|
end type tSolutionParams
|
||||||
|
|
||||||
|
type(tSolutionParams), private :: params
|
||||||
|
|
||||||
type, public :: phaseFieldDataBin !< set of parameters defining a phase field
|
type, public :: phaseFieldDataBin !< set of parameters defining a phase field
|
||||||
real(pReal) :: diffusion = 0.0_pReal, & !< thermal conductivity
|
real(pReal) :: diffusion = 0.0_pReal, & !< thermal conductivity
|
||||||
mobility = 0.0_pReal, & !< thermal mobility
|
mobility = 0.0_pReal, & !< thermal mobility
|
||||||
|
@ -96,18 +127,29 @@ module DAMASK_spectral_utilities
|
||||||
public :: &
|
public :: &
|
||||||
utilities_init, &
|
utilities_init, &
|
||||||
utilities_updateGamma, &
|
utilities_updateGamma, &
|
||||||
utilities_FFTforward, &
|
utilities_FFTtensorForward, &
|
||||||
utilities_FFTbackward, &
|
utilities_FFTtensorBackward, &
|
||||||
utilities_fourierConvolution, &
|
utilities_FFTvectorForward, &
|
||||||
|
utilities_FFTvectorBackward, &
|
||||||
|
utilities_FFTscalarForward, &
|
||||||
|
utilities_FFTscalarBackward, &
|
||||||
|
utilities_fourierGammaConvolution, &
|
||||||
|
utilities_fourierGreenConvolution, &
|
||||||
utilities_inverseLaplace, &
|
utilities_inverseLaplace, &
|
||||||
utilities_divergenceRMS, &
|
utilities_divergenceRMS, &
|
||||||
utilities_curlRMS, &
|
utilities_curlRMS, &
|
||||||
|
utilities_fourierScalarGradient, &
|
||||||
|
utilities_fourierVectorDivergence, &
|
||||||
utilities_maskedCompliance, &
|
utilities_maskedCompliance, &
|
||||||
utilities_constitutiveResponse, &
|
utilities_constitutiveResponse, &
|
||||||
utilities_calculateRate, &
|
utilities_calculateRate, &
|
||||||
utilities_forwardField, &
|
utilities_forwardField, &
|
||||||
utilities_destroy, &
|
utilities_destroy, &
|
||||||
utilities_updateIPcoords
|
utilities_updateIPcoords, &
|
||||||
|
FIELD_UNDEFINED_ID, &
|
||||||
|
FIELD_MECH_ID, &
|
||||||
|
FIELD_THERMAL_ID, &
|
||||||
|
FIELD_DAMAGE_ID
|
||||||
private :: &
|
private :: &
|
||||||
utilities_getFilter
|
utilities_getFilter
|
||||||
|
|
||||||
|
@ -123,15 +165,12 @@ contains
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine utilities_init()
|
subroutine utilities_init()
|
||||||
use, intrinsic :: iso_fortran_env ! to get compiler_version and compiler_options (at least for gfortran >4.6 at the moment)
|
use, intrinsic :: iso_fortran_env ! to get compiler_version and compiler_options (at least for gfortran >4.6 at the moment)
|
||||||
use DAMASK_interface, only: &
|
|
||||||
geometryFile
|
|
||||||
use IO, only: &
|
use IO, only: &
|
||||||
IO_error, &
|
IO_error, &
|
||||||
IO_warning, &
|
IO_warning, &
|
||||||
IO_timeStamp, &
|
IO_timeStamp, &
|
||||||
IO_open_file
|
IO_open_file
|
||||||
use numerics, only: &
|
use numerics, only: &
|
||||||
DAMASK_NumThreadsInt, &
|
|
||||||
fftw_planner_flag, &
|
fftw_planner_flag, &
|
||||||
fftw_timelimit, &
|
fftw_timelimit, &
|
||||||
memory_efficient, &
|
memory_efficient, &
|
||||||
|
@ -155,9 +194,7 @@ subroutine utilities_init()
|
||||||
gridGlobal, &
|
gridGlobal, &
|
||||||
gridLocal, &
|
gridLocal, &
|
||||||
gridOffset, &
|
gridOffset, &
|
||||||
geomSizeGlobal, &
|
geomSizeGlobal
|
||||||
geomSizeLocal, &
|
|
||||||
geomSizeOffset
|
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
#ifdef PETSc
|
#ifdef PETSc
|
||||||
|
@ -168,13 +205,11 @@ subroutine utilities_init()
|
||||||
PetscErrorCode :: ierr
|
PetscErrorCode :: ierr
|
||||||
#endif
|
#endif
|
||||||
integer(pInt) :: i, j, k
|
integer(pInt) :: i, j, k
|
||||||
integer(pInt), parameter :: fileUnit = 228_pInt
|
|
||||||
integer(pInt), dimension(3) :: k_s
|
integer(pInt), dimension(3) :: k_s
|
||||||
type(C_PTR) :: &
|
type(C_PTR) :: &
|
||||||
tensorFieldMPI, & !< field cotaining data for FFTW in real and fourier space (in place)
|
tensorFieldMPI, & !< field cotaining data for FFTW in real and fourier space (in place)
|
||||||
scalarFieldMPI, & !< field cotaining data for FFTW in real space when debugging FFTW (no in place)
|
vectorFieldMPI, & !< field cotaining data for FFTW in real space when debugging FFTW (no in place)
|
||||||
div, & !< field cotaining data for FFTW in real and fourier space when debugging divergence (in place)
|
scalarFieldMPI !< field cotaining data for FFTW in real space when debugging FFTW (no in place)
|
||||||
coordsMPI
|
|
||||||
integer(C_INTPTR_T) :: gridFFTW(3), alloc_local, local_K, local_K_offset
|
integer(C_INTPTR_T) :: gridFFTW(3), alloc_local, local_K, local_K_offset
|
||||||
integer(C_INTPTR_T), parameter :: &
|
integer(C_INTPTR_T), parameter :: &
|
||||||
scalarSize = 1_C_INTPTR_T, &
|
scalarSize = 1_C_INTPTR_T, &
|
||||||
|
@ -236,65 +271,76 @@ subroutine utilities_init()
|
||||||
gridFFTW = int(gridGlobal,C_INTPTR_T)
|
gridFFTW = int(gridGlobal,C_INTPTR_T)
|
||||||
alloc_local = fftw_mpi_local_size_3d(gridFFTW(3), gridFFTW(2), gridFFTW(1)/2 +1, &
|
alloc_local = fftw_mpi_local_size_3d(gridFFTW(3), gridFFTW(2), gridFFTW(1)/2 +1, &
|
||||||
MPI_COMM_WORLD, local_K, local_K_offset)
|
MPI_COMM_WORLD, local_K, local_K_offset)
|
||||||
tensorFieldMPI = fftw_alloc_complex(9*alloc_local)
|
|
||||||
call c_f_pointer(tensorFieldMPI, field_realMPI, [3_C_INTPTR_T,3_C_INTPTR_T, &
|
tensorFieldMPI = fftw_alloc_complex(tensorSize*alloc_local)
|
||||||
2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),gridFFTW(2),local_K])
|
call c_f_pointer(tensorFieldMPI, tensorField_realMPI, [3_C_INTPTR_T,3_C_INTPTR_T, &
|
||||||
call c_f_pointer(tensorFieldMPI, field_fourierMPI, [3_C_INTPTR_T,3_C_INTPTR_T, &
|
2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),gridFFTW(2),local_K]) ! place a pointer for a real tensor representation
|
||||||
gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T , gridFFTW(2),local_K])
|
call c_f_pointer(tensorFieldMPI, tensorField_fourierMPI, [3_C_INTPTR_T,3_C_INTPTR_T, &
|
||||||
|
gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T , gridFFTW(2),local_K]) ! place a pointer for a fourier tensor representation
|
||||||
|
|
||||||
|
vectorFieldMPI = fftw_alloc_complex(vecSize*alloc_local)
|
||||||
|
call c_f_pointer(vectorFieldMPI, vectorField_realMPI, [3_C_INTPTR_T,&
|
||||||
|
2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),gridFFTW(2),local_K]) ! place a pointer for a real vector representation
|
||||||
|
call c_f_pointer(vectorFieldMPI, vectorField_fourierMPI,[3_C_INTPTR_T,&
|
||||||
|
gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T, gridFFTW(2),local_K]) ! place a pointer for a fourier vector representation
|
||||||
|
|
||||||
|
scalarFieldMPI = fftw_alloc_complex(scalarSize*alloc_local) ! allocate data for real representation (no in place transform)
|
||||||
|
call c_f_pointer(scalarFieldMPI, scalarField_realMPI, &
|
||||||
|
[2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1),gridFFTW(2),local_K]) ! place a pointer for a real scalar representation
|
||||||
|
call c_f_pointer(scalarFieldMPI, scalarField_fourierMPI, &
|
||||||
|
[ gridFFTW(1)/2_C_INTPTR_T + 1 ,gridFFTW(2),local_K]) ! place a pointer for a fourier scarlar representation
|
||||||
allocate (xi(3,grid1Red,gridLocal(2),gridLocal(3)),source = 0.0_pReal) ! frequencies, only half the size for first dimension
|
allocate (xi(3,grid1Red,gridLocal(2),gridLocal(3)),source = 0.0_pReal) ! frequencies, only half the size for first dimension
|
||||||
|
|
||||||
coordsMPI = fftw_alloc_complex(3*alloc_local)
|
!--------------------------------------------------------------------------------------------------
|
||||||
call c_f_pointer(coordsMPI, coords_realMPI, [3_C_INTPTR_T,&
|
! tensor MPI fftw plans
|
||||||
2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),gridFFTW(2),local_K]) ! place a pointer for a real representation on coords_fftw
|
planTensorForthMPI = fftw_mpi_plan_many_dft_r2c(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order
|
||||||
call c_f_pointer(coordsMPI, coords_fourierMPI,[3_C_INTPTR_T,&
|
tensorSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &! no. of transforms, default iblock and oblock
|
||||||
gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T, gridFFTW(2),local_K]) ! place a pointer for a real representation on coords_fftw
|
tensorField_realMPI, tensorField_fourierMPI, &! input data, output data
|
||||||
|
MPI_COMM_WORLD, fftw_planner_flag) ! use all processors, planer precision
|
||||||
|
planTensorBackMPI = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order
|
||||||
|
tensorSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &! no. of transforms, default iblock and oblock
|
||||||
|
tensorField_fourierMPI,tensorField_realMPI, &! input data, output data
|
||||||
|
MPI_COMM_WORLD, fftw_planner_flag) ! all processors, planer precision
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! MPI fftw plans
|
! vector MPI fftw plans
|
||||||
planForthMPI = fftw_mpi_plan_many_dft_r2c(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], tensorSize, & ! dimension, logical length in each dimension in reversed order, no. of transforms
|
planVectorForthMPI = fftw_mpi_plan_many_dft_r2c(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order
|
||||||
FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, & ! default iblock and oblock
|
vecSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &! no. of transforms, default iblock and oblock
|
||||||
field_realMPI, field_fourierMPI, & ! input data, output data
|
vectorField_realMPI, vectorField_fourierMPI, &! input data, output data
|
||||||
MPI_COMM_WORLD, fftw_planner_flag) ! use all processors, planer precision
|
MPI_COMM_WORLD, fftw_planner_flag) ! use all processors, planer precision
|
||||||
planBackMPI = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], tensorSize, & ! dimension, logical length in each dimension in reversed order, no. of transforms
|
planVectorBackMPI = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order
|
||||||
FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, & ! default iblock and oblock
|
vecSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, & ! no. of transforms, default iblock and oblock
|
||||||
field_fourierMPI,field_realMPI, & ! input data, output data
|
vectorField_fourierMPI,vectorField_realMPI, & ! input data, output data
|
||||||
MPI_COMM_WORLD, fftw_planner_flag) ! all processors, planer precision
|
MPI_COMM_WORLD, fftw_planner_flag) ! all processors, planer precision
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! Coordinates MPI fftw plans
|
! scalar MPI fftw plans
|
||||||
planCoordsMPI = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], vecSize, & ! dimension, logical length in each dimension in reversed order, no. of transforms
|
planScalarForthMPI = fftw_mpi_plan_many_dft_r2c(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order
|
||||||
FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, & ! default iblock and oblock
|
scalarSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, & ! no. of transforms, default iblock and oblock
|
||||||
coords_fourierMPI,coords_realMPI, & ! input data, output data
|
scalarField_realMPI, scalarField_fourierMPI, & ! input data, output data
|
||||||
MPI_COMM_WORLD, fftw_planner_flag) ! use all processors, planer precision
|
MPI_COMM_WORLD, fftw_planner_flag) ! use all processors, planer precision
|
||||||
|
planScalarBackMPI = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order, no. of transforms
|
||||||
|
scalarSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, & ! no. of transforms, default iblock and oblock
|
||||||
|
scalarField_fourierMPI,scalarField_realMPI, & ! input data, output data
|
||||||
|
MPI_COMM_WORLD, fftw_planner_flag) ! use all processors, planer precision
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! depending on debug options, allocate more memory and create additional plans
|
! depending on debug options, allocate more memory and create additional plans
|
||||||
if (debugDivergence) then
|
if (debugDivergence) then
|
||||||
div = fftw_alloc_complex(3*alloc_local)
|
|
||||||
call c_f_pointer(div,divRealMPI, [3_C_INTPTR_T,&
|
|
||||||
2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),gridFFTW(2),local_K])
|
|
||||||
call c_f_pointer(div,divFourierMPI,[3_C_INTPTR_T,&
|
|
||||||
gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T, gridFFTW(2),local_K])
|
|
||||||
planDivMPI = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2) ,gridFFTW(1)],vecSize, &
|
planDivMPI = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2) ,gridFFTW(1)],vecSize, &
|
||||||
FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &
|
FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &
|
||||||
divFourierMPI, divRealMPI, &
|
vectorField_fourierMPI, vectorField_realMPI, &
|
||||||
MPI_COMM_WORLD, fftw_planner_flag)
|
MPI_COMM_WORLD, fftw_planner_flag)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
if (debugFFTW) then
|
if (debugFFTW) then
|
||||||
scalarFieldMPI = fftw_alloc_complex(alloc_local) ! allocate data for real representation (no in place transform)
|
|
||||||
call c_f_pointer(scalarFieldMPI, scalarField_realMPI, &
|
|
||||||
[2*(gridFFTW(1)/2 + 1),gridFFTW(2),local_K]) ! place a pointer for a real representation
|
|
||||||
call c_f_pointer(scalarFieldMPI, scalarField_fourierMPI, &
|
|
||||||
[ gridFFTW(1)/2 + 1 ,gridFFTW(2),local_K]) ! place a pointer for a fourier representation
|
|
||||||
planDebugForthMPI = fftw_mpi_plan_many_dft_r2c(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], &
|
planDebugForthMPI = fftw_mpi_plan_many_dft_r2c(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], &
|
||||||
scalarSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &
|
scalarSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &
|
||||||
scalarField_realMPI, scalarField_fourierMPI, &
|
scalarField_realMPI, scalarField_fourierMPI, &
|
||||||
MPI_COMM_WORLD, fftw_planner_flag)
|
MPI_COMM_WORLD, fftw_planner_flag)
|
||||||
planDebugBackMPI = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], &
|
planDebugBackMPI = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], &
|
||||||
scalarSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &
|
scalarSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &
|
||||||
scalarField_fourierMPI,scalarField_realMPI, &
|
scalarField_fourierMPI,scalarField_realMPI, &
|
||||||
MPI_COMM_WORLD, fftw_planner_flag)
|
MPI_COMM_WORLD, fftw_planner_flag)
|
||||||
endif
|
endif
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! general initialization of FFTW (see manual on fftw.org for more details)
|
! general initialization of FFTW (see manual on fftw.org for more details)
|
||||||
|
@ -345,8 +391,6 @@ subroutine utilities_updateGamma(C,saveReference)
|
||||||
gridLocal
|
gridLocal
|
||||||
use math, only: &
|
use math, only: &
|
||||||
math_inv33
|
math_inv33
|
||||||
use mesh, only: &
|
|
||||||
gridLocal
|
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
real(pReal), intent(in), dimension(3,3,3,3) :: C !< input stiffness to store as reference stiffness
|
real(pReal), intent(in), dimension(3,3,3,3) :: C !< input stiffness to store as reference stiffness
|
||||||
|
@ -383,14 +427,13 @@ subroutine utilities_updateGamma(C,saveReference)
|
||||||
|
|
||||||
end subroutine utilities_updateGamma
|
end subroutine utilities_updateGamma
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief forward FFT of data in field_real to field_fourier with highest freqs. removed
|
!> @brief forward FFT of data in field_real to field_fourier with highest freqs. removed
|
||||||
!> @details Does an unweighted FFT transform from real to complex.
|
!> @details Does an unweighted FFT transform from real to complex.
|
||||||
!> In case of debugging the FFT, also one component of the tensor (specified by row and column)
|
!> In case of debugging the FFT, also one component of the tensor (specified by row and column)
|
||||||
!> is independetly transformed complex to complex and compared to the whole tensor transform
|
!> is independetly transformed complex to complex and compared to the whole tensor transform
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine utilities_FFTforward()
|
subroutine utilities_FFTtensorForward()
|
||||||
use math
|
use math
|
||||||
use numerics, only: &
|
use numerics, only: &
|
||||||
worldrank
|
worldrank
|
||||||
|
@ -398,10 +441,6 @@ subroutine utilities_FFTforward()
|
||||||
gridLocal
|
gridLocal
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
external :: &
|
|
||||||
MPI_Bcast, &
|
|
||||||
MPI_reduce
|
|
||||||
|
|
||||||
integer(pInt) :: row, column ! if debug FFTW, compare 3D array field of row and column
|
integer(pInt) :: row, column ! if debug FFTW, compare 3D array field of row and column
|
||||||
real(pReal), dimension(2) :: myRand, maxScalarField ! random numbers
|
real(pReal), dimension(2) :: myRand, maxScalarField ! random numbers
|
||||||
integer(pInt) :: i, j, k
|
integer(pInt) :: i, j, k
|
||||||
|
@ -419,12 +458,12 @@ subroutine utilities_FFTforward()
|
||||||
call MPI_Bcast(column,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(column,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
|
||||||
scalarField_realMPI = 0.0_pReal
|
scalarField_realMPI = 0.0_pReal
|
||||||
scalarField_realMPI(1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = &
|
scalarField_realMPI(1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = &
|
||||||
field_realMPI(row,column,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) ! store the selected component
|
tensorField_realMPI(row,column,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) ! store the selected component
|
||||||
endif
|
endif
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! doing the FFT
|
! doing the FFT
|
||||||
call fftw_mpi_execute_dft_r2c(planForthMPI,field_realMPI,field_fourierMPI)
|
call fftw_mpi_execute_dft_r2c(planTensorForthMPI,tensorField_realMPI,tensorField_fourierMPI)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! comparing 1 and 3x3 FT results
|
! comparing 1 and 3x3 FT results
|
||||||
|
@ -433,10 +472,10 @@ subroutine utilities_FFTforward()
|
||||||
where(abs(scalarField_fourierMPI(1:grid1Red,1:gridLocal(2),1:gridLocal(3))) > tiny(1.0_pReal)) ! avoid division by zero
|
where(abs(scalarField_fourierMPI(1:grid1Red,1:gridLocal(2),1:gridLocal(3))) > tiny(1.0_pReal)) ! avoid division by zero
|
||||||
scalarField_fourierMPI(1:grid1Red,1:gridLocal(2),1:gridLocal(3)) = &
|
scalarField_fourierMPI(1:grid1Red,1:gridLocal(2),1:gridLocal(3)) = &
|
||||||
(scalarField_fourierMPI(1:grid1Red,1:gridLocal(2),1:gridLocal(3))-&
|
(scalarField_fourierMPI(1:grid1Red,1:gridLocal(2),1:gridLocal(3))-&
|
||||||
field_fourierMPI(row,column,1:grid1Red,1:gridLocal(2),1:gridLocal(3)))/&
|
tensorField_fourierMPI(row,column,1:grid1Red,1:gridLocal(2),1:gridLocal(3)))/&
|
||||||
scalarField_fourierMPI(1:grid1Red,1:gridLocal(2),1:gridLocal(3))
|
scalarField_fourierMPI(1:grid1Red,1:gridLocal(2),1:gridLocal(3))
|
||||||
else where
|
else where
|
||||||
scalarField_fourierMPI = cmplx(0.0,0.0,pReal)
|
scalarField_realMPI = cmplx(0.0,0.0,pReal)
|
||||||
end where
|
end where
|
||||||
maxScalarField(1) = maxval(real (scalarField_fourierMPI(1:grid1Red,1:gridLocal(2), &
|
maxScalarField(1) = maxval(real (scalarField_fourierMPI(1:grid1Red,1:gridLocal(2), &
|
||||||
1:gridLocal(3))))
|
1:gridLocal(3))))
|
||||||
|
@ -454,11 +493,11 @@ subroutine utilities_FFTforward()
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! applying filter
|
! applying filter
|
||||||
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,grid1Red
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,grid1Red
|
||||||
field_fourierMPI(1:3,1:3,i,j,k) = cmplx(utilities_getFilter(xi(1:3,i,j,k)),0.0,pReal)* &
|
tensorField_fourierMPI(1:3,1:3,i,j,k) = utilities_getFilter(xi(1:3,i,j,k))* &
|
||||||
field_fourierMPI(1:3,1:3,i,j,k)
|
tensorField_fourierMPI(1:3,1:3,i,j,k)
|
||||||
enddo; enddo; enddo
|
enddo; enddo; enddo
|
||||||
|
|
||||||
end subroutine utilities_FFTforward
|
end subroutine utilities_FFTtensorForward
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
@ -468,7 +507,7 @@ end subroutine utilities_FFTforward
|
||||||
!> is independetly transformed complex to complex and compared to the whole tensor transform
|
!> is independetly transformed complex to complex and compared to the whole tensor transform
|
||||||
!> results is weighted by number of points stored in wgt
|
!> results is weighted by number of points stored in wgt
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine utilities_FFTbackward()
|
subroutine utilities_FFTtensorBackward()
|
||||||
use math
|
use math
|
||||||
use numerics, only: &
|
use numerics, only: &
|
||||||
worldrank
|
worldrank
|
||||||
|
@ -476,10 +515,6 @@ subroutine utilities_FFTbackward()
|
||||||
gridLocal
|
gridLocal
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
external :: &
|
|
||||||
MPI_Bcast, &
|
|
||||||
MPI_reduce
|
|
||||||
|
|
||||||
integer(pInt) :: row, column !< if debug FFTW, compare 3D array field of row and column
|
integer(pInt) :: row, column !< if debug FFTW, compare 3D array field of row and column
|
||||||
real(pReal), dimension(2) :: myRand
|
real(pReal), dimension(2) :: myRand
|
||||||
real(pReal) :: maxScalarField
|
real(pReal) :: maxScalarField
|
||||||
|
@ -496,12 +531,12 @@ subroutine utilities_FFTbackward()
|
||||||
call MPI_Bcast(row ,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(row ,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
|
||||||
call MPI_Bcast(column,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(column,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
|
||||||
scalarField_fourierMPI(1:grid1Red,1:gridLocal(2),1:gridLocal(3)) = &
|
scalarField_fourierMPI(1:grid1Red,1:gridLocal(2),1:gridLocal(3)) = &
|
||||||
field_fourierMPI(row,column,1:grid1Red,1:gridLocal(2),1:gridLocal(3))
|
tensorField_fourierMPI(row,column,1:grid1Red,1:gridLocal(2),1:gridLocal(3))
|
||||||
endif
|
endif
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! doing the iFFT
|
! doing the iFFT
|
||||||
call fftw_mpi_execute_dft_c2r(planBackMPI,field_fourierMPI,field_realMPI) ! back transform of fluct deformation gradient
|
call fftw_mpi_execute_dft_c2r(planTensorBackMPI,tensorField_fourierMPI,tensorField_realMPI) ! back transform of fluct deformation gradient
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! comparing 1 and 3x3 inverse FT results
|
! comparing 1 and 3x3 inverse FT results
|
||||||
|
@ -510,10 +545,10 @@ subroutine utilities_FFTbackward()
|
||||||
where(abs(real(scalarField_realMPI,pReal)) > tiny(1.0_pReal)) ! avoid division by zero
|
where(abs(real(scalarField_realMPI,pReal)) > tiny(1.0_pReal)) ! avoid division by zero
|
||||||
scalarField_realMPI(1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = &
|
scalarField_realMPI(1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = &
|
||||||
(scalarField_realMPI(1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) &
|
(scalarField_realMPI(1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) &
|
||||||
- field_realMPI (row,column,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)))/ &
|
- tensorField_realMPI (row,column,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)))/ &
|
||||||
scalarField_realMPI(1:gridLocal(1),1:gridLocal(2),1:gridLocal(3))
|
scalarField_realMPI(1:gridLocal(1),1:gridLocal(2),1:gridLocal(3))
|
||||||
else where
|
else where
|
||||||
scalarField_realMPI = 0.0_pReal
|
scalarField_realMPI = cmplx(0.0,0.0,pReal)
|
||||||
end where
|
end where
|
||||||
maxScalarField = maxval(real (scalarField_realMPI(1:gridLocal(1),1:gridLocal(2),1:gridLocal(3))))
|
maxScalarField = maxval(real (scalarField_realMPI(1:gridLocal(1),1:gridLocal(2),1:gridLocal(3))))
|
||||||
call MPI_reduce(MPI_IN_PLACE,maxScalarField,1,MPI_DOUBLE,MPI_MAX,0,PETSC_COMM_WORLD,ierr)
|
call MPI_reduce(MPI_IN_PLACE,maxScalarField,1,MPI_DOUBLE,MPI_MAX,0,PETSC_COMM_WORLD,ierr)
|
||||||
|
@ -524,10 +559,91 @@ subroutine utilities_FFTbackward()
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
field_realMPI = field_realMPI * wgt ! normalize the result by number of elements
|
tensorField_realMPI = tensorField_realMPI * wgt ! normalize the result by number of elements
|
||||||
|
|
||||||
end subroutine utilities_FFTbackward
|
end subroutine utilities_FFTtensorBackward
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief forward FFT of data in field_real to field_fourier with highest freqs. removed
|
||||||
|
!> @details Does an unweighted FFT transform from real to complex.
|
||||||
|
!> In case of debugging the FFT, also one component of the scalar
|
||||||
|
!> is independetly transformed complex to complex and compared to the whole scalar transform
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine utilities_FFTscalarForward()
|
||||||
|
use math
|
||||||
|
use mesh, only: &
|
||||||
|
gridLocal
|
||||||
|
|
||||||
|
integer(pInt) :: i, j, k
|
||||||
|
|
||||||
|
! doing the scalar FFT
|
||||||
|
call fftw_mpi_execute_dft_r2c(planScalarForthMPI,scalarField_realMPI,scalarField_fourierMPI)
|
||||||
|
|
||||||
|
! applying filter
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,grid1Red
|
||||||
|
scalarField_fourierMPI(i,j,k) = utilities_getFilter(xi(1:3,i,j,k))* &
|
||||||
|
scalarField_fourierMPI(i,j,k)
|
||||||
|
enddo; enddo; enddo
|
||||||
|
|
||||||
|
end subroutine utilities_FFTscalarForward
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief backward FFT of data in field_fourier to field_real
|
||||||
|
!> @details Does an inverse FFT transform from complex to real
|
||||||
|
!> In case of debugging the FFT, also one component of the scalar
|
||||||
|
!> is independetly transformed complex to complex and compared to the whole scalar transform
|
||||||
|
!> results is weighted by number of points stored in wgt
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine utilities_FFTscalarBackward()
|
||||||
|
use math
|
||||||
|
|
||||||
|
! doing the scalar iFFT
|
||||||
|
call fftw_mpi_execute_dft_c2r(planScalarBackMPI,scalarField_fourierMPI,scalarField_realMPI)
|
||||||
|
|
||||||
|
scalarField_realMPI = scalarField_realMPI * wgt ! normalize the result by number of elements
|
||||||
|
|
||||||
|
end subroutine utilities_FFTscalarBackward
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief forward FFT of data in field_real to field_fourier with highest freqs. removed
|
||||||
|
!> @details Does an unweighted FFT transform from real to complex.
|
||||||
|
!> In case of debugging the FFT, also one component of the vector
|
||||||
|
!> is independetly transformed complex to complex and compared to the whole vector transform
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine utilities_FFTvectorForward()
|
||||||
|
use math
|
||||||
|
use mesh, only: &
|
||||||
|
gridLocal
|
||||||
|
|
||||||
|
integer(pInt) :: i, j, k
|
||||||
|
|
||||||
|
! doing the vecotr FFT
|
||||||
|
call fftw_mpi_execute_dft_r2c(planVectorForthMPI,vectorField_realMPI,vectorField_fourierMPI)
|
||||||
|
|
||||||
|
! applying filter
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,grid1Red
|
||||||
|
vectorField_fourierMPI(1:3,i,j,k) = utilities_getFilter(xi(1:3,i,j,k))* &
|
||||||
|
vectorField_fourierMPI(1:3,i,j,k)
|
||||||
|
enddo; enddo; enddo
|
||||||
|
|
||||||
|
end subroutine utilities_FFTvectorForward
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief backward FFT of data in field_fourier to field_real
|
||||||
|
!> @details Does an inverse FFT transform from complex to real
|
||||||
|
!> In case of debugging the FFT, also one component of the vector
|
||||||
|
!> is independetly transformed complex to complex and compared to the whole vector transform
|
||||||
|
!> results is weighted by number of points stored in wgt
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine utilities_FFTvectorBackward()
|
||||||
|
use math
|
||||||
|
|
||||||
|
! doing the vector iFFT
|
||||||
|
call fftw_mpi_execute_dft_c2r(planVectorBackMPI,vectorField_fourierMPI,vectorField_realMPI)
|
||||||
|
|
||||||
|
vectorField_realMPI = vectorField_realMPI * wgt ! normalize the result by number of elements
|
||||||
|
|
||||||
|
end subroutine utilities_FFTvectorBackward
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief doing convolution with inverse laplace kernel
|
!> @brief doing convolution with inverse laplace kernel
|
||||||
|
@ -554,13 +670,14 @@ subroutine utilities_inverseLaplace()
|
||||||
|
|
||||||
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt, grid1Red
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt, grid1Red
|
||||||
k_s = xi(1:3,i,j,k)*scaledGeomSize
|
k_s = xi(1:3,i,j,k)*scaledGeomSize
|
||||||
if (any(k_s /= 0_pInt)) field_fourierMPI(1:3,1:3,i,j,k-gridOffset) = &
|
if (any(k_s /= 0_pInt)) tensorField_fourierMPI(1:3,1:3,i,j,k-gridOffset) = &
|
||||||
field_fourierMPI(1:3,1:3,i,j,k-gridOffset)/ &
|
tensorField_fourierMPI(1:3,1:3,i,j,k-gridOffset)/ &
|
||||||
cmplx(-sum((2.0_pReal*PI*k_s/geomSizeGlobal)* &
|
cmplx(-sum((2.0_pReal*PI*k_s/geomSizeGlobal)* &
|
||||||
(2.0_pReal*PI*k_s/geomSizeGlobal)),0.0_pReal,pReal)
|
(2.0_pReal*PI*k_s/geomSizeGlobal)),0.0_pReal,pReal)
|
||||||
enddo; enddo; enddo
|
enddo; enddo; enddo
|
||||||
|
|
||||||
if (gridOffset == 0_pInt) &
|
if (gridOffset == 0_pInt) &
|
||||||
field_fourierMPI(1:3,1:3,1,1,1) = cmplx(0.0_pReal,0.0_pReal,pReal)
|
tensorField_fourierMPI(1:3,1:3,1,1,1) = cmplx(0.0_pReal,0.0_pReal,pReal)
|
||||||
|
|
||||||
end subroutine utilities_inverseLaplace
|
end subroutine utilities_inverseLaplace
|
||||||
|
|
||||||
|
@ -568,7 +685,7 @@ end subroutine utilities_inverseLaplace
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief doing convolution gamma_hat * field_real, ensuring that average value = fieldAim
|
!> @brief doing convolution gamma_hat * field_real, ensuring that average value = fieldAim
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine utilities_fourierConvolution(fieldAim)
|
subroutine utilities_fourierGammaConvolution(fieldAim)
|
||||||
use numerics, only: &
|
use numerics, only: &
|
||||||
memory_efficient
|
memory_efficient
|
||||||
use math, only: &
|
use math, only: &
|
||||||
|
@ -583,12 +700,13 @@ subroutine utilities_fourierConvolution(fieldAim)
|
||||||
real(pReal), intent(in), dimension(3,3) :: fieldAim !< desired average value of the field after convolution
|
real(pReal), intent(in), dimension(3,3) :: fieldAim !< desired average value of the field after convolution
|
||||||
real(pReal), dimension(3,3) :: xiDyad, temp33_Real
|
real(pReal), dimension(3,3) :: xiDyad, temp33_Real
|
||||||
complex(pReal), dimension(3,3) :: temp33_complex
|
complex(pReal), dimension(3,3) :: temp33_complex
|
||||||
|
|
||||||
integer(pInt) :: &
|
integer(pInt) :: &
|
||||||
i, j, k, &
|
i, j, k, &
|
||||||
l, m, n, o
|
l, m, n, o
|
||||||
|
|
||||||
if (worldrank == 0_pInt) then
|
if (worldrank == 0_pInt) then
|
||||||
write(6,'(/,a)') ' ... doing convolution .....................................................'
|
write(6,'(/,a)') ' ... doing gamma convolution ................................................'
|
||||||
flush(6)
|
flush(6)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
@ -605,22 +723,59 @@ subroutine utilities_fourierConvolution(fieldAim)
|
||||||
forall(l=1_pInt:3_pInt, m=1_pInt:3_pInt, n=1_pInt:3_pInt, o=1_pInt:3_pInt)&
|
forall(l=1_pInt:3_pInt, m=1_pInt:3_pInt, n=1_pInt:3_pInt, o=1_pInt:3_pInt)&
|
||||||
gamma_hat(l,m,n,o, 1,1,1) = temp33_Real(l,n)*xiDyad(m,o)
|
gamma_hat(l,m,n,o, 1,1,1) = temp33_Real(l,n)*xiDyad(m,o)
|
||||||
forall(l = 1_pInt:3_pInt, m = 1_pInt:3_pInt) &
|
forall(l = 1_pInt:3_pInt, m = 1_pInt:3_pInt) &
|
||||||
temp33_Complex(l,m) = sum(gamma_hat(l,m,1:3,1:3, 1,1,1) * field_fourierMPI(1:3,1:3,i,j,k))
|
temp33_Complex(l,m) = sum(gamma_hat(l,m,1:3,1:3, 1,1,1) * &
|
||||||
field_fourierMPI(1:3,1:3,i,j,k) = temp33_Complex
|
tensorField_fourierMPI(1:3,1:3,i,j,k))
|
||||||
|
tensorField_fourierMPI(1:3,1:3,i,j,k) = temp33_Complex
|
||||||
endif
|
endif
|
||||||
enddo; enddo; enddo
|
enddo; enddo; enddo
|
||||||
else ! use precalculated gamma-operator
|
else ! use precalculated gamma-operator
|
||||||
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,grid1Red
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,grid1Red
|
||||||
forall(l = 1_pInt:3_pInt, m = 1_pInt:3_pInt) &
|
forall(l = 1_pInt:3_pInt, m = 1_pInt:3_pInt) &
|
||||||
temp33_Complex(l,m) = sum(gamma_hat(l,m,1:3,1:3,i,j,k) * field_fourierMPI(1:3,1:3,i,j,k))
|
temp33_Complex(l,m) = sum(gamma_hat(l,m,1:3,1:3,i,j,k) * &
|
||||||
field_fourierMPI(1:3,1:3,i,j,k) = temp33_Complex
|
tensorField_fourierMPI(1:3,1:3,i,j,k))
|
||||||
|
tensorField_fourierMPI(1:3,1:3,i,j,k) = temp33_Complex
|
||||||
enddo; enddo; enddo
|
enddo; enddo; enddo
|
||||||
endif
|
endif
|
||||||
|
|
||||||
if (gridOffset == 0_pInt) &
|
if (gridOffset == 0_pInt) &
|
||||||
field_fourierMPI(1:3,1:3,1,1,1) = cmplx(fieldAim/wgt,0.0_pReal,pReal) ! singular point at xi=(0.0,0.0,0.0) i.e. i=j=k=1
|
tensorField_fourierMPI(1:3,1:3,1,1,1) = cmplx(fieldAim/wgt,0.0_pReal,pReal) ! singular point at xi=(0.0,0.0,0.0) i.e. i=j=k=1
|
||||||
|
|
||||||
end subroutine utilities_fourierConvolution
|
end subroutine utilities_fourierGammaConvolution
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief doing convolution DamageGreenOp_hat * field_real
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine utilities_fourierGreenConvolution(D_ref, mobility_ref, deltaT)
|
||||||
|
|
||||||
|
use numerics, only: &
|
||||||
|
memory_efficient
|
||||||
|
use math, only: &
|
||||||
|
math_mul33x3, &
|
||||||
|
PI
|
||||||
|
use numerics, only: &
|
||||||
|
worldrank
|
||||||
|
use mesh, only: &
|
||||||
|
gridLocal, &
|
||||||
|
geomSizeGlobal
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
real(pReal), dimension(3,3), intent(in) :: D_ref !< desired average value of the field after convolution
|
||||||
|
real(pReal), intent(in) :: mobility_ref, deltaT !< desired average value of the field after convolution
|
||||||
|
real(pReal), dimension(3) :: k_s
|
||||||
|
real(pReal) :: GreenOp_hat
|
||||||
|
integer(pInt) :: i, j, k
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! do the actual spectral method calculation
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2) ;do i = 1_pInt, grid1Red
|
||||||
|
k_s = xi(1:3,i,j,k)*scaledGeomSize
|
||||||
|
GreenOp_hat = 1.0_pReal/ &
|
||||||
|
(mobility_ref + deltaT*sum((2.0_pReal*PI*k_s/geomSizeGlobal)* &
|
||||||
|
math_mul33x3(D_ref,(2.0_pReal*PI*k_s/geomSizeGlobal))))!< GreenOp_hat = iK^{T} * D_ref * iK, K is frequency
|
||||||
|
scalarField_fourierMPI(i,j,k) = scalarField_fourierMPI(i,j,k)*GreenOp_hat
|
||||||
|
enddo; enddo; enddo
|
||||||
|
|
||||||
|
end subroutine utilities_fourierGreenConvolution
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief calculate root mean square of divergence of field_fourier
|
!> @brief calculate root mean square of divergence of field_fourier
|
||||||
|
@ -634,10 +789,6 @@ real(pReal) function utilities_divergenceRMS()
|
||||||
gridGlobal
|
gridGlobal
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
external :: &
|
|
||||||
MPI_reduce, &
|
|
||||||
MPI_Allreduce
|
|
||||||
|
|
||||||
integer(pInt) :: i, j, k
|
integer(pInt) :: i, j, k
|
||||||
real(pReal) :: &
|
real(pReal) :: &
|
||||||
err_real_div_RMS, & !< RMS of divergence in real space
|
err_real_div_RMS, & !< RMS of divergence in real space
|
||||||
|
@ -658,19 +809,19 @@ real(pReal) function utilities_divergenceRMS()
|
||||||
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2)
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2)
|
||||||
do i = 2_pInt, grid1Red -1_pInt ! Has somewhere a conj. complex counterpart. Therefore count it twice.
|
do i = 2_pInt, grid1Red -1_pInt ! Has somewhere a conj. complex counterpart. Therefore count it twice.
|
||||||
utilities_divergenceRMS = utilities_divergenceRMS &
|
utilities_divergenceRMS = utilities_divergenceRMS &
|
||||||
+ 2.0_pReal*(sum (real(math_mul33x3_complex(field_fourierMPI(1:3,1:3,i,j,k),& ! (sqrt(real(a)**2 + aimag(a)**2))**2 = real(a)**2 + aimag(a)**2. do not take square root and square again
|
+ 2.0_pReal*(sum (real(math_mul33x3_complex(tensorField_fourierMPI(1:3,1:3,i,j,k),& ! (sqrt(real(a)**2 + aimag(a)**2))**2 = real(a)**2 + aimag(a)**2. do not take square root and square again
|
||||||
xi(1:3,i,j,k))*TWOPIIMG)**2.0_pReal)& ! --> sum squared L_2 norm of vector
|
xi(1:3,i,j,k))*TWOPIIMG)**2.0_pReal)& ! --> sum squared L_2 norm of vector
|
||||||
+sum(aimag(math_mul33x3_complex(field_fourierMPI(1:3,1:3,i,j,k),&
|
+sum(aimag(math_mul33x3_complex(tensorField_fourierMPI(1:3,1:3,i,j,k),&
|
||||||
xi(1:3,i,j,k))*TWOPIIMG)**2.0_pReal))
|
xi(1:3,i,j,k))*TWOPIIMG)**2.0_pReal))
|
||||||
enddo
|
enddo
|
||||||
utilities_divergenceRMS = utilities_divergenceRMS & ! these two layers (DC and Nyquist) do not have a conjugate complex counterpart (if grid(1) /= 1)
|
utilities_divergenceRMS = utilities_divergenceRMS & ! these two layers (DC and Nyquist) do not have a conjugate complex counterpart (if grid(1) /= 1)
|
||||||
+ sum( real(math_mul33x3_complex(field_fourierMPI(1:3,1:3,1 ,j,k), &
|
+ sum( real(math_mul33x3_complex(tensorField_fourierMPI(1:3,1:3,1 ,j,k), &
|
||||||
xi(1:3,1 ,j,k))*TWOPIIMG)**2.0_pReal) &
|
xi(1:3,1 ,j,k))*TWOPIIMG)**2.0_pReal) &
|
||||||
+ sum(aimag(math_mul33x3_complex(field_fourierMPI(1:3,1:3,1 ,j,k), &
|
+ sum(aimag(math_mul33x3_complex(tensorField_fourierMPI(1:3,1:3,1 ,j,k), &
|
||||||
xi(1:3,1 ,j,k))*TWOPIIMG)**2.0_pReal) &
|
xi(1:3,1 ,j,k))*TWOPIIMG)**2.0_pReal) &
|
||||||
+ sum( real(math_mul33x3_complex(field_fourierMPI(1:3,1:3,grid1Red,j,k), &
|
+ sum( real(math_mul33x3_complex(tensorField_fourierMPI(1:3,1:3,grid1Red,j,k), &
|
||||||
xi(1:3,grid1Red,j,k))*TWOPIIMG)**2.0_pReal) &
|
xi(1:3,grid1Red,j,k))*TWOPIIMG)**2.0_pReal) &
|
||||||
+ sum(aimag(math_mul33x3_complex(field_fourierMPI(1:3,1:3,grid1Red,j,k), &
|
+ sum(aimag(math_mul33x3_complex(tensorField_fourierMPI(1:3,1:3,grid1Red,j,k), &
|
||||||
xi(1:3,grid1Red,j,k))*TWOPIIMG)**2.0_pReal)
|
xi(1:3,grid1Red,j,k))*TWOPIIMG)**2.0_pReal)
|
||||||
enddo; enddo
|
enddo; enddo
|
||||||
if(gridGlobal(1) == 1_pInt) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
|
if(gridGlobal(1) == 1_pInt) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
|
||||||
|
@ -682,19 +833,19 @@ real(pReal) function utilities_divergenceRMS()
|
||||||
if (debugDivergence) then ! calculate divergence again
|
if (debugDivergence) then ! calculate divergence again
|
||||||
err_div_max = 0.0_pReal
|
err_div_max = 0.0_pReal
|
||||||
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt, grid1Red
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt, grid1Red
|
||||||
temp3_Complex = math_mul33x3_complex(field_fourierMPI(1:3,1:3,i,j,k)*wgt,& ! weighting P_fourier
|
temp3_Complex = math_mul33x3_complex(tensorField_fourierMPI(1:3,1:3,i,j,k)*wgt,& ! weighting P_fourier
|
||||||
xi(1:3,i,j,k))*TWOPIIMG
|
xi(1:3,i,j,k))*TWOPIIMG
|
||||||
err_div_max = max(err_div_max,sum(abs(temp3_Complex)**2.0_pReal))
|
err_div_max = max(err_div_max,sum(abs(temp3_Complex)**2.0_pReal))
|
||||||
divFourierMPI(1:3,i,j,k) = temp3_Complex ! need divergence NOT squared
|
vectorField_fourierMPI(1:3,i,j,k) = temp3_Complex ! need divergence NOT squared
|
||||||
enddo; enddo; enddo
|
enddo; enddo; enddo
|
||||||
|
|
||||||
call fftw_mpi_execute_dft_c2r(planDivMPI,divFourierMPI,divRealMPI) ! already weighted
|
call fftw_mpi_execute_dft_c2r(planDivMPI,vectorField_fourierMPI,vectorField_realMPI) ! already weighted
|
||||||
|
|
||||||
err_real_div_RMS = sum(divRealMPI**2.0_pReal)
|
err_real_div_RMS = sum(vectorField_realMPI**2.0_pReal)
|
||||||
call MPI_reduce(MPI_IN_PLACE,err_real_div_RMS,1,MPI_DOUBLE,MPI_SUM,0,PETSC_COMM_WORLD,ierr)
|
call MPI_reduce(MPI_IN_PLACE,err_real_div_RMS,1,MPI_DOUBLE,MPI_SUM,0,PETSC_COMM_WORLD,ierr)
|
||||||
err_real_div_RMS = sqrt(wgt*err_real_div_RMS) ! RMS in real space
|
err_real_div_RMS = sqrt(wgt*err_real_div_RMS) ! RMS in real space
|
||||||
|
|
||||||
err_real_div_max = maxval(sum(divRealMPI**2.0_pReal,dim=4)) ! max in real space
|
err_real_div_max = maxval(sum(vectorField_realMPI**2.0_pReal,dim=4)) ! max in real space
|
||||||
call MPI_reduce(MPI_IN_PLACE,err_real_div_max,1,MPI_DOUBLE,MPI_MAX,0,PETSC_COMM_WORLD,ierr)
|
call MPI_reduce(MPI_IN_PLACE,err_real_div_max,1,MPI_DOUBLE,MPI_MAX,0,PETSC_COMM_WORLD,ierr)
|
||||||
err_real_div_max = sqrt(err_real_div_max)
|
err_real_div_max = sqrt(err_real_div_max)
|
||||||
|
|
||||||
|
@ -725,9 +876,6 @@ real(pReal) function utilities_curlRMS()
|
||||||
gridGlobal
|
gridGlobal
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
external :: &
|
|
||||||
MPI_Allreduce
|
|
||||||
|
|
||||||
integer(pInt) :: i, j, k, l
|
integer(pInt) :: i, j, k, l
|
||||||
complex(pReal), dimension(3,3) :: curl_fourier
|
complex(pReal), dimension(3,3) :: curl_fourier
|
||||||
PetscErrorCode :: ierr
|
PetscErrorCode :: ierr
|
||||||
|
@ -744,33 +892,33 @@ real(pReal) function utilities_curlRMS()
|
||||||
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2);
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2);
|
||||||
do i = 2_pInt, grid1Red - 1_pInt
|
do i = 2_pInt, grid1Red - 1_pInt
|
||||||
do l = 1_pInt, 3_pInt
|
do l = 1_pInt, 3_pInt
|
||||||
curl_fourier(l,1) = (+field_fourierMPI(l,3,i,j,k)*xi(2,i,j,k)&
|
curl_fourier(l,1) = (+tensorField_fourierMPI(l,3,i,j,k)*xi(2,i,j,k)&
|
||||||
-field_fourierMPI(l,2,i,j,k)*xi(3,i,j,k))*TWOPIIMG
|
-tensorField_fourierMPI(l,2,i,j,k)*xi(3,i,j,k))*TWOPIIMG
|
||||||
curl_fourier(l,2) = (+field_fourierMPI(l,1,i,j,k)*xi(3,i,j,k)&
|
curl_fourier(l,2) = (+tensorField_fourierMPI(l,1,i,j,k)*xi(3,i,j,k)&
|
||||||
-field_fourierMPI(l,3,i,j,k)*xi(1,i,j,k))*TWOPIIMG
|
-tensorField_fourierMPI(l,3,i,j,k)*xi(1,i,j,k))*TWOPIIMG
|
||||||
curl_fourier(l,3) = (+field_fourierMPI(l,2,i,j,k)*xi(1,i,j,k)&
|
curl_fourier(l,3) = (+tensorField_fourierMPI(l,2,i,j,k)*xi(1,i,j,k)&
|
||||||
-field_fourierMPI(l,1,i,j,k)*xi(2,i,j,k))*TWOPIIMG
|
-tensorField_fourierMPI(l,1,i,j,k)*xi(2,i,j,k))*TWOPIIMG
|
||||||
enddo
|
enddo
|
||||||
utilities_curlRMS = utilities_curlRMS + &
|
utilities_curlRMS = utilities_curlRMS + &
|
||||||
2.0_pReal*sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal)
|
2.0_pReal*sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal)
|
||||||
enddo
|
enddo
|
||||||
do l = 1_pInt, 3_pInt
|
do l = 1_pInt, 3_pInt
|
||||||
curl_fourier = (+field_fourierMPI(l,3,1,j,k)*xi(2,1,j,k)&
|
curl_fourier = (+tensorField_fourierMPI(l,3,1,j,k)*xi(2,1,j,k)&
|
||||||
-field_fourierMPI(l,2,1,j,k)*xi(3,1,j,k))*TWOPIIMG
|
-tensorField_fourierMPI(l,2,1,j,k)*xi(3,1,j,k))*TWOPIIMG
|
||||||
curl_fourier = (+field_fourierMPI(l,1,1,j,k)*xi(3,1,j,k)&
|
curl_fourier = (+tensorField_fourierMPI(l,1,1,j,k)*xi(3,1,j,k)&
|
||||||
-field_fourierMPI(l,3,1,j,k)*xi(1,1,j,k))*TWOPIIMG
|
-tensorField_fourierMPI(l,3,1,j,k)*xi(1,1,j,k))*TWOPIIMG
|
||||||
curl_fourier = (+field_fourierMPI(l,2,1,j,k)*xi(1,1,j,k)&
|
curl_fourier = (+tensorField_fourierMPI(l,2,1,j,k)*xi(1,1,j,k)&
|
||||||
-field_fourierMPI(l,1,1,j,k)*xi(2,1,j,k))*TWOPIIMG
|
-tensorField_fourierMPI(l,1,1,j,k)*xi(2,1,j,k))*TWOPIIMG
|
||||||
enddo
|
enddo
|
||||||
utilities_curlRMS = utilities_curlRMS + &
|
utilities_curlRMS = utilities_curlRMS + &
|
||||||
2.0_pReal*sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal)
|
2.0_pReal*sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal)
|
||||||
do l = 1_pInt, 3_pInt
|
do l = 1_pInt, 3_pInt
|
||||||
curl_fourier = (+field_fourierMPI(l,3,grid1Red,j,k)*xi(2,grid1Red,j,k)&
|
curl_fourier = (+tensorField_fourierMPI(l,3,grid1Red,j,k)*xi(2,grid1Red,j,k)&
|
||||||
-field_fourierMPI(l,2,grid1Red,j,k)*xi(3,grid1Red,j,k))*TWOPIIMG
|
-tensorField_fourierMPI(l,2,grid1Red,j,k)*xi(3,grid1Red,j,k))*TWOPIIMG
|
||||||
curl_fourier = (+field_fourierMPI(l,1,grid1Red,j,k)*xi(3,grid1Red,j,k)&
|
curl_fourier = (+tensorField_fourierMPI(l,1,grid1Red,j,k)*xi(3,grid1Red,j,k)&
|
||||||
-field_fourierMPI(l,3,grid1Red,j,k)*xi(1,grid1Red,j,k))*TWOPIIMG
|
-tensorField_fourierMPI(l,3,grid1Red,j,k)*xi(1,grid1Red,j,k))*TWOPIIMG
|
||||||
curl_fourier = (+field_fourierMPI(l,2,grid1Red,j,k)*xi(1,grid1Red,j,k)&
|
curl_fourier = (+tensorField_fourierMPI(l,2,grid1Red,j,k)*xi(1,grid1Red,j,k)&
|
||||||
-field_fourierMPI(l,1,grid1Red,j,k)*xi(2,grid1Red,j,k))*TWOPIIMG
|
-tensorField_fourierMPI(l,1,grid1Red,j,k)*xi(2,grid1Red,j,k))*TWOPIIMG
|
||||||
enddo
|
enddo
|
||||||
utilities_curlRMS = utilities_curlRMS + &
|
utilities_curlRMS = utilities_curlRMS + &
|
||||||
2.0_pReal*sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal)
|
2.0_pReal*sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal)
|
||||||
|
@ -884,6 +1032,48 @@ function utilities_maskedCompliance(rot_BC,mask_stress,C)
|
||||||
end function utilities_maskedCompliance
|
end function utilities_maskedCompliance
|
||||||
|
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief calculate scalar gradient in fourier field
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine utilities_fourierScalarGradient()
|
||||||
|
use math, only: &
|
||||||
|
PI
|
||||||
|
use mesh, only: &
|
||||||
|
gridLocal, &
|
||||||
|
geomSizeGlobal
|
||||||
|
integer(pInt) :: i, j, k
|
||||||
|
|
||||||
|
vectorField_fourierMPI = cmplx(0.0_pReal,0.0_pReal,pReal)
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,grid1Red
|
||||||
|
vectorField_fourierMPI(1:3,i,j,k) = scalarField_fourierMPI(i,j,k)* &
|
||||||
|
cmplx(0.0_pReal,2.0_pReal*PI*xi(1:3,i,j,k)* &
|
||||||
|
scaledGeomSize/geomSizeGlobal,pReal)
|
||||||
|
enddo; enddo; enddo
|
||||||
|
end subroutine utilities_fourierScalarGradient
|
||||||
|
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief calculate vector divergence in fourier field
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine utilities_fourierVectorDivergence()
|
||||||
|
use math, only: &
|
||||||
|
PI
|
||||||
|
use mesh, only: &
|
||||||
|
gridLocal, &
|
||||||
|
geomSizeGlobal
|
||||||
|
integer(pInt) :: i, j, k, m
|
||||||
|
|
||||||
|
scalarField_fourierMPI = cmplx(0.0_pReal,0.0_pReal,pReal)
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,grid1Red
|
||||||
|
do m = 1_pInt, 3_pInt
|
||||||
|
scalarField_fourierMPI(i,j,k) = &
|
||||||
|
scalarField_fourierMPI(i,j,k) + &
|
||||||
|
vectorField_fourierMPI(m,i,j,k)* &
|
||||||
|
cmplx(0.0_pReal,2.0_pReal*PI*xi(m,i,j,k)*scaledGeomSize(m)/geomSizeGlobal(m),pReal)
|
||||||
|
enddo
|
||||||
|
enddo; enddo; enddo
|
||||||
|
end subroutine utilities_fourierVectorDivergence
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief calculates constitutive response
|
!> @brief calculates constitutive response
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
@ -912,14 +1102,8 @@ subroutine utilities_constitutiveResponse(F_lastInc,F,temperature,timeinc,&
|
||||||
materialpoint_F, &
|
materialpoint_F, &
|
||||||
materialpoint_P, &
|
materialpoint_P, &
|
||||||
materialpoint_dPdF
|
materialpoint_dPdF
|
||||||
! use thermal_isothermal, only: &
|
|
||||||
! thermal_isothermal_temperature
|
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
external :: &
|
|
||||||
MPI_reduce, &
|
|
||||||
MPI_Allreduce
|
|
||||||
|
|
||||||
real(pReal), intent(in) :: temperature !< temperature (no field)
|
real(pReal), intent(in) :: temperature !< temperature (no field)
|
||||||
real(pReal), intent(in), dimension(3,3,gridLocal(1),gridLocal(2),gridLocal(3)) :: &
|
real(pReal), intent(in), dimension(3,3,gridLocal(1),gridLocal(2),gridLocal(3)) :: &
|
||||||
F_lastInc, & !< target deformation gradient
|
F_lastInc, & !< target deformation gradient
|
||||||
|
@ -955,9 +1139,8 @@ subroutine utilities_constitutiveResponse(F_lastInc,F,temperature,timeinc,&
|
||||||
|
|
||||||
call CPFEM_general(CPFEM_COLLECT,F_lastInc(1:3,1:3,1,1,1),F(1:3,1:3,1,1,1), &
|
call CPFEM_general(CPFEM_COLLECT,F_lastInc(1:3,1:3,1,1,1),F(1:3,1:3,1,1,1), &
|
||||||
temperature,timeinc,1_pInt,1_pInt)
|
temperature,timeinc,1_pInt,1_pInt)
|
||||||
! thermal_isothermal_temperature(:) = temperature
|
|
||||||
materialpoint_F = reshape(F,[3,3,1,product(gridLocal)])
|
|
||||||
|
|
||||||
|
materialpoint_F = reshape(F,[3,3,1,product(gridLocal)])
|
||||||
call debug_reset()
|
call debug_reset()
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
@ -1062,9 +1245,6 @@ function utilities_forwardField(timeinc,field_lastInc,rate,aim)
|
||||||
gridLocal
|
gridLocal
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
external :: &
|
|
||||||
MPI_Allreduce
|
|
||||||
|
|
||||||
real(pReal), intent(in) :: &
|
real(pReal), intent(in) :: &
|
||||||
timeinc !< timeinc of current step
|
timeinc !< timeinc of current step
|
||||||
real(pReal), intent(in), dimension(3,3,gridLocal(1),gridLocal(2),gridLocal(3)) :: &
|
real(pReal), intent(in), dimension(3,3,gridLocal(1),gridLocal(2),gridLocal(3)) :: &
|
||||||
|
@ -1083,7 +1263,7 @@ function utilities_forwardField(timeinc,field_lastInc,rate,aim)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
||||||
fieldDiff = fieldDiff - aim
|
fieldDiff = fieldDiff - aim
|
||||||
utilities_forwardField = utilities_forwardField - &
|
utilities_forwardField = utilities_forwardField - &
|
||||||
spread(spread(spread(fieldDiff,3,gridLocal(1)),4,gridLocal(2)),5,gridLocal(3))
|
spread(spread(spread(fieldDiff,3,gridLocal(1)),4,gridLocal(2)),5,gridLocal(3))
|
||||||
endif
|
endif
|
||||||
|
|
||||||
end function utilities_forwardField
|
end function utilities_forwardField
|
||||||
|
@ -1122,11 +1302,13 @@ real(pReal) function utilities_getFilter(k)
|
||||||
utilities_getFilter = 0.0_pReal
|
utilities_getFilter = 0.0_pReal
|
||||||
if (gridGlobal(2) /= 1_pInt .and. k(2) == real(gridGlobal(2)/2_pInt, pReal)/scaledGeomSize(2)) &
|
if (gridGlobal(2) /= 1_pInt .and. k(2) == real(gridGlobal(2)/2_pInt, pReal)/scaledGeomSize(2)) &
|
||||||
utilities_getFilter = 0.0_pReal ! do not delete the whole slice in case of 2D calculation
|
utilities_getFilter = 0.0_pReal ! do not delete the whole slice in case of 2D calculation
|
||||||
if (gridGlobal(2) /= 1_pInt .and. k(2) == real(gridGlobal(2)/2_pInt + mod(gridGlobal(2),2_pInt), pReal)/scaledGeomSize(2)) &
|
if (gridGlobal(2) /= 1_pInt .and. &
|
||||||
|
k(2) == real(gridGlobal(2)/2_pInt + mod(gridGlobal(2),2_pInt), pReal)/scaledGeomSize(2)) &
|
||||||
utilities_getFilter = 0.0_pReal ! do not delete the whole slice in case of 2D calculation
|
utilities_getFilter = 0.0_pReal ! do not delete the whole slice in case of 2D calculation
|
||||||
if (gridGlobal(3) /= 1_pInt .and. k(3) == real(gridGlobal(3)/2_pInt, pReal)/scaledGeomSize(3)) &
|
if (gridGlobal(3) /= 1_pInt .and. k(3) == real(gridGlobal(3)/2_pInt, pReal)/scaledGeomSize(3)) &
|
||||||
utilities_getFilter = 0.0_pReal ! do not delete the whole slice in case of 2D calculation
|
utilities_getFilter = 0.0_pReal ! do not delete the whole slice in case of 2D calculation
|
||||||
if (gridGlobal(3) /= 1_pInt .and. k(3) == real(gridGlobal(3)/2_pInt + mod(gridGlobal(3),2_pInt), pReal)/scaledGeomSize(3)) &
|
if (gridGlobal(3) /= 1_pInt .and. &
|
||||||
|
k(3) == real(gridGlobal(3)/2_pInt + mod(gridGlobal(3),2_pInt), pReal)/scaledGeomSize(3)) &
|
||||||
utilities_getFilter = 0.0_pReal ! do not delete the whole slice in case of 2D calculation
|
utilities_getFilter = 0.0_pReal ! do not delete the whole slice in case of 2D calculation
|
||||||
|
|
||||||
end function utilities_getFilter
|
end function utilities_getFilter
|
||||||
|
@ -1143,9 +1325,12 @@ subroutine utilities_destroy()
|
||||||
if (debugDivergence) call fftw_destroy_plan(planDivMPI)
|
if (debugDivergence) call fftw_destroy_plan(planDivMPI)
|
||||||
if (debugFFTW) call fftw_destroy_plan(planDebugForthMPI)
|
if (debugFFTW) call fftw_destroy_plan(planDebugForthMPI)
|
||||||
if (debugFFTW) call fftw_destroy_plan(planDebugBackMPI)
|
if (debugFFTW) call fftw_destroy_plan(planDebugBackMPI)
|
||||||
call fftw_destroy_plan(planForthMPI)
|
call fftw_destroy_plan(planTensorForthMPI)
|
||||||
call fftw_destroy_plan(planBackMPI)
|
call fftw_destroy_plan(planTensorBackMPI)
|
||||||
call fftw_destroy_plan(planCoordsMPI)
|
call fftw_destroy_plan(planVectorForthMPI)
|
||||||
|
call fftw_destroy_plan(planVectorBackMPI)
|
||||||
|
call fftw_destroy_plan(planScalarForthMPI)
|
||||||
|
call fftw_destroy_plan(planScalarBackMPI)
|
||||||
|
|
||||||
end subroutine utilities_destroy
|
end subroutine utilities_destroy
|
||||||
|
|
||||||
|
@ -1164,8 +1349,6 @@ subroutine utilities_updateIPcoords(F)
|
||||||
geomSizeGlobal, &
|
geomSizeGlobal, &
|
||||||
mesh_ipCoordinates
|
mesh_ipCoordinates
|
||||||
implicit none
|
implicit none
|
||||||
external :: &
|
|
||||||
MPI_Bcast
|
|
||||||
|
|
||||||
real(pReal), dimension(3,3,gridLocal(1),gridLocal(2),gridLocal(3)), intent(in) :: F
|
real(pReal), dimension(3,3,gridLocal(1),gridLocal(2),gridLocal(3)), intent(in) :: F
|
||||||
integer(pInt) :: i, j, k, m
|
integer(pInt) :: i, j, k, m
|
||||||
|
@ -1173,40 +1356,41 @@ subroutine utilities_updateIPcoords(F)
|
||||||
real(pReal), dimension(3,3) :: Favg
|
real(pReal), dimension(3,3) :: Favg
|
||||||
PetscErrorCode :: ierr
|
PetscErrorCode :: ierr
|
||||||
|
|
||||||
field_realMPI = 0.0_pReal
|
tensorField_realMPI = 0.0_pReal
|
||||||
field_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = F
|
tensorField_realMPI(1:3,1:3,1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = F
|
||||||
call utilities_FFTforward()
|
call utilities_FFTtensorForward()
|
||||||
|
|
||||||
integrator = geomSizeGlobal * 0.5_pReal / PI
|
integrator = geomSizeGlobal * 0.5_pReal / PI
|
||||||
step = geomSizeGlobal/real(gridGlobal, pReal)
|
step = geomSizeGlobal/real(gridGlobal, pReal)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! average F
|
! average F
|
||||||
if (gridOffset == 0_pInt) Favg = real(field_fourierMPI(1:3,1:3,1,1,1),pReal)*wgt
|
if (gridOffset == 0_pInt) Favg = real(tensorField_fourierMPI(1:3,1:3,1,1,1),pReal)*wgt
|
||||||
call MPI_Bcast(Favg,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(Favg,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! integration in Fourier space
|
! integration in Fourier space
|
||||||
coords_fourierMPI = cmplx(0.0_pReal, 0.0_pReal, pReal)
|
vectorField_fourierMPI = cmplx(0.0_pReal, 0.0_pReal, pReal)
|
||||||
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,grid1Red
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,grid1Red
|
||||||
do m = 1_pInt,3_pInt
|
do m = 1_pInt,3_pInt
|
||||||
coords_fourierMPI(m,i,j,k) = sum(field_fourierMPI(m,1:3,i,j,k)*&
|
vectorField_fourierMPI(m,i,j,k) = sum(tensorField_fourierMPI(m,1:3,i,j,k)*&
|
||||||
cmplx(0.0_pReal,xi(1:3,i,j,k)*scaledGeomSize*integrator,pReal))
|
cmplx(0.0_pReal,xi(1:3,i,j,k)*scaledGeomSize*integrator,pReal))
|
||||||
enddo
|
enddo
|
||||||
if (any(xi(1:3,i,j,k) /= 0.0_pReal)) coords_fourierMPI(1:3,i,j,k) = &
|
if (any(xi(1:3,i,j,k) /= 0.0_pReal)) &
|
||||||
coords_fourierMPI(1:3,i,j,k)/cmplx(-sum(xi(1:3,i,j,k)*scaledGeomSize*xi(1:3,i,j,k)* &
|
vectorField_fourierMPI(1:3,i,j,k) = &
|
||||||
|
vectorField_fourierMPI(1:3,i,j,k)/cmplx(-sum(xi(1:3,i,j,k)*scaledGeomSize*xi(1:3,i,j,k)* &
|
||||||
scaledGeomSize),0.0_pReal,pReal)
|
scaledGeomSize),0.0_pReal,pReal)
|
||||||
enddo; enddo; enddo
|
enddo; enddo; enddo
|
||||||
call fftw_mpi_execute_dft_c2r(planCoordsMPI,coords_fourierMPI,coords_realMPI)
|
call fftw_mpi_execute_dft_c2r(planVectorBackMPI,vectorField_fourierMPI,vectorField_realMPI)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! add average to fluctuation and put (0,0,0) on (0,0,0)
|
! add average to fluctuation and put (0,0,0) on (0,0,0)
|
||||||
if (gridOffset == 0_pInt) offset_coords = coords_realMPI(1:3,1,1,1)
|
if (gridOffset == 0_pInt) offset_coords = vectorField_realMPI(1:3,1,1,1)
|
||||||
call MPI_Bcast(offset_coords,3,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(offset_coords,3,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
||||||
offset_coords = math_mul33x3(Favg,step/2.0_pReal) - offset_coords
|
offset_coords = math_mul33x3(Favg,step/2.0_pReal) - offset_coords
|
||||||
m = 1_pInt
|
m = 1_pInt
|
||||||
do k = 1_pInt,gridLocal(3); do j = 1_pInt,gridLocal(2); do i = 1_pInt,gridLocal(1)
|
do k = 1_pInt,gridLocal(3); do j = 1_pInt,gridLocal(2); do i = 1_pInt,gridLocal(1)
|
||||||
mesh_ipCoordinates(1:3,1,m) = coords_realMPI(1:3,i,j,k) &
|
mesh_ipCoordinates(1:3,1,m) = vectorField_realMPI(1:3,i,j,k) &
|
||||||
+ offset_coords &
|
+ offset_coords &
|
||||||
+ math_mul33x3(Favg,step*real([i,j,k+gridOffset]-1_pInt,pReal))
|
+ math_mul33x3(Favg,step*real([i,j,k+gridOffset]-1_pInt,pReal))
|
||||||
m = m+1_pInt
|
m = m+1_pInt
|
||||||
|
|
|
@ -348,7 +348,8 @@ DAMASK_spectral.exe: MESHNAME := mesh.f90
|
||||||
DAMASK_spectral.exe: INTERFACENAME := DAMASK_spectral_interface.f90
|
DAMASK_spectral.exe: INTERFACENAME := DAMASK_spectral_interface.f90
|
||||||
|
|
||||||
|
|
||||||
SPECTRAL_SOLVER_FILES = DAMASK_spectral_solverAL.o DAMASK_spectral_solverBasicPETSc.o DAMASK_spectral_solverPolarisation.o
|
SPECTRAL_SOLVER_FILES = DAMASK_spectral_solverAL.o DAMASK_spectral_solverBasicPETSc.o DAMASK_spectral_solverPolarisation.o \
|
||||||
|
spectral_thermal.o spectral_damage.o
|
||||||
|
|
||||||
SPECTRAL_FILES = prec.o DAMASK_interface.o IO.o libs.o numerics.o debug.o math.o \
|
SPECTRAL_FILES = prec.o DAMASK_interface.o IO.o libs.o numerics.o debug.o math.o \
|
||||||
FEsolving.o mesh.o material.o lattice.o \
|
FEsolving.o mesh.o material.o lattice.o \
|
||||||
|
@ -379,6 +380,12 @@ DAMASK_spectral_solverPolarisation.o: DAMASK_spectral_solverPolarisation.f90
|
||||||
DAMASK_spectral_solverBasicPETSc.o: DAMASK_spectral_solverBasicPETSc.f90 \
|
DAMASK_spectral_solverBasicPETSc.o: DAMASK_spectral_solverBasicPETSc.f90 \
|
||||||
DAMASK_spectral_utilities.o
|
DAMASK_spectral_utilities.o
|
||||||
|
|
||||||
|
spectral_thermal.o: spectral_thermal.f90 \
|
||||||
|
DAMASK_spectral_utilities.o
|
||||||
|
|
||||||
|
spectral_damage.o: spectral_damage.f90 \
|
||||||
|
DAMASK_spectral_utilities.o
|
||||||
|
|
||||||
DAMASK_spectral_utilities.o: DAMASK_spectral_utilities.f90 \
|
DAMASK_spectral_utilities.o: DAMASK_spectral_utilities.f90 \
|
||||||
CPFEM.o
|
CPFEM.o
|
||||||
|
|
||||||
|
|
|
@ -115,8 +115,9 @@ module numerics
|
||||||
spectral_solver = 'basicpetsc' , & !< spectral solution method
|
spectral_solver = 'basicpetsc' , & !< spectral solution method
|
||||||
spectral_filter = 'none' !< spectral filtering method
|
spectral_filter = 'none' !< spectral filtering method
|
||||||
character(len=1024), protected, public :: &
|
character(len=1024), protected, public :: &
|
||||||
petsc_options = '-snes_type ngmres &
|
petsc_options = '-mech_snes_type ngmres &
|
||||||
&-snes_ngmres_anderson '
|
&-damage_snes_type ngmres &
|
||||||
|
&-thermal_snes_type ngmres '
|
||||||
integer(pInt), protected, public :: &
|
integer(pInt), protected, public :: &
|
||||||
fftw_planner_flag = 32_pInt, & !< conversion of fftw_plan_mode to integer, basically what is usually done in the include file of fftw
|
fftw_planner_flag = 32_pInt, & !< conversion of fftw_plan_mode to integer, basically what is usually done in the include file of fftw
|
||||||
continueCalculation = 0_pInt, & !< 0: exit if BVP solver does not converge, 1: continue calculation if BVP solver does not converge
|
continueCalculation = 0_pInt, & !< 0: exit if BVP solver does not converge, 1: continue calculation if BVP solver does not converge
|
||||||
|
|
|
@ -0,0 +1,409 @@
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! $Id: spectral_damage.f90 4082 2015-04-11 20:28:07Z MPIE\m.diehl $
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH
|
||||||
|
!> @author Shaokang Zhang, Max-Planck-Institut für Eisenforschung GmbH
|
||||||
|
!> @brief Spectral solver for nonlocal damage
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
module spectral_damage
|
||||||
|
use prec, only: &
|
||||||
|
pInt, &
|
||||||
|
pReal
|
||||||
|
use math, only: &
|
||||||
|
math_I3
|
||||||
|
use DAMASK_spectral_Utilities, only: &
|
||||||
|
tSolutionState, &
|
||||||
|
tSolutionParams
|
||||||
|
use numerics, only: &
|
||||||
|
worldrank, &
|
||||||
|
worldsize
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
private
|
||||||
|
#include <petsc-finclude/petsc.h90>
|
||||||
|
|
||||||
|
character (len=*), parameter, public :: &
|
||||||
|
spectral_damage_label = 'spectraldamage'
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! derived types
|
||||||
|
type(tSolutionParams), private :: params
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! PETSc data
|
||||||
|
SNES, private :: damage_snes
|
||||||
|
Vec, private :: solution
|
||||||
|
PetscInt, private :: xstart, xend, ystart, yend, zstart, zend
|
||||||
|
real(pReal), private, dimension(:,:,:), allocatable :: &
|
||||||
|
damage_current, & !< field of current damage
|
||||||
|
damage_lastInc, & !< field of previous damage
|
||||||
|
damage_stagInc !< field of staggered damage
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! reference diffusion tensor, mobility etc.
|
||||||
|
integer(pInt), private :: totalIter = 0_pInt !< total iteration in current increment
|
||||||
|
real(pReal), dimension(3,3), private :: D_ref
|
||||||
|
real(pReal), private :: mobility_ref
|
||||||
|
character(len=1024), private :: incInfo
|
||||||
|
|
||||||
|
public :: &
|
||||||
|
spectral_damage_init, &
|
||||||
|
spectral_damage_solution, &
|
||||||
|
spectral_damage_forward, &
|
||||||
|
spectral_damage_destroy
|
||||||
|
external :: &
|
||||||
|
VecDestroy, &
|
||||||
|
DMDestroy, &
|
||||||
|
DMDACreate3D, &
|
||||||
|
DMCreateGlobalVector, &
|
||||||
|
DMDASNESSetFunctionLocal, &
|
||||||
|
PETScFinalize, &
|
||||||
|
SNESDestroy, &
|
||||||
|
SNESGetNumberFunctionEvals, &
|
||||||
|
SNESGetIterationNumber, &
|
||||||
|
SNESSolve, &
|
||||||
|
SNESSetDM, &
|
||||||
|
SNESGetConvergedReason, &
|
||||||
|
SNESSetConvergenceTest, &
|
||||||
|
SNESSetFromOptions, &
|
||||||
|
SNESCreate, &
|
||||||
|
MPI_Abort, &
|
||||||
|
MPI_Bcast, &
|
||||||
|
MPI_Allreduce
|
||||||
|
|
||||||
|
contains
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief allocates all neccessary fields and fills them with data, potentially from restart info
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine spectral_damage_init()
|
||||||
|
use, intrinsic :: iso_fortran_env ! to get compiler_version and compiler_options (at least for gfortran >4.6 at the moment)
|
||||||
|
use IO, only: &
|
||||||
|
IO_intOut, &
|
||||||
|
IO_read_realFile, &
|
||||||
|
IO_timeStamp
|
||||||
|
use DAMASK_spectral_Utilities, only: &
|
||||||
|
wgt
|
||||||
|
use mesh, only: &
|
||||||
|
gridLocal, &
|
||||||
|
gridGlobal
|
||||||
|
use damage_nonlocal, only: &
|
||||||
|
damage_nonlocal_getDiffusion33, &
|
||||||
|
damage_nonlocal_getMobility
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
DM :: damage_grid
|
||||||
|
Vec :: uBound, lBound
|
||||||
|
PetscErrorCode :: ierr
|
||||||
|
PetscObject :: dummy
|
||||||
|
integer(pInt), dimension(:), allocatable :: localK
|
||||||
|
integer(pInt) :: proc
|
||||||
|
integer(pInt) :: i, j, k, cell
|
||||||
|
character(len=100) :: snes_type
|
||||||
|
|
||||||
|
mainProcess: if (worldrank == 0_pInt) then
|
||||||
|
write(6,'(/,a)') ' <<<+- spectral_damage init -+>>>'
|
||||||
|
write(6,'(a)') ' $Id: spectral_damage.f90 4082 2015-04-11 20:28:07Z MPIE\m.diehl $'
|
||||||
|
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
|
||||||
|
#include "compilation_info.f90"
|
||||||
|
endif mainProcess
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! initialize solver specific parts of PETSc
|
||||||
|
call SNESCreate(PETSC_COMM_WORLD,damage_snes,ierr); CHKERRQ(ierr)
|
||||||
|
call SNESSetOptionsPrefix(damage_snes,'damage_',ierr);CHKERRQ(ierr)
|
||||||
|
allocate(localK(worldsize), source = 0); localK(worldrank+1) = gridLocal(3)
|
||||||
|
do proc = 1, worldsize
|
||||||
|
call MPI_Bcast(localK(proc),1,MPI_INTEGER,proc-1,PETSC_COMM_WORLD,ierr)
|
||||||
|
enddo
|
||||||
|
call DMDACreate3d(PETSC_COMM_WORLD, &
|
||||||
|
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & !< cut off stencil at boundary
|
||||||
|
DMDA_STENCIL_BOX, & !< Moore (26) neighborhood around central point
|
||||||
|
gridGlobal(1),gridGlobal(2),gridGlobal(3), & !< global grid
|
||||||
|
1, 1, worldsize, &
|
||||||
|
1, 0, & !< #dof (damage phase field), ghost boundary width (domain overlap)
|
||||||
|
gridLocal (1),gridLocal (2),localK, & !< local grid
|
||||||
|
damage_grid,ierr) !< handle, error
|
||||||
|
CHKERRQ(ierr)
|
||||||
|
call SNESSetDM(damage_snes,damage_grid,ierr); CHKERRQ(ierr) !< connect snes to da
|
||||||
|
call DMCreateGlobalVector(damage_grid,solution,ierr); CHKERRQ(ierr) !< global solution vector (grid x 1, i.e. every def grad tensor)
|
||||||
|
call DMDASNESSetFunctionLocal(damage_grid,INSERT_VALUES,spectral_damage_formResidual,dummy,ierr) !< residual vector of same shape as solution vector
|
||||||
|
CHKERRQ(ierr)
|
||||||
|
call SNESSetFromOptions(damage_snes,ierr); CHKERRQ(ierr) !< pull it all together with additional cli arguments
|
||||||
|
call SNESGetType(damage_snes,snes_type,ierr); CHKERRQ(ierr)
|
||||||
|
if (trim(snes_type) == 'vinewtonrsls' .or. &
|
||||||
|
trim(snes_type) == 'vinewtonssls') then
|
||||||
|
call DMGetGlobalVector(damage_grid,lBound,ierr); CHKERRQ(ierr)
|
||||||
|
call DMGetGlobalVector(damage_grid,uBound,ierr); CHKERRQ(ierr)
|
||||||
|
call VecSet(lBound,0.0,ierr); CHKERRQ(ierr)
|
||||||
|
call VecSet(uBound,1.0,ierr); CHKERRQ(ierr)
|
||||||
|
call SNESVISetVariableBounds(damage_snes,lBound,uBound,ierr) !< variable bounds for variational inequalities like contact mechanics, damage etc.
|
||||||
|
call DMRestoreGlobalVector(damage_grid,lBound,ierr); CHKERRQ(ierr)
|
||||||
|
call DMRestoreGlobalVector(damage_grid,uBound,ierr); CHKERRQ(ierr)
|
||||||
|
endif
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! init fields
|
||||||
|
call DMDAGetCorners(damage_grid,xstart,ystart,zstart,xend,yend,zend,ierr)
|
||||||
|
CHKERRQ(ierr)
|
||||||
|
xend = xstart + xend - 1
|
||||||
|
yend = ystart + yend - 1
|
||||||
|
zend = zstart + zend - 1
|
||||||
|
call VecSet(solution,1.0,ierr); CHKERRQ(ierr)
|
||||||
|
allocate(damage_current(gridLocal(1),gridLocal(2),gridLocal(3)), source=1.0_pReal)
|
||||||
|
allocate(damage_lastInc(gridLocal(1),gridLocal(2),gridLocal(3)), source=1.0_pReal)
|
||||||
|
allocate(damage_stagInc(gridLocal(1),gridLocal(2),gridLocal(3)), source=1.0_pReal)
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! damage reference diffusion update
|
||||||
|
cell = 0_pInt
|
||||||
|
D_ref = 0.0_pReal
|
||||||
|
mobility_ref = 0.0_pReal
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,gridLocal(1)
|
||||||
|
cell = cell + 1_pInt
|
||||||
|
D_ref = D_ref + damage_nonlocal_getDiffusion33(1,cell)
|
||||||
|
mobility_ref = mobility_ref + damage_nonlocal_getMobility(1,cell)
|
||||||
|
enddo; enddo; enddo
|
||||||
|
D_ref = D_ref*wgt
|
||||||
|
call MPI_Allreduce(MPI_IN_PLACE,D_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
||||||
|
mobility_ref = mobility_ref*wgt
|
||||||
|
call MPI_Allreduce(MPI_IN_PLACE,mobility_ref,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
||||||
|
|
||||||
|
end subroutine spectral_damage_init
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief solution for the spectral damage scheme with internal iterations
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
type(tSolutionState) function spectral_damage_solution(guess,timeinc,timeinc_old,loadCaseTime)
|
||||||
|
use numerics, only: &
|
||||||
|
itmax, &
|
||||||
|
err_damage_tolAbs, &
|
||||||
|
err_damage_tolRel
|
||||||
|
use DAMASK_spectral_Utilities, only: &
|
||||||
|
tBoundaryCondition, &
|
||||||
|
Utilities_maskedCompliance, &
|
||||||
|
Utilities_updateGamma
|
||||||
|
use mesh, only: &
|
||||||
|
gridLocal
|
||||||
|
use damage_nonlocal, only: &
|
||||||
|
damage_nonlocal_putNonLocalDamage
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! input data for solution
|
||||||
|
real(pReal), intent(in) :: &
|
||||||
|
timeinc, & !< increment in time for current solution
|
||||||
|
timeinc_old, & !< increment in time of last increment
|
||||||
|
loadCaseTime !< remaining time of current load case
|
||||||
|
logical, intent(in) :: guess
|
||||||
|
integer(pInt) :: i, j, k, cell
|
||||||
|
PetscInt ::position
|
||||||
|
PetscReal :: minDamage, maxDamage, stagNorm, solnNorm
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! PETSc Data
|
||||||
|
PetscErrorCode :: ierr
|
||||||
|
SNESConvergedReason :: reason
|
||||||
|
|
||||||
|
spectral_damage_solution%converged =.false.
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! set module wide availabe data
|
||||||
|
params%timeinc = timeinc
|
||||||
|
params%timeincOld = timeinc_old
|
||||||
|
|
||||||
|
call SNESSolve(damage_snes,PETSC_NULL_OBJECT,solution,ierr); CHKERRQ(ierr)
|
||||||
|
call SNESGetConvergedReason(damage_snes,reason,ierr); CHKERRQ(ierr)
|
||||||
|
|
||||||
|
if (reason < 1) then
|
||||||
|
spectral_damage_solution%converged = .false.
|
||||||
|
spectral_damage_solution%iterationsNeeded = itmax
|
||||||
|
else
|
||||||
|
spectral_damage_solution%converged = .true.
|
||||||
|
spectral_damage_solution%iterationsNeeded = totalIter
|
||||||
|
endif
|
||||||
|
stagNorm = maxval(abs(damage_current - damage_stagInc))
|
||||||
|
solnNorm = maxval(abs(damage_current))
|
||||||
|
call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr)
|
||||||
|
call MPI_Allreduce(MPI_IN_PLACE,solnNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr)
|
||||||
|
damage_stagInc = damage_current
|
||||||
|
spectral_damage_solution%stagConverged = stagNorm < err_damage_tolAbs &
|
||||||
|
.or. stagNorm < err_damage_tolRel*solnNorm
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! updating damage state
|
||||||
|
cell = 0_pInt !< material point = 0
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,gridLocal(1)
|
||||||
|
cell = cell + 1_pInt !< material point increase
|
||||||
|
call damage_nonlocal_putNonLocalDamage(damage_current(i,j,k),1,cell)
|
||||||
|
enddo; enddo; enddo
|
||||||
|
|
||||||
|
call VecMin(solution,position,minDamage,ierr); CHKERRQ(ierr)
|
||||||
|
call VecMax(solution,position,maxDamage,ierr); CHKERRQ(ierr)
|
||||||
|
if (worldrank == 0) then
|
||||||
|
if (spectral_damage_solution%converged) &
|
||||||
|
write(6,'(/,a)') ' ... nonlocal damage converged .....................................'
|
||||||
|
write(6,'(/,a,f8.6,2x,f8.6,2x,f8.6,/)',advance='no') ' Minimum|Maximum|Delta Damage = ',&
|
||||||
|
minDamage, maxDamage, stagNorm
|
||||||
|
write(6,'(/,a)') ' ==========================================================================='
|
||||||
|
flush(6)
|
||||||
|
endif
|
||||||
|
|
||||||
|
end function spectral_damage_solution
|
||||||
|
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief forms the spectral damage residual vector
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine spectral_damage_formResidual(in,x_scal,f_scal,dummy,ierr)
|
||||||
|
use mesh, only: &
|
||||||
|
gridLocal
|
||||||
|
use math, only: &
|
||||||
|
math_mul33x3
|
||||||
|
use DAMASK_spectral_Utilities, only: &
|
||||||
|
scalarField_realMPI, &
|
||||||
|
vectorField_realMPI, &
|
||||||
|
utilities_FFTvectorForward, &
|
||||||
|
utilities_FFTvectorBackward, &
|
||||||
|
utilities_FFTscalarForward, &
|
||||||
|
utilities_FFTscalarBackward, &
|
||||||
|
utilities_fourierGreenConvolution, &
|
||||||
|
utilities_fourierScalarGradient, &
|
||||||
|
utilities_fourierVectorDivergence
|
||||||
|
use damage_nonlocal, only: &
|
||||||
|
damage_nonlocal_getSourceAndItsTangent,&
|
||||||
|
damage_nonlocal_getDiffusion33, &
|
||||||
|
damage_nonlocal_getMobility
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
DMDALocalInfo, dimension(DMDA_LOCAL_INFO_SIZE) :: &
|
||||||
|
in
|
||||||
|
PetscScalar, dimension( &
|
||||||
|
XG_RANGE,YG_RANGE,ZG_RANGE) :: &
|
||||||
|
x_scal
|
||||||
|
PetscScalar, dimension( &
|
||||||
|
X_RANGE,Y_RANGE,Z_RANGE) :: &
|
||||||
|
f_scal
|
||||||
|
PetscObject :: dummy
|
||||||
|
PetscErrorCode :: ierr
|
||||||
|
integer(pInt) :: i, j, k, cell
|
||||||
|
real(pReal) :: phiDot, dPhiDot_dPhi, mobility
|
||||||
|
|
||||||
|
damage_current = x_scal
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! evaluate polarization field
|
||||||
|
scalarField_realMPI = 0.0_pReal
|
||||||
|
scalarField_realMPI(1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = damage_current
|
||||||
|
call utilities_FFTscalarForward()
|
||||||
|
call utilities_fourierScalarGradient() !< calculate gradient of damage field
|
||||||
|
call utilities_FFTvectorBackward()
|
||||||
|
cell = 0_pInt
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,gridLocal(1)
|
||||||
|
cell = cell + 1_pInt
|
||||||
|
vectorField_realMPI(1:3,i,j,k) = math_mul33x3(damage_nonlocal_getDiffusion33(1,cell) - D_ref, &
|
||||||
|
vectorField_realMPI(1:3,i,j,k))
|
||||||
|
enddo; enddo; enddo
|
||||||
|
call utilities_FFTvectorForward()
|
||||||
|
call utilities_fourierVectorDivergence() !< calculate damage divergence in fourier field
|
||||||
|
call utilities_FFTscalarBackward()
|
||||||
|
cell = 0_pInt
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,gridLocal(1)
|
||||||
|
cell = cell + 1_pInt
|
||||||
|
call damage_nonlocal_getSourceAndItsTangent(phiDot, dPhiDot_dPhi, damage_current(i,j,k), 1, cell)
|
||||||
|
mobility = damage_nonlocal_getMobility(1,cell)
|
||||||
|
scalarField_realMPI(i,j,k) = params%timeinc*scalarField_realMPI(i,j,k) + &
|
||||||
|
params%timeinc*phiDot + &
|
||||||
|
mobility*damage_lastInc(i,j,k) - &
|
||||||
|
mobility*damage_current(i,j,k) + &
|
||||||
|
mobility_ref*damage_current(i,j,k)
|
||||||
|
enddo; enddo; enddo
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! convolution of damage field with green operator
|
||||||
|
call utilities_FFTscalarForward()
|
||||||
|
call utilities_fourierGreenConvolution(D_ref, mobility_ref, params%timeinc)
|
||||||
|
call utilities_FFTscalarBackward()
|
||||||
|
where(scalarField_realMPI > 1.0_pReal) scalarField_realMPI = 1.0_pReal
|
||||||
|
where(scalarField_realMPI < 0.0_pReal) scalarField_realMPI = 0.0_pReal
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! constructing residual
|
||||||
|
f_scal = scalarField_realMPI(1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) - &
|
||||||
|
damage_current
|
||||||
|
|
||||||
|
end subroutine spectral_damage_formResidual
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief spectral damage forwarding routine
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine spectral_damage_forward(guess,timeinc,timeinc_old,loadCaseTime)
|
||||||
|
use mesh, only: &
|
||||||
|
gridLocal
|
||||||
|
use DAMASK_spectral_Utilities, only: &
|
||||||
|
cutBack, &
|
||||||
|
wgt
|
||||||
|
use damage_nonlocal, only: &
|
||||||
|
damage_nonlocal_putNonLocalDamage, &
|
||||||
|
damage_nonlocal_getDiffusion33, &
|
||||||
|
damage_nonlocal_getMobility
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
real(pReal), intent(in) :: &
|
||||||
|
timeinc_old, &
|
||||||
|
timeinc, &
|
||||||
|
loadCaseTime !< remaining time of current load case
|
||||||
|
logical, intent(in) :: guess
|
||||||
|
PetscErrorCode :: ierr
|
||||||
|
integer(pInt) :: i, j, k, cell
|
||||||
|
DM :: dm_local
|
||||||
|
PetscScalar, dimension(:,:,:), pointer :: x_scal
|
||||||
|
|
||||||
|
if (cutBack) then
|
||||||
|
damage_current = damage_lastInc
|
||||||
|
damage_stagInc = damage_lastInc
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! reverting damage field state
|
||||||
|
cell = 0_pInt
|
||||||
|
call SNESGetDM(damage_snes,dm_local,ierr); CHKERRQ(ierr)
|
||||||
|
call DMDAVecGetArrayF90(dm_local,solution,x_scal,ierr); CHKERRQ(ierr) !< get the data out of PETSc to work with
|
||||||
|
x_scal(xstart:xend,ystart:yend,zstart:zend) = damage_current
|
||||||
|
call DMDAVecRestoreArrayF90(dm_local,solution,x_scal,ierr); CHKERRQ(ierr)
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,gridLocal(1)
|
||||||
|
cell = cell + 1_pInt
|
||||||
|
call damage_nonlocal_putNonLocalDamage(damage_current(i,j,k),1,cell)
|
||||||
|
enddo; enddo; enddo
|
||||||
|
else
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! update rate and forward last inc
|
||||||
|
damage_lastInc = damage_current
|
||||||
|
cell = 0_pInt
|
||||||
|
D_ref = 0.0_pReal
|
||||||
|
mobility_ref = 0.0_pReal
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,gridLocal(1)
|
||||||
|
cell = cell + 1_pInt
|
||||||
|
D_ref = D_ref + damage_nonlocal_getDiffusion33(1,cell)
|
||||||
|
mobility_ref = mobility_ref + damage_nonlocal_getMobility(1,cell)
|
||||||
|
enddo; enddo; enddo
|
||||||
|
D_ref = D_ref*wgt
|
||||||
|
call MPI_Allreduce(MPI_IN_PLACE,D_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
||||||
|
mobility_ref = mobility_ref*wgt
|
||||||
|
call MPI_Allreduce(MPI_IN_PLACE,mobility_ref,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
||||||
|
endif
|
||||||
|
|
||||||
|
end subroutine spectral_damage_forward
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief destroy routine
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine spectral_damage_destroy()
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
PetscErrorCode :: ierr
|
||||||
|
|
||||||
|
call VecDestroy(solution,ierr); CHKERRQ(ierr)
|
||||||
|
call SNESDestroy(damage_snes,ierr); CHKERRQ(ierr)
|
||||||
|
|
||||||
|
end subroutine spectral_damage_destroy
|
||||||
|
|
||||||
|
end module spectral_damage
|
|
@ -0,0 +1,403 @@
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! $Id: spectral_thermal.f90 4082 2015-04-11 20:28:07Z MPIE\m.diehl $
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH
|
||||||
|
!> @author Shaokang Zhang, Max-Planck-Institut für Eisenforschung GmbH
|
||||||
|
!> @brief Spectral solver for thermal conduction
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
module spectral_thermal
|
||||||
|
use prec, only: &
|
||||||
|
pInt, &
|
||||||
|
pReal
|
||||||
|
use math, only: &
|
||||||
|
math_I3
|
||||||
|
use DAMASK_spectral_Utilities, only: &
|
||||||
|
tSolutionState, &
|
||||||
|
tSolutionParams
|
||||||
|
use numerics, only: &
|
||||||
|
worldrank, &
|
||||||
|
worldsize
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
private
|
||||||
|
#include <petsc-finclude/petsc.h90>
|
||||||
|
|
||||||
|
character (len=*), parameter, public :: &
|
||||||
|
spectral_thermal_label = 'spectralthermal'
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! derived types
|
||||||
|
type(tSolutionParams), private :: params
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! PETSc data
|
||||||
|
SNES, private :: thermal_snes
|
||||||
|
Vec, private :: solution
|
||||||
|
PetscInt, private :: xstart, xend, ystart, yend, zstart, zend
|
||||||
|
real(pReal), private, dimension(:,:,:), allocatable :: &
|
||||||
|
temperature_current, & !< field of current temperature
|
||||||
|
temperature_lastInc, & !< field of previous temperature
|
||||||
|
temperature_stagInc !< field of staggered temperature
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! reference diffusion tensor, mobility etc.
|
||||||
|
integer(pInt), private :: totalIter = 0_pInt !< total iteration in current increment
|
||||||
|
real(pReal), dimension(3,3), private :: D_ref
|
||||||
|
real(pReal), private :: mobility_ref
|
||||||
|
character(len=1024), private :: incInfo
|
||||||
|
|
||||||
|
public :: &
|
||||||
|
spectral_thermal_init, &
|
||||||
|
spectral_thermal_solution, &
|
||||||
|
spectral_thermal_forward, &
|
||||||
|
spectral_thermal_destroy
|
||||||
|
external :: &
|
||||||
|
VecDestroy, &
|
||||||
|
DMDestroy, &
|
||||||
|
DMDACreate3D, &
|
||||||
|
DMCreateGlobalVector, &
|
||||||
|
DMDASNESSetFunctionLocal, &
|
||||||
|
PETScFinalize, &
|
||||||
|
SNESDestroy, &
|
||||||
|
SNESGetNumberFunctionEvals, &
|
||||||
|
SNESGetIterationNumber, &
|
||||||
|
SNESSolve, &
|
||||||
|
SNESSetDM, &
|
||||||
|
SNESGetConvergedReason, &
|
||||||
|
SNESSetConvergenceTest, &
|
||||||
|
SNESSetFromOptions, &
|
||||||
|
SNESCreate, &
|
||||||
|
MPI_Abort, &
|
||||||
|
MPI_Bcast, &
|
||||||
|
MPI_Allreduce
|
||||||
|
|
||||||
|
contains
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief allocates all neccessary fields and fills them with data, potentially from restart info
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine spectral_thermal_init(temperature0)
|
||||||
|
use, intrinsic :: iso_fortran_env ! to get compiler_version and compiler_options (at least for gfortran >4.6 at the moment)
|
||||||
|
use IO, only: &
|
||||||
|
IO_intOut, &
|
||||||
|
IO_read_realFile, &
|
||||||
|
IO_timeStamp
|
||||||
|
use DAMASK_spectral_Utilities, only: &
|
||||||
|
wgt
|
||||||
|
use mesh, only: &
|
||||||
|
gridLocal, &
|
||||||
|
gridGlobal
|
||||||
|
use thermal_conduction, only: &
|
||||||
|
thermal_conduction_getConductivity33, &
|
||||||
|
thermal_conduction_getMassDensity, &
|
||||||
|
thermal_conduction_getSpecificHeat
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
real(pReal), intent(inOut) :: temperature0
|
||||||
|
integer(pInt), dimension(:), allocatable :: localK
|
||||||
|
integer(pInt) :: proc
|
||||||
|
integer(pInt) :: i, j, k, cell
|
||||||
|
DM :: thermal_grid
|
||||||
|
PetscErrorCode :: ierr
|
||||||
|
PetscObject :: dummy
|
||||||
|
|
||||||
|
mainProcess: if (worldrank == 0_pInt) then
|
||||||
|
write(6,'(/,a)') ' <<<+- spectral_thermal init -+>>>'
|
||||||
|
write(6,'(a)') ' $Id: spectral_thermal.f90 4082 2015-04-11 20:28:07Z MPIE\m.diehl $'
|
||||||
|
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
|
||||||
|
#include "compilation_info.f90"
|
||||||
|
endif mainProcess
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! initialize solver specific parts of PETSc
|
||||||
|
call SNESCreate(PETSC_COMM_WORLD,thermal_snes,ierr); CHKERRQ(ierr)
|
||||||
|
call SNESSetOptionsPrefix(thermal_snes,'thermal_',ierr);CHKERRQ(ierr)
|
||||||
|
allocate(localK(worldsize), source = 0); localK(worldrank+1) = gridLocal(3)
|
||||||
|
do proc = 1, worldsize
|
||||||
|
call MPI_Bcast(localK(proc),1,MPI_INTEGER,proc-1,PETSC_COMM_WORLD,ierr)
|
||||||
|
enddo
|
||||||
|
call DMDACreate3d(PETSC_COMM_WORLD, &
|
||||||
|
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
||||||
|
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
||||||
|
gridGlobal(1),gridGlobal(2),gridGlobal(3), & ! global grid
|
||||||
|
1, 1, worldsize, &
|
||||||
|
1, 0, & ! #dof (temperature field), ghost boundary width (domain overlap)
|
||||||
|
gridLocal (1),gridLocal (2),localK, & ! local grid
|
||||||
|
thermal_grid,ierr) ! handle, error
|
||||||
|
CHKERRQ(ierr)
|
||||||
|
call SNESSetDM(thermal_snes,thermal_grid,ierr); CHKERRQ(ierr) ! connect snes to da
|
||||||
|
call DMCreateGlobalVector(thermal_grid,solution ,ierr); CHKERRQ(ierr) ! global solution vector (grid x 1, i.e. every def grad tensor)
|
||||||
|
call DMDASNESSetFunctionLocal(thermal_grid,INSERT_VALUES,spectral_thermal_formResidual,dummy,ierr) ! residual vector of same shape as solution vector
|
||||||
|
CHKERRQ(ierr)
|
||||||
|
call SNESSetFromOptions(thermal_snes,ierr); CHKERRQ(ierr) ! pull it all together with additional cli arguments
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! init fields
|
||||||
|
call DMDAGetCorners(thermal_grid,xstart,ystart,zstart,xend,yend,zend,ierr)
|
||||||
|
CHKERRQ(ierr)
|
||||||
|
xend = xstart + xend - 1
|
||||||
|
yend = ystart + yend - 1
|
||||||
|
zend = zstart + zend - 1
|
||||||
|
call VecSet(solution,temperature0,ierr); CHKERRQ(ierr)
|
||||||
|
allocate(temperature_current(gridLocal(1),gridLocal(2),gridLocal(3)), source=temperature0)
|
||||||
|
allocate(temperature_lastInc(gridLocal(1),gridLocal(2),gridLocal(3)), source=temperature0)
|
||||||
|
allocate(temperature_stagInc(gridLocal(1),gridLocal(2),gridLocal(3)), source=temperature0)
|
||||||
|
|
||||||
|
cell = 0_pInt
|
||||||
|
D_ref = 0.0_pReal
|
||||||
|
mobility_ref = 0.0_pReal
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,gridLocal(1)
|
||||||
|
cell = cell + 1_pInt
|
||||||
|
D_ref = D_ref + thermal_conduction_getConductivity33(1,cell)
|
||||||
|
mobility_ref = mobility_ref + thermal_conduction_getMassDensity(1,cell)* &
|
||||||
|
thermal_conduction_getSpecificHeat(1,cell)
|
||||||
|
enddo; enddo; enddo
|
||||||
|
D_ref = D_ref*wgt
|
||||||
|
call MPI_Allreduce(MPI_IN_PLACE,D_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
||||||
|
mobility_ref = mobility_ref*wgt
|
||||||
|
call MPI_Allreduce(MPI_IN_PLACE,mobility_ref,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
||||||
|
|
||||||
|
end subroutine spectral_thermal_init
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief solution for the Basic PETSC scheme with internal iterations
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
type(tSolutionState) function spectral_thermal_solution(guess,timeinc,timeinc_old,loadCaseTime)
|
||||||
|
use numerics, only: &
|
||||||
|
itmax, &
|
||||||
|
err_thermal_tolAbs, &
|
||||||
|
err_thermal_tolRel
|
||||||
|
use DAMASK_spectral_Utilities, only: &
|
||||||
|
tBoundaryCondition, &
|
||||||
|
Utilities_maskedCompliance, &
|
||||||
|
Utilities_updateGamma
|
||||||
|
use mesh, only: &
|
||||||
|
gridLocal
|
||||||
|
use thermal_conduction, only: &
|
||||||
|
thermal_conduction_putTemperatureAndItsRate
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! input data for solution
|
||||||
|
real(pReal), intent(in) :: &
|
||||||
|
timeinc, & !< increment in time for current solution
|
||||||
|
timeinc_old, & !< increment in time of last increment
|
||||||
|
loadCaseTime !< remaining time of current load case
|
||||||
|
logical, intent(in) :: guess
|
||||||
|
integer(pInt) :: i, j, k, cell
|
||||||
|
PetscInt :: position
|
||||||
|
PetscReal :: minTemperature, maxTemperature, stagNorm, solnNorm
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! PETSc Data
|
||||||
|
PetscErrorCode :: ierr
|
||||||
|
SNESConvergedReason :: reason
|
||||||
|
|
||||||
|
spectral_thermal_solution%converged =.false.
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! set module wide availabe data
|
||||||
|
params%timeinc = timeinc
|
||||||
|
params%timeincOld = timeinc_old
|
||||||
|
|
||||||
|
call SNESSolve(thermal_snes,PETSC_NULL_OBJECT,solution,ierr); CHKERRQ(ierr)
|
||||||
|
call SNESGetConvergedReason(thermal_snes,reason,ierr); CHKERRQ(ierr)
|
||||||
|
|
||||||
|
if (reason < 1) then
|
||||||
|
spectral_thermal_solution%converged = .false.
|
||||||
|
spectral_thermal_solution%iterationsNeeded = itmax
|
||||||
|
else
|
||||||
|
spectral_thermal_solution%converged = .true.
|
||||||
|
spectral_thermal_solution%iterationsNeeded = totalIter
|
||||||
|
endif
|
||||||
|
stagNorm = maxval(abs(temperature_current - temperature_stagInc))
|
||||||
|
solnNorm = maxval(abs(temperature_current))
|
||||||
|
call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr)
|
||||||
|
call MPI_Allreduce(MPI_IN_PLACE,solnNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr)
|
||||||
|
temperature_stagInc = temperature_current
|
||||||
|
spectral_thermal_solution%stagConverged = stagNorm < err_thermal_tolAbs &
|
||||||
|
.or. stagNorm < err_thermal_tolRel*solnNorm
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! updating thermal state
|
||||||
|
cell = 0_pInt !< material point = 0
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,gridLocal(1)
|
||||||
|
cell = cell + 1_pInt !< material point increase
|
||||||
|
call thermal_conduction_putTemperatureAndItsRate(temperature_current(i,j,k), &
|
||||||
|
(temperature_current(i,j,k)-temperature_lastInc(i,j,k))/params%timeinc, &
|
||||||
|
1,cell)
|
||||||
|
enddo; enddo; enddo
|
||||||
|
|
||||||
|
call VecMin(solution,position,minTemperature,ierr); CHKERRQ(ierr)
|
||||||
|
call VecMax(solution,position,maxTemperature,ierr); CHKERRQ(ierr)
|
||||||
|
if (worldrank == 0) then
|
||||||
|
if (spectral_thermal_solution%converged) &
|
||||||
|
write(6,'(/,a)') ' ... thermal conduction converged ..................................'
|
||||||
|
write(6,'(/,a,f8.4,2x,f8.4,2x,f8.4,/)',advance='no') ' Minimum|Maximum|Delta Temperature = ',&
|
||||||
|
minTemperature, maxTemperature, stagNorm
|
||||||
|
write(6,'(/,a)') ' ==========================================================================='
|
||||||
|
flush(6)
|
||||||
|
endif
|
||||||
|
|
||||||
|
end function spectral_thermal_solution
|
||||||
|
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief forms the spectral thermal residual vector
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine spectral_thermal_formResidual(in,x_scal,f_scal,dummy,ierr)
|
||||||
|
use mesh, only: &
|
||||||
|
gridLocal
|
||||||
|
use math, only: &
|
||||||
|
math_mul33x3
|
||||||
|
use DAMASK_spectral_Utilities, only: &
|
||||||
|
scalarField_realMPI, &
|
||||||
|
vectorField_realMPI, &
|
||||||
|
utilities_FFTvectorForward, &
|
||||||
|
utilities_FFTvectorBackward, &
|
||||||
|
utilities_FFTscalarForward, &
|
||||||
|
utilities_FFTscalarBackward, &
|
||||||
|
utilities_fourierGreenConvolution, &
|
||||||
|
utilities_fourierScalarGradient, &
|
||||||
|
utilities_fourierVectorDivergence
|
||||||
|
use thermal_conduction, only: &
|
||||||
|
thermal_conduction_getSourceAndItsTangent, &
|
||||||
|
thermal_conduction_getConductivity33, &
|
||||||
|
thermal_conduction_getMassDensity, &
|
||||||
|
thermal_conduction_getSpecificHeat
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
DMDALocalInfo, dimension(DMDA_LOCAL_INFO_SIZE) :: &
|
||||||
|
in
|
||||||
|
PetscScalar, dimension( &
|
||||||
|
XG_RANGE,YG_RANGE,ZG_RANGE) :: &
|
||||||
|
x_scal
|
||||||
|
PetscScalar, dimension( &
|
||||||
|
X_RANGE,Y_RANGE,Z_RANGE) :: &
|
||||||
|
f_scal
|
||||||
|
PetscObject :: dummy
|
||||||
|
PetscErrorCode :: ierr
|
||||||
|
integer(pInt) :: i, j, k, cell
|
||||||
|
real(pReal) :: Tdot, dTdot_dT
|
||||||
|
|
||||||
|
temperature_current = x_scal
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! evaluate polarization field
|
||||||
|
scalarField_realMPI = 0.0_pReal
|
||||||
|
scalarField_realMPI(1:gridLocal(1),1:gridLocal(2),1:gridLocal(3)) = temperature_current
|
||||||
|
call utilities_FFTscalarForward()
|
||||||
|
call utilities_fourierScalarGradient() !< calculate gradient of damage field
|
||||||
|
call utilities_FFTvectorBackward()
|
||||||
|
cell = 0_pInt
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,gridLocal(1)
|
||||||
|
cell = cell + 1_pInt
|
||||||
|
vectorField_realMPI(1:3,i,j,k) = math_mul33x3(thermal_conduction_getConductivity33(1,cell) - D_ref, &
|
||||||
|
vectorField_realMPI(1:3,i,j,k))
|
||||||
|
enddo; enddo; enddo
|
||||||
|
call utilities_FFTvectorForward()
|
||||||
|
call utilities_fourierVectorDivergence() !< calculate damage divergence in fourier field
|
||||||
|
call utilities_FFTscalarBackward()
|
||||||
|
cell = 0_pInt
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,gridLocal(1)
|
||||||
|
cell = cell + 1_pInt
|
||||||
|
call thermal_conduction_getSourceAndItsTangent(Tdot, dTdot_dT, temperature_current(i,j,k), 1, cell)
|
||||||
|
scalarField_realMPI(i,j,k) = params%timeinc*scalarField_realMPI(i,j,k) + &
|
||||||
|
params%timeinc*Tdot + &
|
||||||
|
thermal_conduction_getMassDensity (1,cell)* &
|
||||||
|
thermal_conduction_getSpecificHeat(1,cell)*(temperature_lastInc(i,j,k) - &
|
||||||
|
temperature_current(i,j,k)) + &
|
||||||
|
mobility_ref*temperature_current(i,j,k)
|
||||||
|
enddo; enddo; enddo
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! convolution of damage field with green operator
|
||||||
|
call utilities_FFTscalarForward()
|
||||||
|
call utilities_fourierGreenConvolution(D_ref, mobility_ref, params%timeinc)
|
||||||
|
call utilities_FFTscalarBackward()
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! constructing residual
|
||||||
|
f_scal = temperature_current - scalarField_realMPI(1:gridLocal(1),1:gridLocal(2),1:gridLocal(3))
|
||||||
|
|
||||||
|
end subroutine spectral_thermal_formResidual
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief forwarding routine
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine spectral_thermal_forward(guess,timeinc,timeinc_old,loadCaseTime)
|
||||||
|
use mesh, only: &
|
||||||
|
gridLocal
|
||||||
|
use DAMASK_spectral_Utilities, only: &
|
||||||
|
cutBack, &
|
||||||
|
wgt
|
||||||
|
use thermal_conduction, only: &
|
||||||
|
thermal_conduction_putTemperatureAndItsRate, &
|
||||||
|
thermal_conduction_getConductivity33, &
|
||||||
|
thermal_conduction_getMassDensity, &
|
||||||
|
thermal_conduction_getSpecificHeat
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
real(pReal), intent(in) :: &
|
||||||
|
timeinc_old, &
|
||||||
|
timeinc, &
|
||||||
|
loadCaseTime !< remaining time of current load case
|
||||||
|
logical, intent(in) :: guess
|
||||||
|
integer(pInt) :: i, j, k, cell
|
||||||
|
DM :: dm_local
|
||||||
|
PetscScalar, pointer :: x_scal(:,:,:)
|
||||||
|
PetscErrorCode :: ierr
|
||||||
|
|
||||||
|
if (cutBack) then
|
||||||
|
temperature_current = temperature_lastInc
|
||||||
|
temperature_stagInc = temperature_lastInc
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! reverting thermal field state
|
||||||
|
cell = 0_pInt !< material point = 0
|
||||||
|
call SNESGetDM(thermal_snes,dm_local,ierr); CHKERRQ(ierr)
|
||||||
|
call DMDAVecGetArrayF90(dm_local,solution,x_scal,ierr); CHKERRQ(ierr) !< get the data out of PETSc to work with
|
||||||
|
x_scal(xstart:xend,ystart:yend,zstart:zend) = temperature_current
|
||||||
|
call DMDAVecRestoreArrayF90(dm_local,solution,x_scal,ierr); CHKERRQ(ierr)
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,gridLocal(1)
|
||||||
|
cell = cell + 1_pInt !< material point increase
|
||||||
|
call thermal_conduction_putTemperatureAndItsRate(temperature_current(i,j,k), &
|
||||||
|
(temperature_current(i,j,k) - &
|
||||||
|
temperature_lastInc(i,j,k))/params%timeinc, &
|
||||||
|
1,cell)
|
||||||
|
enddo; enddo; enddo
|
||||||
|
else
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! update rate and forward last inc
|
||||||
|
temperature_lastInc = temperature_current
|
||||||
|
cell = 0_pInt
|
||||||
|
D_ref = 0.0_pReal
|
||||||
|
mobility_ref = 0.0_pReal
|
||||||
|
do k = 1_pInt, gridLocal(3); do j = 1_pInt, gridLocal(2); do i = 1_pInt,gridLocal(1)
|
||||||
|
cell = cell + 1_pInt
|
||||||
|
D_ref = D_ref + thermal_conduction_getConductivity33(1,cell)
|
||||||
|
mobility_ref = mobility_ref + thermal_conduction_getMassDensity(1,cell)* &
|
||||||
|
thermal_conduction_getSpecificHeat(1,cell)
|
||||||
|
enddo; enddo; enddo
|
||||||
|
D_ref = D_ref*wgt
|
||||||
|
call MPI_Allreduce(MPI_IN_PLACE,D_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
||||||
|
mobility_ref = mobility_ref*wgt
|
||||||
|
call MPI_Allreduce(MPI_IN_PLACE,mobility_ref,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
||||||
|
endif
|
||||||
|
|
||||||
|
end subroutine spectral_thermal_forward
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief destroy routine
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine spectral_thermal_destroy()
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
PetscErrorCode :: ierr
|
||||||
|
|
||||||
|
call VecDestroy(solution,ierr); CHKERRQ(ierr)
|
||||||
|
call SNESDestroy(thermal_snes,ierr); CHKERRQ(ierr)
|
||||||
|
|
||||||
|
end subroutine spectral_thermal_destroy
|
||||||
|
|
||||||
|
end module spectral_thermal
|
Loading…
Reference in New Issue