2012-07-30 19:36:22 +05:30
!--------------------------------------------------------------------------------------------------
2012-10-24 17:01:40 +05:30
! $Id$
2012-07-30 19:36:22 +05:30
!--------------------------------------------------------------------------------------------------
!> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
!> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH
!> @brief Utilities used by the different spectral solver variants
!--------------------------------------------------------------------------------------------------
2012-10-24 17:01:40 +05:30
module DAMASK_spectral_utilities
use , intrinsic :: iso_c_binding
2012-07-24 22:37:10 +05:30
use prec , only : &
pReal , &
pInt
2012-10-24 17:01:40 +05:30
2012-07-19 22:54:56 +05:30
implicit none
2013-03-28 16:07:00 +05:30
private
2012-12-15 23:37:49 +05:30
#ifdef PETSc
2015-03-18 22:48:43 +05:30
#include <petsc-finclude/petscsys.h>
2012-12-15 23:37:49 +05:30
#endif
2015-03-25 21:36:19 +05:30
include 'fftw3-mpi.f03'
logical , public :: cutBack = . false . !< cut back of BVP solver in case convergence is not achieved or a material point is terminally ill
2013-11-14 00:51:35 +05:30
integer ( pInt ) , public , parameter :: maxPhaseFields = 2_pInt
2013-05-08 21:22:29 +05:30
!--------------------------------------------------------------------------------------------------
! grid related information information
real ( pReal ) , public :: wgt !< weighting factor 1/Nelems
2012-07-23 15:42:31 +05:30
!--------------------------------------------------------------------------------------------------
! variables storing information for spectral method and FFTW
2015-03-25 21:36:19 +05:30
integer ( pInt ) , public :: grid1Red !< grid(1)/2
real ( C_DOUBLE ) , public , dimension ( : , : , : , : , : ) , pointer :: field_realMPI !< real representation (some stress or deformation) of field_fourier
complex ( C_DOUBLE_COMPLEX ) , public , dimension ( : , : , : , : , : ) , pointer :: field_fourierMPI !< field on which the Fourier transform operates
real ( pReal ) , private , dimension ( : , : , : , : , : , : , : ) , allocatable :: gamma_hat !< gamma operator (field) for spectral method
real ( pReal ) , private , dimension ( : , : , : , : ) , allocatable :: xi !< wave vector field for divergence and for gamma operator
real ( pReal ) , private , dimension ( 3 , 3 , 3 , 3 ) :: C_ref !< reference stiffness
real ( pReal ) , private , dimension ( 3 ) :: scaledGeomSize !< scaled geometry size for calculation of divergence (Basic, Basic PETSc)
2012-07-23 15:42:31 +05:30
!--------------------------------------------------------------------------------------------------
! debug fftw
2015-03-25 21:36:19 +05:30
real ( C_DOUBLE ) , private , dimension ( : , : , : ) , pointer :: scalarField_realMPI !< scalar field real representation for debugging fftw
complex ( C_DOUBLE_COMPLEX ) , private , dimension ( : , : , : ) , pointer :: scalarField_fourierMPI !< scalar field fourier representation for debugging fftw
2015-03-13 03:58:33 +05:30
!--------------------------------------------------------------------------------------------------
! geometry reconstruction
2015-03-25 21:36:19 +05:30
real ( C_DOUBLE ) , private , dimension ( : , : , : , : ) , pointer :: coords_realMPI !< vector field real representation for geometry reconstruction
complex ( C_DOUBLE_COMPLEX ) , private , dimension ( : , : , : , : ) , pointer :: coords_fourierMPI !< vector field fourier representation for geometry reconstruction
2012-07-20 21:03:13 +05:30
2012-07-23 15:42:31 +05:30
!--------------------------------------------------------------------------------------------------
! debug divergence
2015-03-25 21:36:19 +05:30
real ( C_DOUBLE ) , private , dimension ( : , : , : , : ) , pointer :: divRealMPI !< vector field real representation for debugging divergence calculation
complex ( C_DOUBLE_COMPLEX ) , private , dimension ( : , : , : , : ) , pointer :: divFourierMPI !< vector field fourier representation for debugging divergence calculation
2012-07-23 15:42:31 +05:30
2012-11-12 19:44:39 +05:30
!--------------------------------------------------------------------------------------------------
! plans for FFTW
2013-04-10 15:49:16 +05:30
type ( C_PTR ) , private :: &
2015-03-25 21:36:19 +05:30
planForthMPI , & !< FFTW MPI plan P(x) to P(k)
planBackMPI , & !< FFTW MPI plan F(k) to F(x)
planDebugForthMPI , & !< FFTW MPI plan for scalar field
planDebugBackMPI , & !< FFTW MPI plan for scalar field inverse
planDivMPI , & !< FFTW MPI plan for FFTW in case of debugging divergence calculation
planCoordsMPI !< FFTW MPI plan for geometry reconstruction
2012-11-12 19:44:39 +05:30
2012-07-23 15:42:31 +05:30
!--------------------------------------------------------------------------------------------------
2012-10-24 17:01:40 +05:30
! variables controlling debugging
2013-05-08 21:22:29 +05:30
logical , private :: &
2012-10-24 17:01:40 +05:30
debugGeneral , & !< general debugging of spectral solver
debugDivergence , & !< debugging of divergence calculation (comparison to function used for post processing)
2012-11-28 20:34:05 +05:30
debugFFTW , & !< doing additional FFT on scalar field and compare to results of strided 3D FFT
2012-12-15 23:37:49 +05:30
debugRotation , & !< also printing out results in lab frame
debugPETSc !< use some in debug defined options for more verbose PETSc solution
2012-07-23 15:42:31 +05:30
2012-07-25 19:31:39 +05:30
!--------------------------------------------------------------------------------------------------
2012-08-03 14:55:48 +05:30
! derived types
2013-04-10 15:49:16 +05:30
type , public :: tSolutionState !< return type of solution from spectral solver variants
2012-10-24 17:01:40 +05:30
logical :: converged = . true .
logical :: regrid = . false .
logical :: termIll = . false .
integer ( pInt ) :: iterationsNeeded = 0_pInt
2012-10-02 20:56:56 +05:30
end type tSolutionState
2012-08-03 14:55:48 +05:30
2013-04-10 15:49:16 +05:30
type , public :: tBoundaryCondition !< set of parameters defining a boundary condition
2012-10-24 17:01:40 +05:30
real ( pReal ) , dimension ( 3 , 3 ) :: values = 0.0_pReal
real ( pReal ) , dimension ( 3 , 3 ) :: maskFloat = 0.0_pReal
2012-08-03 14:55:48 +05:30
logical , dimension ( 3 , 3 ) :: maskLogical = . false .
2012-10-24 17:01:40 +05:30
character ( len = 64 ) :: myType = 'None'
2012-10-02 20:56:56 +05:30
end type tBoundaryCondition
2013-11-14 00:51:35 +05:30
2015-03-25 21:36:19 +05:30
type , public :: phaseFieldDataBin !< set of parameters defining a phase field
real ( pReal ) :: diffusion = 0.0_pReal , & !< thermal conductivity
mobility = 0.0_pReal , & !< thermal mobility
phaseField0 = 0.0_pReal !< homogeneous damage field starting condition
2013-11-14 00:51:35 +05:30
logical :: active = . false .
character ( len = 64 ) :: label = ''
end type phaseFieldDataBin
2012-10-24 17:01:40 +05:30
public :: &
utilities_init , &
utilities_updateGamma , &
utilities_FFTforward , &
utilities_FFTbackward , &
utilities_fourierConvolution , &
2013-07-26 21:55:37 +05:30
utilities_inverseLaplace , &
2012-10-24 17:01:40 +05:30
utilities_divergenceRMS , &
2013-03-07 01:04:30 +05:30
utilities_curlRMS , &
2012-10-24 17:01:40 +05:30
utilities_maskedCompliance , &
utilities_constitutiveResponse , &
utilities_calculateRate , &
utilities_forwardField , &
2015-03-13 03:58:33 +05:30
utilities_destroy , &
utilities_updateIPcoords
2012-10-24 17:01:40 +05:30
private :: &
utilities_getFilter
2012-08-03 14:55:48 +05:30
2012-07-23 15:42:31 +05:30
contains
2012-07-19 22:54:56 +05:30
2012-08-03 14:55:48 +05:30
!--------------------------------------------------------------------------------------------------
2012-11-12 19:44:39 +05:30
!> @brief allocates all neccessary fields, sets debug flags, create plans for FFTW
!> @details Sets the debug levels for general, divergence, restart and FFTW from the biwise coding
2012-08-09 18:34:56 +05:30
!> provided by the debug module to logicals.
!> Allocates all fields used by FFTW and create the corresponding plans depending on the debug
!> level chosen.
!> Initializes FFTW.
2012-08-03 14:55:48 +05:30
!--------------------------------------------------------------------------------------------------
2012-10-24 17:01:40 +05:30
subroutine utilities_init ( )
2012-08-27 13:34:47 +05:30
use , intrinsic :: iso_fortran_env ! to get compiler_version and compiler_options (at least for gfortran >4.6 at the moment)
2013-05-08 21:22:29 +05:30
use DAMASK_interface , only : &
geometryFile
2012-10-24 17:01:40 +05:30
use IO , only : &
2013-01-10 19:03:43 +05:30
IO_error , &
2013-02-25 22:04:59 +05:30
IO_warning , &
2013-05-08 21:22:29 +05:30
IO_timeStamp , &
IO_open_file
2012-08-03 14:55:48 +05:30
use numerics , only : &
2012-07-20 21:03:13 +05:30
DAMASK_NumThreadsInt , &
fftw_planner_flag , &
2012-07-30 19:36:22 +05:30
fftw_timelimit , &
2012-12-15 23:37:49 +05:30
memory_efficient , &
2013-05-08 21:22:29 +05:30
petsc_options , &
2015-03-25 21:36:19 +05:30
divergence_correction , &
worldrank
2012-07-20 21:03:13 +05:30
use debug , only : &
debug_level , &
2013-05-16 14:25:10 +05:30
debug_SPECTRAL , &
debug_LEVELBASIC , &
debug_SPECTRALDIVERGENCE , &
debug_SPECTRALFFTW , &
debug_SPECTRALPETSC , &
debug_SPECTRALROTATION
2012-12-15 23:37:49 +05:30
#ifdef PETSc
2013-01-02 22:32:12 +05:30
use debug , only : &
2013-05-16 14:25:10 +05:30
PETSCDEBUG
2012-12-15 23:37:49 +05:30
#endif
2015-03-25 21:36:19 +05:30
use math
2013-05-08 21:22:29 +05:30
use mesh , only : &
2015-03-25 21:36:19 +05:30
gridGlobal , &
gridLocal , &
gridOffset , &
geomSizeGlobal , &
geomSizeLocal , &
geomSizeOffset
2012-12-15 23:37:49 +05:30
2012-07-24 22:37:10 +05:30
implicit none
2012-12-15 23:37:49 +05:30
#ifdef PETSc
2013-02-13 23:24:56 +05:30
external :: &
PETScOptionsClear , &
PETScOptionsInsertString , &
MPI_Abort
2012-12-15 23:37:49 +05:30
PetscErrorCode :: ierr
#endif
2012-10-24 17:01:40 +05:30
integer ( pInt ) :: i , j , k
2013-05-08 21:22:29 +05:30
integer ( pInt ) , parameter :: fileUnit = 228_pInt
2012-07-24 22:37:10 +05:30
integer ( pInt ) , dimension ( 3 ) :: k_s
2012-10-24 17:01:40 +05:30
type ( C_PTR ) :: &
2015-03-25 21:36:19 +05:30
tensorFieldMPI , & !< field cotaining data for FFTW in real and fourier space (in place)
scalarFieldMPI , & !< field cotaining data for FFTW in real space when debugging FFTW (no in place)
div , & !< field cotaining data for FFTW in real and fourier space when debugging divergence (in place)
coordsMPI
2015-03-31 02:19:17 +05:30
integer ( C_INTPTR_T ) :: gridFFTW ( 3 ) , alloc_local , local_K , local_K_offset
integer ( C_INTPTR_T ) , parameter :: &
scalarSize = 1_C_INTPTR_T , &
vecSize = 3_C_INTPTR_T , &
tensorSize = 9_C_INTPTR_T
2015-03-25 21:36:19 +05:30
mainProcess : if ( worldrank == 0 ) then
write ( 6 , '(/,a)' ) ' <<<+- DAMASK_spectral_utilities init -+>>>'
write ( 6 , '(a)' ) ' $Id$'
write ( 6 , '(a15,a)' ) ' Current time: ' , IO_timeStamp ( )
2012-07-19 22:54:56 +05:30
#include "compilation_info.f90"
2015-03-25 21:36:19 +05:30
endif mainProcess
2012-07-19 22:54:56 +05:30
!--------------------------------------------------------------------------------------------------
2012-07-23 15:42:31 +05:30
! set debugging parameters
2013-05-16 14:25:10 +05:30
debugGeneral = iand ( debug_level ( debug_SPECTRAL ) , debug_LEVELBASIC ) / = 0
debugDivergence = iand ( debug_level ( debug_SPECTRAL ) , debug_SPECTRALDIVERGENCE ) / = 0
debugFFTW = iand ( debug_level ( debug_SPECTRAL ) , debug_SPECTRALFFTW ) / = 0
debugRotation = iand ( debug_level ( debug_SPECTRAL ) , debug_SPECTRALROTATION ) / = 0
debugPETSc = iand ( debug_level ( debug_SPECTRAL ) , debug_SPECTRALPETSC ) / = 0
2015-03-31 02:19:17 +05:30
2015-03-25 21:36:19 +05:30
if ( debugPETSc . and . worldrank == 0_pInt ) write ( 6 , '(3(/,a),/)' ) &
2013-07-08 21:18:13 +05:30
' Initializing PETSc with debug options: ' , &
trim ( PETScDebug ) , &
' add more using the PETSc_Options keyword in numerics.config '
2013-01-10 03:49:32 +05:30
flush ( 6 )
call PetscOptionsClear ( ierr ) ; CHKERRQ ( ierr )
2013-05-16 14:25:10 +05:30
if ( debugPETSc ) call PetscOptionsInsertString ( trim ( PETSCDEBUG ) , ierr ) ; CHKERRQ ( ierr )
2013-01-10 03:49:32 +05:30
call PetscOptionsInsertString ( trim ( petsc_options ) , ierr ) ; CHKERRQ ( ierr )
2013-03-07 01:04:30 +05:30
2015-03-25 21:36:19 +05:30
grid1Red = gridLocal ( 1 ) / 2_pInt + 1_pInt
wgt = 1.0 / real ( product ( gridGlobal ) , pReal )
2013-05-08 21:22:29 +05:30
2015-03-25 21:36:19 +05:30
if ( worldrank == 0 ) then
write ( 6 , '(a,3(i12 ))' ) ' grid a b c: ' , gridGlobal
write ( 6 , '(a,3(es12.5))' ) ' size x y z: ' , geomSizeGlobal
endif
2013-05-08 21:22:29 +05:30
!--------------------------------------------------------------------------------------------------
! scale dimension to calculate either uncorrected, dimension-independent, or dimension- and reso-
! lution-independent divergence
if ( divergence_correction == 1_pInt ) then
do j = 1_pInt , 3_pInt
2015-03-25 21:36:19 +05:30
if ( j / = minloc ( geomSizeGlobal , 1 ) . and . j / = maxloc ( geomSizeGlobal , 1 ) ) &
scaledGeomSize = geomSizeGlobal / geomSizeGlobal ( j )
2013-05-08 21:22:29 +05:30
enddo
elseif ( divergence_correction == 2_pInt ) then
do j = 1_pInt , 3_pInt
2015-03-25 21:36:19 +05:30
if ( j / = minloc ( geomSizeGlobal / gridGlobal , 1 ) . and . j / = maxloc ( geomSizeGlobal / gridGlobal , 1 ) ) &
scaledGeomSize = geomSizeGlobal / geomSizeGlobal ( j ) * gridGlobal ( j )
2013-05-08 21:22:29 +05:30
enddo
else
2015-03-25 21:36:19 +05:30
scaledGeomSize = geomSizeGlobal
2013-05-08 21:22:29 +05:30
endif
2012-07-19 22:54:56 +05:30
!--------------------------------------------------------------------------------------------------
2015-03-25 21:36:19 +05:30
! MPI allocation
2015-03-31 02:19:17 +05:30
gridFFTW = int ( gridGlobal , C_INTPTR_T )
2015-03-25 21:36:19 +05:30
alloc_local = fftw_mpi_local_size_3d ( gridFFTW ( 3 ) , gridFFTW ( 2 ) , gridFFTW ( 1 ) / 2 + 1 , &
MPI_COMM_WORLD , local_K , local_K_offset )
tensorFieldMPI = fftw_alloc_complex ( 9 * alloc_local )
2015-03-31 02:19:17 +05:30
call c_f_pointer ( tensorFieldMPI , field_realMPI , [ 3_C_INTPTR_T , 3_C_INTPTR_T , &
2_C_INTPTR_T * ( gridFFTW ( 1 ) / 2_C_INTPTR_T + 1_C_INTPTR_T ) , gridFFTW ( 2 ) , local_K ] )
call c_f_pointer ( tensorFieldMPI , field_fourierMPI , [ 3_C_INTPTR_T , 3_C_INTPTR_T , &
gridFFTW ( 1 ) / 2_C_INTPTR_T + 1_C_INTPTR_T , gridFFTW ( 2 ) , local_K ] )
2015-03-25 21:36:19 +05:30
allocate ( xi ( 3 , grid1Red , gridLocal ( 2 ) , gridLocal ( 3 ) ) , source = 0.0_pReal ) ! frequencies, only half the size for first dimension
coordsMPI = fftw_alloc_complex ( 3 * alloc_local )
2015-03-31 02:19:17 +05:30
call c_f_pointer ( coordsMPI , coords_realMPI , [ 3_C_INTPTR_T , &
2_C_INTPTR_T * ( gridFFTW ( 1 ) / 2_C_INTPTR_T + 1_C_INTPTR_T ) , gridFFTW ( 2 ) , local_K ] ) ! place a pointer for a real representation on coords_fftw
call c_f_pointer ( coordsMPI , coords_fourierMPI , [ 3_C_INTPTR_T , &
gridFFTW ( 1 ) / 2_C_INTPTR_T + 1_C_INTPTR_T , gridFFTW ( 2 ) , local_K ] ) ! place a pointer for a real representation on coords_fftw
2015-03-25 21:36:19 +05:30
!--------------------------------------------------------------------------------------------------
! MPI fftw plans
2015-03-31 02:19:17 +05:30
planForthMPI = fftw_mpi_plan_many_dft_r2c ( 3 , [ gridFFTW ( 3 ) , gridFFTW ( 2 ) , gridFFTW ( 1 ) ] , tensorSize , & ! dimension, logical length in each dimension in reversed order, no. of transforms
2015-03-25 21:36:19 +05:30
FFTW_MPI_DEFAULT_BLOCK , FFTW_MPI_DEFAULT_BLOCK , & ! default iblock and oblock
field_realMPI , field_fourierMPI , & ! input data, output data
MPI_COMM_WORLD , fftw_planner_flag ) ! use all processors, planer precision
2015-03-31 02:19:17 +05:30
planBackMPI = fftw_mpi_plan_many_dft_c2r ( 3 , [ gridFFTW ( 3 ) , gridFFTW ( 2 ) , gridFFTW ( 1 ) ] , tensorSize , & ! dimension, logical length in each dimension in reversed order, no. of transforms
2015-03-25 21:36:19 +05:30
FFTW_MPI_DEFAULT_BLOCK , FFTW_MPI_DEFAULT_BLOCK , & ! default iblock and oblock
field_fourierMPI , field_realMPI , & ! input data, output data
MPI_COMM_WORLD , fftw_planner_flag ) ! all processors, planer precision
2015-03-13 03:58:33 +05:30
!--------------------------------------------------------------------------------------------------
2015-03-25 21:36:19 +05:30
! Coordinates MPI fftw plans
2015-03-31 02:19:17 +05:30
planCoordsMPI = fftw_mpi_plan_many_dft_c2r ( 3 , [ gridFFTW ( 3 ) , gridFFTW ( 2 ) , gridFFTW ( 1 ) ] , vecSize , & ! dimension, logical length in each dimension in reversed order, no. of transforms
2015-03-25 21:36:19 +05:30
FFTW_MPI_DEFAULT_BLOCK , FFTW_MPI_DEFAULT_BLOCK , & ! default iblock and oblock
coords_fourierMPI , coords_realMPI , & ! input data, output data
MPI_COMM_WORLD , fftw_planner_flag ) ! use all processors, planer precision
2015-03-13 03:58:33 +05:30
2012-07-19 22:54:56 +05:30
!--------------------------------------------------------------------------------------------------
2012-11-12 19:44:39 +05:30
! depending on debug options, allocate more memory and create additional plans
2012-07-19 22:54:56 +05:30
if ( debugDivergence ) then
2015-03-25 21:36:19 +05:30
div = fftw_alloc_complex ( 3 * alloc_local )
2015-03-31 02:19:17 +05:30
call c_f_pointer ( div , divRealMPI , [ 3_C_INTPTR_T , &
2_C_INTPTR_T * ( gridFFTW ( 1 ) / 2_C_INTPTR_T + 1_C_INTPTR_T ) , gridFFTW ( 2 ) , local_K ] )
call c_f_pointer ( div , divFourierMPI , [ 3_C_INTPTR_T , &
gridFFTW ( 1 ) / 2_C_INTPTR_T + 1_C_INTPTR_T , gridFFTW ( 2 ) , local_K ] )
2015-03-25 21:36:19 +05:30
planDivMPI = fftw_mpi_plan_many_dft_c2r ( 3 , [ gridFFTW ( 3 ) , gridFFTW ( 2 ) , gridFFTW ( 1 ) ] , vecSize , &
FFTW_MPI_DEFAULT_BLOCK , FFTW_MPI_DEFAULT_BLOCK , &
divFourierMPI , divRealMPI , &
MPI_COMM_WORLD , fftw_planner_flag )
2012-07-19 22:54:56 +05:30
endif
if ( debugFFTW ) then
2015-03-25 21:36:19 +05:30
scalarFieldMPI = fftw_alloc_complex ( alloc_local ) ! allocate data for real representation (no in place transform)
call c_f_pointer ( scalarFieldMPI , scalarField_realMPI , &
[ 2 * ( gridFFTW ( 1 ) / 2 + 1 ) , gridFFTW ( 2 ) , local_K ] ) ! place a pointer for a real representation
call c_f_pointer ( scalarFieldMPI , scalarField_fourierMPI , &
[ gridFFTW ( 1 ) / 2 + 1 , gridFFTW ( 2 ) , local_K ] ) ! place a pointer for a fourier representation
planDebugForthMPI = fftw_mpi_plan_many_dft_r2c ( 3 , [ gridFFTW ( 3 ) , gridFFTW ( 2 ) , gridFFTW ( 1 ) ] , &
scalarSize , FFTW_MPI_DEFAULT_BLOCK , FFTW_MPI_DEFAULT_BLOCK , &
scalarField_realMPI , scalarField_fourierMPI , &
MPI_COMM_WORLD , fftw_planner_flag )
planDebugBackMPI = fftw_mpi_plan_many_dft_c2r ( 3 , [ gridFFTW ( 3 ) , gridFFTW ( 2 ) , gridFFTW ( 1 ) ] , &
scalarSize , FFTW_MPI_DEFAULT_BLOCK , FFTW_MPI_DEFAULT_BLOCK , &
scalarField_fourierMPI , scalarField_realMPI , &
MPI_COMM_WORLD , fftw_planner_flag )
2012-07-19 22:54:56 +05:30
endif
2015-03-25 21:36:19 +05:30
!--------------------------------------------------------------------------------------------------
! general initialization of FFTW (see manual on fftw.org for more details)
if ( pReal / = C_DOUBLE . or . pInt / = C_INT ) call IO_error ( 0_pInt , ext_msg = 'Fortran to C' ) ! check for correct precision in C
call fftw_set_timelimit ( fftw_timelimit ) ! set timelimit for plan creation
2012-07-19 22:54:56 +05:30
2015-03-25 21:36:19 +05:30
if ( debugGeneral . and . worldrank == 0_pInt ) write ( 6 , '(/,a)' ) ' FFTW initialized'
flush ( 6 )
2012-07-24 22:37:10 +05:30
2012-07-19 22:54:56 +05:30
!--------------------------------------------------------------------------------------------------
2012-07-23 15:42:31 +05:30
! calculation of discrete angular frequencies, ordered as in FFTW (wrap around)
2015-03-25 21:36:19 +05:30
do k = gridOffset + 1_pInt , gridOffset + gridLocal ( 3 )
2012-07-23 15:42:31 +05:30
k_s ( 3 ) = k - 1_pInt
2015-03-25 21:36:19 +05:30
if ( k > gridGlobal ( 3 ) / 2_pInt + 1_pInt ) k_s ( 3 ) = k_s ( 3 ) - gridGlobal ( 3 ) ! running from 0,1,...,N/2,N/2+1,-N/2,-N/2+1,...,-1
do j = 1_pInt , gridLocal ( 2 )
2012-07-23 15:42:31 +05:30
k_s ( 2 ) = j - 1_pInt
2015-03-25 21:36:19 +05:30
if ( j > gridGlobal ( 2 ) / 2_pInt + 1_pInt ) k_s ( 2 ) = k_s ( 2 ) - gridGlobal ( 2 ) ! running from 0,1,...,N/2,N/2+1,-N/2,-N/2+1,...,-1
2013-05-08 21:22:29 +05:30
do i = 1_pInt , grid1Red
2012-10-24 17:01:40 +05:30
k_s ( 1 ) = i - 1_pInt ! symmetry, junst running from 0,1,...,N/2,N/2+1
2015-03-25 21:36:19 +05:30
xi ( 1 : 3 , i , j , k - gridOffset ) = real ( k_s , pReal ) / scaledGeomSize ! if divergence_correction is set, frequencies are calculated on unit length
2012-07-23 15:42:31 +05:30
enddo ; enddo ; enddo
2012-07-24 22:37:10 +05:30
2012-07-19 22:54:56 +05:30
if ( memory_efficient ) then ! allocate just single fourth order tensor
2012-08-09 18:34:56 +05:30
allocate ( gamma_hat ( 3 , 3 , 3 , 3 , 1 , 1 , 1 ) , source = 0.0_pReal )
2012-07-19 22:54:56 +05:30
else ! precalculation of gamma_hat field
2015-03-25 21:36:19 +05:30
allocate ( gamma_hat ( 3 , 3 , 3 , 3 , grid1Red , gridLocal ( 2 ) , gridLocal ( 3 ) ) , source = 0.0_pReal )
2012-07-19 22:54:56 +05:30
endif
2012-08-03 14:55:48 +05:30
2012-10-24 17:01:40 +05:30
end subroutine utilities_init
2012-07-26 19:28:47 +05:30
2012-08-03 14:55:48 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief updates references stiffness and potentially precalculated gamma operator
2012-08-09 18:34:56 +05:30
!> @details Sets the current reference stiffness to the stiffness given as an argument.
!> If the gamma operator is precalculated, it is calculated with this stiffness.
!> In case of a on-the-fly calculation, only the reference stiffness is updated.
2012-10-24 17:01:40 +05:30
!> The gamma operator is filtered depening on the filter selected in numerics.
!> Also writes out the current reference stiffness for restart.
2012-08-03 14:55:48 +05:30
!--------------------------------------------------------------------------------------------------
2012-10-24 17:01:40 +05:30
subroutine utilities_updateGamma ( C , saveReference )
use IO , only : &
2013-09-18 19:37:55 +05:30
IO_write_jobRealFile
2012-08-03 14:55:48 +05:30
use numerics , only : &
2015-03-25 21:36:19 +05:30
memory_efficient , &
worldrank
use mesh , only : &
gridOffset , &
gridLocal
2012-10-24 17:01:40 +05:30
use math , only : &
math_inv33
2015-03-25 21:36:19 +05:30
use mesh , only : &
gridLocal
2012-10-24 17:01:40 +05:30
2012-08-03 14:55:48 +05:30
implicit none
2012-10-24 17:01:40 +05:30
real ( pReal ) , intent ( in ) , dimension ( 3 , 3 , 3 , 3 ) :: C !< input stiffness to store as reference stiffness
logical , intent ( in ) :: saveReference !< save reference stiffness to file for restart
real ( pReal ) , dimension ( 3 , 3 ) :: temp33_Real , xiDyad
integer ( pInt ) :: &
i , j , k , &
l , m , n , o
2015-03-25 21:36:19 +05:30
2012-08-03 14:55:48 +05:30
C_ref = C
2012-10-24 17:01:40 +05:30
if ( saveReference ) then
2015-03-25 21:36:19 +05:30
if ( worldrank == 0_pInt ) then
write ( 6 , '(/,a)' ) ' writing reference stiffness to file'
flush ( 6 )
call IO_write_jobRealFile ( 777 , 'C_ref' , size ( C_ref ) )
write ( 777 , rec = 1 ) C_ref
close ( 777 )
endif
2012-10-24 17:01:40 +05:30
endif
2012-08-03 14:55:48 +05:30
if ( . not . memory_efficient ) then
2015-03-25 21:36:19 +05:30
do k = gridOffset + 1_pInt , gridOffset + gridLocal ( 3 ) ; do j = 1_pInt , gridLocal ( 2 ) ; do i = 1_pInt , grid1Red
if ( any ( [ i , j , k ] / = 1_pInt ) ) then ! singular point at xi=(0.0,0.0,0.0) i.e. i=j=k=1
2012-08-03 14:55:48 +05:30
forall ( l = 1_pInt : 3_pInt , m = 1_pInt : 3_pInt ) &
2015-03-25 21:36:19 +05:30
xiDyad ( l , m ) = xi ( l , i , j , k - gridOffset ) * xi ( m , i , j , k - gridOffset )
2012-08-03 14:55:48 +05:30
forall ( l = 1_pInt : 3_pInt , m = 1_pInt : 3_pInt ) &
2012-11-29 00:16:07 +05:30
temp33_Real ( l , m ) = sum ( C_ref ( l , 1 : 3 , m , 1 : 3 ) * xiDyad )
2012-08-03 14:55:48 +05:30
temp33_Real = math_inv33 ( temp33_Real )
forall ( l = 1_pInt : 3_pInt , m = 1_pInt : 3_pInt , n = 1_pInt : 3_pInt , o = 1_pInt : 3_pInt ) &
2015-03-25 21:36:19 +05:30
gamma_hat ( l , m , n , o , i , j , k - gridOffset ) = temp33_Real ( l , n ) * xiDyad ( m , o )
2012-08-03 14:55:48 +05:30
endif
enddo ; enddo ; enddo
2015-03-25 21:36:19 +05:30
endif
2012-10-24 17:01:40 +05:30
end subroutine utilities_updateGamma
2012-07-26 19:28:47 +05:30
2012-08-03 14:55:48 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief forward FFT of data in field_real to field_fourier with highest freqs. removed
2013-06-29 00:29:21 +05:30
!> @details Does an unweighted FFT transform from real to complex.
2012-08-09 18:34:56 +05:30
!> In case of debugging the FFT, also one component of the tensor (specified by row and column)
!> is independetly transformed complex to complex and compared to the whole tensor transform
2012-08-03 14:55:48 +05:30
!--------------------------------------------------------------------------------------------------
2013-06-11 22:05:04 +05:30
subroutine utilities_FFTforward ( )
2012-10-24 17:01:40 +05:30
use math
2015-03-25 21:36:19 +05:30
use numerics , only : &
worldrank
use mesh , only : &
gridLocal
2012-10-24 17:01:40 +05:30
implicit none
2015-05-28 22:32:23 +05:30
external :: &
MPI_Bcast , &
MPI_reduce
2013-04-10 15:49:16 +05:30
integer ( pInt ) :: row , column ! if debug FFTW, compare 3D array field of row and column
2015-03-25 21:36:19 +05:30
real ( pReal ) , dimension ( 2 ) :: myRand , maxScalarField ! random numbers
integer ( pInt ) :: i , j , k
PetscErrorCode :: ierr
2013-04-10 15:49:16 +05:30
2012-07-25 19:31:39 +05:30
!--------------------------------------------------------------------------------------------------
2012-07-19 22:54:56 +05:30
! copy one component of the stress field to to a single FT and check for mismatch
2013-04-10 15:49:16 +05:30
if ( debugFFTW ) then
2015-03-25 21:36:19 +05:30
if ( worldrank == 0_pInt ) then
call random_number ( myRand ) ! two numbers: 0 <= x < 1
row = nint ( myRand ( 1 ) * 2_pReal + 1_pReal , pInt )
column = nint ( myRand ( 2 ) * 2_pReal + 1_pReal , pInt )
endif
call MPI_Bcast ( row , 1 , MPI_INTEGER , 0 , PETSC_COMM_WORLD , ierr )
call MPI_Bcast ( column , 1 , MPI_INTEGER , 0 , PETSC_COMM_WORLD , ierr )
scalarField_realMPI = 0.0_pReal
scalarField_realMPI ( 1 : gridLocal ( 1 ) , 1 : gridLocal ( 2 ) , 1 : gridLocal ( 3 ) ) = &
field_realMPI ( row , column , 1 : gridLocal ( 1 ) , 1 : gridLocal ( 2 ) , 1 : gridLocal ( 3 ) ) ! store the selected component
2013-04-10 15:49:16 +05:30
endif
2013-03-07 01:04:30 +05:30
2012-07-19 22:54:56 +05:30
!--------------------------------------------------------------------------------------------------
2012-10-24 17:01:40 +05:30
! doing the FFT
2015-03-25 21:36:19 +05:30
call fftw_mpi_execute_dft_r2c ( planForthMPI , field_realMPI , field_fourierMPI )
2012-07-26 19:28:47 +05:30
2012-07-19 22:54:56 +05:30
!--------------------------------------------------------------------------------------------------
! comparing 1 and 3x3 FT results
2013-04-10 15:49:16 +05:30
if ( debugFFTW ) then
2015-03-25 21:36:19 +05:30
call fftw_mpi_execute_dft_r2c ( planDebugForthMPI , scalarField_realMPI , scalarField_fourierMPI )
where ( abs ( scalarField_fourierMPI ( 1 : grid1Red , 1 : gridLocal ( 2 ) , 1 : gridLocal ( 3 ) ) ) > tiny ( 1.0_pReal ) ) ! avoid division by zero
scalarField_fourierMPI ( 1 : grid1Red , 1 : gridLocal ( 2 ) , 1 : gridLocal ( 3 ) ) = &
( scalarField_fourierMPI ( 1 : grid1Red , 1 : gridLocal ( 2 ) , 1 : gridLocal ( 3 ) ) - &
field_fourierMPI ( row , column , 1 : grid1Red , 1 : gridLocal ( 2 ) , 1 : gridLocal ( 3 ) ) ) / &
scalarField_fourierMPI ( 1 : grid1Red , 1 : gridLocal ( 2 ) , 1 : gridLocal ( 3 ) )
2013-04-10 15:49:16 +05:30
else where
2015-05-28 22:32:23 +05:30
scalarField_fourierMPI = cmplx ( 0.0 , 0.0 , pReal )
2013-04-10 15:49:16 +05:30
end where
2015-03-25 21:36:19 +05:30
maxScalarField ( 1 ) = maxval ( real ( scalarField_fourierMPI ( 1 : grid1Red , 1 : gridLocal ( 2 ) , &
1 : gridLocal ( 3 ) ) ) )
maxScalarField ( 2 ) = maxval ( aimag ( scalarField_fourierMPI ( 1 : grid1Red , 1 : gridLocal ( 2 ) , &
1 : gridLocal ( 3 ) ) ) )
call MPI_reduce ( MPI_IN_PLACE , maxScalarField , 2 , MPI_DOUBLE , MPI_MAX , 0 , PETSC_COMM_WORLD , ierr )
if ( worldrank == 0_pInt ) then
write ( 6 , '(/,a,i1,1x,i1,a)' ) ' .. checking FT results of compontent ' , row , column , ' ..'
write ( 6 , '(/,a,2(es11.4,1x))' ) ' max FT relative error = ' , & ! print real and imaginary part seperately
maxScalarField ( 1 ) , maxScalarField ( 2 )
flush ( 6 )
endif
2013-04-10 15:49:16 +05:30
endif
2012-10-24 17:01:40 +05:30
2012-07-30 19:36:22 +05:30
!--------------------------------------------------------------------------------------------------
2015-03-25 21:36:19 +05:30
! applying filter
do k = 1_pInt , gridLocal ( 3 ) ; do j = 1_pInt , gridLocal ( 2 ) ; do i = 1_pInt , grid1Red
2015-05-28 22:32:23 +05:30
field_fourierMPI ( 1 : 3 , 1 : 3 , i , j , k ) = cmplx ( utilities_getFilter ( xi ( 1 : 3 , i , j , k ) ) , 0.0 , pReal ) * &
2015-03-25 21:36:19 +05:30
field_fourierMPI ( 1 : 3 , 1 : 3 , i , j , k )
enddo ; enddo ; enddo
2012-10-24 17:01:40 +05:30
end subroutine utilities_FFTforward
2012-07-26 19:28:47 +05:30
2012-08-09 18:34:56 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief backward FFT of data in field_fourier to field_real
2013-06-29 00:29:21 +05:30
!> @details Does an inverse FFT transform from complex to real
2012-08-09 18:34:56 +05:30
!> In case of debugging the FFT, also one component of the tensor (specified by row and column)
!> is independetly transformed complex to complex and compared to the whole tensor transform
!> results is weighted by number of points stored in wgt
!--------------------------------------------------------------------------------------------------
2013-04-10 15:49:16 +05:30
subroutine utilities_FFTbackward ( )
2015-03-25 21:36:19 +05:30
use math
use numerics , only : &
worldrank
use mesh , only : &
gridLocal
2012-07-26 19:28:47 +05:30
2012-10-24 17:01:40 +05:30
implicit none
2015-05-28 22:32:23 +05:30
external :: &
MPI_Bcast , &
MPI_reduce
2013-04-10 15:49:16 +05:30
integer ( pInt ) :: row , column !< if debug FFTW, compare 3D array field of row and column
real ( pReal ) , dimension ( 2 ) :: myRand
2015-03-25 21:36:19 +05:30
real ( pReal ) :: maxScalarField
PetscErrorCode :: ierr
2013-04-10 15:49:16 +05:30
2012-07-26 19:28:47 +05:30
!--------------------------------------------------------------------------------------------------
2012-11-12 19:44:39 +05:30
! unpack FFT data for conj complex symmetric part. This data is not transformed when using c2r
2013-04-10 15:49:16 +05:30
if ( debugFFTW ) then
2015-03-25 21:36:19 +05:30
if ( worldrank == 0_pInt ) then
call random_number ( myRand ) ! two numbers: 0 <= x < 1
row = nint ( myRand ( 1 ) * 2_pReal + 1_pReal , pInt )
column = nint ( myRand ( 2 ) * 2_pReal + 1_pReal , pInt )
endif
call MPI_Bcast ( row , 1 , MPI_INTEGER , 0 , PETSC_COMM_WORLD , ierr )
call MPI_Bcast ( column , 1 , MPI_INTEGER , 0 , PETSC_COMM_WORLD , ierr )
scalarField_fourierMPI ( 1 : grid1Red , 1 : gridLocal ( 2 ) , 1 : gridLocal ( 3 ) ) = &
field_fourierMPI ( row , column , 1 : grid1Red , 1 : gridLocal ( 2 ) , 1 : gridLocal ( 3 ) )
endif
2012-10-24 17:01:40 +05:30
!--------------------------------------------------------------------------------------------------
! doing the iFFT
2015-03-25 21:36:19 +05:30
call fftw_mpi_execute_dft_c2r ( planBackMPI , field_fourierMPI , field_realMPI ) ! back transform of fluct deformation gradient
2012-08-03 14:55:48 +05:30
2012-07-26 19:28:47 +05:30
!--------------------------------------------------------------------------------------------------
! comparing 1 and 3x3 inverse FT results
2013-04-10 15:49:16 +05:30
if ( debugFFTW ) then
2015-03-25 21:36:19 +05:30
call fftw_mpi_execute_dft_c2r ( planDebugBackMPI , scalarField_fourierMPI , scalarField_realMPI )
where ( abs ( real ( scalarField_realMPI , pReal ) ) > tiny ( 1.0_pReal ) ) ! avoid division by zero
scalarField_realMPI ( 1 : gridLocal ( 1 ) , 1 : gridLocal ( 2 ) , 1 : gridLocal ( 3 ) ) = &
( scalarField_realMPI ( 1 : gridLocal ( 1 ) , 1 : gridLocal ( 2 ) , 1 : gridLocal ( 3 ) ) &
- field_realMPI ( row , column , 1 : gridLocal ( 1 ) , 1 : gridLocal ( 2 ) , 1 : gridLocal ( 3 ) ) ) / &
scalarField_realMPI ( 1 : gridLocal ( 1 ) , 1 : gridLocal ( 2 ) , 1 : gridLocal ( 3 ) )
2013-04-10 15:49:16 +05:30
else where
2015-05-28 22:32:23 +05:30
scalarField_realMPI = 0.0_pReal
2013-04-10 15:49:16 +05:30
end where
2015-03-25 21:36:19 +05:30
maxScalarField = maxval ( real ( scalarField_realMPI ( 1 : gridLocal ( 1 ) , 1 : gridLocal ( 2 ) , 1 : gridLocal ( 3 ) ) ) )
call MPI_reduce ( MPI_IN_PLACE , maxScalarField , 1 , MPI_DOUBLE , MPI_MAX , 0 , PETSC_COMM_WORLD , ierr )
if ( worldrank == 0_pInt ) then
write ( 6 , '(/,a,i1,1x,i1,a)' ) ' ... checking iFT results of compontent ' , row , column , ' ..'
write ( 6 , '(/,a,es11.4)' ) ' max iFT relative error = ' , maxScalarField
flush ( 6 )
endif
2013-04-10 15:49:16 +05:30
endif
2015-03-25 21:36:19 +05:30
field_realMPI = field_realMPI * wgt ! normalize the result by number of elements
2012-08-03 14:55:48 +05:30
2012-10-24 17:01:40 +05:30
end subroutine utilities_FFTbackward
2012-07-26 19:28:47 +05:30
2013-07-26 21:55:37 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief doing convolution with inverse laplace kernel
!--------------------------------------------------------------------------------------------------
subroutine utilities_inverseLaplace ( )
use math , only : &
math_inv33 , &
PI
2015-03-25 21:36:19 +05:30
use numerics , only : &
worldrank
use mesh , only : &
gridLocal , &
gridOffset , &
geomSizeGlobal
2013-07-26 21:55:37 +05:30
implicit none
integer ( pInt ) :: i , j , k
2015-03-25 21:36:19 +05:30
real ( pReal ) , dimension ( 3 ) :: k_s
2013-07-26 21:55:37 +05:30
2015-03-25 21:36:19 +05:30
if ( worldrank == 0_pInt ) then
write ( 6 , '(/,a)' ) ' ... doing inverse laplace .................................................'
flush ( 6 )
endif
2013-07-26 21:55:37 +05:30
2015-03-25 21:36:19 +05:30
do k = 1_pInt , gridLocal ( 3 ) ; do j = 1_pInt , gridLocal ( 2 ) ; do i = 1_pInt , grid1Red
k_s = xi ( 1 : 3 , i , j , k ) * scaledGeomSize
if ( any ( k_s / = 0_pInt ) ) field_fourierMPI ( 1 : 3 , 1 : 3 , i , j , k - gridOffset ) = &
field_fourierMPI ( 1 : 3 , 1 : 3 , i , j , k - gridOffset ) / &
cmplx ( - sum ( ( 2.0_pReal * PI * k_s / geomSizeGlobal ) * &
( 2.0_pReal * PI * k_s / geomSizeGlobal ) ) , 0.0_pReal , pReal )
enddo ; enddo ; enddo
if ( gridOffset == 0_pInt ) &
field_fourierMPI ( 1 : 3 , 1 : 3 , 1 , 1 , 1 ) = cmplx ( 0.0_pReal , 0.0_pReal , pReal )
2013-07-26 21:55:37 +05:30
end subroutine utilities_inverseLaplace
2012-07-30 19:36:22 +05:30
!--------------------------------------------------------------------------------------------------
2012-10-24 17:01:40 +05:30
!> @brief doing convolution gamma_hat * field_real, ensuring that average value = fieldAim
2012-07-30 19:36:22 +05:30
!--------------------------------------------------------------------------------------------------
2012-10-24 17:01:40 +05:30
subroutine utilities_fourierConvolution ( fieldAim )
2012-07-30 19:36:22 +05:30
use numerics , only : &
memory_efficient
2012-10-24 17:01:40 +05:30
use math , only : &
math_inv33
2015-03-25 21:36:19 +05:30
use numerics , only : &
worldrank
use mesh , only : &
gridLocal , &
gridOffset
2012-07-26 19:28:47 +05:30
2012-10-24 17:01:40 +05:30
implicit none
real ( pReal ) , intent ( in ) , dimension ( 3 , 3 ) :: fieldAim !< desired average value of the field after convolution
real ( pReal ) , dimension ( 3 , 3 ) :: xiDyad , temp33_Real
2012-11-12 19:44:39 +05:30
complex ( pReal ) , dimension ( 3 , 3 ) :: temp33_complex
2012-10-24 17:01:40 +05:30
integer ( pInt ) :: &
i , j , k , &
l , m , n , o
2012-10-02 20:56:56 +05:30
2015-03-25 21:36:19 +05:30
if ( worldrank == 0_pInt ) then
write ( 6 , '(/,a)' ) ' ... doing convolution .....................................................'
flush ( 6 )
endif
2013-01-10 03:49:32 +05:30
2012-07-19 22:54:56 +05:30
!--------------------------------------------------------------------------------------------------
2013-04-18 22:10:49 +05:30
! do the actual spectral method calculation (mechanical equilibrium)
2012-10-24 17:01:40 +05:30
if ( memory_efficient ) then ! memory saving version, on-the-fly calculation of gamma_hat
2015-03-25 21:36:19 +05:30
do k = 1_pInt , gridLocal ( 3 ) ; do j = 1_pInt , gridLocal ( 2 ) ; do i = 1_pInt , grid1Red
if ( any ( [ i , j , k + gridOffset ] / = 1_pInt ) ) then ! singular point at xi=(0.0,0.0,0.0) i.e. i=j=k=1
2012-11-29 00:16:07 +05:30
forall ( l = 1_pInt : 3_pInt , m = 1_pInt : 3_pInt ) &
xiDyad ( l , m ) = xi ( l , i , j , k ) * xi ( m , i , j , k )
forall ( l = 1_pInt : 3_pInt , m = 1_pInt : 3_pInt ) &
temp33_Real ( l , m ) = sum ( C_ref ( l , 1 : 3 , m , 1 : 3 ) * xiDyad )
temp33_Real = math_inv33 ( temp33_Real )
forall ( l = 1_pInt : 3_pInt , m = 1_pInt : 3_pInt , n = 1_pInt : 3_pInt , o = 1_pInt : 3_pInt ) &
2015-03-25 21:36:19 +05:30
gamma_hat ( l , m , n , o , 1 , 1 , 1 ) = temp33_Real ( l , n ) * xiDyad ( m , o )
2012-11-29 00:16:07 +05:30
forall ( l = 1_pInt : 3_pInt , m = 1_pInt : 3_pInt ) &
2015-03-25 21:36:19 +05:30
temp33_Complex ( l , m ) = sum ( gamma_hat ( l , m , 1 : 3 , 1 : 3 , 1 , 1 , 1 ) * field_fourierMPI ( 1 : 3 , 1 : 3 , i , j , k ) )
field_fourierMPI ( 1 : 3 , 1 : 3 , i , j , k ) = temp33_Complex
2012-07-30 19:36:22 +05:30
endif
enddo ; enddo ; enddo
2012-10-24 17:01:40 +05:30
else ! use precalculated gamma-operator
2015-03-25 21:36:19 +05:30
do k = 1_pInt , gridLocal ( 3 ) ; do j = 1_pInt , gridLocal ( 2 ) ; do i = 1_pInt , grid1Red
2013-03-01 15:06:45 +05:30
forall ( l = 1_pInt : 3_pInt , m = 1_pInt : 3_pInt ) &
2015-03-25 21:36:19 +05:30
temp33_Complex ( l , m ) = sum ( gamma_hat ( l , m , 1 : 3 , 1 : 3 , i , j , k ) * field_fourierMPI ( 1 : 3 , 1 : 3 , i , j , k ) )
field_fourierMPI ( 1 : 3 , 1 : 3 , i , j , k ) = temp33_Complex
2012-07-30 19:36:22 +05:30
enddo ; enddo ; enddo
endif
2015-03-25 21:36:19 +05:30
if ( gridOffset == 0_pInt ) &
field_fourierMPI ( 1 : 3 , 1 : 3 , 1 , 1 , 1 ) = cmplx ( fieldAim / wgt , 0.0_pReal , pReal ) ! singular point at xi=(0.0,0.0,0.0) i.e. i=j=k=1
2012-07-30 19:36:22 +05:30
2012-10-24 17:01:40 +05:30
end subroutine utilities_fourierConvolution
2012-07-26 19:28:47 +05:30
2012-07-30 19:36:22 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief calculate root mean square of divergence of field_fourier
!--------------------------------------------------------------------------------------------------
2012-10-24 17:01:40 +05:30
real ( pReal ) function utilities_divergenceRMS ( )
2015-03-25 21:36:19 +05:30
use math
use numerics , only : &
worldrank
use mesh , only : &
gridLocal , &
gridGlobal
2012-10-24 17:01:40 +05:30
implicit none
2015-05-28 22:32:23 +05:30
external :: &
MPI_reduce , &
MPI_Allreduce
2012-10-24 17:01:40 +05:30
integer ( pInt ) :: i , j , k
real ( pReal ) :: &
err_real_div_RMS , & !< RMS of divergence in real space
err_div_max , & !< maximum value of divergence in Fourier space
err_real_div_max !< maximum value of divergence in real space
complex ( pReal ) , dimension ( 3 ) :: temp3_complex
2015-03-25 21:36:19 +05:30
PetscErrorCode :: ierr
2012-07-30 19:36:22 +05:30
2015-03-25 21:36:19 +05:30
if ( worldrank == 0_pInt ) then
write ( 6 , '(/,a)' ) ' ... calculating divergence ................................................'
flush ( 6 )
endif
2013-04-10 15:49:16 +05:30
2012-07-19 22:54:56 +05:30
!--------------------------------------------------------------------------------------------------
2012-07-26 19:28:47 +05:30
! calculating RMS divergence criterion in Fourier space
2012-10-24 17:01:40 +05:30
utilities_divergenceRMS = 0.0_pReal
2015-03-25 21:36:19 +05:30
do k = 1_pInt , gridLocal ( 3 ) ; do j = 1_pInt , gridLocal ( 2 )
2013-05-08 21:22:29 +05:30
do i = 2_pInt , grid1Red - 1_pInt ! Has somewhere a conj. complex counterpart. Therefore count it twice.
2012-10-24 17:01:40 +05:30
utilities_divergenceRMS = utilities_divergenceRMS &
2015-03-25 21:36:19 +05:30
+ 2.0_pReal * ( sum ( real ( math_mul33x3_complex ( field_fourierMPI ( 1 : 3 , 1 : 3 , i , j , k ) , & ! (sqrt(real(a)**2 + aimag(a)**2))**2 = real(a)**2 + aimag(a)**2. do not take square root and square again
xi ( 1 : 3 , i , j , k ) ) * TWOPIIMG ) ** 2.0_pReal ) & ! --> sum squared L_2 norm of vector
+ sum ( aimag ( math_mul33x3_complex ( field_fourierMPI ( 1 : 3 , 1 : 3 , i , j , k ) , &
xi ( 1 : 3 , i , j , k ) ) * TWOPIIMG ) ** 2.0_pReal ) )
2012-10-24 17:01:40 +05:30
enddo
2013-05-08 21:22:29 +05:30
utilities_divergenceRMS = utilities_divergenceRMS & ! these two layers (DC and Nyquist) do not have a conjugate complex counterpart (if grid(1) /= 1)
2015-03-25 21:36:19 +05:30
+ sum ( real ( math_mul33x3_complex ( field_fourierMPI ( 1 : 3 , 1 : 3 , 1 , j , k ) , &
xi ( 1 : 3 , 1 , j , k ) ) * TWOPIIMG ) ** 2.0_pReal ) &
+ sum ( aimag ( math_mul33x3_complex ( field_fourierMPI ( 1 : 3 , 1 : 3 , 1 , j , k ) , &
xi ( 1 : 3 , 1 , j , k ) ) * TWOPIIMG ) ** 2.0_pReal ) &
+ sum ( real ( math_mul33x3_complex ( field_fourierMPI ( 1 : 3 , 1 : 3 , grid1Red , j , k ) , &
xi ( 1 : 3 , grid1Red , j , k ) ) * TWOPIIMG ) ** 2.0_pReal ) &
+ sum ( aimag ( math_mul33x3_complex ( field_fourierMPI ( 1 : 3 , 1 : 3 , grid1Red , j , k ) , &
xi ( 1 : 3 , grid1Red , j , k ) ) * TWOPIIMG ) ** 2.0_pReal )
2012-10-24 17:01:40 +05:30
enddo ; enddo
2015-03-25 21:36:19 +05:30
if ( gridGlobal ( 1 ) == 1_pInt ) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
call MPI_Allreduce ( MPI_IN_PLACE , utilities_divergenceRMS , 1 , MPI_DOUBLE , MPI_SUM , PETSC_COMM_WORLD , ierr )
2013-04-10 15:49:16 +05:30
utilities_divergenceRMS = sqrt ( utilities_divergenceRMS ) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space
2012-10-24 17:01:40 +05:30
2012-07-19 22:54:56 +05:30
!--------------------------------------------------------------------------------------------------
2012-07-26 19:28:47 +05:30
! calculate additional divergence criteria and report
2013-04-10 15:49:16 +05:30
if ( debugDivergence ) then ! calculate divergence again
2012-10-24 17:01:40 +05:30
err_div_max = 0.0_pReal
2015-03-25 21:36:19 +05:30
do k = 1_pInt , gridLocal ( 3 ) ; do j = 1_pInt , gridLocal ( 2 ) ; do i = 1_pInt , grid1Red
temp3_Complex = math_mul33x3_complex ( field_fourierMPI ( 1 : 3 , 1 : 3 , i , j , k ) * wgt , & ! weighting P_fourier
2012-10-24 17:01:40 +05:30
xi ( 1 : 3 , i , j , k ) ) * TWOPIIMG
err_div_max = max ( err_div_max , sum ( abs ( temp3_Complex ) ** 2.0_pReal ) )
2015-03-25 21:36:19 +05:30
divFourierMPI ( 1 : 3 , i , j , k ) = temp3_Complex ! need divergence NOT squared
2012-10-24 17:01:40 +05:30
enddo ; enddo ; enddo
2015-03-25 21:36:19 +05:30
call fftw_mpi_execute_dft_c2r ( planDivMPI , divFourierMPI , divRealMPI ) ! already weighted
2012-07-25 19:31:39 +05:30
2015-03-25 21:36:19 +05:30
err_real_div_RMS = sum ( divRealMPI ** 2.0_pReal )
call MPI_reduce ( MPI_IN_PLACE , err_real_div_RMS , 1 , MPI_DOUBLE , MPI_SUM , 0 , PETSC_COMM_WORLD , ierr )
err_real_div_RMS = sqrt ( wgt * err_real_div_RMS ) ! RMS in real space
err_real_div_max = maxval ( sum ( divRealMPI ** 2.0_pReal , dim = 4 ) ) ! max in real space
call MPI_reduce ( MPI_IN_PLACE , err_real_div_max , 1 , MPI_DOUBLE , MPI_MAX , 0 , PETSC_COMM_WORLD , ierr )
err_real_div_max = sqrt ( err_real_div_max )
call MPI_reduce ( MPI_IN_PLACE , err_div_max , 1 , MPI_DOUBLE , MPI_MAX , 0 , PETSC_COMM_WORLD , ierr )
2013-04-10 15:49:16 +05:30
err_div_max = sqrt ( err_div_max ) ! max in Fourier space
2012-10-24 17:01:40 +05:30
2015-03-25 21:36:19 +05:30
if ( worldrank == 0_pInt ) then
write ( 6 , '(/,1x,a,es11.4)' ) 'error divergence FT RMS = ' , utilities_divergenceRMS
write ( 6 , '(1x,a,es11.4)' ) 'error divergence Real RMS = ' , err_real_div_RMS
write ( 6 , '(1x,a,es11.4)' ) 'error divergence FT max = ' , err_div_max
write ( 6 , '(1x,a,es11.4)' ) 'error divergence Real max = ' , err_real_div_max
flush ( 6 )
endif
2012-10-24 17:01:40 +05:30
endif
end function utilities_divergenceRMS
2013-03-06 20:01:13 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief calculate max of curl of field_fourier
!--------------------------------------------------------------------------------------------------
real ( pReal ) function utilities_curlRMS ( )
2015-03-25 21:36:19 +05:30
use math
use numerics , only : &
worldrank
use mesh , only : &
gridLocal , &
gridGlobal
2013-03-06 20:01:13 +05:30
implicit none
2015-05-28 22:32:23 +05:30
external :: &
MPI_Allreduce
2013-03-06 20:01:13 +05:30
integer ( pInt ) :: i , j , k , l
complex ( pReal ) , dimension ( 3 , 3 ) :: curl_fourier
2015-03-25 21:36:19 +05:30
PetscErrorCode :: ierr
2013-03-06 20:01:13 +05:30
2015-03-25 21:36:19 +05:30
if ( worldrank == 0_pInt ) then
write ( 6 , '(/,a)' ) ' ... calculating curl ......................................................'
flush ( 6 )
endif
!--------------------------------------------------------------------------------------------------
2013-03-06 20:01:13 +05:30
! calculating max curl criterion in Fourier space
utilities_curlRMS = 0.0_pReal
2015-03-25 21:36:19 +05:30
do k = 1_pInt , gridLocal ( 3 ) ; do j = 1_pInt , gridLocal ( 2 ) ;
2013-05-08 21:22:29 +05:30
do i = 2_pInt , grid1Red - 1_pInt
2013-03-06 20:01:13 +05:30
do l = 1_pInt , 3_pInt
2015-03-25 21:36:19 +05:30
curl_fourier ( l , 1 ) = ( + field_fourierMPI ( l , 3 , i , j , k ) * xi ( 2 , i , j , k ) &
- field_fourierMPI ( l , 2 , i , j , k ) * xi ( 3 , i , j , k ) ) * TWOPIIMG
curl_fourier ( l , 2 ) = ( + field_fourierMPI ( l , 1 , i , j , k ) * xi ( 3 , i , j , k ) &
- field_fourierMPI ( l , 3 , i , j , k ) * xi ( 1 , i , j , k ) ) * TWOPIIMG
curl_fourier ( l , 3 ) = ( + field_fourierMPI ( l , 2 , i , j , k ) * xi ( 1 , i , j , k ) &
- field_fourierMPI ( l , 1 , i , j , k ) * xi ( 2 , i , j , k ) ) * TWOPIIMG
2013-03-06 20:01:13 +05:30
enddo
utilities_curlRMS = utilities_curlRMS + &
2015-03-25 21:36:19 +05:30
2.0_pReal * sum ( real ( curl_fourier ) ** 2.0_pReal + aimag ( curl_fourier ) ** 2.0_pReal )
2013-03-06 20:01:13 +05:30
enddo
do l = 1_pInt , 3_pInt
2015-03-25 21:36:19 +05:30
curl_fourier = ( + field_fourierMPI ( l , 3 , 1 , j , k ) * xi ( 2 , 1 , j , k ) &
- field_fourierMPI ( l , 2 , 1 , j , k ) * xi ( 3 , 1 , j , k ) ) * TWOPIIMG
curl_fourier = ( + field_fourierMPI ( l , 1 , 1 , j , k ) * xi ( 3 , 1 , j , k ) &
- field_fourierMPI ( l , 3 , 1 , j , k ) * xi ( 1 , 1 , j , k ) ) * TWOPIIMG
curl_fourier = ( + field_fourierMPI ( l , 2 , 1 , j , k ) * xi ( 1 , 1 , j , k ) &
- field_fourierMPI ( l , 1 , 1 , j , k ) * xi ( 2 , 1 , j , k ) ) * TWOPIIMG
2013-03-06 20:01:13 +05:30
enddo
utilities_curlRMS = utilities_curlRMS + &
2.0_pReal * sum ( real ( curl_fourier ) ** 2.0_pReal + aimag ( curl_fourier ) ** 2.0_pReal )
do l = 1_pInt , 3_pInt
2015-03-25 21:36:19 +05:30
curl_fourier = ( + field_fourierMPI ( l , 3 , grid1Red , j , k ) * xi ( 2 , grid1Red , j , k ) &
- field_fourierMPI ( l , 2 , grid1Red , j , k ) * xi ( 3 , grid1Red , j , k ) ) * TWOPIIMG
curl_fourier = ( + field_fourierMPI ( l , 1 , grid1Red , j , k ) * xi ( 3 , grid1Red , j , k ) &
- field_fourierMPI ( l , 3 , grid1Red , j , k ) * xi ( 1 , grid1Red , j , k ) ) * TWOPIIMG
curl_fourier = ( + field_fourierMPI ( l , 2 , grid1Red , j , k ) * xi ( 1 , grid1Red , j , k ) &
- field_fourierMPI ( l , 1 , grid1Red , j , k ) * xi ( 2 , grid1Red , j , k ) ) * TWOPIIMG
2013-03-06 20:01:13 +05:30
enddo
utilities_curlRMS = utilities_curlRMS + &
2.0_pReal * sum ( real ( curl_fourier ) ** 2.0_pReal + aimag ( curl_fourier ) ** 2.0_pReal )
enddo ; enddo
2015-03-25 21:36:19 +05:30
call MPI_Allreduce ( MPI_IN_PLACE , utilities_curlRMS , 1 , MPI_DOUBLE , MPI_SUM , PETSC_COMM_WORLD , ierr )
2013-09-20 21:47:25 +05:30
utilities_curlRMS = sqrt ( utilities_curlRMS ) * wgt
2015-03-25 21:36:19 +05:30
if ( gridGlobal ( 1 ) == 1_pInt ) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
2013-03-06 20:01:13 +05:30
end function utilities_curlRMS
2012-07-24 22:37:10 +05:30
2012-07-30 19:36:22 +05:30
!--------------------------------------------------------------------------------------------------
2013-04-18 22:10:49 +05:30
!> @brief calculates mask compliance tensor used to adjust F to fullfill stress BC
2012-07-30 19:36:22 +05:30
!--------------------------------------------------------------------------------------------------
2012-10-24 17:01:40 +05:30
function utilities_maskedCompliance ( rot_BC , mask_stress , C )
use IO , only : &
IO_error
2015-03-25 21:36:19 +05:30
use numerics , only : &
worldrank
2012-10-24 17:01:40 +05:30
use math , only : &
math_Plain3333to99 , &
math_plain99to3333 , &
math_rotate_forward3333 , &
math_rotate_forward33 , &
math_invert
2012-08-28 22:29:45 +05:30
implicit none
2015-03-25 21:36:19 +05:30
real ( pReal ) , dimension ( 3 , 3 , 3 , 3 ) :: utilities_maskedCompliance !< masked compliance
real ( pReal ) , intent ( in ) , dimension ( 3 , 3 , 3 , 3 ) :: C !< current average stiffness
real ( pReal ) , intent ( in ) , dimension ( 3 , 3 ) :: rot_BC !< rotation of load frame
logical , intent ( in ) , dimension ( 3 , 3 ) :: mask_stress !< mask of stress BC
2012-08-28 22:29:45 +05:30
integer ( pInt ) :: j , k , m , n
2012-10-19 14:14:21 +05:30
logical , dimension ( 9 ) :: mask_stressVector
2012-07-30 19:36:22 +05:30
real ( pReal ) , dimension ( 9 , 9 ) :: temp99_Real
integer ( pInt ) :: size_reduced = 0_pInt
2012-10-24 17:01:40 +05:30
real ( pReal ) , dimension ( : , : ) , allocatable :: &
s_reduced , & !< reduced compliance matrix (depending on number of stress BC)
c_reduced , & !< reduced stiffness (depending on number of stress BC)
sTimesC !< temp variable to check inversion
2012-07-30 19:36:22 +05:30
logical :: errmatinv
2012-08-28 22:29:45 +05:30
character ( len = 1024 ) :: formatString
2012-07-25 19:31:39 +05:30
2012-10-19 14:14:21 +05:30
mask_stressVector = reshape ( transpose ( mask_stress ) , [ 9 ] )
size_reduced = int ( count ( mask_stressVector ) , pInt )
2012-07-30 19:36:22 +05:30
if ( size_reduced > 0_pInt ) then
allocate ( c_reduced ( size_reduced , size_reduced ) , source = 0.0_pReal )
allocate ( s_reduced ( size_reduced , size_reduced ) , source = 0.0_pReal )
2012-08-28 22:29:45 +05:30
allocate ( sTimesC ( size_reduced , size_reduced ) , source = 0.0_pReal )
2012-07-30 19:36:22 +05:30
2012-10-24 17:01:40 +05:30
temp99_Real = math_Plain3333to99 ( math_rotate_forward3333 ( C , rot_BC ) )
2013-01-10 03:49:32 +05:30
2015-03-25 21:36:19 +05:30
if ( debugGeneral . and . worldrank == 0_pInt ) then
2013-01-10 03:49:32 +05:30
write ( 6 , '(/,a)' ) ' ... updating masked compliance ............................................'
2013-04-18 22:10:49 +05:30
write ( 6 , '(/,a,/,9(9(2x,f12.7,1x)/))' , advance = 'no' ) ' Stiffness C (load) / GPa =' , &
2012-10-24 17:01:40 +05:30
transpose ( temp99_Real ) / 1.e9_pReal
2013-01-10 03:49:32 +05:30
flush ( 6 )
endif
2012-10-24 17:01:40 +05:30
k = 0_pInt ! calculate reduced stiffness
2012-07-26 19:28:47 +05:30
do n = 1_pInt , 9_pInt
if ( mask_stressVector ( n ) ) then
k = k + 1_pInt
j = 0_pInt
do m = 1_pInt , 9_pInt
if ( mask_stressVector ( m ) ) then
j = j + 1_pInt
2012-07-30 19:36:22 +05:30
c_reduced ( k , j ) = temp99_Real ( n , m )
endif ; enddo ; endif ; enddo
2012-08-28 22:29:45 +05:30
call math_invert ( size_reduced , c_reduced , s_reduced , errmatinv ) ! invert reduced stiffness
2012-12-14 23:00:22 +05:30
if ( errmatinv ) call IO_error ( error_ID = 400_pInt , ext_msg = 'utilities_maskedCompliance' )
2012-10-24 17:01:40 +05:30
temp99_Real = 0.0_pReal ! fill up compliance with zeros
2012-07-30 19:36:22 +05:30
k = 0_pInt
do n = 1_pInt , 9_pInt
if ( mask_stressVector ( n ) ) then
k = k + 1_pInt
j = 0_pInt
do m = 1_pInt , 9_pInt
if ( mask_stressVector ( m ) ) then
j = j + 1_pInt
temp99_Real ( n , m ) = s_reduced ( k , j )
endif ; enddo ; endif ; enddo
2013-03-07 01:04:30 +05:30
2012-10-24 17:01:40 +05:30
!--------------------------------------------------------------------------------------------------
2013-03-07 01:04:30 +05:30
! check if inversion was successful
2012-08-28 22:29:45 +05:30
sTimesC = matmul ( c_reduced , s_reduced )
do m = 1_pInt , size_reduced
do n = 1_pInt , size_reduced
2012-10-24 17:01:40 +05:30
if ( m == n . and . abs ( sTimesC ( m , n ) ) > ( 1.0_pReal + 1 0.0e-12_pReal ) ) errmatinv = . true . ! diagonal elements of S*C should be 1
if ( m / = n . and . abs ( sTimesC ( m , n ) ) > ( 0.0_pReal + 1 0.0e-12_pReal ) ) errmatinv = . true . ! off diagonal elements of S*C should be 0
2012-08-28 22:29:45 +05:30
enddo
enddo
2015-03-25 21:36:19 +05:30
if ( ( debugGeneral . or . errmatinv ) . and . ( worldrank == 0_pInt ) ) then ! report
2012-08-28 22:29:45 +05:30
write ( formatString , '(I16.16)' ) size_reduced
2013-01-10 03:49:32 +05:30
formatString = '(/,a,/,' / / trim ( formatString ) / / '(' / / trim ( formatString ) / / '(2x,es9.2,1x)/))'
2013-04-18 22:10:49 +05:30
write ( 6 , trim ( formatString ) , advance = 'no' ) ' C * S (load) ' , &
transpose ( matmul ( c_reduced , s_reduced ) )
write ( 6 , trim ( formatString ) , advance = 'no' ) ' S (load) ' , transpose ( s_reduced )
2012-08-28 22:29:45 +05:30
endif
2012-12-14 23:00:22 +05:30
if ( errmatinv ) call IO_error ( error_ID = 400_pInt , ext_msg = 'utilities_maskedCompliance' )
2012-07-30 19:36:22 +05:30
deallocate ( c_reduced )
deallocate ( s_reduced )
2012-08-28 22:29:45 +05:30
deallocate ( sTimesC )
2012-07-30 19:36:22 +05:30
else
temp99_real = 0.0_pReal
endif
2015-03-25 21:36:19 +05:30
if ( debugGeneral . and . worldrank == 0_pInt ) & ! report
2013-04-18 22:10:49 +05:30
write ( 6 , '(/,a,/,9(9(2x,f12.7,1x)/),/)' , advance = 'no' ) ' Masked Compliance (load) * GPa =' , &
2013-01-10 03:49:32 +05:30
transpose ( temp99_Real * 1.e9_pReal )
flush ( 6 )
2012-10-24 17:01:40 +05:30
utilities_maskedCompliance = math_Plain99to3333 ( temp99_Real )
end function utilities_maskedCompliance
!--------------------------------------------------------------------------------------------------
!> @brief calculates constitutive response
!--------------------------------------------------------------------------------------------------
2012-11-14 20:08:10 +05:30
subroutine utilities_constitutiveResponse ( F_lastInc , F , temperature , timeinc , &
2013-03-06 20:01:13 +05:30
P , C_volAvg , C_minmaxAvg , P_av , forwardData , rotation_BC )
2012-10-24 17:01:40 +05:30
use debug , only : &
debug_reset , &
debug_info
2015-03-25 21:36:19 +05:30
use numerics , only : &
worldrank
2012-10-24 17:01:40 +05:30
use math , only : &
math_transpose33 , &
2013-03-07 01:04:30 +05:30
math_rotate_forward33 , &
math_det33
2015-03-25 21:36:19 +05:30
use mesh , only : &
gridLocal
2012-10-24 17:01:40 +05:30
use FEsolving , only : &
restartWrite
use CPFEM , only : &
2013-03-01 17:18:29 +05:30
CPFEM_general , &
CPFEM_COLLECT , &
CPFEM_CALCRESULTS , &
2014-07-24 17:56:01 +05:30
CPFEM_AGERESULTS
2013-01-24 01:26:45 +05:30
use homogenization , only : &
materialpoint_F0 , &
materialpoint_F , &
materialpoint_P , &
materialpoint_dPdF
2015-05-28 22:32:23 +05:30
! use thermal_isothermal, only: &
! thermal_isothermal_temperature
2012-10-24 17:01:40 +05:30
implicit none
2015-05-28 22:32:23 +05:30
external :: &
MPI_reduce , &
MPI_Allreduce
2014-06-11 13:49:07 +05:30
real ( pReal ) , intent ( in ) :: temperature !< temperature (no field)
2015-03-25 21:36:19 +05:30
real ( pReal ) , intent ( in ) , dimension ( 3 , 3 , gridLocal ( 1 ) , gridLocal ( 2 ) , gridLocal ( 3 ) ) :: &
2012-10-24 17:01:40 +05:30
F_lastInc , & !< target deformation gradient
F !< previous deformation gradient
real ( pReal ) , intent ( in ) :: timeinc !< loading time
logical , intent ( in ) :: forwardData !< age results
2013-05-08 21:22:29 +05:30
real ( pReal ) , intent ( in ) , dimension ( 3 , 3 ) :: rotation_BC !< rotation of load frame
2012-10-24 17:01:40 +05:30
2013-05-08 21:22:29 +05:30
real ( pReal ) , intent ( out ) , dimension ( 3 , 3 , 3 , 3 ) :: C_volAvg , C_minmaxAvg !< average stiffness
real ( pReal ) , intent ( out ) , dimension ( 3 , 3 ) :: P_av !< average PK stress
2015-03-25 21:36:19 +05:30
real ( pReal ) , intent ( out ) , dimension ( 3 , 3 , gridLocal ( 1 ) , gridLocal ( 2 ) , gridLocal ( 3 ) ) :: P !< PK stress
2012-10-24 17:01:40 +05:30
integer ( pInt ) :: &
calcMode , & !< CPFEM mode for calculation
2013-04-10 15:49:16 +05:30
j , k
2013-03-06 20:01:13 +05:30
real ( pReal ) , dimension ( 3 , 3 , 3 , 3 ) :: max_dPdF , min_dPdF
2013-04-10 15:49:16 +05:30
real ( pReal ) :: max_dPdF_norm , min_dPdF_norm , defgradDetMin , defgradDetMax , defgradDet
2015-03-25 21:36:19 +05:30
PetscErrorCode :: ierr
2012-10-24 17:01:40 +05:30
2015-03-25 21:36:19 +05:30
if ( worldrank == 0_pInt ) then
write ( 6 , '(/,a)' ) ' ... evaluating constitutive response ......................................'
flush ( 6 )
endif
2013-03-01 17:18:29 +05:30
calcMode = CPFEM_CALCRESULTS
2014-08-04 23:20:01 +05:30
2012-10-24 17:01:40 +05:30
if ( forwardData ) then ! aging results
2013-03-01 17:18:29 +05:30
calcMode = ior ( calcMode , CPFEM_AGERESULTS )
2015-03-25 21:36:19 +05:30
materialpoint_F0 = reshape ( F_lastInc , [ 3 , 3 , 1 , product ( gridLocal ) ] )
2012-10-24 17:01:40 +05:30
endif
if ( cutBack ) then ! restore saved variables
2013-03-01 17:18:29 +05:30
calcMode = iand ( calcMode , not ( CPFEM_AGERESULTS ) )
2012-10-24 17:01:40 +05:30
endif
2013-03-07 01:04:30 +05:30
2014-08-04 23:20:01 +05:30
call CPFEM_general ( CPFEM_COLLECT , F_lastInc ( 1 : 3 , 1 : 3 , 1 , 1 , 1 ) , F ( 1 : 3 , 1 : 3 , 1 , 1 , 1 ) , &
2014-06-11 13:49:07 +05:30
temperature , timeinc , 1_pInt , 1_pInt )
2015-05-28 22:32:23 +05:30
! thermal_isothermal_temperature(:) = temperature
2015-03-25 21:36:19 +05:30
materialpoint_F = reshape ( F , [ 3 , 3 , 1 , product ( gridLocal ) ] )
2013-04-09 15:38:00 +05:30
call debug_reset ( )
2012-11-12 19:44:39 +05:30
!--------------------------------------------------------------------------------------------------
! calculate bounds of det(F) and report
2013-03-07 01:04:30 +05:30
if ( debugGeneral ) then
defgradDetMax = - huge ( 1.0_pReal )
defgradDetMin = + huge ( 1.0_pReal )
2015-03-25 21:36:19 +05:30
do j = 1_pInt , product ( gridLocal )
2013-04-09 15:38:00 +05:30
defgradDet = math_det33 ( materialpoint_F ( 1 : 3 , 1 : 3 , 1 , j ) )
2013-03-07 01:04:30 +05:30
defgradDetMax = max ( defgradDetMax , defgradDet )
defgradDetMin = min ( defgradDetMin , defgradDet )
2013-04-09 15:38:00 +05:30
end do
2015-03-25 21:36:19 +05:30
call MPI_reduce ( MPI_IN_PLACE , defgradDetMax , 1 , MPI_DOUBLE , MPI_MAX , 0 , PETSC_COMM_WORLD , ierr )
call MPI_reduce ( MPI_IN_PLACE , defgradDetMin , 1 , MPI_DOUBLE , MPI_MIN , 0 , PETSC_COMM_WORLD , ierr )
if ( worldrank == 0_pInt ) then
write ( 6 , '(a,1x,es11.4)' ) ' max determinant of deformation =' , defgradDetMax
write ( 6 , '(a,1x,es11.4)' ) ' min determinant of deformation =' , defgradDetMin
flush ( 6 )
endif
2013-03-07 01:04:30 +05:30
endif
2013-04-09 15:38:00 +05:30
2015-03-25 21:36:19 +05:30
call CPFEM_general ( calcMode , F_lastInc ( 1 : 3 , 1 : 3 , 1 , 1 , 1 ) , F ( 1 : 3 , 1 : 3 , 1 , 1 , 1 ) , & ! first call calculates everything
2013-04-09 15:38:00 +05:30
temperature , timeinc , 1_pInt , 1_pInt )
2014-06-11 13:49:07 +05:30
2013-03-06 20:01:13 +05:30
max_dPdF = 0.0_pReal
max_dPdF_norm = 0.0_pReal
min_dPdF = huge ( 1.0_pReal )
min_dPdF_norm = huge ( 1.0_pReal )
2015-03-25 21:36:19 +05:30
do k = 1_pInt , product ( gridLocal )
2013-03-06 20:01:13 +05:30
if ( max_dPdF_norm < sum ( materialpoint_dPdF ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , 1 , k ) ** 2.0_pReal ) ) then
max_dPdF = materialpoint_dPdF ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , 1 , k )
max_dPdF_norm = sum ( materialpoint_dPdF ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , 1 , k ) ** 2.0_pReal )
endif
if ( min_dPdF_norm > sum ( materialpoint_dPdF ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , 1 , k ) ** 2.0_pReal ) ) then
min_dPdF = materialpoint_dPdF ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , 1 , k )
min_dPdF_norm = sum ( materialpoint_dPdF ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , 1 , k ) ** 2.0_pReal )
endif
2013-04-09 15:38:00 +05:30
end do
2015-03-25 21:36:19 +05:30
call MPI_Allreduce ( MPI_IN_PLACE , max_dPdF , 81 , MPI_DOUBLE , MPI_MAX , PETSC_COMM_WORLD , ierr )
call MPI_Allreduce ( MPI_IN_PLACE , min_dPdF , 81 , MPI_DOUBLE , MPI_MIN , PETSC_COMM_WORLD , ierr )
C_minmaxAvg = 0.5_pReal * ( max_dPdF + min_dPdF )
2013-01-24 01:26:45 +05:30
2013-03-06 20:01:13 +05:30
C_volAvg = sum ( sum ( materialpoint_dPdF , dim = 6 ) , dim = 5 ) * wgt
2015-03-25 21:36:19 +05:30
call MPI_Allreduce ( MPI_IN_PLACE , C_volAvg , 81 , MPI_DOUBLE , MPI_SUM , PETSC_COMM_WORLD , ierr )
2013-03-06 20:01:13 +05:30
2012-10-24 17:01:40 +05:30
call debug_info ( )
restartWrite = . false . ! reset restartWrite status
cutBack = . false . ! reset cutBack status
2015-03-25 21:36:19 +05:30
P = reshape ( materialpoint_P , [ 3 , 3 , gridLocal ( 1 ) , gridLocal ( 2 ) , gridLocal ( 3 ) ] )
2012-11-28 20:34:05 +05:30
P_av = sum ( sum ( sum ( P , dim = 5 ) , dim = 4 ) , dim = 3 ) * wgt ! average of P
2015-03-25 21:36:19 +05:30
call MPI_Allreduce ( MPI_IN_PLACE , P_av , 9 , MPI_DOUBLE , MPI_SUM , PETSC_COMM_WORLD , ierr )
if ( debugRotation . and . worldrank == 0_pInt ) &
2013-07-08 21:18:13 +05:30
write ( 6 , '(/,a,/,3(3(2x,f12.4,1x)/))' , advance = 'no' ) ' Piola--Kirchhoff stress (lab) / MPa =' , &
2013-08-07 22:50:05 +05:30
math_transpose33 ( P_av ) * 1.e-6_pReal
2012-11-28 20:34:05 +05:30
P_av = math_rotate_forward33 ( P_av , rotation_BC )
2015-03-25 21:36:19 +05:30
if ( worldrank == 0_pInt ) then
write ( 6 , '(/,a,/,3(3(2x,f12.4,1x)/))' , advance = 'no' ) ' Piola--Kirchhoff stress / MPa =' , &
2013-08-07 22:50:05 +05:30
math_transpose33 ( P_av ) * 1.e-6_pReal
2015-03-25 21:36:19 +05:30
flush ( 6 )
endif
2013-01-10 03:49:32 +05:30
2012-10-24 17:01:40 +05:30
end subroutine utilities_constitutiveResponse
2012-07-30 19:36:22 +05:30
2012-10-24 17:01:40 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief calculates forward rate, either guessing or just add delta/timeinc
!--------------------------------------------------------------------------------------------------
2012-11-28 20:34:05 +05:30
pure function utilities_calculateRate ( avRate , timeinc_old , guess , field_lastInc , field )
2015-03-25 21:36:19 +05:30
use mesh , only : &
gridLocal
2012-08-28 22:29:45 +05:30
implicit none
2012-11-28 20:34:05 +05:30
real ( pReal ) , intent ( in ) , dimension ( 3 , 3 ) :: avRate !< homogeneous addon
2012-10-24 17:01:40 +05:30
real ( pReal ) , intent ( in ) :: &
2012-11-09 01:02:00 +05:30
timeinc_old !< timeinc of last step
logical , intent ( in ) :: &
guess !< guess along former trajectory
2015-03-25 21:36:19 +05:30
real ( pReal ) , intent ( in ) , dimension ( 3 , 3 , gridLocal ( 1 ) , gridLocal ( 2 ) , gridLocal ( 3 ) ) :: &
2012-10-24 17:01:40 +05:30
field_lastInc , & !< data of previous step
field !< data of current step
2015-03-25 21:36:19 +05:30
real ( pReal ) , dimension ( 3 , 3 , gridLocal ( 1 ) , gridLocal ( 2 ) , gridLocal ( 3 ) ) :: &
utilities_calculateRate
2012-08-03 14:55:48 +05:30
2015-03-25 21:36:19 +05:30
if ( guess ) then
2012-10-24 17:01:40 +05:30
utilities_calculateRate = ( field - field_lastInc ) / timeinc_old
2012-08-09 18:34:56 +05:30
else
2015-03-25 21:36:19 +05:30
utilities_calculateRate = spread ( spread ( spread ( avRate , 3 , gridLocal ( 1 ) ) , 4 , gridLocal ( 2 ) ) , &
5 , gridLocal ( 3 ) )
2012-08-09 18:34:56 +05:30
endif
2012-10-24 17:01:40 +05:30
end function utilities_calculateRate
2012-10-02 20:56:56 +05:30
2012-10-24 17:01:40 +05:30
!--------------------------------------------------------------------------------------------------
2013-01-08 15:42:03 +05:30
!> @brief forwards a field with a pointwise given rate, if aim is given,
!> ensures that the average matches the aim
2012-10-24 17:01:40 +05:30
!--------------------------------------------------------------------------------------------------
2015-03-25 21:36:19 +05:30
function utilities_forwardField ( timeinc , field_lastInc , rate , aim )
use mesh , only : &
gridLocal
2012-10-24 17:01:40 +05:30
2012-10-02 20:56:56 +05:30
implicit none
2015-05-28 22:32:23 +05:30
external :: &
MPI_Allreduce
2013-01-08 15:42:03 +05:30
real ( pReal ) , intent ( in ) :: &
timeinc !< timeinc of current step
2015-03-25 21:36:19 +05:30
real ( pReal ) , intent ( in ) , dimension ( 3 , 3 , gridLocal ( 1 ) , gridLocal ( 2 ) , gridLocal ( 3 ) ) :: &
2013-01-08 15:42:03 +05:30
field_lastInc , & !< initial field
2012-10-24 17:01:40 +05:30
rate !< rate by which to forward
2013-01-08 15:42:03 +05:30
real ( pReal ) , intent ( in ) , optional , dimension ( 3 , 3 ) :: &
aim !< average field value aim
2015-03-25 21:36:19 +05:30
real ( pReal ) , dimension ( 3 , 3 , gridLocal ( 1 ) , gridLocal ( 2 ) , gridLocal ( 3 ) ) :: &
utilities_forwardField
2013-01-08 15:42:03 +05:30
real ( pReal ) , dimension ( 3 , 3 ) :: fieldDiff !< <a + adot*t> - aim
2015-03-25 21:36:19 +05:30
PetscErrorCode :: ierr
2012-08-07 22:53:13 +05:30
2012-10-24 17:01:40 +05:30
utilities_forwardField = field_lastInc + rate * timeinc
2013-01-08 15:42:03 +05:30
if ( present ( aim ) ) then !< correct to match average
2015-03-25 21:36:19 +05:30
fieldDiff = sum ( sum ( sum ( utilities_forwardField , dim = 5 ) , dim = 4 ) , dim = 3 ) * wgt
call MPI_Allreduce ( MPI_IN_PLACE , fieldDiff , 9 , MPI_DOUBLE , MPI_SUM , PETSC_COMM_WORLD , ierr )
fieldDiff = fieldDiff - aim
2013-01-08 15:42:03 +05:30
utilities_forwardField = utilities_forwardField - &
2015-03-25 21:36:19 +05:30
spread ( spread ( spread ( fieldDiff , 3 , gridLocal ( 1 ) ) , 4 , gridLocal ( 2 ) ) , 5 , gridLocal ( 3 ) )
2013-01-08 15:42:03 +05:30
endif
2012-10-02 20:56:56 +05:30
2012-10-24 17:01:40 +05:30
end function utilities_forwardField
2012-10-02 20:56:56 +05:30
2012-08-07 22:53:13 +05:30
2012-10-24 17:01:40 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief calculates filter for fourier convolution depending on type given in numerics.config
!--------------------------------------------------------------------------------------------------
real ( pReal ) function utilities_getFilter ( k )
use IO , only : &
IO_error
use numerics , only : &
2014-09-04 01:29:47 +05:30
spectral_filter
2012-10-24 17:01:40 +05:30
use math , only : &
PI
2015-03-25 21:36:19 +05:30
use mesh , only : &
gridGlobal
2012-08-07 22:53:13 +05:30
2012-10-24 17:01:40 +05:30
implicit none
real ( pReal ) , intent ( in ) , dimension ( 3 ) :: k !< indices of frequency
2012-08-07 22:53:13 +05:30
2013-03-27 17:58:55 +05:30
utilities_getFilter = 1.0_pReal
2014-09-04 01:29:47 +05:30
select case ( spectral_filter )
2015-03-25 21:36:19 +05:30
case ( 'none' ) ! default, no weighting
case ( 'cosine' ) ! cosine curve with 1 for avg and zero for highest freq
utilities_getFilter = product ( 1.0_pReal + cos ( PI * k * scaledGeomSize / gridGlobal ) ) / 8.0_pReal
case ( 'gradient' ) ! gradient, might need grid scaling as for cosine filter
utilities_getFilter = 1.0_pReal / ( 1.0_pReal + &
( k ( 1 ) * k ( 1 ) + k ( 2 ) * k ( 2 ) + k ( 3 ) * k ( 3 ) ) )
case default
call IO_error ( error_ID = 892_pInt , ext_msg = trim ( spectral_filter ) )
end select
if ( gridGlobal ( 1 ) / = 1_pInt . and . k ( 1 ) == real ( grid1Red - 1_pInt , pReal ) / scaledGeomSize ( 1 ) ) &
utilities_getFilter = 0.0_pReal
if ( gridGlobal ( 2 ) / = 1_pInt . and . k ( 2 ) == real ( gridGlobal ( 2 ) / 2_pInt , pReal ) / scaledGeomSize ( 2 ) ) &
utilities_getFilter = 0.0_pReal ! do not delete the whole slice in case of 2D calculation
if ( gridGlobal ( 2 ) / = 1_pInt . and . k ( 2 ) == real ( gridGlobal ( 2 ) / 2_pInt + mod ( gridGlobal ( 2 ) , 2_pInt ) , pReal ) / scaledGeomSize ( 2 ) ) &
utilities_getFilter = 0.0_pReal ! do not delete the whole slice in case of 2D calculation
if ( gridGlobal ( 3 ) / = 1_pInt . and . k ( 3 ) == real ( gridGlobal ( 3 ) / 2_pInt , pReal ) / scaledGeomSize ( 3 ) ) &
utilities_getFilter = 0.0_pReal ! do not delete the whole slice in case of 2D calculation
if ( gridGlobal ( 3 ) / = 1_pInt . and . k ( 3 ) == real ( gridGlobal ( 3 ) / 2_pInt + mod ( gridGlobal ( 3 ) , 2_pInt ) , pReal ) / scaledGeomSize ( 3 ) ) &
utilities_getFilter = 0.0_pReal ! do not delete the whole slice in case of 2D calculation
2012-08-07 22:53:13 +05:30
2012-10-24 17:01:40 +05:30
end function utilities_getFilter
2012-08-03 14:55:48 +05:30
2012-10-02 20:56:56 +05:30
2012-10-24 17:01:40 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief cleans up
!--------------------------------------------------------------------------------------------------
subroutine utilities_destroy ( )
use math
2013-03-28 16:07:00 +05:30
2012-10-24 17:01:40 +05:30
implicit none
2012-07-25 19:31:39 +05:30
2015-03-25 21:36:19 +05:30
if ( debugDivergence ) call fftw_destroy_plan ( planDivMPI )
if ( debugFFTW ) call fftw_destroy_plan ( planDebugForthMPI )
if ( debugFFTW ) call fftw_destroy_plan ( planDebugBackMPI )
call fftw_destroy_plan ( planForthMPI )
call fftw_destroy_plan ( planBackMPI )
call fftw_destroy_plan ( planCoordsMPI )
2012-07-25 19:31:39 +05:30
2012-10-24 17:01:40 +05:30
end subroutine utilities_destroy
2012-10-02 20:56:56 +05:30
2015-03-13 03:58:33 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief calculate coordinates in current configuration for given defgrad field
! using integration in Fourier space. Similar as in mesh.f90, but using data already defined for
! convolution
!--------------------------------------------------------------------------------------------------
subroutine utilities_updateIPcoords ( F )
use math
use mesh , only : &
2015-03-25 21:36:19 +05:30
gridGlobal , &
gridLocal , &
gridOffset , &
geomSizeGlobal , &
2015-03-13 03:58:33 +05:30
mesh_ipCoordinates
implicit none
2015-05-28 22:32:23 +05:30
external :: &
MPI_Bcast
2015-03-13 03:58:33 +05:30
2015-03-25 21:36:19 +05:30
real ( pReal ) , dimension ( 3 , 3 , gridLocal ( 1 ) , gridLocal ( 2 ) , gridLocal ( 3 ) ) , intent ( in ) :: F
2015-03-13 03:58:33 +05:30
integer ( pInt ) :: i , j , k , m
real ( pReal ) , dimension ( 3 ) :: step , offset_coords , integrator
real ( pReal ) , dimension ( 3 , 3 ) :: Favg
2015-03-25 21:36:19 +05:30
PetscErrorCode :: ierr
2015-03-13 03:58:33 +05:30
2015-03-25 21:36:19 +05:30
field_realMPI = 0.0_pReal
field_realMPI ( 1 : 3 , 1 : 3 , 1 : gridLocal ( 1 ) , 1 : gridLocal ( 2 ) , 1 : gridLocal ( 3 ) ) = F
2015-03-13 03:58:33 +05:30
call utilities_FFTforward ( )
2015-03-25 21:36:19 +05:30
integrator = geomSizeGlobal * 0.5_pReal / PI
step = geomSizeGlobal / real ( gridGlobal , pReal )
2015-03-13 03:58:33 +05:30
!--------------------------------------------------------------------------------------------------
! average F
2015-03-25 21:36:19 +05:30
if ( gridOffset == 0_pInt ) Favg = real ( field_fourierMPI ( 1 : 3 , 1 : 3 , 1 , 1 , 1 ) , pReal ) * wgt
call MPI_Bcast ( Favg , 9 , MPI_DOUBLE , 0 , PETSC_COMM_WORLD , ierr )
2015-03-13 03:58:33 +05:30
!--------------------------------------------------------------------------------------------------
! integration in Fourier space
2015-03-25 21:36:19 +05:30
coords_fourierMPI = cmplx ( 0.0_pReal , 0.0_pReal , pReal )
do k = 1_pInt , gridLocal ( 3 ) ; do j = 1_pInt , gridLocal ( 2 ) ; do i = 1_pInt , grid1Red
do m = 1_pInt , 3_pInt
coords_fourierMPI ( m , i , j , k ) = sum ( field_fourierMPI ( m , 1 : 3 , i , j , k ) * &
cmplx ( 0.0_pReal , xi ( 1 : 3 , i , j , k ) * scaledGeomSize * integrator , pReal ) )
enddo
if ( any ( xi ( 1 : 3 , i , j , k ) / = 0.0_pReal ) ) coords_fourierMPI ( 1 : 3 , i , j , k ) = &
coords_fourierMPI ( 1 : 3 , i , j , k ) / cmplx ( - sum ( xi ( 1 : 3 , i , j , k ) * scaledGeomSize * xi ( 1 : 3 , i , j , k ) * &
scaledGeomSize ) , 0.0_pReal , pReal )
2015-03-13 03:58:33 +05:30
enddo ; enddo ; enddo
2015-03-25 21:36:19 +05:30
call fftw_mpi_execute_dft_c2r ( planCoordsMPI , coords_fourierMPI , coords_realMPI )
2015-03-13 03:58:33 +05:30
!--------------------------------------------------------------------------------------------------
! add average to fluctuation and put (0,0,0) on (0,0,0)
2015-03-25 21:36:19 +05:30
if ( gridOffset == 0_pInt ) offset_coords = coords_realMPI ( 1 : 3 , 1 , 1 , 1 )
call MPI_Bcast ( offset_coords , 3 , MPI_DOUBLE , 0 , PETSC_COMM_WORLD , ierr )
offset_coords = math_mul33x3 ( Favg , step / 2.0_pReal ) - offset_coords
2015-03-13 03:58:33 +05:30
m = 1_pInt
2015-03-25 21:36:19 +05:30
do k = 1_pInt , gridLocal ( 3 ) ; do j = 1_pInt , gridLocal ( 2 ) ; do i = 1_pInt , gridLocal ( 1 )
mesh_ipCoordinates ( 1 : 3 , 1 , m ) = coords_realMPI ( 1 : 3 , i , j , k ) &
2015-03-13 03:58:33 +05:30
+ offset_coords &
2015-03-25 21:36:19 +05:30
+ math_mul33x3 ( Favg , step * real ( [ i , j , k + gridOffset ] - 1_pInt , pReal ) )
2015-03-13 03:58:33 +05:30
m = m + 1_pInt
enddo ; enddo ; enddo
end subroutine utilities_updateIPcoords
2012-10-24 17:01:40 +05:30
end module DAMASK_spectral_utilities