2013-03-22 23:05:05 +05:30
! Copyright 2011-13 Max-Planck-Institut für Eisenforschung GmbH
2011-04-04 19:39:54 +05:30
!
! This file is part of DAMASK,
2011-04-07 12:50:28 +05:30
! the Düsseldorf Advanced MAterial Simulation Kit.
2011-04-04 19:39:54 +05:30
!
! DAMASK is free software: you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation, either version 3 of the License, or
! (at your option) any later version.
!
! DAMASK is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License
! along with DAMASK. If not, see <http://www.gnu.org/licenses/>.
!
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
2011-04-04 19:39:54 +05:30
! $Id$
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
!> @author Denny Tjahjanto, Max-Planck-Institut für Eisenforschung GmbH
!> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH
!> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH
!> @brief Relaxed grain cluster (RGC) homogenization scheme
!> Ngrains is defined as p x q x r (cluster)
!--------------------------------------------------------------------------------------------------
module homogenization_RGC
use prec , only : &
pReal , &
pInt
2009-10-12 21:31:49 +05:30
2012-03-09 01:55:28 +05:30
implicit none
2013-02-21 03:36:15 +05:30
private
character ( len = * ) , parameter , public :: &
2013-10-09 11:42:16 +05:30
HOMOGENIZATION_RGC_label = 'rgc'
2013-02-21 03:36:15 +05:30
integer ( pInt ) , dimension ( : ) , allocatable , public :: &
homogenization_RGC_sizeState , &
homogenization_RGC_sizePostResults
integer ( pInt ) , dimension ( : , : ) , allocatable , target , public :: &
homogenization_RGC_sizePostResult
character ( len = 64 ) , dimension ( : , : ) , allocatable , target , public :: &
homogenization_RGC_output ! name of each post result output
integer ( pInt ) , dimension ( : , : ) , allocatable , private :: &
homogenization_RGC_Ngrains
real ( pReal ) , dimension ( : , : ) , allocatable , private :: &
homogenization_RGC_dAlpha , &
homogenization_RGC_angles
real ( pReal ) , dimension ( : , : , : , : ) , allocatable , private :: &
homogenization_RGC_orientation
real ( pReal ) , dimension ( : ) , allocatable , private :: &
homogenization_RGC_xiAlpha , &
homogenization_RGC_ciAlpha
public :: &
homogenization_RGC_init , &
homogenization_RGC_partitionDeformation , &
homogenization_RGC_averageStressAndItsTangent , &
homogenization_RGC_updateState , &
homogenization_RGC_postResults
private :: &
homogenization_RGC_stressPenalty , &
homogenization_RGC_volumePenalty , &
homogenization_RGC_grainDeformation , &
homogenization_RGC_surfaceCorrection , &
homogenization_RGC_equivalentModuli , &
homogenization_RGC_relaxationVector , &
homogenization_RGC_interfaceNormal , &
homogenization_RGC_getInterface , &
homogenization_RGC_grain1to3 , &
homogenization_RGC_grain3to1 , &
homogenization_RGC_interface4to1 , &
homogenization_RGC_interface1to4
contains
!--------------------------------------------------------------------------------------------------
!> @brief allocates all neccessary fields, reads information from material configuration file
!--------------------------------------------------------------------------------------------------
2013-10-16 18:34:59 +05:30
subroutine homogenization_RGC_init ( myUnit )
2013-02-21 03:36:15 +05:30
use , intrinsic :: iso_fortran_env ! to get compiler_version and compiler_options (at least for gfortran 4.6 at the moment)
use debug , only : &
debug_level , &
debug_homogenization , &
debug_levelBasic , &
debug_levelExtensive
use math , only : &
math_Mandel3333to66 , &
math_Voigt66to3333 , &
math_I3 , &
math_sampleRandomOri , &
math_EulerToR , &
INRAD
use mesh , only : &
mesh_maxNips , &
mesh_NcpElems , &
mesh_element , &
FE_Nips , &
FE_geomtype
2009-10-12 21:31:49 +05:30
use IO
use material
2012-03-09 01:55:28 +05:30
implicit none
2013-10-16 18:34:59 +05:30
integer ( pInt ) , intent ( in ) :: myUnit !< file pointer to material configuration
2013-10-11 21:31:53 +05:30
integer ( pInt ) , parameter :: MAXNCHUNKS = 4_pInt
integer ( pInt ) , dimension ( 1_pInt + 2_pInt * MAXNCHUNKS ) :: positions
2013-02-21 03:36:15 +05:30
integer ( pInt ) :: section = 0_pInt , maxNinstance , i , j , e , output = - 1_pInt , mySize , myInstance
2013-06-27 00:49:41 +05:30
character ( len = 65536 ) :: tag
character ( len = 65536 ) :: line = ''
2013-01-09 03:41:59 +05:30
2013-10-09 11:42:16 +05:30
write ( 6 , '(/,a)' ) ' <<<+- homogenization_' / / HOMOGENIZATION_RGC_label / / ' init -+>>>'
write ( 6 , '(a)' ) ' $Id$'
write ( 6 , '(a15,a)' ) ' Current time: ' , IO_timeStamp ( )
2012-02-01 00:48:55 +05:30
#include "compilation_info.f90"
2009-10-12 21:31:49 +05:30
2013-10-09 11:42:16 +05:30
maxNinstance = int ( count ( homogenization_type == HOMOGENIZATION_RGC_label ) , pInt )
2013-02-21 03:36:15 +05:30
if ( maxNinstance == 0_pInt ) return
2009-10-12 21:31:49 +05:30
allocate ( homogenization_RGC_sizeState ( maxNinstance ) ) ; homogenization_RGC_sizeState = 0_pInt
allocate ( homogenization_RGC_sizePostResults ( maxNinstance ) ) ; homogenization_RGC_sizePostResults = 0_pInt
allocate ( homogenization_RGC_Ngrains ( 3 , maxNinstance ) ) ; homogenization_RGC_Ngrains = 0_pInt
2010-03-24 18:50:12 +05:30
allocate ( homogenization_RGC_ciAlpha ( maxNinstance ) ) ; homogenization_RGC_ciAlpha = 0.0_pReal
allocate ( homogenization_RGC_xiAlpha ( maxNinstance ) ) ; homogenization_RGC_xiAlpha = 0.0_pReal
allocate ( homogenization_RGC_dAlpha ( 3 , maxNinstance ) ) ; homogenization_RGC_dAlpha = 0.0_pReal
allocate ( homogenization_RGC_angles ( 3 , maxNinstance ) ) ; homogenization_RGC_angles = 40 0.0_pReal
2013-02-21 03:36:15 +05:30
allocate ( homogenization_RGC_output ( maxval ( homogenization_Noutput ) , maxNinstance ) )
homogenization_RGC_output = ''
2010-03-24 21:53:21 +05:30
allocate ( homogenization_RGC_sizePostResult ( maxval ( homogenization_Noutput ) , maxNinstance ) )
2013-02-21 03:36:15 +05:30
homogenization_RGC_sizePostResult = 0_pInt
2010-03-24 18:50:12 +05:30
allocate ( homogenization_RGC_orientation ( 3 , 3 , mesh_maxNips , mesh_NcpElems ) )
2013-02-21 03:36:15 +05:30
homogenization_RGC_orientation = spread ( spread ( math_I3 , 3 , mesh_maxNips ) , 4 , mesh_NcpElems ) ! initialize to identity
2009-10-12 21:31:49 +05:30
2013-10-16 18:34:59 +05:30
rewind ( myUnit )
2009-10-12 21:31:49 +05:30
2013-06-27 00:49:41 +05:30
do while ( trim ( line ) / = '#EOF#' . and . IO_lc ( IO_getTag ( line , '<' , '>' ) ) / = material_partHomogenization ) ! wind forward to <homogenization>
2013-10-16 18:34:59 +05:30
line = IO_read ( myUnit )
2009-10-12 21:31:49 +05:30
enddo
2013-06-27 00:49:41 +05:30
do while ( trim ( line ) / = '#EOF#' )
2013-10-16 18:34:59 +05:30
line = IO_read ( myUnit )
2013-02-21 03:36:15 +05:30
if ( IO_isBlank ( line ) ) cycle ! skip empty lines
if ( IO_getTag ( line , '<' , '>' ) / = '' ) exit ! stop at next part
if ( IO_getTag ( line , '[' , ']' ) / = '' ) then ! next section
2012-02-13 19:48:07 +05:30
section = section + 1_pInt
2013-02-21 03:36:15 +05:30
output = 0_pInt ! reset output counter
2009-10-12 21:31:49 +05:30
endif
2013-10-09 11:42:16 +05:30
if ( section > 0_pInt ) then ! do not short-circuit here (.and. with next if-statement). It's not safe in Fortran
if ( trim ( homogenization_type ( section ) ) == HOMOGENIZATION_RGC_label ) then ! one of my sections
i = homogenization_typeInstance ( section ) ! which instance of my type is present homogenization
2013-10-11 21:31:53 +05:30
positions = IO_stringPos ( line , MAXNCHUNKS )
2013-10-09 11:42:16 +05:30
tag = IO_lc ( IO_stringValue ( line , positions , 1_pInt ) ) ! extract key
select case ( tag )
case ( '(output)' )
output = output + 1_pInt
homogenization_RGC_output ( output , i ) = IO_lc ( IO_stringValue ( line , positions , 2_pInt ) )
case ( 'clustersize' )
homogenization_RGC_Ngrains ( 1 , i ) = IO_intValue ( line , positions , 2_pInt )
homogenization_RGC_Ngrains ( 2 , i ) = IO_intValue ( line , positions , 3_pInt )
homogenization_RGC_Ngrains ( 3 , i ) = IO_intValue ( line , positions , 4_pInt )
case ( 'scalingparameter' )
homogenization_RGC_xiAlpha ( i ) = IO_floatValue ( line , positions , 2_pInt )
case ( 'overproportionality' )
homogenization_RGC_ciAlpha ( i ) = IO_floatValue ( line , positions , 2_pInt )
case ( 'grainsize' )
homogenization_RGC_dAlpha ( 1 , i ) = IO_floatValue ( line , positions , 2_pInt )
homogenization_RGC_dAlpha ( 2 , i ) = IO_floatValue ( line , positions , 3_pInt )
homogenization_RGC_dAlpha ( 3 , i ) = IO_floatValue ( line , positions , 4_pInt )
case ( 'clusterorientation' )
homogenization_RGC_angles ( 1 , i ) = IO_floatValue ( line , positions , 2_pInt )
homogenization_RGC_angles ( 2 , i ) = IO_floatValue ( line , positions , 3_pInt )
homogenization_RGC_angles ( 3 , i ) = IO_floatValue ( line , positions , 4_pInt )
end select
endif
2009-10-12 21:31:49 +05:30
endif
enddo
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! assigning cluster orientations
elementLooping : do e = 1_pInt , mesh_NcpElems
2013-10-09 11:42:16 +05:30
if ( homogenization_type ( mesh_element ( 3 , e ) ) == HOMOGENIZATION_RGC_label ) then
2010-03-24 18:50:12 +05:30
myInstance = homogenization_typeInstance ( mesh_element ( 3 , e ) )
if ( all ( homogenization_RGC_angles ( : , myInstance ) > = 39 9.9_pReal ) ) then
2013-02-21 03:36:15 +05:30
homogenization_RGC_orientation ( 1 : 3 , 1 : 3 , 1 , e ) = math_EulerToR ( math_sampleRandomOri ( ) )
2012-11-16 04:15:20 +05:30
do i = 1_pInt , FE_Nips ( FE_geomtype ( mesh_element ( 2 , e ) ) )
2010-03-24 18:50:12 +05:30
if ( microstructure_elemhomo ( mesh_element ( 4 , e ) ) ) then
2013-02-21 03:36:15 +05:30
homogenization_RGC_orientation ( 1 : 3 , 1 : 3 , i , e ) = homogenization_RGC_orientation ( 1 : 3 , 1 : 3 , 1 , e )
2010-03-24 18:50:12 +05:30
else
2013-02-21 03:36:15 +05:30
homogenization_RGC_orientation ( 1 : 3 , 1 : 3 , i , e ) = math_EulerToR ( math_sampleRandomOri ( ) )
2010-03-24 18:50:12 +05:30
endif
enddo
else
2012-11-16 04:15:20 +05:30
do i = 1_pInt , FE_Nips ( FE_geomtype ( mesh_element ( 2 , e ) ) )
2013-02-21 03:36:15 +05:30
homogenization_RGC_orientation ( 1 : 3 , 1 : 3 , i , e ) = &
math_EulerToR ( homogenization_RGC_angles ( 1 : 3 , myInstance ) * inRad )
2010-03-24 18:50:12 +05:30
enddo
endif
endif
2013-02-21 03:36:15 +05:30
enddo elementLooping
2010-03-24 18:50:12 +05:30
2013-06-27 00:49:41 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt ) then
2012-02-13 19:48:07 +05:30
do i = 1_pInt , maxNinstance
2013-10-16 18:34:59 +05:30
write ( 6 , '(a15,1x,i4,/)' ) 'instance: ' , i
2012-02-13 19:48:07 +05:30
write ( 6 , '(a25,3(1x,i8))' ) 'cluster size: ' , ( homogenization_RGC_Ngrains ( j , i ) , j = 1_pInt , 3_pInt )
2012-02-01 00:48:55 +05:30
write ( 6 , '(a25,1x,e10.3)' ) 'scaling parameter: ' , homogenization_RGC_xiAlpha ( i )
write ( 6 , '(a25,1x,e10.3)' ) 'over-proportionality: ' , homogenization_RGC_ciAlpha ( i )
2012-02-13 19:48:07 +05:30
write ( 6 , '(a25,3(1x,e10.3))' ) 'grain size: ' , ( homogenization_RGC_dAlpha ( j , i ) , j = 1_pInt , 3_pInt )
write ( 6 , '(a25,3(1x,e10.3))' ) 'cluster orientation: ' , ( homogenization_RGC_angles ( j , i ) , j = 1_pInt , 3_pInt )
2011-06-06 20:57:35 +05:30
enddo
endif
2012-02-13 19:48:07 +05:30
do i = 1_pInt , maxNinstance
do j = 1_pInt , maxval ( homogenization_Noutput )
2009-10-12 21:31:49 +05:30
select case ( homogenization_RGC_output ( j , i ) )
case ( 'constitutivework' )
2012-02-13 19:48:07 +05:30
mySize = 1_pInt
2009-12-16 21:50:53 +05:30
case ( 'magnitudemismatch' )
2012-02-13 19:48:07 +05:30
mySize = 3_pInt
2009-10-12 21:31:49 +05:30
case ( 'penaltyenergy' )
2012-02-13 19:48:07 +05:30
mySize = 1_pInt
2009-12-16 21:50:53 +05:30
case ( 'volumediscrepancy' )
2012-02-13 19:48:07 +05:30
mySize = 1_pInt
2010-03-24 18:50:12 +05:30
case ( 'averagerelaxrate' )
2012-02-13 19:48:07 +05:30
mySize = 1_pInt
2010-03-24 18:50:12 +05:30
case ( 'maximumrelaxrate' )
2012-02-13 19:48:07 +05:30
mySize = 1_pInt
2010-11-03 20:09:18 +05:30
case default
2012-02-13 19:48:07 +05:30
mySize = 0_pInt
2009-10-12 21:31:49 +05:30
end select
2010-02-19 23:33:16 +05:30
2013-10-16 18:34:59 +05:30
outputFound : if ( mySize > 0_pInt ) then
2010-11-03 20:09:18 +05:30
homogenization_RGC_sizePostResult ( j , i ) = mySize
homogenization_RGC_sizePostResults ( i ) = &
homogenization_RGC_sizePostResults ( i ) + mySize
2013-10-16 18:34:59 +05:30
endif outputFound
2009-10-12 21:31:49 +05:30
enddo
homogenization_RGC_sizeState ( i ) &
2012-02-21 22:01:37 +05:30
= 3_pInt * ( homogenization_RGC_Ngrains ( 1 , i ) - 1_pInt ) * homogenization_RGC_Ngrains ( 2 , i ) * homogenization_RGC_Ngrains ( 3 , i ) &
+ 3_pInt * homogenization_RGC_Ngrains ( 1 , i ) * ( homogenization_RGC_Ngrains ( 2 , i ) - 1_pInt ) * homogenization_RGC_Ngrains ( 3 , i ) &
+ 3_pInt * homogenization_RGC_Ngrains ( 1 , i ) * homogenization_RGC_Ngrains ( 2 , i ) * ( homogenization_RGC_Ngrains ( 3 , i ) - 1_pInt ) &
2010-03-24 18:50:12 +05:30
+ 8_pInt ! (1) Average constitutive work, (2-4) Overall mismatch, (5) Average penalty energy,
! (6) Volume discrepancy, (7) Avg relaxation rate component, (8) Max relaxation rate component
2009-10-12 21:31:49 +05:30
enddo
2013-02-21 03:36:15 +05:30
end subroutine homogenization_RGC_init
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief partitions the deformation gradient onto the constituents
!--------------------------------------------------------------------------------------------------
2013-10-16 18:34:59 +05:30
subroutine homogenization_RGC_partitionDeformation ( F , avgF , state , ip , el )
2013-02-21 03:36:15 +05:30
use prec , only : &
p_vec
use debug , only : &
debug_level , &
debug_homogenization , &
debug_levelExtensive
use mesh , only : &
mesh_element
use material , only : &
homogenization_maxNgrains , &
homogenization_Ngrains , &
homogenization_typeInstance
use FEsolving , only : &
theInc , &
cycleCounter
2009-10-12 21:31:49 +05:30
implicit none
2013-02-21 03:36:15 +05:30
real ( pReal ) , dimension ( 3 , 3 , homogenization_maxNgrains ) , intent ( out ) :: F !< partioned F per grain
real ( pReal ) , dimension ( 3 , 3 ) , intent ( in ) :: avgF !< averaged F
type ( p_vec ) , intent ( in ) :: state
integer ( pInt ) , intent ( in ) :: &
ip , & !< integration point number
el !< element number
real ( pReal ) , dimension ( 3 ) :: aVect , nVect
2009-10-12 21:31:49 +05:30
integer ( pInt ) , dimension ( 4 ) :: intFace
integer ( pInt ) , dimension ( 3 ) :: iGrain3
integer ( pInt ) homID , iGrain , iFace , i , j
2013-02-21 03:36:15 +05:30
integer ( pInt ) , parameter :: nFace = 6_pInt
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! debugging the overall deformation gradient
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(1x,a,i3,a,i3,a)' ) '========== Increment: ' , theInc , ' Cycle: ' , cycleCounter , ' =========='
write ( 6 , '(1x,a32)' ) 'Overall deformation gradient: '
2012-02-13 19:48:07 +05:30
do i = 1_pInt , 3_pInt
2012-02-16 00:28:38 +05:30
write ( 6 , '(1x,3(e15.8,1x))' ) ( avgF ( i , j ) , j = 1_pInt , 3_pInt )
2009-10-12 21:31:49 +05:30
enddo
write ( 6 , * ) ' '
2012-11-06 21:20:20 +05:30
flush ( 6 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! compute the deformation gradient of individual grains due to relaxations
2009-10-12 21:31:49 +05:30
homID = homogenization_typeInstance ( mesh_element ( 3 , el ) )
F = 0.0_pReal
2012-02-21 22:01:37 +05:30
do iGrain = 1_pInt , homogenization_Ngrains ( mesh_element ( 3 , el ) )
2010-11-26 17:20:20 +05:30
iGrain3 = homogenization_RGC_grain1to3 ( iGrain , homID )
2012-02-13 19:48:07 +05:30
do iFace = 1_pInt , nFace
2013-02-21 03:36:15 +05:30
intFace = homogenization_RGC_getInterface ( iFace , iGrain3 ) ! identifying 6 interfaces of each grain
aVect = homogenization_RGC_relaxationVector ( intFace , state , homID ) ! get the relaxation vectors for each interface from global relaxation vector array
nVect = homogenization_RGC_interfaceNormal ( intFace , ip , el ) ! get the normal of each interface
2012-02-13 19:48:07 +05:30
forall ( i = 1_pInt : 3_pInt , j = 1_pInt : 3_pInt ) &
2013-02-21 03:36:15 +05:30
F ( i , j , iGrain ) = F ( i , j , iGrain ) + aVect ( i ) * nVect ( j ) ! calculating deformation relaxations due to interface relaxation
2009-10-12 21:31:49 +05:30
enddo
2013-02-21 03:36:15 +05:30
F ( 1 : 3 , 1 : 3 , iGrain ) = F ( 1 : 3 , 1 : 3 , iGrain ) + avgF ! resulting relaxed deformation gradient
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! debugging the grain deformation gradients
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(1x,a32,1x,i3)' ) 'Deformation gradient of grain: ' , iGrain
2012-02-13 19:48:07 +05:30
do i = 1_pInt , 3_pInt
2012-02-16 00:28:38 +05:30
write ( 6 , '(1x,3(e15.8,1x))' ) ( F ( i , j , iGrain ) , j = 1_pInt , 3_pInt )
2009-10-12 21:31:49 +05:30
enddo
write ( 6 , * ) ' '
2012-11-06 21:20:20 +05:30
flush ( 6 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2010-11-26 17:20:20 +05:30
2009-10-12 21:31:49 +05:30
enddo
2013-02-21 03:36:15 +05:30
end subroutine homogenization_RGC_partitionDeformation
!--------------------------------------------------------------------------------------------------
!> @brief update the internal state of the homogenization scheme and tell whether "done" and
! "happy" with result
!--------------------------------------------------------------------------------------------------
function homogenization_RGC_updateState ( state , state0 , P , F , F0 , avgF , dt , dPdF , ip , el )
use prec , only : &
p_vec
use debug , only : &
debug_level , &
debug_homogenization , &
debug_levelExtensive , &
debug_e , &
debug_i
use math , only : &
math_invert
use mesh , only : &
mesh_element
use material , only : &
homogenization_maxNgrains , &
homogenization_typeInstance , &
homogenization_Ngrains
use numerics , only : &
absTol_RGC , &
relTol_RGC , &
absMax_RGC , &
relMax_RGC , &
pPert_RGC , &
maxdRelax_RGC , &
viscPower_RGC , &
viscModus_RGC , &
refRelaxRate_RGC
2009-10-12 21:31:49 +05:30
implicit none
2013-02-21 03:36:15 +05:30
type ( p_vec ) , intent ( inout ) :: state !< current state
type ( p_vec ) , intent ( in ) :: state0 !< initial state
real ( pReal ) , dimension ( 3 , 3 , homogenization_maxNgrains ) , intent ( in ) :: &
P , & !< array of P
F , & !< array of F
F0 !< array of initial F
real ( pReal ) , dimension ( 3 , 3 , 3 , 3 , homogenization_maxNgrains ) , intent ( in ) :: dPdF !< array of current grain stiffness
real ( pReal ) , dimension ( 3 , 3 ) , intent ( in ) :: avgF !< average F
real ( pReal ) , intent ( in ) :: dt !< time increment
integer ( pInt ) , intent ( in ) :: &
ip , & !< integration point number
el !< element number
2009-10-12 21:31:49 +05:30
logical , dimension ( 2 ) :: homogenization_RGC_updateState
integer ( pInt ) , dimension ( 4 ) :: intFaceN , intFaceP , faceID
integer ( pInt ) , dimension ( 3 ) :: nGDim , iGr3N , iGr3P , stresLoc
integer ( pInt ) , dimension ( 2 ) :: residLoc
2012-08-28 22:29:45 +05:30
integer ( pInt ) homID , iNum , i , j , nIntFaceTot , iGrN , iGrP , iMun , iFace , k , l , ipert , iGrain , nGrain
2009-12-16 21:50:53 +05:30
real ( pReal ) , dimension ( 3 , 3 , homogenization_maxNgrains ) :: R , pF , pR , D , pD
2010-03-24 18:50:12 +05:30
real ( pReal ) , dimension ( 3 , homogenization_maxNgrains ) :: NN , pNN
2009-10-12 21:31:49 +05:30
real ( pReal ) , dimension ( 3 ) :: normP , normN , mornP , mornN
2011-04-13 19:46:22 +05:30
real ( pReal ) residMax , stresMax , constitutiveWork , penaltyEnergy , volDiscrep
2011-06-06 20:57:35 +05:30
logical error
2013-02-21 03:36:15 +05:30
2012-02-13 19:48:07 +05:30
integer ( pInt ) , parameter :: nFace = 6_pInt
2013-02-21 03:36:15 +05:30
2009-11-17 19:12:38 +05:30
real ( pReal ) , dimension ( : , : ) , allocatable :: tract , jmatrix , jnverse , smatrix , pmatrix , rmatrix
2009-10-12 21:31:49 +05:30
real ( pReal ) , dimension ( : ) , allocatable :: resid , relax , p_relax , p_resid , drelax
2013-02-13 15:06:06 +05:30
2013-02-21 03:36:15 +05:30
if ( dt < tiny ( 1.0_pReal ) ) then ! zero time step
homogenization_RGC_updateState = . true . ! pretend everything is fine and return
return
2013-02-13 15:06:06 +05:30
endif
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! get the dimension of the cluster (grains and interfaces)
2009-12-16 21:50:53 +05:30
homID = homogenization_typeInstance ( mesh_element ( 3 , el ) )
2013-02-21 03:36:15 +05:30
nGDim = homogenization_RGC_Ngrains ( 1 : 3 , homID )
2009-12-16 21:50:53 +05:30
nGrain = homogenization_Ngrains ( mesh_element ( 3 , el ) )
2012-02-21 22:01:37 +05:30
nIntFaceTot = ( nGDim ( 1 ) - 1_pInt ) * nGDim ( 2 ) * nGDim ( 3 ) + nGDim ( 1 ) * ( nGDim ( 2 ) - 1_pInt ) * nGDim ( 3 ) &
+ nGDim ( 1 ) * nGDim ( 2 ) * ( nGDim ( 3 ) - 1_pInt )
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! allocate the size of the global relaxation arrays/jacobian matrices depending on the size of the cluster
2012-02-13 19:48:07 +05:30
allocate ( resid ( 3_pInt * nIntFaceTot ) ) ; resid = 0.0_pReal
2009-10-12 21:31:49 +05:30
allocate ( tract ( nIntFaceTot , 3 ) ) ; tract = 0.0_pReal
2012-02-13 19:48:07 +05:30
allocate ( relax ( 3_pInt * nIntFaceTot ) ) ; relax = state % p ( 1 : 3_pInt * nIntFaceTot )
allocate ( drelax ( 3_pInt * nIntFaceTot ) )
drelax = state % p ( 1 : 3_pInt * nIntFaceTot ) - state0 % p ( 1 : 3_pInt * nIntFaceTot )
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! debugging the obtained state
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(1x,a30)' ) 'Obtained state: '
2012-02-21 22:01:37 +05:30
do i = 1_pInt , 3_pInt * nIntFaceTot
2012-02-16 00:28:38 +05:30
write ( 6 , '(1x,2(e15.8,1x))' ) state % p ( i )
2009-10-12 21:31:49 +05:30
enddo
write ( 6 , * ) ' '
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! computing interface mismatch and stress penalty tensor for all interfaces of all grains
2010-03-24 18:50:12 +05:30
call homogenization_RGC_stressPenalty ( R , NN , avgF , F , ip , el , homID )
2009-12-16 21:50:53 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! calculating volume discrepancy and stress penalty related to overall volume discrepancy
2013-10-16 18:34:59 +05:30
call homogenization_RGC_volumePenalty ( D , volDiscrep , F , avgF , ip , el )
2009-12-16 21:50:53 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! debugging the mismatch, stress and penalties of grains
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2012-02-21 22:01:37 +05:30
do iGrain = 1_pInt , nGrain
2013-02-21 03:36:15 +05:30
write ( 6 , '(1x,a30,1x,i3,1x,a4,3(1x,e15.8))' ) 'Mismatch magnitude of grain(' , iGrain , ') :' , &
NN ( 1 , iGrain ) , NN ( 2 , iGrain ) , NN ( 3 , iGrain )
write ( 6 , '(/,1x,a30,1x,i3)' ) 'Stress and penalties of grain: ' , iGrain
2012-02-21 22:01:37 +05:30
do i = 1_pInt , 3_pInt
write ( 6 , '(1x,3(e15.8,1x),1x,3(e15.8,1x),1x,3(e15.8,1x))' ) ( P ( i , j , iGrain ) , j = 1_pInt , 3_pInt ) , &
( R ( i , j , iGrain ) , j = 1_pInt , 3_pInt ) , &
( D ( i , j , iGrain ) , j = 1_pInt , 3_pInt )
2009-10-12 21:31:49 +05:30
enddo
write ( 6 , * ) ' '
enddo
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2013-02-21 03:36:15 +05:30
!!!------------------------------------------------------------------------------------------------
! computing the residual stress from the balance of traction at all (interior) interfaces
2012-02-21 22:01:37 +05:30
do iNum = 1_pInt , nIntFaceTot
2013-02-21 03:36:15 +05:30
faceID = homogenization_RGC_interface1to4 ( iNum , homID ) ! identifying the interface ID in local coordinate system (4-dimensional index)
!--------------------------------------------------------------------------------------------------
! identify the left/bottom/back grain (-|N)
iGr3N = faceID ( 2 : 4 ) ! identifying the grain ID in local coordinate system (3-dimensional index)
iGrN = homogenization_RGC_grain3to1 ( iGr3N , homID ) ! translate the local grain ID into global coordinate system (1-dimensional index)
2012-02-13 19:48:07 +05:30
intFaceN = homogenization_RGC_getInterface ( 2_pInt * faceID ( 1 ) , iGr3N )
2013-02-21 03:36:15 +05:30
normN = homogenization_RGC_interfaceNormal ( intFaceN , ip , el ) ! get the interface normal
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! identify the right/up/front grain (+|P)
2009-10-12 21:31:49 +05:30
iGr3P = iGr3N
2013-02-21 03:36:15 +05:30
iGr3P ( faceID ( 1 ) ) = iGr3N ( faceID ( 1 ) ) + 1_pInt ! identifying the grain ID in local coordinate system (3-dimensional index)
iGrP = homogenization_RGC_grain3to1 ( iGr3P , homID ) ! translate the local grain ID into global coordinate system (1-dimensional index)
2012-02-13 19:48:07 +05:30
intFaceP = homogenization_RGC_getInterface ( 2_pInt * faceID ( 1 ) - 1_pInt , iGr3P )
2013-02-21 03:36:15 +05:30
normP = homogenization_RGC_interfaceNormal ( intFaceP , ip , el ) ! get the interface normal
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! compute the residual of traction at the interface (in local system, 4-dimensional index)
2012-02-21 22:01:37 +05:30
do i = 1_pInt , 3_pInt
2012-02-13 19:48:07 +05:30
tract ( iNum , i ) = sign ( viscModus_RGC * ( abs ( drelax ( i + 3 * ( iNum - 1_pInt ) ) ) / ( refRelaxRate_RGC * dt ) ) ** viscPower_RGC , &
2013-02-21 03:36:15 +05:30
drelax ( i + 3 * ( iNum - 1_pInt ) ) ) ! contribution from the relaxation viscosity
2012-02-21 22:01:37 +05:30
do j = 1_pInt , 3_pInt
2013-02-21 03:36:15 +05:30
tract ( iNum , i ) = tract ( iNum , i ) + ( P ( i , j , iGrP ) + R ( i , j , iGrP ) + D ( i , j , iGrP ) ) * normP ( j ) & ! contribution from material stress P, mismatch penalty R, and volume penalty D projected into the interface
2009-12-16 21:50:53 +05:30
+ ( P ( i , j , iGrN ) + R ( i , j , iGrN ) + D ( i , j , iGrN ) ) * normN ( j )
2013-02-21 03:36:15 +05:30
resid ( i + 3_pInt * ( iNum - 1_pInt ) ) = tract ( iNum , i ) ! translate the local residual into global 1-dimensional residual array
2009-11-17 19:12:38 +05:30
enddo
2009-10-12 21:31:49 +05:30
enddo
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! debugging the residual stress
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(1x,a30,1x,i3)' ) 'Traction at interface: ' , iNum
2012-02-21 22:01:37 +05:30
write ( 6 , '(1x,3(e15.8,1x))' ) ( tract ( iNum , j ) , j = 1_pInt , 3_pInt )
2009-10-12 21:31:49 +05:30
write ( 6 , * ) ' '
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
enddo
2013-02-21 03:36:15 +05:30
!!!------------------------------------------------------------------------------------------------
! convergence check for stress residual
stresMax = maxval ( abs ( P ) ) ! get the maximum of first Piola-Kirchhoff (material) stress
stresLoc = int ( maxloc ( abs ( P ) ) , pInt ) ! get the location of the maximum stress
residMax = maxval ( abs ( tract ) ) ! get the maximum of the residual
residLoc = int ( maxloc ( abs ( tract ) ) , pInt ) ! get the position of the maximum residual
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! Debugging the convergent criteria
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt &
2012-03-09 01:55:28 +05:30
. and . debug_e == el . and . debug_i == ip ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(1x,a)' ) ' '
write ( 6 , '(1x,a,1x,i2,1x,i4)' ) 'RGC residual check ...' , ip , el
2012-02-16 00:28:38 +05:30
write ( 6 , '(1x,a15,1x,e15.8,1x,a7,i3,1x,a12,i2,i2)' ) 'Max stress: ' , stresMax , &
2009-10-12 21:31:49 +05:30
'@ grain' , stresLoc ( 3 ) , 'in component' , stresLoc ( 1 ) , stresLoc ( 2 )
2012-02-16 00:28:38 +05:30
write ( 6 , '(1x,a15,1x,e15.8,1x,a7,i3,1x,a12,i2)' ) 'Max residual: ' , residMax , &
2009-10-12 21:31:49 +05:30
'@ iface' , residLoc ( 1 ) , 'in direction' , residLoc ( 2 )
2012-11-06 21:20:20 +05:30
flush ( 6 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2010-11-26 17:20:20 +05:30
2009-10-12 21:31:49 +05:30
homogenization_RGC_updateState = . false .
2013-10-16 18:34:59 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! If convergence reached => done and happy
2009-10-12 21:31:49 +05:30
if ( residMax < relTol_RGC * stresMax . or . residMax < absTol_RGC ) then
homogenization_RGC_updateState = . true .
2010-11-26 17:20:20 +05:30
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt &
2012-03-09 01:55:28 +05:30
. and . debug_e == el . and . debug_i == ip ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2013-10-16 18:34:59 +05:30
write ( 6 , '(1x,a55,/)' ) '... done and happy'
2012-11-06 21:20:20 +05:30
flush ( 6 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2012-02-01 00:48:55 +05:30
! write(6,'(1x,a,1x,i3,1x,a6,1x,i3,1x,a12)')'RGC_updateState: ip',ip,'| el',el,'converged :)'
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! compute/update the state for postResult, i.e., all energy densities computed by time-integration
2009-10-12 21:31:49 +05:30
constitutiveWork = state % p ( 3 * nIntFaceTot + 1 )
2010-03-24 18:50:12 +05:30
penaltyEnergy = state % p ( 3 * nIntFaceTot + 5 )
2013-02-21 03:36:15 +05:30
do iGrain = 1_pInt , homogenization_Ngrains ( mesh_element ( 3 , el ) ) ! time-integration loop for the calculating the work and energy
2012-02-21 22:01:37 +05:30
do i = 1_pInt , 3_pInt
do j = 1_pInt , 3_pInt
constitutiveWork = constitutiveWork + P ( i , j , iGrain ) * ( F ( i , j , iGrain ) - F0 ( i , j , iGrain ) ) / real ( nGrain , pReal )
penaltyEnergy = penaltyEnergy + R ( i , j , iGrain ) * ( F ( i , j , iGrain ) - F0 ( i , j , iGrain ) ) / real ( nGrain , pReal )
2009-10-12 21:31:49 +05:30
enddo
enddo
enddo
2013-02-21 03:36:15 +05:30
state % p ( 3 * nIntFaceTot + 1 ) = constitutiveWork ! the bulk mechanical/constitutive work
state % p ( 3 * nIntFaceTot + 2 ) = sum ( NN ( 1 , : ) ) / real ( nGrain , pReal ) ! the overall mismatch of all interface normal to e1-direction
state % p ( 3 * nIntFaceTot + 3 ) = sum ( NN ( 2 , : ) ) / real ( nGrain , pReal ) ! the overall mismatch of all interface normal to e2-direction
state % p ( 3 * nIntFaceTot + 4 ) = sum ( NN ( 3 , : ) ) / real ( nGrain , pReal ) ! the overall mismatch of all interface normal to e3-direction
state % p ( 3 * nIntFaceTot + 5 ) = penaltyEnergy ! the overall penalty energy
state % p ( 3 * nIntFaceTot + 6 ) = volDiscrep ! the overall volume discrepancy
state % p ( 3 * nIntFaceTot + 7 ) = sum ( abs ( drelax ) ) / dt / real ( 3_pInt * nIntFaceTot , pReal ) ! the average rate of relaxation vectors
state % p ( 3 * nIntFaceTot + 8 ) = maxval ( abs ( drelax ) ) / dt ! the maximum rate of relaxation vectors
2010-11-26 17:20:20 +05:30
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt &
2012-03-09 01:55:28 +05:30
. and . debug_e == el . and . debug_i == ip ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2013-10-16 18:34:59 +05:30
write ( 6 , '(1x,a30,1x,e15.8)' ) 'Constitutive work: ' , constitutiveWork
2012-02-21 22:01:37 +05:30
write ( 6 , '(1x,a30,3(1x,e15.8))' ) 'Magnitude mismatch: ' , sum ( NN ( 1 , : ) ) / real ( nGrain , pReal ) , &
sum ( NN ( 2 , : ) ) / real ( nGrain , pReal ) , &
sum ( NN ( 3 , : ) ) / real ( nGrain , pReal )
2013-10-16 18:34:59 +05:30
write ( 6 , '(1x,a30,1x,e15.8)' ) 'Penalty energy: ' , penaltyEnergy
write ( 6 , '(1x,a30,1x,e15.8,/)' ) 'Volume discrepancy: ' , volDiscrep
write ( 6 , '(1x,a30,1x,e15.8)' ) 'Maximum relaxation rate: ' , maxval ( abs ( drelax ) ) / dt
write ( 6 , '(1x,a30,1x,e15.8,/)' ) 'Average relaxation rate: ' , sum ( abs ( drelax ) ) / dt / real ( 3_pInt * nIntFaceTot , pReal )
2012-11-06 21:20:20 +05:30
flush ( 6 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2010-11-26 17:20:20 +05:30
2009-10-12 21:31:49 +05:30
deallocate ( tract , resid , relax , drelax )
return
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! if residual blows-up => done but unhappy
elseif ( residMax > relMax_RGC * stresMax . or . residMax > absMax_RGC ) then ! try to restart when residual blows up exceeding maximum bound
homogenization_RGC_updateState = [ . true . , . false . ] ! with direct cut-back
2010-11-26 17:20:20 +05:30
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt &
2012-03-09 01:55:28 +05:30
. and . debug_e == el . and . debug_i == ip ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2013-10-16 18:34:59 +05:30
write ( 6 , '(1x,a55,/)' ) '... broken'
2012-11-06 21:20:20 +05:30
flush ( 6 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2010-11-26 17:20:20 +05:30
2009-10-12 21:31:49 +05:30
deallocate ( tract , resid , relax , drelax )
return
2013-02-21 03:36:15 +05:30
else ! proceed with computing the Jacobian and state update
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt &
2012-03-09 01:55:28 +05:30
. and . debug_e == el . and . debug_i == ip ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2013-10-16 18:34:59 +05:30
write ( 6 , '(1x,a55,/)' ) '... not yet done'
2012-11-06 21:20:20 +05:30
flush ( 6 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2010-11-26 17:20:20 +05:30
2009-10-12 21:31:49 +05:30
endif
2013-02-21 03:36:15 +05:30
!!!------------------------------------------------------------------------------------------------
! construct the global Jacobian matrix for updating the global relaxation vector array when
! convergence is not yet reached ...
!--------------------------------------------------------------------------------------------------
! ... of the constitutive stress tangent, assembled from dPdF or material constitutive model "smatrix"
2009-10-12 21:31:49 +05:30
allocate ( smatrix ( 3 * nIntFaceTot , 3 * nIntFaceTot ) ) ; smatrix = 0.0_pReal
2012-02-21 22:01:37 +05:30
do iNum = 1_pInt , nIntFaceTot
2013-02-21 03:36:15 +05:30
faceID = homogenization_RGC_interface1to4 ( iNum , homID ) ! assembling of local dPdF into global Jacobian matrix
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! identify the left/bottom/back grain (-|N)
iGr3N = faceID ( 2 : 4 ) ! identifying the grain ID in local coordinate sytem
iGrN = homogenization_RGC_grain3to1 ( iGr3N , homID ) ! translate into global grain ID
intFaceN = homogenization_RGC_getInterface ( 2_pInt * faceID ( 1 ) , iGr3N ) ! identifying the connecting interface in local coordinate system
normN = homogenization_RGC_interfaceNormal ( intFaceN , ip , el ) ! get the interface normal
2012-02-21 22:01:37 +05:30
do iFace = 1_pInt , nFace
2013-02-21 03:36:15 +05:30
intFaceN = homogenization_RGC_getInterface ( iFace , iGr3N ) ! identifying all interfaces that influence relaxation of the above interface
mornN = homogenization_RGC_interfaceNormal ( intFaceN , ip , el ) ! get normal of the interfaces
iMun = homogenization_RGC_interface4to1 ( intFaceN , homID ) ! translate the interfaces ID into local 4-dimensional index
if ( iMun > 0 ) then ! get the corresponding tangent
2012-02-21 22:01:37 +05:30
do i = 1_pInt , 3_pInt ; do j = 1_pInt , 3_pInt ; do k = 1_pInt , 3_pInt ; do l = 1_pInt , 3_pInt
2011-08-03 23:28:16 +05:30
smatrix ( 3 * ( iNum - 1 ) + i , 3 * ( iMun - 1 ) + j ) = smatrix ( 3 * ( iNum - 1 ) + i , 3 * ( iMun - 1 ) + j ) + dPdF ( i , k , j , l , iGrN ) * normN ( k ) * mornN ( l )
enddo ; enddo ; enddo ; enddo
2013-02-21 03:36:15 +05:30
! projecting the material tangent dPdF into the interface
! to obtain the Jacobian matrix contribution of dPdF
2009-10-12 21:31:49 +05:30
endif
enddo
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! identify the right/up/front grain (+|P)
2009-10-12 21:31:49 +05:30
iGr3P = iGr3N
2013-02-21 03:36:15 +05:30
iGr3P ( faceID ( 1 ) ) = iGr3N ( faceID ( 1 ) ) + 1_pInt ! identifying the grain ID in local coordinate sytem
iGrP = homogenization_RGC_grain3to1 ( iGr3P , homID ) ! translate into global grain ID
intFaceP = homogenization_RGC_getInterface ( 2_pInt * faceID ( 1 ) - 1_pInt , iGr3P ) ! identifying the connecting interface in local coordinate system
normP = homogenization_RGC_interfaceNormal ( intFaceP , ip , el ) ! get the interface normal
2012-02-21 22:01:37 +05:30
do iFace = 1_pInt , nFace
2013-02-21 03:36:15 +05:30
intFaceP = homogenization_RGC_getInterface ( iFace , iGr3P ) ! identifying all interfaces that influence relaxation of the above interface
mornP = homogenization_RGC_interfaceNormal ( intFaceP , ip , el ) ! get normal of the interfaces
iMun = homogenization_RGC_interface4to1 ( intFaceP , homID ) ! translate the interfaces ID into local 4-dimensional index
if ( iMun > 0_pInt ) then ! get the corresponding tangent
2012-02-21 22:01:37 +05:30
do i = 1_pInt , 3_pInt ; do j = 1_pInt , 3_pInt ; do k = 1_pInt , 3_pInt ; do l = 1_pInt , 3_pInt
2011-08-03 23:28:16 +05:30
smatrix ( 3 * ( iNum - 1 ) + i , 3 * ( iMun - 1 ) + j ) = smatrix ( 3 * ( iNum - 1 ) + i , 3 * ( iMun - 1 ) + j ) + dPdF ( i , k , j , l , iGrP ) * normP ( k ) * mornP ( l )
enddo ; enddo ; enddo ; enddo
2009-10-12 21:31:49 +05:30
endif
enddo
enddo
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! debugging the global Jacobian matrix of stress tangent
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(1x,a30)' ) 'Jacobian matrix of stress'
2012-02-21 22:01:37 +05:30
do i = 1_pInt , 3_pInt * nIntFaceTot
write ( 6 , '(1x,100(e11.4,1x))' ) ( smatrix ( i , j ) , j = 1_pInt , 3_pInt * nIntFaceTot )
2009-10-12 21:31:49 +05:30
enddo
write ( 6 , * ) ' '
2012-11-06 21:20:20 +05:30
flush ( 6 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! ... of the stress penalty tangent (mismatch penalty and volume penalty, computed using numerical
! perturbation method) "pmatrix"
2009-10-12 21:31:49 +05:30
allocate ( pmatrix ( 3 * nIntFaceTot , 3 * nIntFaceTot ) ) ; pmatrix = 0.0_pReal
allocate ( p_relax ( 3 * nIntFaceTot ) ) ; p_relax = 0.0_pReal
allocate ( p_resid ( 3 * nIntFaceTot ) ) ; p_resid = 0.0_pReal
2012-02-21 22:01:37 +05:30
do ipert = 1_pInt , 3_pInt * nIntFaceTot
2009-10-12 21:31:49 +05:30
p_relax = relax
2013-02-21 03:36:15 +05:30
p_relax ( ipert ) = relax ( ipert ) + pPert_RGC ! perturb the relaxation vector
2009-10-12 21:31:49 +05:30
state % p ( 1 : 3 * nIntFaceTot ) = p_relax
2013-10-16 18:34:59 +05:30
call homogenization_RGC_grainDeformation ( pF , avgF , state , ip , el ) ! compute the grains deformation from perturbed state
2013-02-21 03:36:15 +05:30
call homogenization_RGC_stressPenalty ( pR , pNN , avgF , pF , ip , el , homID ) ! compute stress penalty due to interface mismatch from perturbed state
2013-10-16 18:34:59 +05:30
call homogenization_RGC_volumePenalty ( pD , volDiscrep , pF , avgF , ip , el ) ! compute stress penalty due to volume discrepancy from perturbed state
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! computing the global stress residual array from the perturbed state
2009-10-12 21:31:49 +05:30
p_resid = 0.0_pReal
2012-02-21 22:01:37 +05:30
do iNum = 1_pInt , nIntFaceTot
2013-02-21 03:36:15 +05:30
faceID = homogenization_RGC_interface1to4 ( iNum , homID ) ! identifying the interface ID in local coordinate system (4-dimensional index)
!--------------------------------------------------------------------------------------------------
! identify the left/bottom/back grain (-|N)
iGr3N = faceID ( 2 : 4 ) ! identifying the grain ID in local coordinate system (3-dimensional index)
iGrN = homogenization_RGC_grain3to1 ( iGr3N , homID ) ! translate the local grain ID into global coordinate system (1-dimensional index)
intFaceN = homogenization_RGC_getInterface ( 2_pInt * faceID ( 1 ) , iGr3N ) ! identifying the interface ID of the grain
normN = homogenization_RGC_interfaceNormal ( intFaceN , ip , el ) ! get the corresponding interface normal
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! identify the right/up/front grain (+|P)
2009-10-12 21:31:49 +05:30
iGr3P = iGr3N
2013-02-21 03:36:15 +05:30
iGr3P ( faceID ( 1 ) ) = iGr3N ( faceID ( 1 ) ) + 1_pInt ! identifying the grain ID in local coordinate system (3-dimensional index)
iGrP = homogenization_RGC_grain3to1 ( iGr3P , homID ) ! translate the local grain ID into global coordinate system (1-dimensional index)
intFaceP = homogenization_RGC_getInterface ( 2_pInt * faceID ( 1 ) - 1_pInt , iGr3P ) ! identifying the interface ID of the grain
normP = homogenization_RGC_interfaceNormal ( intFaceP , ip , el ) ! get the corresponding normal
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! compute the residual stress (contribution of mismatch and volume penalties) from perturbed state
! at all interfaces
do i = 1_pInt , 3_pInt ; do j = 1_pInt , 3_pInt
2009-10-12 21:31:49 +05:30
p_resid ( i + 3 * ( iNum - 1 ) ) = p_resid ( i + 3 * ( iNum - 1 ) ) + ( pR ( i , j , iGrP ) - R ( i , j , iGrP ) ) * normP ( j ) &
2009-12-16 21:50:53 +05:30
+ ( pR ( i , j , iGrN ) - R ( i , j , iGrN ) ) * normN ( j ) &
+ ( pD ( i , j , iGrP ) - D ( i , j , iGrP ) ) * normP ( j ) &
+ ( pD ( i , j , iGrN ) - D ( i , j , iGrN ) ) * normN ( j )
2013-02-21 03:36:15 +05:30
enddo ; enddo
2009-10-12 21:31:49 +05:30
enddo
pmatrix ( : , ipert ) = p_resid / pPert_RGC
enddo
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! debugging the global Jacobian matrix of penalty tangent
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(1x,a30)' ) 'Jacobian matrix of penalty'
2012-02-21 22:01:37 +05:30
do i = 1_pInt , 3_pInt * nIntFaceTot
write ( 6 , '(1x,100(e11.4,1x))' ) ( pmatrix ( i , j ) , j = 1_pInt , 3_pInt * nIntFaceTot )
2009-10-12 21:31:49 +05:30
enddo
write ( 6 , * ) ' '
2012-11-06 21:20:20 +05:30
flush ( 6 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2009-11-17 19:12:38 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! ... of the numerical viscosity traction "rmatrix"
2009-11-17 19:12:38 +05:30
allocate ( rmatrix ( 3 * nIntFaceTot , 3 * nIntFaceTot ) ) ; rmatrix = 0.0_pReal
2012-02-21 22:01:37 +05:30
forall ( i = 1_pInt : 3_pInt * nIntFaceTot ) &
2013-02-21 03:36:15 +05:30
rmatrix ( i , i ) = viscModus_RGC * viscPower_RGC / ( refRelaxRate_RGC * dt ) * & ! tangent due to numerical viscosity traction appears
( abs ( drelax ( i ) ) / ( refRelaxRate_RGC * dt ) ) ** ( viscPower_RGC - 1.0_pReal ) ! only in the main diagonal term
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! debugging the global Jacobian matrix of numerical viscosity tangent
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(1x,a30)' ) 'Jacobian matrix of penalty'
2012-02-21 22:01:37 +05:30
do i = 1_pInt , 3_pInt * nIntFaceTot
write ( 6 , '(1x,100(e11.4,1x))' ) ( rmatrix ( i , j ) , j = 1_pInt , 3_pInt * nIntFaceTot )
2009-11-17 19:12:38 +05:30
enddo
write ( 6 , * ) ' '
2012-11-06 21:20:20 +05:30
flush ( 6 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-11-17 19:12:38 +05:30
endif
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! The overall Jacobian matrix summarizing contributions of smatrix, pmatrix, rmatrix
2009-11-17 19:12:38 +05:30
allocate ( jmatrix ( 3 * nIntFaceTot , 3 * nIntFaceTot ) ) ; jmatrix = smatrix + pmatrix + rmatrix
2010-11-26 17:20:20 +05:30
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(1x,a30)' ) 'Jacobian matrix (total)'
2012-02-21 22:01:37 +05:30
do i = 1_pInt , 3_pInt * nIntFaceTot
write ( 6 , '(1x,100(e11.4,1x))' ) ( jmatrix ( i , j ) , j = 1_pInt , 3_pInt * nIntFaceTot )
2009-10-12 21:31:49 +05:30
enddo
write ( 6 , * ) ' '
2012-11-06 21:20:20 +05:30
flush ( 6 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!!!------------------------------------------------------------------------------------------------
! computing the update of the state variable (relaxation vectors) using the Jacobian matrix
2012-02-13 19:48:07 +05:30
allocate ( jnverse ( 3_pInt * nIntFaceTot , 3_pInt * nIntFaceTot ) ) ; jnverse = 0.0_pReal
2013-02-21 03:36:15 +05:30
call math_invert ( size ( jmatrix , 1 ) , jmatrix , jnverse , error ) ! Compute the inverse of the overall Jacobian matrix
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! debugging the inverse Jacobian matrix
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(1x,a30)' ) 'Jacobian inverse'
2012-02-21 22:01:37 +05:30
do i = 1_pInt , 3_pInt * nIntFaceTot
2012-02-16 00:28:38 +05:30
write ( 6 , '(1x,100(e11.4,1x))' ) ( jnverse ( i , j ) , j = 1_pInt , 3_pInt * nIntFaceTot )
2009-10-12 21:31:49 +05:30
enddo
write ( 6 , * ) ' '
2012-11-06 21:20:20 +05:30
flush ( 6 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! calculate the state update (global relaxation vectors) for the next Newton-Raphson iteration
2009-11-17 19:12:38 +05:30
drelax = 0.0_pReal
2012-02-21 22:01:37 +05:30
do i = 1_pInt , 3_pInt * nIntFaceTot
do j = 1_pInt , 3_pInt * nIntFaceTot
2013-02-21 03:36:15 +05:30
drelax ( i ) = drelax ( i ) - jnverse ( i , j ) * resid ( j ) ! Calculate the correction for the state variable
2009-10-12 21:31:49 +05:30
enddo
enddo
2013-02-21 03:36:15 +05:30
relax = relax + drelax ! Updateing the state variable for the next iteration
2009-10-12 21:31:49 +05:30
state % p ( 1 : 3 * nIntFaceTot ) = relax
2013-02-21 03:36:15 +05:30
if ( any ( abs ( drelax ) > maxdRelax_RGC ) ) then ! Forcing cutback when the incremental change of relaxation vector becomes too large
homogenization_RGC_updateState = [ . true . , . false . ]
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(1x,a,1x,i3,1x,a,1x,i3,1x,a)' ) 'RGC_updateState: ip' , ip , '| el' , el , 'enforces cutback'
2012-02-16 00:28:38 +05:30
write ( 6 , '(1x,a,1x,e15.8)' ) 'due to large relaxation change =' , maxval ( abs ( drelax ) )
2012-11-06 21:20:20 +05:30
flush ( 6 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! debugging the return state
2012-03-09 01:55:28 +05:30
if ( iand ( debug_homogenization , debug_levelExtensive ) > 0_pInt ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(1x,a30)' ) 'Returned state: '
2012-02-21 22:01:37 +05:30
do i = 1_pInt , 3_pInt * nIntFaceTot
2012-02-16 00:28:38 +05:30
write ( 6 , '(1x,2(e15.8,1x))' ) state % p ( i )
2009-10-12 21:31:49 +05:30
enddo
write ( 6 , * ) ' '
2012-11-06 21:20:20 +05:30
flush ( 6 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
deallocate ( tract , resid , jmatrix , jnverse , relax , drelax , pmatrix , smatrix , p_relax , p_resid )
2013-02-21 03:36:15 +05:30
end function homogenization_RGC_updateState
!--------------------------------------------------------------------------------------------------
!> @brief derive average stress and stiffness from constituent quantities
!--------------------------------------------------------------------------------------------------
2013-10-16 18:34:59 +05:30
subroutine homogenization_RGC_averageStressAndItsTangent ( avgP , dAvgPdAvgF , P , dPdF , el )
2013-02-21 03:36:15 +05:30
use debug , only : &
debug_level , &
debug_homogenization , &
debug_levelExtensive
2012-02-21 22:01:37 +05:30
use mesh , only : mesh_element
2009-10-22 14:54:05 +05:30
use material , only : homogenization_maxNgrains , homogenization_Ngrains , homogenization_typeInstance
2009-10-12 21:31:49 +05:30
use math , only : math_Plain3333to99
2012-03-09 01:55:28 +05:30
2009-10-12 21:31:49 +05:30
implicit none
2013-10-16 18:34:59 +05:30
real ( pReal ) , dimension ( 3 , 3 ) , intent ( out ) :: avgP !< average stress at material point
real ( pReal ) , dimension ( 3 , 3 , 3 , 3 ) , intent ( out ) :: dAvgPdAvgF !< average stiffness at material point
real ( pReal ) , dimension ( 3 , 3 , homogenization_maxNgrains ) , intent ( in ) :: P !< array of current grain stresses
real ( pReal ) , dimension ( 3 , 3 , 3 , 3 , homogenization_maxNgrains ) , intent ( in ) :: dPdF !< array of current grain stiffnesses
integer ( pInt ) , intent ( in ) :: el !< element number
2009-10-12 21:31:49 +05:30
real ( pReal ) , dimension ( 9 , 9 ) :: dPdF99
2013-10-16 18:34:59 +05:30
integer ( pInt ) :: homID , i , j , Ngrains , iGrain
2009-10-12 21:31:49 +05:30
homID = homogenization_typeInstance ( mesh_element ( 3 , el ) )
2010-11-26 17:20:20 +05:30
Ngrains = homogenization_Ngrains ( mesh_element ( 3 , el ) )
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! debugging the grain tangent
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2012-02-21 22:01:37 +05:30
do iGrain = 1_pInt , Ngrains
2011-09-13 21:24:06 +05:30
dPdF99 = math_Plain3333to99 ( dPdF ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , iGrain ) )
2012-02-01 00:48:55 +05:30
write ( 6 , '(1x,a30,1x,i3)' ) 'Stress tangent of grain: ' , iGrain
2012-02-21 22:01:37 +05:30
do i = 1_pInt , 9_pInt
write ( 6 , '(1x,(e15.8,1x))' ) ( dPdF99 ( i , j ) , j = 1_pInt , 9_pInt )
2009-10-12 21:31:49 +05:30
enddo
write ( 6 , * ) ' '
enddo
2012-11-06 21:20:20 +05:30
flush ( 6 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! computing the average first Piola-Kirchhoff stress P and the average tangent dPdF
2012-02-21 22:01:37 +05:30
avgP = sum ( P , 3 ) / real ( Ngrains , pReal )
dAvgPdAvgF = sum ( dPdF , 5 ) / real ( Ngrains , pReal )
2009-10-12 21:31:49 +05:30
2013-10-11 21:31:53 +05:30
end subroutine homogenization_RGC_averageStressAndItsTangent
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief return array of homogenization results for post file inclusion
!--------------------------------------------------------------------------------------------------
2013-10-16 18:34:59 +05:30
pure function homogenization_RGC_postResults ( state , el )
2013-02-21 03:36:15 +05:30
use prec , only : &
p_vec
use mesh , only : &
mesh_element
use material , only : &
homogenization_typeInstance , &
homogenization_Noutput
2012-03-09 01:55:28 +05:30
2009-10-12 21:31:49 +05:30
implicit none
2013-02-21 03:36:15 +05:30
type ( p_vec ) , intent ( in ) :: state ! my State
2013-10-16 18:34:59 +05:30
integer ( pInt ) , intent ( in ) :: el ! element number
2009-10-22 22:29:24 +05:30
integer ( pInt ) homID , o , c , nIntFaceTot
2009-10-12 21:31:49 +05:30
real ( pReal ) , dimension ( homogenization_RGC_sizePostResults ( homogenization_typeInstance ( mesh_element ( 3 , el ) ) ) ) :: &
homogenization_RGC_postResults
homID = homogenization_typeInstance ( mesh_element ( 3 , el ) )
2012-02-21 22:01:37 +05:30
nIntFaceTot = ( homogenization_RGC_Ngrains ( 1 , homID ) - 1_pInt ) * homogenization_RGC_Ngrains ( 2 , homID ) * homogenization_RGC_Ngrains ( 3 , homID ) &
+ homogenization_RGC_Ngrains ( 1 , homID ) * ( homogenization_RGC_Ngrains ( 2 , homID ) - 1_pInt ) * homogenization_RGC_Ngrains ( 3 , homID ) &
+ homogenization_RGC_Ngrains ( 1 , homID ) * homogenization_RGC_Ngrains ( 2 , homID ) * ( homogenization_RGC_Ngrains ( 3 , homID ) - 1_pInt )
2009-10-12 21:31:49 +05:30
c = 0_pInt
homogenization_RGC_postResults = 0.0_pReal
2012-02-21 22:01:37 +05:30
do o = 1_pInt , homogenization_Noutput ( mesh_element ( 3 , el ) )
2009-10-12 21:31:49 +05:30
select case ( homogenization_RGC_output ( o , homID ) )
case ( 'constitutivework' )
homogenization_RGC_postResults ( c + 1 ) = state % p ( 3 * nIntFaceTot + 1 )
2012-02-21 22:01:37 +05:30
c = c + 1_pInt
2009-12-16 21:50:53 +05:30
case ( 'magnitudemismatch' )
2009-10-12 21:31:49 +05:30
homogenization_RGC_postResults ( c + 1 ) = state % p ( 3 * nIntFaceTot + 2 )
2010-03-24 18:50:12 +05:30
homogenization_RGC_postResults ( c + 2 ) = state % p ( 3 * nIntFaceTot + 3 )
homogenization_RGC_postResults ( c + 3 ) = state % p ( 3 * nIntFaceTot + 4 )
2012-02-21 22:01:37 +05:30
c = c + 3_pInt
2010-03-24 18:50:12 +05:30
case ( 'penaltyenergy' )
homogenization_RGC_postResults ( c + 1 ) = state % p ( 3 * nIntFaceTot + 5 )
2012-02-21 22:01:37 +05:30
c = c + 1_pInt
2009-12-16 21:50:53 +05:30
case ( 'volumediscrepancy' )
2010-03-24 18:50:12 +05:30
homogenization_RGC_postResults ( c + 1 ) = state % p ( 3 * nIntFaceTot + 6 )
2012-02-21 22:01:37 +05:30
c = c + 1_pInt
2010-03-24 18:50:12 +05:30
case ( 'averagerelaxrate' )
homogenization_RGC_postResults ( c + 1 ) = state % p ( 3 * nIntFaceTot + 7 )
2012-02-21 22:01:37 +05:30
c = c + 1_pInt
2010-03-24 18:50:12 +05:30
case ( 'maximumrelaxrate' )
homogenization_RGC_postResults ( c + 1 ) = state % p ( 3 * nIntFaceTot + 8 )
2012-02-21 22:01:37 +05:30
c = c + 1_pInt
2009-10-12 21:31:49 +05:30
end select
enddo
2013-02-21 03:36:15 +05:30
end function homogenization_RGC_postResults
!--------------------------------------------------------------------------------------------------
!> @brief calculate stress-like penalty due to deformation mismatch
!--------------------------------------------------------------------------------------------------
subroutine homogenization_RGC_stressPenalty ( rPen , nMis , avgF , fDef , ip , el , homID )
use debug , only : &
debug_level , &
debug_homogenization , &
debug_levelExtensive , &
debug_e , &
debug_i
use mesh , only : &
mesh_element
use constitutive , only : &
constitutive_homogenizedC
use math , only : &
math_civita , &
math_invert33
use material , only : &
homogenization_maxNgrains , &
homogenization_Ngrains
use numerics , only : &
xSmoo_RGC
2012-03-09 01:55:28 +05:30
2009-10-12 21:31:49 +05:30
implicit none
2013-10-16 18:34:59 +05:30
real ( pReal ) , dimension ( 3 , 3 , homogenization_maxNgrains ) , intent ( out ) :: rPen !< stress-like penalty
real ( pReal ) , dimension ( 3 , homogenization_maxNgrains ) , intent ( out ) :: nMis !< total amount of mismatch
real ( pReal ) , dimension ( 3 , 3 , homogenization_maxNgrains ) , intent ( in ) :: fDef !< deformation gradients
real ( pReal ) , dimension ( 3 , 3 ) , intent ( in ) :: avgF !< initial effective stretch tensor
2013-02-21 03:36:15 +05:30
integer ( pInt ) , intent ( in ) :: ip , el
integer ( pInt ) , dimension ( 4 ) :: intFace
integer ( pInt ) , dimension ( 3 ) :: iGrain3 , iGNghb3 , nGDim
real ( pReal ) , dimension ( 3 , 3 ) :: gDef , nDef
real ( pReal ) , dimension ( 3 ) :: nVect , surfCorr
real ( pReal ) , dimension ( 2 ) :: Gmoduli
2013-10-11 21:31:53 +05:30
integer ( pInt ) :: homID , iGrain , iGNghb , iFace , i , j , k , l
real ( pReal ) :: muGrain , muGNghb , nDefNorm , bgGrain , bgGNghb
2013-02-21 03:36:15 +05:30
integer ( pInt ) , parameter :: nFace = 6_pInt
real ( pReal ) , parameter :: nDefToler = 1.0e-10_pReal
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
nGDim = homogenization_RGC_Ngrains ( 1 : 3 , homID )
2009-10-12 21:31:49 +05:30
rPen = 0.0_pReal
nMis = 0.0_pReal
2010-03-24 18:50:12 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! get the correction factor the modulus of penalty stress representing the evolution of area of
! the interfaces due to deformations
2010-11-26 17:20:20 +05:30
surfCorr = homogenization_RGC_surfaceCorrection ( avgF , ip , el )
2010-03-24 18:50:12 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! debugging the surface correction factor
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt &
. and . debug_e == el . and . debug_i == ip ) then
!$OMP CRITICAL (write2out)
write ( 6 , '(1x,a20,2(1x,i3))' ) 'Correction factor: ' , ip , el
write ( 6 , '(1x,3(e11.4,1x))' ) ( surfCorr ( i ) , i = 1 , 3 )
!$OMP END CRITICAL (write2out)
endif
2010-03-24 18:50:12 +05:30
2013-10-11 21:31:53 +05:30
!--------------------------------------------------------------------------------------------------
2013-02-21 03:36:15 +05:30
! computing the mismatch and penalty stress tensor of all grains
2012-02-21 22:01:37 +05:30
do iGrain = 1_pInt , homogenization_Ngrains ( mesh_element ( 3 , el ) )
2010-11-26 17:20:20 +05:30
Gmoduli = homogenization_RGC_equivalentModuli ( iGrain , ip , el )
2013-02-21 03:36:15 +05:30
muGrain = Gmoduli ( 1 ) ! collecting the equivalent shear modulus of grain
bgGrain = Gmoduli ( 2 ) ! and the lengthh of Burgers vector
iGrain3 = homogenization_RGC_grain1to3 ( iGrain , homID ) ! get the grain ID in local 3-dimensional index (x,y,z)-position
2010-11-26 17:20:20 +05:30
!* Looping over all six interfaces of each grain
2012-02-21 22:01:37 +05:30
do iFace = 1_pInt , nFace
2013-02-21 03:36:15 +05:30
intFace = homogenization_RGC_getInterface ( iFace , iGrain3 ) ! get the 4-dimensional index of the interface in local numbering system of the grain
nVect = homogenization_RGC_interfaceNormal ( intFace , ip , el ) ! get the interface normal
iGNghb3 = iGrain3 ! identify the neighboring grain across the interface
2012-02-21 22:01:37 +05:30
iGNghb3 ( abs ( intFace ( 1 ) ) ) = iGNghb3 ( abs ( intFace ( 1 ) ) ) + int ( real ( intFace ( 1 ) , pReal ) / real ( abs ( intFace ( 1 ) ) , pReal ) , pInt )
2013-02-21 03:36:15 +05:30
if ( iGNghb3 ( 1 ) < 1 ) iGNghb3 ( 1 ) = nGDim ( 1 ) ! with periodicity along e1 direction
2012-02-21 22:01:37 +05:30
if ( iGNghb3 ( 1 ) > nGDim ( 1 ) ) iGNghb3 ( 1 ) = 1_pInt
2013-02-21 03:36:15 +05:30
if ( iGNghb3 ( 2 ) < 1 ) iGNghb3 ( 2 ) = nGDim ( 2 ) ! with periodicity along e2 direction
2012-02-21 22:01:37 +05:30
if ( iGNghb3 ( 2 ) > nGDim ( 2 ) ) iGNghb3 ( 2 ) = 1_pInt
2013-02-21 03:36:15 +05:30
if ( iGNghb3 ( 3 ) < 1 ) iGNghb3 ( 3 ) = nGDim ( 3 ) ! with periodicity along e3 direction
2012-02-21 22:01:37 +05:30
if ( iGNghb3 ( 3 ) > nGDim ( 3 ) ) iGNghb3 ( 3 ) = 1_pInt
2013-02-21 03:36:15 +05:30
iGNghb = homogenization_RGC_grain3to1 ( iGNghb3 , homID ) ! get the ID of the neighboring grain
Gmoduli = homogenization_RGC_equivalentModuli ( iGNghb , ip , el ) ! collecting the shear modulus and Burgers vector of the neighbor
2010-11-26 17:20:20 +05:30
muGNghb = Gmoduli ( 1 )
bgGNghb = Gmoduli ( 2 )
2013-02-21 03:36:15 +05:30
gDef = 0.5_pReal * ( fDef ( 1 : 3 , 1 : 3 , iGNghb ) - fDef ( 1 : 3 , 1 : 3 , iGrain ) ) ! compute the difference/jump in deformation gradeint across the neighbor
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! compute the mismatch tensor of all interfaces
2009-10-12 21:31:49 +05:30
nDefNorm = 0.0_pReal
nDef = 0.0_pReal
2013-02-21 03:36:15 +05:30
do i = 1_pInt , 3_pInt ; do j = 1_pInt , 3_pInt
do k = 1_pInt , 3_pInt ; do l = 1_pInt , 3_pInt
nDef ( i , j ) = nDef ( i , j ) - nVect ( k ) * gDef ( i , l ) * math_civita ( j , k , l ) ! compute the interface mismatch tensor from the jump of deformation gradient
enddo ; enddo
nDefNorm = nDefNorm + nDef ( i , j ) * nDef ( i , j ) ! compute the norm of the mismatch tensor
enddo ; enddo
nDefNorm = max ( nDefToler , sqrt ( nDefNorm ) ) ! approximation to zero mismatch if mismatch is zero (singularity)
nMis ( abs ( intFace ( 1 ) ) , iGrain ) = nMis ( abs ( intFace ( 1 ) ) , iGrain ) + nDefNorm ! total amount of mismatch experienced by the grain (at all six interfaces)
!--------------------------------------------------------------------------------------------------
! debuggin the mismatch tensor
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt &
. and . debug_e == el . and . debug_i == ip ) then
!$OMP CRITICAL (write2out)
write ( 6 , '(1x,a20,i2,1x,a20,1x,i3)' ) 'Mismatch to face: ' , intFace ( 1 ) , 'neighbor grain: ' , iGNghb
do i = 1 , 3
write ( 6 , '(1x,3(e11.4,1x))' ) ( nDef ( i , j ) , j = 1 , 3 )
enddo
write ( 6 , '(1x,a20,e11.4)' ) 'with magnitude: ' , nDefNorm
!$OMP END CRITICAL (write2out)
endif
!--------------------------------------------------------------------------------------------------
! compute the stress penalty of all interfaces
do i = 1_pInt , 3_pInt ; do j = 1_pInt , 3_pInt
do k = 1_pInt , 3_pInt ; do l = 1_pInt , 3_pInt
2010-03-24 18:50:12 +05:30
rPen ( i , j , iGrain ) = rPen ( i , j , iGrain ) + 0.5_pReal * ( muGrain * bgGrain + muGNghb * bgGNghb ) * homogenization_RGC_xiAlpha ( homID ) &
* surfCorr ( abs ( intFace ( 1 ) ) ) / homogenization_RGC_dAlpha ( abs ( intFace ( 1 ) ) , homID ) &
* cosh ( homogenization_RGC_ciAlpha ( homID ) * nDefNorm ) &
2009-10-12 21:31:49 +05:30
* 0.5_pReal * nVect ( l ) * nDef ( i , k ) / nDefNorm * math_civita ( k , l , j ) &
* tanh ( nDefNorm / xSmoo_RGC )
2013-02-21 03:36:15 +05:30
enddo ; enddo
enddo ; enddo
2009-10-12 21:31:49 +05:30
enddo
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! debugging the stress-like penalty
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt &
. and . debug_e == el . and . debug_i == ip ) then
!$OMP CRITICAL (write2out)
write ( 6 , '(1x,a20,i2)' ) 'Penalty of grain: ' , iGrain
do i = 1 , 3
write ( 6 , '(1x,3(e11.4,1x))' ) ( rPen ( i , j , iGrain ) , j = 1 , 3 )
enddo
!$OMP END CRITICAL (write2out)
endif
2010-11-26 17:20:20 +05:30
2009-10-12 21:31:49 +05:30
enddo
2013-02-21 03:36:15 +05:30
end subroutine homogenization_RGC_stressPenalty
!--------------------------------------------------------------------------------------------------
!> @brief calculate stress-like penalty due to volume discrepancy
!--------------------------------------------------------------------------------------------------
2013-10-16 18:34:59 +05:30
subroutine homogenization_RGC_volumePenalty ( vPen , vDiscrep , fDef , fAvg , ip , el )
2013-02-21 03:36:15 +05:30
use debug , only : &
debug_level , &
debug_homogenization , &
debug_levelExtensive , &
debug_e , &
debug_i
use mesh , only : &
mesh_element
use math , only : &
math_det33 , &
math_inv33
use material , only : &
homogenization_maxNgrains , &
homogenization_Ngrains
use numerics , only : &
maxVolDiscr_RGC , &
volDiscrMod_RGC , &
volDiscrPow_RGC
2009-12-16 21:50:53 +05:30
implicit none
2013-02-21 03:36:15 +05:30
real ( pReal ) , dimension ( 3 , 3 , homogenization_maxNgrains ) , intent ( out ) :: vPen ! stress-like penalty due to volume
real ( pReal ) , intent ( out ) :: vDiscrep ! total volume discrepancy
real ( pReal ) , dimension ( 3 , 3 , homogenization_maxNgrains ) , intent ( in ) :: fDef ! deformation gradients
real ( pReal ) , dimension ( 3 , 3 ) , intent ( in ) :: fAvg ! overall deformation gradient
integer ( pInt ) , intent ( in ) :: ip , & ! integration point
el
2009-12-16 21:50:53 +05:30
real ( pReal ) , dimension ( homogenization_maxNgrains ) :: gVol
2013-10-16 18:34:59 +05:30
integer ( pInt ) :: iGrain , nGrain , i , j
2013-10-11 21:31:53 +05:30
2009-12-16 21:50:53 +05:30
nGrain = homogenization_Ngrains ( mesh_element ( 3 , el ) )
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! compute the volumes of grains and of cluster
2012-01-26 19:20:00 +05:30
vDiscrep = math_det33 ( fAvg ) ! compute the volume of the cluster
2012-02-21 22:01:37 +05:30
do iGrain = 1_pInt , nGrain
2013-02-21 03:36:15 +05:30
gVol ( iGrain ) = math_det33 ( fDef ( 1 : 3 , 1 : 3 , iGrain ) ) ! compute the volume of individual grains
2012-02-21 22:01:37 +05:30
vDiscrep = vDiscrep - gVol ( iGrain ) / real ( nGrain , pReal ) ! calculate the difference/dicrepancy between
2010-11-26 17:20:20 +05:30
! the volume of the cluster and the the total volume of grains
2009-12-16 21:50:53 +05:30
enddo
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! calculate the stress and penalty due to volume discrepancy
2010-03-24 18:50:12 +05:30
vPen = 0.0_pReal
2012-02-21 22:01:37 +05:30
do iGrain = 1_pInt , nGrain
vPen ( : , : , iGrain ) = - 1.0_pReal / real ( nGrain , pReal ) * volDiscrMod_RGC * volDiscrPow_RGC / maxVolDiscr_RGC * &
2010-03-24 18:50:12 +05:30
sign ( ( abs ( vDiscrep ) / maxVolDiscr_RGC ) ** ( volDiscrPow_RGC - 1.0 ) , vDiscrep ) * &
2012-01-26 19:20:00 +05:30
gVol ( iGrain ) * transpose ( math_inv33 ( fDef ( : , : , iGrain ) ) )
2009-12-16 21:50:53 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! debugging the stress-like penalty
if ( iand ( debug_level ( debug_homogenization ) , debug_levelExtensive ) / = 0_pInt &
. and . debug_e == el . and . debug_i == ip ) then
!$OMP CRITICAL (write2out)
write ( 6 , '(1x,a30,i2)' ) 'Volume penalty of grain: ' , iGrain
do i = 1 , 3
write ( 6 , '(1x,3(e11.4,1x))' ) ( vPen ( i , j , iGrain ) , j = 1 , 3 )
enddo
!$OMP END CRITICAL (write2out)
endif
2009-12-16 21:50:53 +05:30
enddo
2013-02-21 03:36:15 +05:30
end subroutine homogenization_RGC_volumePenalty
2009-12-16 21:50:53 +05:30
2010-03-24 18:50:12 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief compute the correction factor accouted for surface evolution (area change) due to
! deformation
!--------------------------------------------------------------------------------------------------
function homogenization_RGC_surfaceCorrection ( avgF , ip , el )
use math , only : &
math_invert33 , &
math_mul33x33
2012-03-09 01:55:28 +05:30
2010-03-24 18:50:12 +05:30
implicit none
2010-11-26 17:20:20 +05:30
real ( pReal ) , dimension ( 3 ) :: homogenization_RGC_surfaceCorrection
2013-02-21 03:36:15 +05:30
real ( pReal ) , dimension ( 3 , 3 ) , intent ( in ) :: avgF !< average F
integer ( pInt ) , intent ( in ) :: ip , & !< integration point number
el !< element number
2010-03-24 18:50:12 +05:30
real ( pReal ) , dimension ( 3 , 3 ) :: invC , avgC
real ( pReal ) , dimension ( 3 ) :: nVect
2013-10-11 21:31:53 +05:30
real ( pReal ) :: detF
2010-03-24 18:50:12 +05:30
integer ( pInt ) , dimension ( 4 ) :: intFace
2013-10-11 21:31:53 +05:30
integer ( pInt ) :: i , j , iBase
logical :: error
2010-03-24 18:50:12 +05:30
avgC = math_mul33x33 ( transpose ( avgF ) , avgF )
2012-01-26 19:20:00 +05:30
call math_invert33 ( avgC , invC , detF , error )
2010-11-26 17:20:20 +05:30
homogenization_RGC_surfaceCorrection = 0.0_pReal
2012-02-21 22:01:37 +05:30
do iBase = 1_pInt , 3_pInt
2013-02-21 03:36:15 +05:30
intFace = [ iBase , 1_pInt , 1_pInt , 1_pInt ]
nVect = homogenization_RGC_interfaceNormal ( intFace , ip , el ) ! get the normal of the interface
do i = 1_pInt , 3_pInt ; do j = 1_pInt , 3_pInt
homogenization_RGC_surfaceCorrection ( iBase ) = & ! compute the component of (the inverse of) the stretch in the direction of the normal
2010-11-26 17:20:20 +05:30
homogenization_RGC_surfaceCorrection ( iBase ) + invC ( i , j ) * nVect ( i ) * nVect ( j )
2013-02-21 03:36:15 +05:30
enddo ; enddo
homogenization_RGC_surfaceCorrection ( iBase ) = & ! get the surface correction factor (area contraction/enlargement)
2010-11-26 17:20:20 +05:30
sqrt ( homogenization_RGC_surfaceCorrection ( iBase ) ) * detF
2010-03-24 18:50:12 +05:30
enddo
2013-02-21 03:36:15 +05:30
end function homogenization_RGC_surfaceCorrection
2010-03-24 18:50:12 +05:30
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief compute the equivalent shear and bulk moduli from the elasticity tensor
!--------------------------------------------------------------------------------------------------
function homogenization_RGC_equivalentModuli ( grainID , ip , el )
use constitutive , only : &
2013-10-14 16:24:45 +05:30
constitutive_homogenizedC
2009-10-12 21:31:49 +05:30
2012-03-09 01:55:28 +05:30
implicit none
2013-02-21 03:36:15 +05:30
integer ( pInt ) , intent ( in ) :: &
grainID , &
ip , & !< integration point number
el !< element number
2010-03-24 18:50:12 +05:30
real ( pReal ) , dimension ( 6 , 6 ) :: elasTens
2010-11-26 17:20:20 +05:30
real ( pReal ) , dimension ( 2 ) :: homogenization_RGC_equivalentModuli
2013-10-11 21:31:53 +05:30
real ( pReal ) :: cEquiv_11 , cEquiv_12 , cEquiv_44
2009-10-12 21:31:49 +05:30
2010-03-24 18:50:12 +05:30
elasTens = constitutive_homogenizedC ( grainID , ip , el )
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! compute the equivalent shear modulus after Turterltaub and Suiker, JMPS (2005)
2009-10-12 21:31:49 +05:30
cEquiv_11 = ( elasTens ( 1 , 1 ) + elasTens ( 2 , 2 ) + elasTens ( 3 , 3 ) ) / 3.0_pReal
cEquiv_12 = ( elasTens ( 1 , 2 ) + elasTens ( 2 , 3 ) + elasTens ( 3 , 1 ) + &
elasTens ( 1 , 3 ) + elasTens ( 2 , 1 ) + elasTens ( 3 , 2 ) ) / 6.0_pReal
cEquiv_44 = ( elasTens ( 4 , 4 ) + elasTens ( 5 , 5 ) + elasTens ( 6 , 6 ) ) / 3.0_pReal
2010-11-26 17:20:20 +05:30
homogenization_RGC_equivalentModuli ( 1 ) = 0.2_pReal * ( cEquiv_11 - cEquiv_12 ) + 0.6_pReal * cEquiv_44
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
2013-10-14 16:24:45 +05:30
! obtain the length of Burgers vector (could be model dependend)
homogenization_RGC_equivalentModuli ( 2 ) = 2.5e-10_pReal
2010-03-24 18:50:12 +05:30
2013-02-21 03:36:15 +05:30
end function homogenization_RGC_equivalentModuli
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief collect relaxation vectors of an interface
!--------------------------------------------------------------------------------------------------
function homogenization_RGC_relaxationVector ( intFace , state , homID )
use prec , only : &
p_vec
2009-10-12 21:31:49 +05:30
implicit none
2010-11-26 17:20:20 +05:30
real ( pReal ) , dimension ( 3 ) :: homogenization_RGC_relaxationVector
2013-02-21 03:36:15 +05:30
integer ( pInt ) , dimension ( 4 ) , intent ( in ) :: intFace !< set of interface ID in 4D array (normal and position)
type ( p_vec ) , intent ( in ) :: state !< set of global relaxation vectors
2009-10-12 21:31:49 +05:30
integer ( pInt ) , dimension ( 3 ) :: nGDim
2013-02-21 03:36:15 +05:30
integer ( pInt ) iNum , &
homID !< homogenization ID
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! collect the interface relaxation vector from the global state array
2010-11-26 17:20:20 +05:30
homogenization_RGC_relaxationVector = 0.0_pReal
2013-02-21 03:36:15 +05:30
nGDim = homogenization_RGC_Ngrains ( 1 : 3 , homID )
iNum = homogenization_RGC_interface4to1 ( intFace , homID ) ! identify the position of the interface in global state array
if ( iNum . gt . 0_pInt ) homogenization_RGC_relaxationVector = state % p ( ( 3 * iNum - 2 ) : ( 3 * iNum ) ) ! get the corresponding entries
end function homogenization_RGC_relaxationVector
!--------------------------------------------------------------------------------------------------
!> @brief identify the normal of an interface
!--------------------------------------------------------------------------------------------------
function homogenization_RGC_interfaceNormal ( intFace , ip , el )
use debug , only : &
debug_level , &
debug_homogenization , &
debug_levelExtensive , &
debug_e , &
debug_i
use math , only : &
math_mul33x3
2012-03-09 01:55:28 +05:30
2009-10-12 21:31:49 +05:30
implicit none
2010-11-26 17:20:20 +05:30
real ( pReal ) , dimension ( 3 ) :: homogenization_RGC_interfaceNormal
2013-02-21 03:36:15 +05:30
integer ( pInt ) , dimension ( 4 ) , intent ( in ) :: intFace !< interface ID in 4D array (normal and position)
integer ( pInt ) , intent ( in ) :: &
ip , & !< integration point number
el !< element number
2011-04-13 19:46:22 +05:30
integer ( pInt ) nPos
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! get the normal of the interface, identified from the value of intFace(1)
2010-11-26 17:20:20 +05:30
homogenization_RGC_interfaceNormal = 0.0_pReal
2012-02-16 00:28:38 +05:30
nPos = abs ( intFace ( 1 ) ) ! identify the position of the interface in global state array
homogenization_RGC_interfaceNormal ( nPos ) = real ( intFace ( 1 ) / abs ( intFace ( 1 ) ) , pReal ) ! get the normal vector w.r.t. cluster axis
2009-10-12 21:31:49 +05:30
2010-11-26 17:20:20 +05:30
homogenization_RGC_interfaceNormal = &
2013-02-21 03:36:15 +05:30
math_mul33x3 ( homogenization_RGC_orientation ( 1 : 3 , 1 : 3 , ip , el ) , homogenization_RGC_interfaceNormal )
2012-02-16 00:28:38 +05:30
! map the normal vector into sample coordinate system (basis)
2010-03-24 18:50:12 +05:30
2013-02-21 03:36:15 +05:30
end function homogenization_RGC_interfaceNormal
!--------------------------------------------------------------------------------------------------
!> @brief collect six faces of a grain in 4D (normal and position)
!--------------------------------------------------------------------------------------------------
function homogenization_RGC_getInterface ( iFace , iGrain3 )
2009-10-12 21:31:49 +05:30
implicit none
2010-11-26 17:20:20 +05:30
integer ( pInt ) , dimension ( 4 ) :: homogenization_RGC_getInterface
2013-02-21 03:36:15 +05:30
integer ( pInt ) , dimension ( 3 ) , intent ( in ) :: iGrain3 !< grain ID in 3D array
integer ( pInt ) , intent ( in ) :: iFace !< face index (1..6) mapped like (-e1,-e2,-e3,+e1,+e2,+e3) or iDir = (-1,-2,-3,1,2,3)
2009-10-12 21:31:49 +05:30
integer ( pInt ) iDir
!* Direction of interface normal
2012-02-21 22:01:37 +05:30
iDir = ( int ( real ( iFace - 1_pInt , pReal ) / 2.0_pReal , pInt ) + 1_pInt ) * ( - 1_pInt ) ** iFace
2010-11-26 17:20:20 +05:30
homogenization_RGC_getInterface ( 1 ) = iDir
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! identify the interface position by the direction of its normal
homogenization_RGC_getInterface ( 2 : 4 ) = iGrain3
if ( iDir < 0_pInt ) & ! to have a correlation with coordinate/position in real space
2010-11-26 17:20:20 +05:30
homogenization_RGC_getInterface ( 1_pInt - iDir ) = homogenization_RGC_getInterface ( 1_pInt - iDir ) - 1_pInt
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
end function homogenization_RGC_getInterface
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief map grain ID from in 1D (global array) to in 3D (local position)
!--------------------------------------------------------------------------------------------------
function homogenization_RGC_grain1to3 ( grain1 , homID )
2012-03-09 01:55:28 +05:30
2009-10-12 21:31:49 +05:30
implicit none
2013-02-21 03:36:15 +05:30
integer ( pInt ) , intent ( in ) :: &
grain1 , & !< grain ID in 1D array
homID !< homogenization ID
2010-11-26 17:20:20 +05:30
integer ( pInt ) , dimension ( 3 ) :: homogenization_RGC_grain1to3
2009-10-12 21:31:49 +05:30
integer ( pInt ) , dimension ( 3 ) :: nGDim
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! get the grain position
nGDim = homogenization_RGC_Ngrains ( 1 : 3 , homID )
2012-02-13 19:48:07 +05:30
homogenization_RGC_grain1to3 ( 3 ) = 1_pInt + ( grain1 - 1_pInt ) / ( nGDim ( 1 ) * nGDim ( 2 ) )
homogenization_RGC_grain1to3 ( 2 ) = 1_pInt + mod ( ( grain1 - 1_pInt ) / nGDim ( 1 ) , nGDim ( 2 ) )
homogenization_RGC_grain1to3 ( 1 ) = 1_pInt + mod ( ( grain1 - 1_pInt ) , nGDim ( 1 ) )
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
end function homogenization_RGC_grain1to3
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief map grain ID from in 3D (local position) to in 1D (global array)
!--------------------------------------------------------------------------------------------------
pure function homogenization_RGC_grain3to1 ( grain3 , homID )
2009-10-12 21:31:49 +05:30
2012-03-09 01:55:28 +05:30
implicit none
2013-02-21 03:36:15 +05:30
integer ( pInt ) , dimension ( 3 ) , intent ( in ) :: grain3 !< grain ID in 3D array (pos.x,pos.y,pos.z)
integer ( pInt ) :: homogenization_RGC_grain3to1
2009-10-12 21:31:49 +05:30
integer ( pInt ) , dimension ( 3 ) :: nGDim
2013-02-21 03:36:15 +05:30
integer ( pInt ) , intent ( in ) :: homID ! homogenization ID
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! get the grain ID
nGDim = homogenization_RGC_Ngrains ( 1 : 3 , homID )
2012-02-21 22:01:37 +05:30
homogenization_RGC_grain3to1 = grain3 ( 1 ) + nGDim ( 1 ) * ( grain3 ( 2 ) - 1_pInt ) + nGDim ( 1 ) * nGDim ( 2 ) * ( grain3 ( 3 ) - 1_pInt )
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
end function homogenization_RGC_grain3to1
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief maps interface ID from 4D (normal and local position) into 1D (global array)
!--------------------------------------------------------------------------------------------------
integer ( pInt ) pure function homogenization_RGC_interface4to1 ( iFace4D , homID )
2012-03-09 01:55:28 +05:30
2009-10-12 21:31:49 +05:30
implicit none
2013-02-21 03:36:15 +05:30
integer ( pInt ) , dimension ( 4 ) , intent ( in ) :: iFace4D !< interface ID in 4D array (n.dir,pos.x,pos.y,pos.z)
2009-10-12 21:31:49 +05:30
integer ( pInt ) , dimension ( 3 ) :: nGDim , nIntFace
2013-02-21 03:36:15 +05:30
integer ( pInt ) , intent ( in ) :: homID !< homogenization ID
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
nGDim = homogenization_RGC_Ngrains ( 1 : 3 , homID )
!--------------------------------------------------------------------------------------------------
! compute the total number of interfaces, which ...
nIntFace ( 1 ) = ( nGDim ( 1 ) - 1_pInt ) * nGDim ( 2 ) * nGDim ( 3 ) ! ... normal //e1
nIntFace ( 2 ) = nGDim ( 1 ) * ( nGDim ( 2 ) - 1_pInt ) * nGDim ( 3 ) ! ... normal //e2
nIntFace ( 3 ) = nGDim ( 1 ) * nGDim ( 2 ) * ( nGDim ( 3 ) - 1_pInt ) ! ... normal //e3
2010-11-26 17:20:20 +05:30
2013-02-21 03:36:15 +05:30
homogenization_RGC_interface4to1 = - 1_pInt
!--------------------------------------------------------------------------------------------------
! get the corresponding interface ID in 1D global array
if ( abs ( iFace4D ( 1 ) ) == 1_pInt ) then ! interface with normal //e1
2012-02-21 22:01:37 +05:30
homogenization_RGC_interface4to1 = iFace4D ( 3 ) + nGDim ( 2 ) * ( iFace4D ( 4 ) - 1_pInt ) &
+ nGDim ( 2 ) * nGDim ( 3 ) * ( iFace4D ( 2 ) - 1_pInt )
2010-11-26 17:20:20 +05:30
if ( ( iFace4D ( 2 ) == 0_pInt ) . or . ( iFace4D ( 2 ) == nGDim ( 1 ) ) ) homogenization_RGC_interface4to1 = 0_pInt
2013-02-21 03:36:15 +05:30
elseif ( abs ( iFace4D ( 1 ) ) == 2_pInt ) then ! interface with normal //e2
2012-02-21 22:01:37 +05:30
homogenization_RGC_interface4to1 = iFace4D ( 4 ) + nGDim ( 3 ) * ( iFace4D ( 2 ) - 1_pInt ) &
+ nGDim ( 3 ) * nGDim ( 1 ) * ( iFace4D ( 3 ) - 1_pInt ) + nIntFace ( 1 )
2010-11-26 17:20:20 +05:30
if ( ( iFace4D ( 3 ) == 0_pInt ) . or . ( iFace4D ( 3 ) == nGDim ( 2 ) ) ) homogenization_RGC_interface4to1 = 0_pInt
2013-02-21 03:36:15 +05:30
elseif ( abs ( iFace4D ( 1 ) ) == 3_pInt ) then ! interface with normal //e3
2012-02-21 22:01:37 +05:30
homogenization_RGC_interface4to1 = iFace4D ( 2 ) + nGDim ( 1 ) * ( iFace4D ( 3 ) - 1_pInt ) &
+ nGDim ( 1 ) * nGDim ( 2 ) * ( iFace4D ( 4 ) - 1_pInt ) + nIntFace ( 1 ) + nIntFace ( 2 )
2010-11-26 17:20:20 +05:30
if ( ( iFace4D ( 4 ) == 0_pInt ) . or . ( iFace4D ( 4 ) == nGDim ( 3 ) ) ) homogenization_RGC_interface4to1 = 0_pInt
2009-10-12 21:31:49 +05:30
endif
2013-02-21 03:36:15 +05:30
end function homogenization_RGC_interface4to1
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
!> @brief maps interface ID from 1D (global array) into 4D (normal and local position)
!--------------------------------------------------------------------------------------------------
function homogenization_RGC_interface1to4 ( iFace1D , homID )
2012-03-09 01:55:28 +05:30
2009-10-12 21:31:49 +05:30
implicit none
2010-11-26 17:20:20 +05:30
integer ( pInt ) , dimension ( 4 ) :: homogenization_RGC_interface1to4
2013-02-21 03:36:15 +05:30
integer ( pInt ) , intent ( in ) :: iFace1D !< interface ID in 1D array
2009-10-12 21:31:49 +05:30
integer ( pInt ) , dimension ( 3 ) :: nGDim , nIntFace
2013-02-21 03:36:15 +05:30
integer ( pInt ) , intent ( in ) :: homID !< homogenization ID
2009-10-12 21:31:49 +05:30
nGDim = homogenization_RGC_Ngrains ( : , homID )
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! compute the total number of interfaces, which ...
nIntFace ( 1 ) = ( nGDim ( 1 ) - 1_pInt ) * nGDim ( 2 ) * nGDim ( 3 ) ! ... normal //e1
nIntFace ( 2 ) = nGDim ( 1 ) * ( nGDim ( 2 ) - 1_pInt ) * nGDim ( 3 ) ! ... normal //e2
nIntFace ( 3 ) = nGDim ( 1 ) * nGDim ( 2 ) * ( nGDim ( 3 ) - 1_pInt ) ! ... normal //e3
!--------------------------------------------------------------------------------------------------
! get the corresponding interface ID in 4D (normal and local position)
if ( iFace1D > 0 . and . iFace1D < = nIntFace ( 1 ) ) then ! interface with normal //e1
2012-02-21 22:01:37 +05:30
homogenization_RGC_interface1to4 ( 1 ) = 1_pInt
2012-02-13 19:48:07 +05:30
homogenization_RGC_interface1to4 ( 3 ) = mod ( ( iFace1D - 1_pInt ) , nGDim ( 2 ) ) + 1_pInt
2012-02-21 22:01:37 +05:30
homogenization_RGC_interface1to4 ( 4 ) = mod ( &
int ( &
real ( iFace1D - 1_pInt , pReal ) / &
real ( nGDim ( 2 ) , pReal ) &
, pInt ) &
, nGDim ( 3 ) ) + 1_pInt
homogenization_RGC_interface1to4 ( 2 ) = int ( &
real ( iFace1D - 1_pInt , pReal ) / &
real ( nGDim ( 2 ) , pReal ) / &
real ( nGDim ( 3 ) , pReal ) &
, pInt ) + 1_pInt
2013-02-21 03:36:15 +05:30
elseif ( iFace1D > nIntFace ( 1 ) . and . iFace1D < = ( nIntFace ( 2 ) + nIntFace ( 1 ) ) ) then ! interface with normal //e2
2012-02-21 22:01:37 +05:30
homogenization_RGC_interface1to4 ( 1 ) = 2_pInt
homogenization_RGC_interface1to4 ( 4 ) = mod ( ( iFace1D - nIntFace ( 1 ) - 1_pInt ) , nGDim ( 3 ) ) + 1_pInt
homogenization_RGC_interface1to4 ( 2 ) = mod ( &
int ( &
real ( iFace1D - nIntFace ( 1 ) - 1_pInt , pReal ) / &
real ( nGDim ( 3 ) , pReal ) &
, pInt ) &
, nGDim ( 1 ) ) + 1_pInt
homogenization_RGC_interface1to4 ( 3 ) = int ( &
real ( iFace1D - nIntFace ( 1 ) - 1_pInt , pReal ) / &
real ( nGDim ( 3 ) , pReal ) / &
real ( nGDim ( 1 ) , pReal ) &
, pInt ) + 1_pInt
2013-02-21 03:36:15 +05:30
elseif ( iFace1D > nIntFace ( 2 ) + nIntFace ( 1 ) . and . iFace1D < = ( nIntFace ( 3 ) + nIntFace ( 2 ) + nIntFace ( 1 ) ) ) then ! interface with normal //e3
2012-02-21 22:01:37 +05:30
homogenization_RGC_interface1to4 ( 1 ) = 3_pInt
homogenization_RGC_interface1to4 ( 2 ) = mod ( ( iFace1D - nIntFace ( 2 ) - nIntFace ( 1 ) - 1_pInt ) , nGDim ( 1 ) ) + 1_pInt
homogenization_RGC_interface1to4 ( 3 ) = mod ( &
int ( &
real ( iFace1D - nIntFace ( 2 ) - nIntFace ( 1 ) - 1_pInt , pReal ) / &
real ( nGDim ( 1 ) , pReal ) &
, pInt ) &
, nGDim ( 2 ) ) + 1_pInt
homogenization_RGC_interface1to4 ( 4 ) = int ( &
real ( iFace1D - nIntFace ( 2 ) - nIntFace ( 1 ) - 1_pInt , pReal ) / &
real ( nGDim ( 1 ) , pReal ) / &
real ( nGDim ( 2 ) , pReal ) &
, pInt ) + 1_pInt
2009-10-12 21:31:49 +05:30
endif
2013-02-21 03:36:15 +05:30
end function homogenization_RGC_interface1to4
!--------------------------------------------------------------------------------------------------
!> @brief calculating the grain deformation gradient (the same with
! homogenization_RGC_partionDeformation, but used only for perturbation scheme)
!--------------------------------------------------------------------------------------------------
2013-10-16 18:34:59 +05:30
subroutine homogenization_RGC_grainDeformation ( F , avgF , state , ip , el )
2013-02-21 03:36:15 +05:30
use prec , only : &
p_vec
use mesh , only : &
mesh_element
use material , only : &
homogenization_maxNgrains , &
homogenization_Ngrains , &
homogenization_typeInstance
2012-03-09 01:55:28 +05:30
2009-10-12 21:31:49 +05:30
implicit none
2013-02-21 03:36:15 +05:30
real ( pReal ) , dimension ( 3 , 3 , homogenization_maxNgrains ) , intent ( out ) :: F !< partioned F per grain
real ( pReal ) , dimension ( 3 , 3 ) , intent ( in ) :: avgF !<
type ( p_vec ) , intent ( in ) :: state
integer ( pInt ) , intent ( in ) :: &
el , & !< element number
ip !< integration point number
real ( pReal ) , dimension ( 3 ) :: aVect , nVect
2009-10-12 21:31:49 +05:30
integer ( pInt ) , dimension ( 4 ) :: intFace
integer ( pInt ) , dimension ( 3 ) :: iGrain3
2013-02-21 03:36:15 +05:30
integer ( pInt ) :: homID , iGrain , iFace , i , j
integer ( pInt ) , parameter :: nFace = 6_pInt
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
!--------------------------------------------------------------------------------------------------
! compute the deformation gradient of individual grains due to relaxations
2009-10-12 21:31:49 +05:30
homID = homogenization_typeInstance ( mesh_element ( 3 , el ) )
F = 0.0_pReal
2012-02-21 22:01:37 +05:30
do iGrain = 1_pInt , homogenization_Ngrains ( mesh_element ( 3 , el ) )
2010-11-26 17:20:20 +05:30
iGrain3 = homogenization_RGC_grain1to3 ( iGrain , homID )
2012-02-21 22:01:37 +05:30
do iFace = 1_pInt , nFace
2010-11-26 17:20:20 +05:30
intFace = homogenization_RGC_getInterface ( iFace , iGrain3 )
aVect = homogenization_RGC_relaxationVector ( intFace , state , homID )
nVect = homogenization_RGC_interfaceNormal ( intFace , ip , el )
2012-02-21 22:01:37 +05:30
forall ( i = 1_pInt : 3_pInt , j = 1_pInt : 3_pInt ) &
2013-02-21 03:36:15 +05:30
F ( i , j , iGrain ) = F ( i , j , iGrain ) + aVect ( i ) * nVect ( j ) ! effective relaxations
2009-10-12 21:31:49 +05:30
enddo
2013-02-21 03:36:15 +05:30
F ( 1 : 3 , 1 : 3 , iGrain ) = F ( 1 : 3 , 1 : 3 , iGrain ) + avgF ! relaxed deformation gradient
2009-10-12 21:31:49 +05:30
enddo
2013-02-21 03:36:15 +05:30
end subroutine homogenization_RGC_grainDeformation
2009-10-12 21:31:49 +05:30
2013-02-21 03:36:15 +05:30
end module homogenization_RGC