2011-04-07 12:50:28 +05:30
! Copyright 2011 Max-Planck-Institut für Eisenforschung GmbH
2011-04-04 19:39:54 +05:30
!
! This file is part of DAMASK,
2011-04-07 12:50:28 +05:30
! the Düsseldorf Advanced MAterial Simulation Kit.
2011-04-04 19:39:54 +05:30
!
! DAMASK is free software: you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation, either version 3 of the License, or
! (at your option) any later version.
!
! DAMASK is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License
! along with DAMASK. If not, see <http://www.gnu.org/licenses/>.
!
!##############################################################
2009-08-31 20:39:15 +05:30
!* $Id$
2009-05-07 21:57:36 +05:30
!***************************************
!* Module: CRYSTALLITE *
!***************************************
!* contains: *
!* - _init *
!* - materialpoint_stressAndItsTangent *
!* - _partitionDeformation *
!* - _updateState *
2011-01-24 20:32:19 +05:30
!* - _stressAndItsTangent *
2009-05-07 21:57:36 +05:30
!* - _postResults *
!***************************************
2010-10-01 17:48:49 +05:30
2012-08-31 01:56:28 +05:30
module crystallite
use prec , only : pReal , pInt
implicit none
private :: crystallite_integrateStateFPI , &
crystallite_integrateStateEuler , &
crystallite_integrateStateAdaptiveEuler , &
crystallite_integrateStateRK4 , &
crystallite_integrateStateRKCK45 , &
crystallite_integrateStress , &
crystallite_stateJump
2013-02-11 16:13:45 +05:30
external :: dgesv
2012-08-31 01:56:28 +05:30
2009-05-07 21:57:36 +05:30
! ****************************************************************
! *** General variables for the crystallite calculation ***
! ****************************************************************
2012-08-31 01:56:28 +05:30
integer ( pInt ) crystallite_maxSizePostResults
integer ( pInt ) , dimension ( : ) , allocatable :: crystallite_sizePostResults
integer ( pInt ) , dimension ( : , : ) , allocatable :: crystallite_sizePostResult
character ( len = 64 ) , dimension ( : , : ) , allocatable :: crystallite_output !< name of each post result output
integer ( pInt ) , dimension ( : , : , : ) , allocatable :: &
crystallite_symmetryID !< crystallographic symmetry 1=cubic 2=hexagonal, needed in all orientation calcs
real ( pReal ) , dimension ( : , : , : ) , allocatable :: &
crystallite_dt , & !< requested time increment of each grain
crystallite_subdt , & !< substepped time increment of each grain
crystallite_subFrac , & !< already calculated fraction of increment
crystallite_subStep , & !< size of next integration step
crystallite_Temperature , & !< Temp of each grain
crystallite_partionedTemperature0 , & !< Temp of each grain at start of homog inc
crystallite_subTemperature0 , & !< Temp of each grain at start of crystallite inc
crystallite_dotTemperature !< evolution of Temperature of each grain
real ( pReal ) , dimension ( : , : , : , : ) , allocatable :: &
crystallite_Tstar_v , & !< current 2nd Piola-Kirchhoff stress vector (end of converged time step)
crystallite_Tstar0_v , & !< 2nd Piola-Kirchhoff stress vector at start of FE inc
crystallite_partionedTstar0_v , & !< 2nd Piola-Kirchhoff stress vector at start of homog inc
crystallite_subTstar0_v , & !< 2nd Piola-Kirchhoff stress vector at start of crystallite inc
crystallite_orientation , & !< orientation as quaternion
crystallite_orientation0 , & !< initial orientation as quaternion
2013-01-16 14:15:41 +05:30
crystallite_rotation !< grain rotation away from initial orientation as axis-angle (in degrees) in crystal reference frame
2012-08-31 01:56:28 +05:30
real ( pReal ) , dimension ( : , : , : , : , : ) , allocatable :: &
crystallite_Fe , & !< current "elastic" def grad (end of converged time step)
crystallite_subFe0 , & !< "elastic" def grad at start of crystallite inc
crystallite_Fp , & !< current plastic def grad (end of converged time step)
crystallite_invFp , & !< inverse of current plastic def grad (end of converged time step)
crystallite_Fp0 , & !< plastic def grad at start of FE inc
crystallite_partionedFp0 , & !< plastic def grad at start of homog inc
crystallite_subFp0 , & !< plastic def grad at start of crystallite inc
crystallite_F0 , & !< def grad at start of FE inc
crystallite_partionedF , & !< def grad to be reached at end of homog inc
crystallite_partionedF0 , & !< def grad at start of homog inc
crystallite_subF , & !< def grad to be reached at end of crystallite inc
crystallite_subF0 , & !< def grad at start of crystallite inc
crystallite_Lp , & !< current plastic velocitiy grad (end of converged time step)
crystallite_Lp0 , & !< plastic velocitiy grad at start of FE inc
crystallite_partionedLp0 , & !< plastic velocity grad at start of homog inc
crystallite_subLp0 , & !< plastic velocity grad at start of crystallite inc
crystallite_P , & !< 1st Piola-Kirchhoff stress per grain
crystallite_disorientation !< disorientation between two neighboring ips (only calculated for single grain IPs)
real ( pReal ) , dimension ( : , : , : , : , : , : , : ) , allocatable :: &
crystallite_dPdF , & !< current individual dPdF per grain (end of converged time step)
crystallite_dPdF0 , & !< individual dPdF per grain at start of FE inc
crystallite_partioneddPdF0 , & !< individual dPdF per grain at start of homog inc
crystallite_fallbackdPdF !< dPdF fallback for non-converged grains (elastic prediction)
logical , dimension ( : , : , : ) , allocatable :: &
crystallite_localPlasticity , & !< indicates this grain to have purely local constitutive law
crystallite_requested , & !< flag to request crystallite calculation
crystallite_todo , & !< flag to indicate need for further computation
crystallite_converged !< convergence flag
2012-11-28 00:06:55 +05:30
logical , dimension ( : , : ) , allocatable :: &
crystallite_clearToWindForward , &
crystallite_clearToCutback , &
crystallite_syncSubFrac , &
crystallite_syncSubFracCompleted , &
crystallite_neighborEnforcedCutback
2012-08-31 01:56:28 +05:30
contains
2009-05-07 21:57:36 +05:30
2011-03-29 12:57:19 +05:30
2009-05-07 21:57:36 +05:30
!********************************************************************
! allocate and initialize per grain variables
!********************************************************************
2009-07-01 16:25:31 +05:30
subroutine crystallite_init ( Temperature )
2012-08-31 01:56:28 +05:30
!*** variables and functions from other modules ***!
use , intrinsic :: iso_fortran_env ! to get compiler_version and compiler_options (at least for gfortran 4.6 at the moment)
use debug , only : debug_info , &
debug_reset , &
debug_level , &
debug_crystallite , &
debug_levelBasic
use math , only : math_I3 , &
math_EulerToR , &
math_inv33 , &
math_transpose33 , &
math_mul33xx33 , &
math_mul33x33
use FEsolving , only : FEsolving_execElem , &
FEsolving_execIP
use mesh , only : mesh_element , &
mesh_NcpElems , &
mesh_maxNips , &
mesh_maxNipNeighbors
use IO
use material
use lattice , only : lattice_symmetryType
use constitutive , only : constitutive_microstructure
use constitutive_phenopowerlaw , only : constitutive_phenopowerlaw_label , &
2013-01-22 03:27:26 +05:30
constitutive_phenopowerlaw_structureName
2012-08-31 01:56:28 +05:30
use constitutive_titanmod , only : constitutive_titanmod_label , &
2013-01-22 03:27:26 +05:30
constitutive_titanmod_structureName
2012-08-31 01:56:28 +05:30
use constitutive_dislotwin , only : constitutive_dislotwin_label , &
2013-01-22 03:27:26 +05:30
constitutive_dislotwin_structureName
2012-08-31 01:56:28 +05:30
use constitutive_nonlocal , only : constitutive_nonlocal_label , &
2013-01-22 03:27:26 +05:30
constitutive_nonlocal_structureName
2010-02-25 23:09:11 +05:30
2012-08-31 01:56:28 +05:30
implicit none
integer ( pInt ) , parameter :: myFile = 200_pInt , &
maxNchunks = 2_pInt
!*** input variables ***!
real ( pReal ) Temperature
!*** local variables ***!
integer ( pInt ) , dimension ( 1 + 2 * maxNchunks ) :: positions
integer ( pInt ) g , & ! grain number
i , & ! integration point number
e , & ! element number
gMax , & ! maximum number of grains
iMax , & ! maximum number of integration points
eMax , & ! maximum number of elements
nMax , & ! maximum number of ip neighbors
myNgrains , & ! number of grains in current IP
section , &
j , &
p , &
output , &
mySize , &
myPhase , &
myMat
character ( len = 64 ) tag
character ( len = 1024 ) line
2009-06-09 16:35:29 +05:30
2010-02-25 23:09:11 +05:30
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP CRITICAL (write2out)
2012-08-31 01:56:28 +05:30
write ( 6 , * )
write ( 6 , * ) '<<<+- crystallite init -+>>>'
write ( 6 , * ) '$Id$'
2012-02-01 00:48:55 +05:30
#include "compilation_info.f90"
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP END CRITICAL (write2out)
2012-08-31 01:56:28 +05:30
gMax = homogenization_maxNgrains
iMax = mesh_maxNips
eMax = mesh_NcpElems
nMax = mesh_maxNipNeighbors
allocate ( crystallite_Temperature ( gMax , iMax , eMax ) ) ; crystallite_Temperature = Temperature
allocate ( crystallite_partionedTemperature0 ( gMax , iMax , eMax ) ) ; crystallite_partionedTemperature0 = 0.0_pReal
allocate ( crystallite_subTemperature0 ( gMax , iMax , eMax ) ) ; crystallite_subTemperature0 = 0.0_pReal
allocate ( crystallite_dotTemperature ( gMax , iMax , eMax ) ) ; crystallite_dotTemperature = 0.0_pReal
allocate ( crystallite_Tstar0_v ( 6 , gMax , iMax , eMax ) ) ; crystallite_Tstar0_v = 0.0_pReal
allocate ( crystallite_partionedTstar0_v ( 6 , gMax , iMax , eMax ) ) ; crystallite_partionedTstar0_v = 0.0_pReal
allocate ( crystallite_subTstar0_v ( 6 , gMax , iMax , eMax ) ) ; crystallite_subTstar0_v = 0.0_pReal
allocate ( crystallite_Tstar_v ( 6 , gMax , iMax , eMax ) ) ; crystallite_Tstar_v = 0.0_pReal
allocate ( crystallite_P ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_P = 0.0_pReal
allocate ( crystallite_F0 ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_F0 = 0.0_pReal
allocate ( crystallite_partionedF0 ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_partionedF0 = 0.0_pReal
allocate ( crystallite_partionedF ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_partionedF = 0.0_pReal
allocate ( crystallite_subF0 ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_subF0 = 0.0_pReal
allocate ( crystallite_subF ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_subF = 0.0_pReal
allocate ( crystallite_Fp0 ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_Fp0 = 0.0_pReal
allocate ( crystallite_partionedFp0 ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_partionedFp0 = 0.0_pReal
allocate ( crystallite_subFp0 ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_subFp0 = 0.0_pReal
allocate ( crystallite_Fp ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_Fp = 0.0_pReal
allocate ( crystallite_invFp ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_invFp = 0.0_pReal
allocate ( crystallite_Fe ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_Fe = 0.0_pReal
allocate ( crystallite_subFe0 ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_subFe0 = 0.0_pReal
allocate ( crystallite_Lp0 ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_Lp0 = 0.0_pReal
allocate ( crystallite_partionedLp0 ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_partionedLp0 = 0.0_pReal
allocate ( crystallite_subLp0 ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_subLp0 = 0.0_pReal
allocate ( crystallite_Lp ( 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_Lp = 0.0_pReal
allocate ( crystallite_dPdF ( 3 , 3 , 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_dPdF = 0.0_pReal
allocate ( crystallite_dPdF0 ( 3 , 3 , 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_dPdF0 = 0.0_pReal
allocate ( crystallite_partioneddPdF0 ( 3 , 3 , 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_partioneddPdF0 = 0.0_pReal
allocate ( crystallite_fallbackdPdF ( 3 , 3 , 3 , 3 , gMax , iMax , eMax ) ) ; crystallite_fallbackdPdF = 0.0_pReal
allocate ( crystallite_dt ( gMax , iMax , eMax ) ) ; crystallite_dt = 0.0_pReal
allocate ( crystallite_subdt ( gMax , iMax , eMax ) ) ; crystallite_subdt = 0.0_pReal
allocate ( crystallite_subFrac ( gMax , iMax , eMax ) ) ; crystallite_subFrac = 0.0_pReal
allocate ( crystallite_subStep ( gMax , iMax , eMax ) ) ; crystallite_subStep = 0.0_pReal
allocate ( crystallite_orientation ( 4 , gMax , iMax , eMax ) ) ; crystallite_orientation = 0.0_pReal
allocate ( crystallite_orientation0 ( 4 , gMax , iMax , eMax ) ) ; crystallite_orientation0 = 0.0_pReal
allocate ( crystallite_rotation ( 4 , gMax , iMax , eMax ) ) ; crystallite_rotation = 0.0_pReal
allocate ( crystallite_disorientation ( 4 , nMax , gMax , iMax , eMax ) ) ; crystallite_disorientation = 0.0_pReal
allocate ( crystallite_symmetryID ( gMax , iMax , eMax ) ) ; crystallite_symmetryID = 0_pInt
allocate ( crystallite_localPlasticity ( gMax , iMax , eMax ) ) ; crystallite_localPlasticity = . true .
allocate ( crystallite_requested ( gMax , iMax , eMax ) ) ; crystallite_requested = . false .
allocate ( crystallite_todo ( gMax , iMax , eMax ) ) ; crystallite_todo = . false .
allocate ( crystallite_converged ( gMax , iMax , eMax ) ) ; crystallite_converged = . true .
2012-11-28 00:06:55 +05:30
allocate ( crystallite_clearToWindForward ( iMax , eMax ) ) ; crystallite_clearToWindForward = . true .
allocate ( crystallite_syncSubFrac ( iMax , eMax ) ) ; crystallite_syncSubFrac = . false .
allocate ( crystallite_syncSubFracCompleted ( iMax , eMax ) ) ; crystallite_syncSubFracCompleted = . false .
allocate ( crystallite_clearToCutback ( iMax , eMax ) ) ; crystallite_clearToCutback = . true .
allocate ( crystallite_neighborEnforcedCutback ( iMax , eMax ) ) ; crystallite_neighborEnforcedCutback = . false .
2012-08-31 01:56:28 +05:30
allocate ( crystallite_output ( maxval ( crystallite_Noutput ) , &
material_Ncrystallite ) ) ; crystallite_output = ''
allocate ( crystallite_sizePostResults ( material_Ncrystallite ) ) ; crystallite_sizePostResults = 0_pInt
allocate ( crystallite_sizePostResult ( maxval ( crystallite_Noutput ) , &
material_Ncrystallite ) ) ; crystallite_sizePostResult = 0_pInt
if ( . not . IO_open_jobFile_stat ( myFile , material_localFileExt ) ) then ! no local material configuration present...
call IO_open_file ( myFile , material_configFile ) ! ...open material.config file
endif
line = ''
section = 0_pInt
do while ( IO_lc ( IO_getTag ( line , '<' , '>' ) ) / = material_partCrystallite ) ! wind forward to <crystallite>
read ( myFile , '(a1024)' , END = 100 ) line
enddo
do ! read thru sections of phase part
read ( myFile , '(a1024)' , END = 100 ) line
if ( IO_isBlank ( line ) ) cycle ! skip empty lines
if ( IO_getTag ( line , '<' , '>' ) / = '' ) exit ! stop at next part
if ( IO_getTag ( line , '[' , ']' ) / = '' ) then ! next section
section = section + 1_pInt
output = 0_pInt ! reset output counter
endif
if ( section > 0_pInt ) then
positions = IO_stringPos ( line , maxNchunks )
tag = IO_lc ( IO_stringValue ( line , positions , 1_pInt ) ) ! extract key
select case ( tag )
case ( '(output)' )
output = output + 1_pInt
crystallite_output ( output , section ) = IO_lc ( IO_stringValue ( line , positions , 2_pInt ) )
end select
endif
enddo
2012-02-21 22:01:37 +05:30
100 close ( myFile )
2012-08-31 01:56:28 +05:30
do i = 1_pInt , material_Ncrystallite ! sanity checks
enddo
do i = 1_pInt , material_Ncrystallite
do j = 1_pInt , crystallite_Noutput ( i )
select case ( crystallite_output ( j , i ) )
2013-01-16 14:15:41 +05:30
case ( 'phase' , 'texture' , 'volume' , 'grainrotationx' , 'grainrotationy' , 'grainrotationz' )
2012-08-31 01:56:28 +05:30
mySize = 1_pInt
case ( 'orientation' , 'grainrotation' ) ! orientation as quaternion, or deviation from initial grain orientation in axis-angle form (angle in degrees)
mySize = 4_pInt
case ( 'eulerangles' ) ! Bunge (3-1-3) Euler angles
mySize = 3_pInt
case ( 'defgrad' , 'f' , 'fe' , 'fp' , 'lp' , 'e' , 'ee' , 'p' , 'firstpiola' , '1stpiola' , 's' , 'tstar' , 'secondpiola' , '2ndpiola' )
mySize = 9_pInt
case ( 'elasmatrix' )
mySize = 36_pInt
case default
mySize = 0_pInt
end select
if ( mySize > 0_pInt ) then ! any meaningful output found
crystallite_sizePostResult ( j , i ) = mySize
crystallite_sizePostResults ( i ) = crystallite_sizePostResults ( i ) + mySize
endif
enddo
enddo
crystallite_maxSizePostResults = 0_pInt
do j = 1_pInt , material_Nmicrostructure
if ( microstructure_active ( j ) ) &
crystallite_maxSizePostResults = max ( crystallite_maxSizePostResults , &
crystallite_sizePostResults ( microstructure_crystallite ( j ) ) )
enddo
2009-06-16 14:33:30 +05:30
2010-02-25 23:09:11 +05:30
! write description file for crystallite output
2012-08-31 01:56:28 +05:30
call IO_write_jobFile ( myFile , 'outputCrystallite' )
do p = 1_pInt , material_Ncrystallite
write ( myFile , * )
write ( myFile , '(a)' ) '[' / / trim ( crystallite_name ( p ) ) / / ']'
write ( myFile , * )
do e = 1_pInt , crystallite_Noutput ( p )
write ( myFile , '(a,i4)' ) trim ( crystallite_output ( e , p ) ) / / char ( 9 ) , crystallite_sizePostResult ( e , p )
enddo
enddo
close ( myFile )
2010-11-03 22:52:48 +05:30
2012-11-07 21:13:29 +05:30
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 ) ! iterate over all cp elements
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) ) ! look up homogenization-->grainCount
forall ( i = FEsolving_execIP ( 1 , e ) : FEsolving_execIP ( 2 , e ) , g = 1_pInt : myNgrains )
crystallite_Fp0 ( 1 : 3 , 1 : 3 , g , i , e ) = math_EulerToR ( material_EulerAngles ( 1 : 3 , g , i , e ) ) ! plastic def gradient reflects init orientation
crystallite_F0 ( 1 : 3 , 1 : 3 , g , i , e ) = math_I3
crystallite_localPlasticity ( g , i , e ) = phase_localPlasticity ( material_phase ( g , i , e ) )
crystallite_Fe ( 1 : 3 , 1 : 3 , g , i , e ) = math_transpose33 ( crystallite_Fp0 ( 1 : 3 , 1 : 3 , g , i , e ) )
crystallite_Fp ( 1 : 3 , 1 : 3 , g , i , e ) = crystallite_Fp0 ( 1 : 3 , 1 : 3 , g , i , e )
2012-11-19 20:43:48 +05:30
crystallite_requested ( g , i , e ) = . true .
2012-11-07 21:13:29 +05:30
endforall
enddo
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
crystallite_partionedTemperature0 = Temperature ! isothermal assumption
crystallite_partionedFp0 = crystallite_Fp0
crystallite_partionedF0 = crystallite_F0
crystallite_partionedF = crystallite_F0
2010-11-03 22:52:48 +05:30
2009-12-22 17:58:02 +05:30
2010-04-19 15:33:34 +05:30
! Initialize crystallite_symmetryID(g,i,e)
2010-11-03 22:52:48 +05:30
2012-11-07 21:13:29 +05:30
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 )
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) )
do i = FEsolving_execIP ( 1 , e ) , FEsolving_execIP ( 2 , e )
do g = 1_pInt , myNgrains
myPhase = material_phase ( g , i , e )
myMat = phase_plasticityInstance ( myPhase )
select case ( phase_plasticity ( myPhase ) )
case ( constitutive_phenopowerlaw_label )
2013-01-22 03:27:26 +05:30
crystallite_symmetryID ( g , i , e ) = lattice_symmetryType ( constitutive_phenopowerlaw_structureName ( myMat ) )
2012-11-07 21:13:29 +05:30
case ( constitutive_titanmod_label )
2013-01-22 03:27:26 +05:30
crystallite_symmetryID ( g , i , e ) = lattice_symmetryType ( constitutive_titanmod_structureName ( myMat ) )
2012-11-07 21:13:29 +05:30
case ( constitutive_dislotwin_label )
2013-01-22 03:27:26 +05:30
crystallite_symmetryID ( g , i , e ) = lattice_symmetryType ( constitutive_dislotwin_structureName ( myMat ) )
2012-11-07 21:13:29 +05:30
case ( constitutive_nonlocal_label )
2013-01-22 03:27:26 +05:30
crystallite_symmetryID ( g , i , e ) = lattice_symmetryType ( constitutive_nonlocal_structureName ( myMat ) )
2012-11-07 21:13:29 +05:30
case default
2013-01-22 03:27:26 +05:30
crystallite_symmetryID ( g , i , e ) = 0_pInt ! does this happen for j2 material?
2012-11-07 21:13:29 +05:30
end select
2010-04-19 15:33:34 +05:30
enddo
enddo
2012-11-07 21:13:29 +05:30
enddo
2010-04-12 16:44:36 +05:30
2009-05-07 21:57:36 +05:30
2010-11-03 22:52:48 +05:30
call crystallite_orientations ( )
crystallite_orientation0 = crystallite_orientation ! Store initial orientations for calculation of grain rotations
2009-06-16 14:33:30 +05:30
2011-03-21 16:05:42 +05:30
!$OMP PARALLEL DO PRIVATE(myNgrains)
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 )
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) )
do i = FEsolving_execIP ( 1 , e ) , FEsolving_execIP ( 2 , e )
2012-02-21 22:01:37 +05:30
do g = 1_pInt , myNgrains
2012-01-17 15:56:57 +05:30
call constitutive_microstructure ( crystallite_Temperature ( g , i , e ) , crystallite_Fe ( 1 : 3 , 1 : 3 , g , i , e ) , &
crystallite_Fp ( 1 : 3 , 1 : 3 , g , i , e ) , g , i , e ) ! update dependent state variables to be consistent with basic states
2011-03-21 16:05:42 +05:30
enddo
enddo
enddo
!$OMP END PARALLEL DO
2012-03-14 19:26:50 +05:30
call crystallite_stressAndItsTangent ( . true . , . false . ) ! request elastic answers
2010-11-03 22:52:48 +05:30
crystallite_fallbackdPdF = crystallite_dPdF ! use initial elastic stiffness as fallback
! *** Output to MARC output file ***
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
2011-03-21 16:01:17 +05:30
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_Temperature: ' , shape ( crystallite_Temperature )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_dotTemperature: ' , shape ( crystallite_dotTemperature )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_Fe: ' , shape ( crystallite_Fe )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_Fp: ' , shape ( crystallite_Fp )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_Lp: ' , shape ( crystallite_Lp )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_F0: ' , shape ( crystallite_F0 )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_Fp0: ' , shape ( crystallite_Fp0 )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_Lp0: ' , shape ( crystallite_Lp0 )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_partionedF: ' , shape ( crystallite_partionedF )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_partionedTemp0: ' , shape ( crystallite_partionedTemperature0 )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_partionedF0: ' , shape ( crystallite_partionedF0 )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_partionedFp0: ' , shape ( crystallite_partionedFp0 )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_partionedLp0: ' , shape ( crystallite_partionedLp0 )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_subF: ' , shape ( crystallite_subF )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_subTemperature0: ' , shape ( crystallite_subTemperature0 )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_symmetryID: ' , shape ( crystallite_symmetryID )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_subF0: ' , shape ( crystallite_subF0 )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_subFe0: ' , shape ( crystallite_subFe0 )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_subFp0: ' , shape ( crystallite_subFp0 )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_subLp0: ' , shape ( crystallite_subLp0 )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_P: ' , shape ( crystallite_P )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_Tstar_v: ' , shape ( crystallite_Tstar_v )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_Tstar0_v: ' , shape ( crystallite_Tstar0_v )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_partionedTstar0_v: ' , shape ( crystallite_partionedTstar0_v )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_subTstar0_v: ' , shape ( crystallite_subTstar0_v )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_dPdF: ' , shape ( crystallite_dPdF )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_dPdF0: ' , shape ( crystallite_dPdF0 )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_partioneddPdF0: ' , shape ( crystallite_partioneddPdF0 )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_fallbackdPdF: ' , shape ( crystallite_fallbackdPdF )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_orientation: ' , shape ( crystallite_orientation )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_orientation0: ' , shape ( crystallite_orientation0 )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_rotation: ' , shape ( crystallite_rotation )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_disorientation: ' , shape ( crystallite_disorientation )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_dt: ' , shape ( crystallite_dt )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_subdt: ' , shape ( crystallite_subdt )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_subFrac: ' , shape ( crystallite_subFrac )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_subStep: ' , shape ( crystallite_subStep )
2012-05-17 20:55:21 +05:30
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_localPlasticity: ' , shape ( crystallite_localPlasticity )
2012-02-01 00:48:55 +05:30
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_requested: ' , shape ( crystallite_requested )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_todo: ' , shape ( crystallite_todo )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_converged: ' , shape ( crystallite_converged )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_sizePostResults: ' , shape ( crystallite_sizePostResults )
write ( 6 , '(a35,1x,7(i8,1x))' ) 'crystallite_sizePostResult: ' , shape ( crystallite_sizePostResult )
2011-03-21 16:01:17 +05:30
write ( 6 , * )
2012-03-12 19:39:37 +05:30
write ( 6 , * ) 'Number of nonlocal grains: ' , count ( . not . crystallite_localPlasticity )
2012-10-31 15:26:26 +05:30
flush ( 6 )
2011-03-21 16:01:17 +05:30
!$OMP END CRITICAL (write2out)
endif
2010-11-03 22:52:48 +05:30
2012-08-31 01:56:28 +05:30
call debug_info
call debug_reset
2010-11-03 22:52:48 +05:30
2012-03-09 01:55:28 +05:30
end subroutine crystallite_init
2009-05-07 21:57:36 +05:30
2009-06-09 16:35:29 +05:30
2009-05-07 21:57:36 +05:30
!********************************************************************
! calculate stress (P) and tangent (dPdF) for crystallites
!********************************************************************
2012-03-14 19:26:50 +05:30
subroutine crystallite_stressAndItsTangent ( updateJaco , rate_sensitivity )
2009-05-07 21:57:36 +05:30
2010-11-03 22:52:48 +05:30
!*** variables and functions from other modules ***!
2012-03-09 01:55:28 +05:30
use numerics , only : subStepMinCryst , &
subStepSizeCryst , &
stepIncreaseCryst , &
pert_Fg , &
pert_method , &
nCryst , &
numerics_integrator , &
numerics_integrationMode , &
2012-11-28 00:06:55 +05:30
numerics_timeSyncing , &
2012-03-09 01:55:28 +05:30
relevantStrain , &
2012-03-14 19:26:50 +05:30
analyticJaco
2012-07-05 15:24:50 +05:30
use debug , only : debug_level , &
2012-03-09 01:55:28 +05:30
debug_crystallite , &
debug_levelBasic , &
debug_levelExtensive , &
debug_levelSelective , &
debug_e , &
debug_i , &
debug_g , &
debug_CrystalliteLoopDistribution
use IO , only : IO_warning
use math , only : math_inv33 , &
math_identity2nd , &
math_transpose33 , &
math_mul33x33 , &
math_mul66x6 , &
math_Mandel6to33 , &
math_Mandel33to6 , &
math_I3 , &
2012-03-14 19:26:50 +05:30
math_mul3333xx3333
2012-03-09 01:55:28 +05:30
use FEsolving , only : FEsolving_execElem , &
FEsolving_execIP
use mesh , only : mesh_element , &
mesh_NcpElems , &
2012-11-28 00:06:55 +05:30
mesh_maxNips , &
mesh_ipNeighborhood , &
2012-12-10 22:52:16 +05:30
FE_NipNeighbors , &
FE_geomtype
2012-03-09 01:55:28 +05:30
use material , only : homogenization_Ngrains , &
homogenization_maxNgrains
use constitutive , only : constitutive_sizeState , &
constitutive_sizeDotState , &
constitutive_state , &
constitutive_state_backup , &
constitutive_subState0 , &
constitutive_partionedState0 , &
constitutive_homogenizedC , &
constitutive_dotState , &
2012-03-21 20:30:36 +05:30
constitutive_dotState_backup , &
constitutive_TandItsTangent
2009-12-15 13:50:31 +05:30
2012-11-28 00:06:55 +05:30
2010-11-03 22:52:48 +05:30
implicit none
!*** input variables ***!
2012-08-31 01:56:28 +05:30
logical , intent ( in ) :: updateJaco , rate_sensitivity ! flag indicating wehther we want to update the Jacobian (stiffness) or not
2010-11-03 22:52:48 +05:30
!*** local variables ***!
2012-08-31 01:56:28 +05:30
real ( pReal ) myPert , & ! perturbation with correct sign
2012-11-28 00:06:55 +05:30
formerSubStep , &
subFracIntermediate
2012-08-31 01:56:28 +05:30
real ( pReal ) , dimension ( 3 , 3 ) :: invFp , & ! inverse of the plastic deformation gradient
Fe_guess , & ! guess for elastic deformation gradient
Tstar ! 2nd Piola-Kirchhoff stress tensor
2010-11-03 22:52:48 +05:30
real ( pReal ) , dimension ( 3 , 3 , 3 , 3 , homogenization_maxNgrains , mesh_maxNips , mesh_NcpElems ) :: &
2012-03-09 01:55:28 +05:30
dPdF_perturbation1 , &
dPdF_perturbation2
2010-11-03 22:52:48 +05:30
real ( pReal ) , dimension ( 3 , 3 , homogenization_maxNgrains , mesh_maxNips , mesh_NcpElems ) :: &
2012-03-09 01:55:28 +05:30
F_backup , &
Fp_backup , &
InvFp_backup , &
Fe_backup , &
Lp_backup , &
P_backup
2010-11-03 22:52:48 +05:30
real ( pReal ) , dimension ( 6 , homogenization_maxNgrains , mesh_maxNips , mesh_NcpElems ) :: &
2012-03-09 01:55:28 +05:30
Tstar_v_backup
2010-11-03 22:52:48 +05:30
real ( pReal ) , dimension ( homogenization_maxNgrains , mesh_maxNips , mesh_NcpElems ) :: &
2012-03-09 01:55:28 +05:30
Temperature_backup
2012-08-31 01:56:28 +05:30
integer ( pInt ) NiterationCrystallite , & ! number of iterations in crystallite loop
e , & ! element index
i , & ! integration point index
g , & ! grain index
2012-03-09 01:55:28 +05:30
k , &
l , &
2012-11-28 00:06:55 +05:30
n , &
neighboring_e , &
neighboring_i , &
2012-03-09 01:55:28 +05:30
o , &
p , &
2012-08-31 01:56:28 +05:30
perturbation , & ! loop counter for forward,backward perturbation mode
2012-11-07 21:13:29 +05:30
myNgrains
2010-11-03 22:52:48 +05:30
logical , dimension ( homogenization_maxNgrains , mesh_maxNips , mesh_NcpElems ) :: &
convergenceFlag_backup
2012-02-23 01:41:09 +05:30
! local variables used for calculating analytic Jacobian
2012-03-14 19:26:50 +05:30
real ( pReal ) , dimension ( 3 , 3 ) :: Fpinv_rate , &
2012-03-21 20:30:36 +05:30
FDot_inv , &
junk
real ( pReal ) , dimension ( 3 , 3 , 3 , 3 ) :: dSdFe , &
2012-02-23 01:41:09 +05:30
dFedF , &
2012-03-14 19:26:50 +05:30
dFedFdot , &
dSdF , &
dSdFdot , &
2012-06-18 12:56:34 +05:30
dFp_invdFdot , &
junk2
2012-03-14 19:26:50 +05:30
real ( pReal ) :: counter
2010-11-03 22:52:48 +05:30
! --+>> INITIALIZE TO STARTING CONDITION <<+--
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt &
2012-03-09 01:55:28 +05:30
. and . debug_e > 0 . and . debug_e < = mesh_NcpElems &
2011-03-29 12:57:19 +05:30
. and . debug_i > 0 . and . debug_i < = mesh_maxNips &
. and . debug_g > 0 . and . debug_g < = homogenization_maxNgrains ) then
!$OMP CRITICAL (write2out)
2012-10-02 18:23:25 +05:30
write ( 6 , * )
write ( 6 , '(a,i8,1x,i2,1x,i3)' ) '<< CRYST >> crystallite start at el ip g ' , debug_e , debug_i , debug_g
write ( 6 , '(a,/,12x,f14.9)' ) '<< CRYST >> Temp0' , crystallite_partionedTemperature0 ( debug_g , debug_i , debug_e )
write ( 6 , '(a,/,3(12x,3(f14.9,1x)/))' ) '<< CRYST >> F0 ' , &
2012-01-26 19:20:00 +05:30
math_transpose33 ( crystallite_partionedF0 ( 1 : 3 , 1 : 3 , debug_g , debug_i , debug_e ) )
2012-10-02 18:23:25 +05:30
write ( 6 , '(a,/,3(12x,3(f14.9,1x)/))' ) '<< CRYST >> Fp0' , &
2012-01-26 19:20:00 +05:30
math_transpose33 ( crystallite_partionedFp0 ( 1 : 3 , 1 : 3 , debug_g , debug_i , debug_e ) )
2012-10-02 18:23:25 +05:30
write ( 6 , '(a,/,3(12x,3(f14.9,1x)/))' ) '<< CRYST >> Lp0' , &
2012-01-26 19:20:00 +05:30
math_transpose33 ( crystallite_partionedLp0 ( 1 : 3 , 1 : 3 , debug_g , debug_i , debug_e ) )
2011-03-29 12:57:19 +05:30
!$OMP END CRITICAL (write2out)
endif
2011-03-21 16:01:17 +05:30
2010-11-03 22:52:48 +05:30
crystallite_subStep = 0.0_pReal
2012-11-07 21:13:29 +05:30
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 ) ! iterate over elements to be processed
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) )
forall ( i = FEsolving_execIP ( 1 , e ) : FEsolving_execIP ( 2 , e ) , g = 1_pInt : myNgrains , crystallite_requested ( g , i , e ) )
crystallite_subTemperature0 ( g , i , e ) = crystallite_partionedTemperature0 ( g , i , e ) ! ...temperature
constitutive_subState0 ( g , i , e ) % p = constitutive_partionedState0 ( g , i , e ) % p ! ...microstructure
crystallite_subFp0 ( 1 : 3 , 1 : 3 , g , i , e ) = crystallite_partionedFp0 ( 1 : 3 , 1 : 3 , g , i , e ) ! ...plastic def grad
crystallite_subLp0 ( 1 : 3 , 1 : 3 , g , i , e ) = crystallite_partionedLp0 ( 1 : 3 , 1 : 3 , g , i , e ) ! ...plastic velocity grad
crystallite_dPdF0 ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , g , i , e ) = crystallite_partioneddPdF0 ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , g , i , e ) ! ...stiffness
crystallite_subF0 ( 1 : 3 , 1 : 3 , g , i , e ) = crystallite_partionedF0 ( 1 : 3 , 1 : 3 , g , i , e ) ! ...def grad
crystallite_subTstar0_v ( 1 : 6 , g , i , e ) = crystallite_partionedTstar0_v ( 1 : 6 , g , i , e ) !...2nd PK stress
crystallite_subFe0 ( 1 : 3 , 1 : 3 , g , i , e ) = math_mul33x33 ( crystallite_subF0 ( 1 : 3 , 1 : 3 , g , i , e ) , &
math_inv33 ( crystallite_subFp0 ( 1 : 3 , 1 : 3 , g , i , e ) ) ) ! only needed later on for stiffness calculation
crystallite_subFrac ( g , i , e ) = 0.0_pReal
crystallite_subStep ( g , i , e ) = 1.0_pReal / subStepSizeCryst
crystallite_todo ( g , i , e ) = . true .
crystallite_converged ( g , i , e ) = . false . ! pretend failed step of twice the required size
endforall
enddo
2009-05-07 21:57:36 +05:30
2010-11-03 22:52:48 +05:30
! --+>> CRYSTALLITE CUTBACK LOOP <<+--
2009-12-22 17:58:02 +05:30
2010-11-03 22:52:48 +05:30
NiterationCrystallite = 0_pInt
2011-02-23 13:59:51 +05:30
numerics_integrationMode = 1_pInt
2012-11-09 01:30:29 +05:30
do while ( any ( crystallite_todo ( : , : , FEsolving_execELem ( 1 ) : FEsolving_execElem ( 2 ) ) ) ) ! cutback loop for crystallites
2009-06-16 14:33:30 +05:30
2012-11-28 00:06:55 +05:30
if ( any ( . not . crystallite_localPlasticity ) . and . numerics_timeSyncing ) then
! Time synchronization can only be used for nonlocal calculations, and only there it makes sense.
! The idea is that in nonlocal calculations often the vast amjority of the ips
! converges in one iteration whereas a small fraction of ips has to do a lot of cutbacks.
! Hence, we try to minimize the computational effort by just doing a lot of cutbacks
! in the vicinity of the "bad" ips and leave the easily converged volume more or less as it is.
! However, some synchronization of the time step has to be done at the border between "bad" ips
! and the ones that immediately converged.
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
!$OMP CRITICAL (write2out)
write ( 6 , '(a,i6)' ) '<< CRYST >> crystallite iteration ' , NiterationCrystallite
!$OMP END CRITICAL (write2out)
endif
if ( any ( crystallite_syncSubFrac ) ) then
! Just did a time synchronization.
2012-12-11 19:12:22 +05:30
! If all synchrnizers converged, then do nothing else than winding them forward.
! If any of the cynchronizers did not converge, something went completely wrong
! and its not clear how to fix this, so all nonlocals become terminally ill.
2012-11-28 00:06:55 +05:30
2012-12-11 19:12:22 +05:30
if ( any ( crystallite_syncSubFrac . and . . not . crystallite_converged ( 1 , : , : ) ) ) then
2012-12-16 17:00:19 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 )
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) )
do i = FEsolving_execIP ( 1 , e ) , FEsolving_execIP ( 2 , e )
if ( crystallite_syncSubFrac ( i , e ) . and . . not . crystallite_converged ( 1 , i , e ) ) then
!$OMP CRITICAL (write2out)
2012-12-16 21:48:36 +05:30
write ( 6 , '(a,i8,1x,i2)' ) '<< CRYST >> time synchronization: failed at el,ip ' , e , i
2012-12-16 17:00:19 +05:30
!$OMP END CRITICAL (write2out)
endif
enddo
enddo
endif
2012-12-16 21:48:36 +05:30
crystallite_syncSubFrac = . false .
2012-12-11 19:12:22 +05:30
where ( . not . crystallite_localPlasticity )
crystallite_substep = 0.0_pReal
crystallite_todo = . false .
endwhere
else
crystallite_clearToWindForward = crystallite_localPlasticity ( 1 , : , : ) . or . crystallite_syncSubFrac
crystallite_clearToCutback = crystallite_localPlasticity ( 1 , : , : )
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
!$OMP CRITICAL (write2out)
write ( 6 , '(a,i6)' ) '<< CRYST >> time synchronization: wind forward'
!$OMP END CRITICAL (write2out)
endif
2012-11-28 00:06:55 +05:30
endif
elseif ( any ( crystallite_syncSubFracCompleted ) ) then
! Just completed a time synchronization.
! Make sure that the ips that synchronized their time step start non-converged
where ( crystallite_syncSubFracCompleted ) &
crystallite_converged ( 1 , : , : ) = . false .
crystallite_syncSubFracCompleted = . false .
crystallite_clearToWindForward = crystallite_localPlasticity ( 1 , : , : )
crystallite_clearToCutback = crystallite_localPlasticity ( 1 , : , : ) . or . . not . crystallite_converged ( 1 , : , : )
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
!$OMP CRITICAL (write2out)
write ( 6 , '(a,i6)' ) '<< CRYST >> time synchronization: done, proceed with cutback'
!$OMP END CRITICAL (write2out)
endif
else
! Normal calculation.
! If all converged and are at the end of the time increment, then just do a final wind forward.
! If all converged, but not all reached the end of the time increment, then we only wind
! those forward that are still on their way, all others have to wait.
! If some did not converge and all are still at the start of the time increment,
! then all non-convergers force their converged neighbors to also do a cutback.
! In case that some ips have already wound forward to an intermediate time (subfrac),
! then all those ips that converged in the first iteration, but now have a non-converged neighbor
! have to synchronize their time step to the same intermediate time. If such a synchronization
! takes place, all other ips have to wait and only the synchronizers do a cutback. In the next
! iteration those will do a wind forward while all others still wait.
crystallite_clearToWindForward = crystallite_localPlasticity ( 1 , : , : )
crystallite_clearToCutback = crystallite_localPlasticity ( 1 , : , : )
if ( all ( crystallite_localPlasticity . or . crystallite_converged ) ) then
if ( all ( crystallite_localPlasticity . or . crystallite_subStep + crystallite_subFrac > = 1.0_pReal ) ) then
crystallite_clearToWindForward = . true . ! final wind forward
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
!$OMP CRITICAL (write2out)
write ( 6 , '(a,i6)' ) '<< CRYST >> final wind forward'
!$OMP END CRITICAL (write2out)
endif
else
crystallite_clearToWindForward = crystallite_localPlasticity ( 1 , : , : ) . or . crystallite_subStep ( 1 , : , : ) < 1.0_pReal
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
!$OMP CRITICAL (write2out)
write ( 6 , '(a,i6)' ) '<< CRYST >> wind forward'
!$OMP END CRITICAL (write2out)
endif
endif
else
subFracIntermediate = maxval ( crystallite_subFrac , mask = . not . crystallite_localPlasticity )
if ( subFracIntermediate == 0.0_pReal ) then
crystallite_neighborEnforcedCutback = . false . ! look for ips that require a cutback because of a nonconverged neighbor
!$OMP PARALLEL DO PRIVATE(neighboring_e,neighboring_i)
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 )
do i = FEsolving_execIP ( 1 , e ) , FEsolving_execIP ( 2 , e )
if ( . not . crystallite_localPlasticity ( 1 , i , e ) . and . crystallite_converged ( 1 , i , e ) ) then
2012-12-10 22:33:28 +05:30
do n = 1_pInt , FE_NipNeighbors ( FE_geomtype ( mesh_element ( 2 , e ) ) )
2012-11-28 00:06:55 +05:30
neighboring_e = mesh_ipNeighborhood ( 1 , n , i , e )
neighboring_i = mesh_ipNeighborhood ( 2 , n , i , e )
if ( neighboring_e > 0_pInt . and . neighboring_i > 0_pInt ) then
if ( . not . crystallite_localPlasticity ( 1 , neighboring_i , neighboring_e ) &
. and . . not . crystallite_converged ( 1 , neighboring_i , neighboring_e ) ) then
crystallite_neighborEnforcedCutback ( i , e ) = . true .
#ifndef _OPENMP
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) &
write ( 6 , '(a12,i5,1x,i2,a,i5,1x,i2)' ) '<< CRYST >> ' , neighboring_e , neighboring_i , &
' enforced cutback at ' , e , i
#endif
endif
endif
enddo
endif
enddo
enddo
!$OMP END PARALLEL DO
where ( crystallite_neighborEnforcedCutback ) &
crystallite_converged ( 1 , : , : ) = . false .
else
crystallite_syncSubFrac = . false . ! look for ips that have to do a time synchronization because of a nonconverged neighbor
!$OMP PARALLEL DO PRIVATE(neighboring_e,neighboring_i)
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 )
do i = FEsolving_execIP ( 1 , e ) , FEsolving_execIP ( 2 , e )
if ( . not . crystallite_localPlasticity ( 1 , i , e ) . and . crystallite_subFrac ( 1 , i , e ) == 0.0_pReal ) then
2012-12-10 22:33:28 +05:30
do n = 1_pInt , FE_NipNeighbors ( FE_geomtype ( mesh_element ( 2 , e ) ) )
2012-11-28 00:06:55 +05:30
neighboring_e = mesh_ipNeighborhood ( 1 , n , i , e )
neighboring_i = mesh_ipNeighborhood ( 2 , n , i , e )
if ( neighboring_e > 0_pInt . and . neighboring_i > 0_pInt ) then
if ( . not . crystallite_localPlasticity ( 1 , neighboring_i , neighboring_e ) &
. and . . not . crystallite_converged ( 1 , neighboring_i , neighboring_e ) ) then
crystallite_syncSubFrac ( i , e ) = . true .
#ifndef _OPENMP
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) &
write ( 6 , '(a12,i5,1x,i2,a,i5,1x,i2)' ) '<< CRYST >> ' , neighboring_e , neighboring_i , &
' enforced time synchronization at ' , e , i
#endif
endif
endif
enddo
endif
enddo
enddo
!$OMP END PARALLEL DO
where ( crystallite_syncSubFrac ) &
crystallite_converged ( 1 , : , : ) = . false .
endif
where ( . not . crystallite_localPlasticity . and . crystallite_subStep < 1.0_pReal ) &
crystallite_converged = . false .
if ( any ( crystallite_syncSubFrac ) ) then ! have to do syncing now, so all wait except for the synchronizers which do a cutback
crystallite_clearToWindForward = crystallite_localPlasticity ( 1 , : , : )
crystallite_clearToCutback = crystallite_localPlasticity ( 1 , : , : ) . or . crystallite_syncSubFrac
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
!$OMP CRITICAL (write2out)
write ( 6 , '(a,i6)' ) '<< CRYST >> time synchronization: cutback'
!$OMP END CRITICAL (write2out)
endif
else
where ( . not . crystallite_converged ( 1 , : , : ) ) &
crystallite_clearToCutback = . true .
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
!$OMP CRITICAL (write2out)
write ( 6 , '(a,i6)' ) '<< CRYST >> cutback'
!$OMP END CRITICAL (write2out)
endif
endif
endif
endif
! Make sure that all cutbackers start with the same substep
where ( . not . crystallite_localPlasticity . and . . not . crystallite_converged ) &
crystallite_subStep = minval ( crystallite_subStep , mask = . not . crystallite_localPlasticity &
. and . . not . crystallite_converged )
! Those that do neither wind forward nor cutback are not to do
where ( . not . crystallite_clearToWindForward . and . . not . crystallite_clearToCutback ) &
crystallite_todo ( 1 , : , : ) = . false .
endif
!$OMP PARALLEL DO PRIVATE(myNgrains,formerSubStep)
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 ) ! iterate over elements to be processed
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) )
do i = FEsolving_execIP ( 1 , e ) , FEsolving_execIP ( 2 , e ) ! iterate over IPs of this element to be processed
do g = 1 , myNgrains
! --- wind forward ---
if ( crystallite_converged ( g , i , e ) . and . crystallite_clearToWindForward ( i , e ) ) then
2010-11-03 22:52:48 +05:30
formerSubStep = crystallite_subStep ( g , i , e )
2012-08-14 17:37:59 +05:30
crystallite_subFrac ( g , i , e ) = crystallite_subFrac ( g , i , e ) + crystallite_subStep ( g , i , e )
2012-11-28 00:06:55 +05:30
!$OMP FLUSH(crystallite_subFrac)
crystallite_subStep ( g , i , e ) = min ( 1.0_pReal - crystallite_subFrac ( g , i , e ) , &
stepIncreaseCryst * crystallite_subStep ( g , i , e ) )
!$OMP FLUSH(crystallite_subStep)
if ( crystallite_subStep ( g , i , e ) > 0.0_pReal ) then
crystallite_subTemperature0 ( g , i , e ) = crystallite_Temperature ( g , i , e ) ! wind forward...
crystallite_subF0 ( 1 : 3 , 1 : 3 , g , i , e ) = crystallite_subF ( 1 : 3 , 1 : 3 , g , i , e ) ! ...def grad
!$OMP FLUSH(crystallite_subF0)
crystallite_subFp0 ( 1 : 3 , 1 : 3 , g , i , e ) = crystallite_Fp ( 1 : 3 , 1 : 3 , g , i , e ) ! ...plastic def grad
crystallite_subFe0 ( 1 : 3 , 1 : 3 , g , i , e ) = math_mul33x33 ( crystallite_subF ( 1 : 3 , 1 : 3 , g , i , e ) , crystallite_invFp ( 1 : 3 , 1 : 3 , g , i , e ) ) ! only needed later on for stiffness calculation
crystallite_subLp0 ( 1 : 3 , 1 : 3 , g , i , e ) = crystallite_Lp ( 1 : 3 , 1 : 3 , g , i , e ) ! ...plastic velocity gradient
constitutive_subState0 ( g , i , e ) % p = constitutive_state ( g , i , e ) % p ! ...microstructure
crystallite_subTstar0_v ( 1 : 6 , g , i , e ) = crystallite_Tstar_v ( 1 : 6 , g , i , e ) ! ...2nd PK stress
if ( crystallite_syncSubFrac ( i , e ) ) then ! if we just did a synchronization of states, then we wind forward without any further time integration
crystallite_syncSubFracCompleted ( i , e ) = . true .
crystallite_syncSubFrac ( i , e ) = . false .
crystallite_todo ( g , i , e ) = . false .
else
crystallite_todo ( g , i , e ) = . true .
endif
!$OMP FLUSH(crystallite_todo)
2011-03-29 12:57:19 +05:30
#ifndef _OPENMP
2012-12-03 21:59:54 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt &
. and . ( ( e == debug_e . and . i == debug_i . and . g == debug_g ) &
. or . . not . iand ( debug_level ( debug_crystallite ) , debug_levelSelective ) / = 0_pInt ) ) then
write ( 6 , '(a,f12.8,a,f12.8,a,i8,1x,i2,1x,i3)' ) '<< CRYST >> winding forward from ' , &
crystallite_subFrac ( g , i , e ) - formerSubStep , ' to current crystallite_subfrac ' , &
crystallite_subFrac ( g , i , e ) , ' in crystallite_stressAndItsTangent at el ip g ' , e , i , g
write ( 6 , * )
endif
2011-03-29 12:57:19 +05:30
#endif
2012-11-28 00:06:55 +05:30
elseif ( formerSubStep > 0.0_pReal ) then ! this crystallite just converged for the entire timestep
crystallite_todo ( g , i , e ) = . false . ! so done here
!$OMP FLUSH(crystallite_todo)
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
!$OMP CRITICAL (distributionCrystallite)
debug_CrystalliteLoopDistribution ( min ( nCryst + 1_pInt , NiterationCrystallite ) ) = &
debug_CrystalliteLoopDistribution ( min ( nCryst + 1_pInt , NiterationCrystallite ) ) + 1_pInt
!$OMP END CRITICAL (distributionCrystallite)
endif
2012-11-22 15:28:36 +05:30
endif
2012-11-28 00:06:55 +05:30
! --- cutback ---
elseif ( . not . crystallite_converged ( g , i , e ) . and . crystallite_clearToCutback ( i , e ) ) then
if ( crystallite_syncSubFrac ( i , e ) ) then ! synchronize time
crystallite_subStep ( g , i , e ) = subFracIntermediate
else
crystallite_subStep ( g , i , e ) = subStepSizeCryst * crystallite_subStep ( g , i , e ) ! cut step in half and restore...
endif
!$OMP FLUSH(crystallite_subStep)
crystallite_Temperature ( g , i , e ) = crystallite_subTemperature0 ( g , i , e ) ! ...temperature
crystallite_Fp ( 1 : 3 , 1 : 3 , g , i , e ) = crystallite_subFp0 ( 1 : 3 , 1 : 3 , g , i , e ) ! ...plastic def grad
!$OMP FLUSH(crystallite_Fp)
2012-11-22 15:28:36 +05:30
crystallite_invFp ( 1 : 3 , 1 : 3 , g , i , e ) = math_inv33 ( crystallite_Fp ( 1 : 3 , 1 : 3 , g , i , e ) )
2012-11-28 00:06:55 +05:30
!$OMP FLUSH(crystallite_invFp)
2012-11-22 15:28:36 +05:30
crystallite_Lp ( 1 : 3 , 1 : 3 , g , i , e ) = crystallite_subLp0 ( 1 : 3 , 1 : 3 , g , i , e ) ! ...plastic velocity grad
constitutive_state ( g , i , e ) % p = constitutive_subState0 ( g , i , e ) % p ! ...microstructure
crystallite_Tstar_v ( 1 : 6 , g , i , e ) = crystallite_subTstar0_v ( 1 : 6 , g , i , e ) ! ...2nd PK stress
! cant restore dotState here, since not yet calculated in first cutback after initialization
crystallite_todo ( g , i , e ) = crystallite_subStep ( g , i , e ) > subStepMinCryst ! still on track or already done (beyond repair)
2012-11-28 00:06:55 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-22 15:28:36 +05:30
#ifndef _OPENMP
2012-12-03 21:59:54 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt &
. and . ( ( e == debug_e . and . i == debug_i . and . g == debug_g ) &
. or . . not . iand ( debug_level ( debug_crystallite ) , debug_levelSelective ) / = 0_pInt ) ) then
if ( crystallite_todo ( g , i , e ) ) then
write ( 6 , '(a,f12.8,a,i8,1x,i2,1x,i3)' ) ' < < CRYST > > cutback step in crystallite_stressAndItsTangent &
& with new crystallite_subStep : ' , &
crystallite_subStep ( g , i , e ) , ' at el ip g ' , e , i , g
else
write ( 6 , '(a,i8,1x,i2,1x,i3)' ) ' < < CRYST > > reached minimum step size &
& in crystallite_stressAndItsTangent at el ip g ' , e , i , g
endif
write ( 6 , * )
endif
2012-11-22 15:28:36 +05:30
#endif
2012-12-03 21:59:54 +05:30
endif
2010-11-03 22:52:48 +05:30
2012-12-03 21:59:54 +05:30
! --- prepare for integration ---
if ( crystallite_todo ( g , i , e ) . and . ( crystallite_clearToWindForward ( i , e ) . or . crystallite_clearToCutback ( i , e ) ) ) then
crystallite_subF ( 1 : 3 , 1 : 3 , g , i , e ) = crystallite_subF0 ( 1 : 3 , 1 : 3 , g , i , e ) &
+ crystallite_subStep ( g , i , e ) &
* ( crystallite_partionedF ( 1 : 3 , 1 : 3 , g , i , e ) - crystallite_partionedF0 ( 1 : 3 , 1 : 3 , g , i , e ) )
!$OMP FLUSH(crystallite_subF)
crystallite_Fe ( 1 : 3 , 1 : 3 , g , i , e ) = math_mul33x33 ( crystallite_subF ( 1 : 3 , 1 : 3 , g , i , e ) , crystallite_invFp ( 1 : 3 , 1 : 3 , g , i , e ) )
crystallite_subdt ( g , i , e ) = crystallite_subStep ( g , i , e ) * crystallite_dt ( g , i , e )
crystallite_converged ( g , i , e ) = . false . ! start out non-converged
endif
2012-11-28 00:06:55 +05:30
2012-12-03 21:59:54 +05:30
enddo ! grains
enddo ! IPs
enddo ! elements
!$OMP END PARALLEL DO
2009-12-22 17:58:02 +05:30
2012-11-28 00:06:55 +05:30
if ( numerics_timeSyncing ) then
if ( any ( . not . crystallite_localPlasticity . and . . not . crystallite_todo . and . . not . crystallite_converged &
. and . crystallite_subStep < = subStepMinCryst ) ) then ! no way of rescuing a nonlocal ip that violated the lower time step limit, ...
2012-12-16 16:24:13 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 )
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) )
do i = FEsolving_execIP ( 1 , e ) , FEsolving_execIP ( 2 , e )
do g = 1 , myNgrains
if ( . not . crystallite_localPlasticity ( g , i , e ) . and . . not . crystallite_todo ( g , i , e ) &
. and . . not . crystallite_converged ( g , i , e ) . and . crystallite_subStep ( g , i , e ) < = subStepMinCryst ) then
!$OMP CRITICAL (write2out)
write ( 6 , '(a,i8,1x,i2,1x,i3)' ) '<< CRYST >> nonlocal violated minimum subStep at el,ip,g ' , e , i , g
!$OMP END CRITICAL (write2out)
endif
enddo
enddo
enddo
endif
2012-11-28 00:06:55 +05:30
where ( . not . crystallite_localPlasticity )
crystallite_todo = . false . ! ... so let all nonlocal ips die peacefully
crystallite_subStep = 0.0_pReal
endwhere
endif
endif
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
!$OMP CRITICAL (write2out)
write ( 6 , * )
write ( 6 , '(a,e12.5)' ) '<< CRYST >> min(subStep) ' , minval ( crystallite_subStep )
write ( 6 , '(a,e12.5)' ) '<< CRYST >> max(subStep) ' , maxval ( crystallite_subStep )
write ( 6 , '(a,e12.5)' ) '<< CRYST >> min(subFrac) ' , minval ( crystallite_subFrac )
write ( 6 , '(a,e12.5)' ) '<< CRYST >> max(subFrac) ' , maxval ( crystallite_subFrac )
write ( 6 , * )
!$OMP END CRITICAL (write2out)
endif
2011-08-02 16:59:08 +05:30
! --- integrate --- requires fully defined state array (basic + dependent state)
2012-11-28 00:06:55 +05:30
2010-11-03 22:52:48 +05:30
if ( any ( crystallite_todo ) ) then
2011-02-23 13:59:51 +05:30
select case ( numerics_integrator ( numerics_integrationMode ) )
2012-02-13 19:48:07 +05:30
case ( 1_pInt )
2011-02-23 13:59:51 +05:30
call crystallite_integrateStateFPI ( )
2012-02-13 19:48:07 +05:30
case ( 2_pInt )
2011-02-23 13:59:51 +05:30
call crystallite_integrateStateEuler ( )
2012-02-13 19:48:07 +05:30
case ( 3_pInt )
2011-02-23 13:59:51 +05:30
call crystallite_integrateStateAdaptiveEuler ( )
2012-02-13 19:48:07 +05:30
case ( 4_pInt )
2011-02-23 13:59:51 +05:30
call crystallite_integrateStateRK4 ( )
2012-02-13 19:48:07 +05:30
case ( 5_pInt )
2011-02-23 13:59:51 +05:30
call crystallite_integrateStateRKCK45 ( )
2012-11-22 15:28:36 +05:30
end select
2010-11-03 22:52:48 +05:30
endif
2009-08-24 13:46:01 +05:30
2012-11-22 15:28:36 +05:30
where ( . not . crystallite_converged . and . crystallite_subStep > subStepMinCryst ) & ! do not try non-converged & fully cutbacked any further
2012-11-19 14:09:48 +05:30
crystallite_todo = . true .
2012-11-09 01:30:29 +05:30
2012-02-13 19:48:07 +05:30
NiterationCrystallite = NiterationCrystallite + 1_pInt
2010-11-03 22:52:48 +05:30
2012-11-22 15:28:36 +05:30
enddo ! cutback loop
2010-11-03 22:52:48 +05:30
! --+>> CHECK FOR NON-CONVERGED CRYSTALLITES <<+--
2012-11-07 21:13:29 +05:30
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 ) ! iterate over elements to be processed
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) )
do i = FEsolving_execIP ( 1 , e ) , FEsolving_execIP ( 2 , e ) ! iterate over IPs of this element to be processed
do g = 1 , myNgrains
if ( . not . crystallite_converged ( g , i , e ) ) then ! respond fully elastically (might be not required due to becoming terminally ill anyway)
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
!$OMP CRITICAL (write2out)
2012-11-22 15:28:36 +05:30
write ( 6 , '(a,i8,1x,i2,1x,i3)' ) '<< CRYST >> no convergence: respond fully elastic at el ip g ' , e , i , g
2012-10-02 18:23:25 +05:30
write ( 6 , * )
2012-11-07 21:13:29 +05:30
!$OMP END CRITICAL (write2out)
2010-11-03 22:52:48 +05:30
endif
2012-11-07 21:13:29 +05:30
invFp = math_inv33 ( crystallite_partionedFp0 ( 1 : 3 , 1 : 3 , g , i , e ) )
Fe_guess = math_mul33x33 ( crystallite_partionedF ( 1 : 3 , 1 : 3 , g , i , e ) , invFp )
call constitutive_TandItsTangent ( Tstar , junk2 , Fe_guess , g , i , e )
crystallite_P ( 1 : 3 , 1 : 3 , g , i , e ) = math_mul33x33 ( Fe_guess , math_mul33x33 ( Tstar , transpose ( invFp ) ) )
endif
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt &
. and . ( ( e == debug_e . and . i == debug_i . and . g == debug_g ) &
. or . . not . iand ( debug_level ( debug_crystallite ) , debug_levelSelective ) / = 0_pInt ) ) then
!$OMP CRITICAL (write2out)
write ( 6 , '(a,i8,1x,i2,1x,i3)' ) '<< CRYST >> central solution of cryst_StressAndTangent at el ip g ' , e , i , g
write ( 6 , * )
write ( 6 , '(a,/,3(12x,3(f12.4,1x)/))' ) '<< CRYST >> P / MPa' , math_transpose33 ( crystallite_P ( 1 : 3 , 1 : 3 , g , i , e ) ) / 1.0e6_pReal
write ( 6 , '(a,/,3(12x,3(f14.9,1x)/))' ) '<< CRYST >> Fp' , math_transpose33 ( crystallite_Fp ( 1 : 3 , 1 : 3 , g , i , e ) )
write ( 6 , '(a,/,3(12x,3(f14.9,1x)/))' ) '<< CRYST >> Lp' , math_transpose33 ( crystallite_Lp ( 1 : 3 , 1 : 3 , g , i , e ) )
write ( 6 , * )
!$OMP END CRITICAL (write2out)
endif
2009-06-16 14:33:30 +05:30
enddo
2010-11-03 22:52:48 +05:30
enddo
2012-11-07 21:13:29 +05:30
enddo
2010-11-03 22:52:48 +05:30
2009-06-16 14:33:30 +05:30
2010-11-03 22:52:48 +05:30
! --+>> STIFFNESS CALCULATION <<+--
2009-06-16 14:33:30 +05:30
2010-11-03 22:52:48 +05:30
if ( updateJaco ) then ! Jacobian required
2012-02-23 01:41:09 +05:30
if ( . not . analyticJaco ) then ! Calculate Jacobian using perturbations
2011-02-23 13:59:51 +05:30
numerics_integrationMode = 2_pInt
2009-12-15 13:50:31 +05:30
2010-11-03 22:52:48 +05:30
! --- BACKUP ---
2012-11-07 21:13:29 +05:30
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 ) ! iterate over elements to be processed
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) )
forall ( i = FEsolving_execIP ( 1 , e ) : FEsolving_execIP ( 2 , e ) , g = 1 : myNgrains )
constitutive_state_backup ( g , i , e ) % p ( 1 : constitutive_sizeState ( g , i , e ) ) = &
constitutive_state ( g , i , e ) % p ( 1 : constitutive_sizeState ( g , i , e ) ) ! remember unperturbed, converged state, ...
constitutive_dotState_backup ( g , i , e ) % p ( 1 : constitutive_sizeDotState ( g , i , e ) ) = &
constitutive_dotState ( g , i , e ) % p ( 1 : constitutive_sizeDotState ( g , i , e ) ) ! ... dotStates, ...
endforall
enddo
2012-11-22 15:28:36 +05:30
Temperature_backup = crystallite_Temperature ! ... Temperature, ...
F_backup = crystallite_subF ! ... and kinematics
Fp_backup = crystallite_Fp
InvFp_backup = crystallite_invFp
Fe_backup = crystallite_Fe
Lp_backup = crystallite_Lp
Tstar_v_backup = crystallite_Tstar_v
P_backup = crystallite_P
2010-11-03 22:52:48 +05:30
convergenceFlag_backup = crystallite_converged
2009-12-15 13:50:31 +05:30
2010-11-03 22:52:48 +05:30
! --- CALCULATE STATE AND STRESS FOR PERTURBATION ---
dPdF_perturbation1 = crystallite_dPdF0 ! initialize stiffness with known good values from last increment
dPdF_perturbation2 = crystallite_dPdF0 ! initialize stiffness with known good values from last increment
do perturbation = 1 , 2 ! forward and backward perturbation
2012-11-22 15:28:36 +05:30
if ( iand ( pert_method , perturbation ) > 0_pInt ) then ! mask for desired direction
2010-11-03 22:52:48 +05:30
myPert = - pert_Fg * ( - 1.0_pReal ) ** perturbation ! set perturbation step
do k = 1 , 3 ; do l = 1 , 3 ! ...alter individual components
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
2010-11-03 22:52:48 +05:30
!$OMP CRITICAL (write2out)
2012-02-02 01:50:05 +05:30
write ( 6 , '(a,2(1x,i1),1x,a)' ) '<< CRYST >> [[[[[[ Stiffness perturbation' , k , l , ']]]]]]'
2011-03-21 16:01:17 +05:30
write ( 6 , * )
2010-11-03 22:52:48 +05:30
!$OMP END CRITICAL (write2out)
endif
2011-11-04 18:14:50 +05:30
! --- INITIALIZE UNPERTURBED STATE ---
select case ( numerics_integrator ( numerics_integrationMode ) )
2012-02-13 19:48:07 +05:30
case ( 1_pInt ) ! Fix-point method: restore to last converged state at end of subinc, since this is probably closest to perturbed state
2012-11-07 21:13:29 +05:30
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 )
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) )
forall ( i = FEsolving_execIP ( 1 , e ) : FEsolving_execIP ( 2 , e ) , g = 1 : myNgrains )
constitutive_state ( g , i , e ) % p ( 1 : constitutive_sizeState ( g , i , e ) ) = &
constitutive_state_backup ( g , i , e ) % p ( 1 : constitutive_sizeState ( g , i , e ) )
constitutive_dotState ( g , i , e ) % p ( 1 : constitutive_sizeDotState ( g , i , e ) ) = &
constitutive_dotState_backup ( g , i , e ) % p ( 1 : constitutive_sizeDotState ( g , i , e ) )
endforall
enddo
2011-11-04 18:14:50 +05:30
crystallite_Temperature = Temperature_backup
2012-11-22 15:28:36 +05:30
crystallite_Fp = Fp_backup
crystallite_invFp = InvFp_backup
crystallite_Fe = Fe_backup
crystallite_Lp = Lp_backup
crystallite_Tstar_v = Tstar_v_backup
2012-08-31 01:56:28 +05:30
case ( 2_pInt , 3_pInt ) ! explicit Euler methods: nothing to restore (except for F), since we are only doing a stress integration step
case ( 4_pInt , 5_pInt ) ! explicit Runge-Kutta methods: restore to start of subinc, since we are doing a full integration of state and stress
2012-11-07 21:13:29 +05:30
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 )
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) )
forall ( i = FEsolving_execIP ( 1 , e ) : FEsolving_execIP ( 2 , e ) , g = 1 : myNgrains )
constitutive_state ( g , i , e ) % p ( 1 : constitutive_sizeState ( g , i , e ) ) = &
constitutive_subState0 ( g , i , e ) % p ( 1 : constitutive_sizeState ( g , i , e ) )
constitutive_dotState ( g , i , e ) % p ( 1 : constitutive_sizeDotState ( g , i , e ) ) = &
constitutive_dotState_backup ( g , i , e ) % p ( 1 : constitutive_sizeDotState ( g , i , e ) )
endforall
enddo
2011-11-04 18:14:50 +05:30
crystallite_Temperature = crystallite_subTemperature0
2012-11-22 15:28:36 +05:30
crystallite_Fp = crystallite_subFp0
crystallite_Fe = crystallite_subFe0
2012-11-22 18:34:19 +05:30
crystallite_Lp = crystallite_subLp0
2012-11-22 15:28:36 +05:30
crystallite_Tstar_v = crystallite_subTstar0_v
2011-11-04 18:14:50 +05:30
end select
! --- PERTURB EITHER FORWARD OR BACKWARD ---
crystallite_subF = F_backup
crystallite_subF ( k , l , : , : , : ) = crystallite_subF ( k , l , : , : , : ) + myPert
crystallite_converged = convergenceFlag_backup
2010-11-03 22:52:48 +05:30
crystallite_todo = crystallite_requested . and . crystallite_converged
where ( crystallite_todo ) crystallite_converged = . false . ! start out non-converged
2011-02-23 13:59:51 +05:30
select case ( numerics_integrator ( numerics_integrationMode ) )
2012-02-13 19:48:07 +05:30
case ( 1_pInt )
2011-02-23 13:59:51 +05:30
call crystallite_integrateStateFPI ( )
2012-02-13 19:48:07 +05:30
case ( 2_pInt )
2011-02-23 13:59:51 +05:30
call crystallite_integrateStateEuler ( )
2012-02-13 19:48:07 +05:30
case ( 3_pInt )
2011-02-23 13:59:51 +05:30
call crystallite_integrateStateAdaptiveEuler ( )
2012-02-13 19:48:07 +05:30
case ( 4_pInt )
2011-02-23 13:59:51 +05:30
call crystallite_integrateStateRK4 ( )
2012-02-13 19:48:07 +05:30
case ( 5_pInt )
2011-02-23 13:59:51 +05:30
call crystallite_integrateStateRKCK45 ( )
2010-11-03 22:52:48 +05:30
end select
2012-11-07 21:13:29 +05:30
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 )
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) )
select case ( perturbation )
case ( 1_pInt )
forall ( i = FEsolving_execIP ( 1 , e ) : FEsolving_execIP ( 2 , e ) , g = 1 : myNgrains , &
crystallite_requested ( g , i , e ) . and . crystallite_converged ( g , i , e ) ) & ! converged state warrants stiffness update
dPdF_perturbation1 ( 1 : 3 , 1 : 3 , k , l , g , i , e ) = ( crystallite_P ( 1 : 3 , 1 : 3 , g , i , e ) - P_backup ( 1 : 3 , 1 : 3 , g , i , e ) ) / myPert ! tangent dP_ij/dFg_kl
case ( 2_pInt )
forall ( i = FEsolving_execIP ( 1 , e ) : FEsolving_execIP ( 2 , e ) , g = 1 : myNgrains , &
crystallite_requested ( g , i , e ) . and . crystallite_converged ( g , i , e ) ) & ! converged state warrants stiffness update
dPdF_perturbation2 ( 1 : 3 , 1 : 3 , k , l , g , i , e ) = ( crystallite_P ( 1 : 3 , 1 : 3 , g , i , e ) - P_backup ( 1 : 3 , 1 : 3 , g , i , e ) ) / myPert ! tangent dP_ij/dFg_kl
end select
enddo
2010-11-03 22:52:48 +05:30
2012-11-22 15:28:36 +05:30
enddo ; enddo ! k,l component perturbation loop
2010-11-03 22:52:48 +05:30
endif
enddo ! perturbation direction
2010-10-15 20:27:13 +05:30
2010-11-03 22:52:48 +05:30
! --- STIFFNESS ACCORDING TO PERTURBATION METHOD AND CONVERGENCE ---
2010-10-15 20:27:13 +05:30
2013-01-28 21:00:51 +05:30
elementLooping : do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 )
2012-11-07 21:13:29 +05:30
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) )
select case ( pert_method )
case ( 1_pInt )
forall ( i = FEsolving_execIP ( 1 , e ) : FEsolving_execIP ( 2 , e ) , g = 1 : myNgrains , &
crystallite_requested ( g , i , e ) . and . convergenceFlag_backup ( g , i , e ) ) & ! perturbation mode 1: central solution converged
crystallite_dPdF ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , g , i , e ) = dPdF_perturbation1 ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , g , i , e )
case ( 2_pInt )
forall ( i = FEsolving_execIP ( 1 , e ) : FEsolving_execIP ( 2 , e ) , g = 1 : myNgrains , &
crystallite_requested ( g , i , e ) . and . convergenceFlag_backup ( g , i , e ) ) & ! perturbation mode 2: central solution converged
crystallite_dPdF ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , g , i , e ) = dPdF_perturbation2 ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , g , i , e )
case ( 3_pInt )
forall ( i = FEsolving_execIP ( 1 , e ) : FEsolving_execIP ( 2 , e ) , g = 1 : myNgrains , &
crystallite_requested ( g , i , e ) . and . convergenceFlag_backup ( g , i , e ) ) & ! perturbation mode 3: central solution converged
crystallite_dPdF ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , g , i , e ) = 0.5_pReal * ( dPdF_perturbation1 ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , g , i , e ) &
+ dPdF_perturbation2 ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , g , i , e ) )
end select
forall ( i = FEsolving_execIP ( 1 , e ) : FEsolving_execIP ( 2 , e ) , g = 1 : myNgrains , &
2013-01-28 21:00:51 +05:30
crystallite_requested ( g , i , e ) . and . . not . convergenceFlag_backup ( g , i , e ) ) & ! for any pertubation mode: if central solution did not converge...
2012-11-07 21:13:29 +05:30
crystallite_dPdF ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , g , i , e ) = crystallite_fallbackdPdF ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , g , i , e ) ! ...use (elastic) fallback
2013-01-28 21:00:51 +05:30
enddo elementLooping
2010-11-03 22:52:48 +05:30
2011-11-04 18:14:50 +05:30
! --- RESTORE ---
2012-11-07 21:13:29 +05:30
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 )
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) )
forall ( i = FEsolving_execIP ( 1 , e ) : FEsolving_execIP ( 2 , e ) , g = 1 : myNgrains )
constitutive_state ( g , i , e ) % p ( 1 : constitutive_sizeState ( g , i , e ) ) = &
constitutive_state_backup ( g , i , e ) % p ( 1 : constitutive_sizeState ( g , i , e ) )
constitutive_dotState ( g , i , e ) % p ( 1 : constitutive_sizeDotState ( g , i , e ) ) = &
constitutive_dotState_backup ( g , i , e ) % p ( 1 : constitutive_sizeDotState ( g , i , e ) )
endforall
enddo
2011-11-04 18:14:50 +05:30
crystallite_Temperature = Temperature_backup
2012-11-22 15:28:36 +05:30
crystallite_subF = F_backup
crystallite_Fp = Fp_backup
crystallite_invFp = InvFp_backup
crystallite_Fe = Fe_backup
crystallite_Lp = Lp_backup
crystallite_Tstar_v = Tstar_v_backup
crystallite_P = P_backup
crystallite_converged = convergenceFlag_backup
2012-02-23 01:41:09 +05:30
2012-03-21 20:30:36 +05:30
else ! Calculate Jacobian using analytical expression
2011-11-04 18:14:50 +05:30
2012-02-23 01:41:09 +05:30
! --- CALCULATE ANALYTIC dPdF ---
2012-03-21 20:30:36 +05:30
!$OMP PARALLEL DO PRIVATE(dFedF,dSdF,dSdFe,myNgrains)
2012-03-14 19:26:50 +05:30
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 ) ! iterate over elements to be processed
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) )
do i = FEsolving_execIP ( 1 , e ) , FEsolving_execIP ( 2 , e ) ! iterate over IPs of this element to be processed
do g = 1_pInt , myNgrains
dFedF = 0.0_pReal
do p = 1_pInt , 3_pInt ; do o = 1_pInt , 3_pInt
2012-03-21 20:30:36 +05:30
dFedF ( p , o , o , 1 : 3 ) = crystallite_invFp ( 1 : 3 , p , g , i , e ) ! dFe^T_ij/dF_kl = delta_jk * (Fp current^-1)_li
enddo ; enddo
call constitutive_TandItsTangent ( junk , dSdFe , crystallite_subFe0 ( 1 : 3 , 1 : 3 , g , i , e ) , g , i , e ) ! call constitutive law to calculate 2nd Piola-Kirchhoff stress and its derivative
2012-03-14 19:26:50 +05:30
dSdF = math_mul3333xx3333 ( dSdFe , dFedF ) ! dS/dF = dS/dFe * dFe/dF
do p = 1_pInt , 3_pInt ; do o = 1_pInt , 3_pInt
crystallite_dPdF ( 1 : 3 , 1 : 3 , o , p , g , i , e ) = math_mul33x33 ( math_mul33x33 ( dFedF ( 1 : 3 , 1 : 3 , o , p ) , &
math_Mandel6to33 ( crystallite_Tstar_v ) ) , math_transpose33 ( &
crystallite_invFp ( 1 : 3 , 1 : 3 , g , i , e ) ) ) & ! dP/dF = dFe/dF * S * Fp^-T...
+ math_mul33x33 ( crystallite_subFe0 ( 1 : 3 , 1 : 3 , g , i , e ) , &
math_mul33x33 ( dSdF ( 1 : 3 , 1 : 3 , o , p ) , math_transpose33 ( crystallite_invFp ( 1 : 3 , 1 : 3 , g , i , e ) ) ) ) ! + Fe * dS/dF * Fp^-T
enddo ; enddo
enddo ; enddo ; enddo
!$OMP END PARALLEL DO
endif
if ( rate_sensitivity ) then
2012-03-21 20:30:36 +05:30
!$OMP PARALLEL DO PRIVATE(dFedFdot,dSdFdot,dSdFe,Fpinv_rate,FDot_inv,counter,dFp_invdFdot,myNgrains)
2012-02-23 01:41:09 +05:30
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 ) ! iterate over elements to be processed
myNgrains = homogenization_Ngrains ( mesh_element ( 3 , e ) )
do i = FEsolving_execIP ( 1 , e ) , FEsolving_execIP ( 2 , e ) ! iterate over IPs of this element to be processed
2012-03-14 19:26:50 +05:30
do g = 1_pInt , myNgrains
Fpinv_rate = math_mul33x33 ( crystallite_invFp ( 1 : 3 , 1 : 3 , g , i , e ) , crystallite_Lp ( 1 : 3 , 1 : 3 , g , i , e ) ) ! dFp^-1 = dFp^-1/dt *dt... dFp may overshoot dF by small ammount as
FDot_inv = crystallite_subF ( 1 : 3 , 1 : 3 , g , i , e ) - crystallite_F0 ( 1 : 3 , 1 : 3 , g , i , e )
2012-02-27 23:10:28 +05:30
counter = 0.0_pReal
2012-03-14 19:26:50 +05:30
do p = 1_pInt , 3_pInt ; do o = 1_pInt , 3_pInt
2012-11-22 15:28:36 +05:30
if ( abs ( FDot_inv ( o , p ) ) < relevantStrain ) then
2012-03-14 19:26:50 +05:30
FDot_inv ( o , p ) = 0.0_pReal
2012-02-23 01:41:09 +05:30
else
2012-03-14 19:26:50 +05:30
counter = counter + 1.0_pReal
FDot_inv ( o , p ) = crystallite_dt ( g , i , e ) / FDot_inv ( o , p )
2012-02-23 01:41:09 +05:30
endif
enddo ; enddo
2012-11-22 15:28:36 +05:30
if ( counter > 0.0_pReal ) FDot_inv = FDot_inv / counter
2012-03-14 19:26:50 +05:30
do p = 1_pInt , 3_pInt ; do o = 1_pInt , 3_pInt
2012-03-21 20:30:36 +05:30
dFp_invdFdot ( o , p , 1 : 3 , 1 : 3 ) = Fpinv_rate ( o , p ) * FDot_inv
2012-03-02 01:12:43 +05:30
enddo ; enddo
2012-03-14 19:26:50 +05:30
do p = 1_pInt , 3_pInt ; do o = 1_pInt , 3_pInt
dFedFdot ( 1 : 3 , 1 : 3 , o , p ) = math_transpose33 ( math_mul33x33 ( crystallite_subF ( 1 : 3 , 1 : 3 , g , i , e ) , &
dFp_invdFdot ( 1 : 3 , 1 : 3 , o , p ) ) )
2012-02-23 01:41:09 +05:30
enddo ; enddo
2012-03-21 20:30:36 +05:30
call constitutive_TandItsTangent ( junk , dSdFe , crystallite_subFe0 ( 1 : 3 , 1 : 3 , g , i , e ) , g , i , e ) ! call constitutive law to calculate 2nd Piola-Kirchhoff stress and its derivative
2012-03-14 19:26:50 +05:30
dSdFdot = math_mul3333xx3333 ( dSdFe , dFedFdot )
do p = 1_pInt , 3_pInt ; do o = 1_pInt , 3_pInt
crystallite_dPdF ( 1 : 3 , 1 : 3 , o , p , g , i , e ) = crystallite_dPdF ( 1 : 3 , 1 : 3 , o , p , g , i , e ) - &
( math_mul33x33 ( math_mul33x33 ( dFedFdot ( 1 : 3 , 1 : 3 , o , p ) , &
math_Mandel6to33 ( crystallite_Tstar_v ) ) , math_transpose33 ( &
2012-03-21 20:30:36 +05:30
crystallite_invFp ( 1 : 3 , 1 : 3 , g , i , e ) ) ) + & ! dP/dFdot = dFe/dFdot * S * Fp^-T...
2012-03-14 19:26:50 +05:30
math_mul33x33 ( math_mul33x33 ( crystallite_subFe0 ( 1 : 3 , 1 : 3 , g , i , e ) , &
2012-03-21 20:30:36 +05:30
math_Mandel6to33 ( crystallite_Tstar_v ) ) , math_transpose33 ( dFp_invdFdot ( 1 : 3 , 1 : 3 , o , p ) ) ) & ! + Fe * S * dFp^-T/dFdot...
2012-03-14 19:26:50 +05:30
+ math_mul33x33 ( crystallite_subFe0 ( 1 : 3 , 1 : 3 , g , i , e ) , &
2012-03-21 20:30:36 +05:30
math_mul33x33 ( dSdFdot ( 1 : 3 , 1 : 3 , o , p ) , math_transpose33 ( crystallite_invFp ( 1 : 3 , 1 : 3 , g , i , e ) ) ) ) ) ! + Fe * dS/dFdot * Fp^-T
2012-02-23 01:41:09 +05:30
enddo ; enddo
2012-03-14 19:26:50 +05:30
enddo ; enddo ; enddo
2012-02-24 19:37:46 +05:30
!$OMP END PARALLEL DO
2012-02-23 01:41:09 +05:30
endif
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif ! jacobian calculation
2009-05-07 21:57:36 +05:30
2012-03-09 01:55:28 +05:30
end subroutine crystallite_stressAndItsTangent
2009-05-07 21:57:36 +05:30
2009-07-22 21:37:19 +05:30
!********************************************************************
2010-10-01 17:48:49 +05:30
! integrate stress, state and Temperature with
! 4h order explicit Runge Kutta method
2009-07-22 21:37:19 +05:30
!********************************************************************
2011-02-23 13:59:51 +05:30
subroutine crystallite_integrateStateRK4 ( gg , ii , ee )
2009-07-22 21:37:19 +05:30
2010-10-01 17:48:49 +05:30
!*** variables and functions from other modules ***!
use prec , only : pInt , &
pReal
2011-02-23 13:59:51 +05:30
use numerics , only : numerics_integrationMode
2012-07-05 15:24:50 +05:30
use debug , only : debug_level , &
2012-03-09 01:55:28 +05:30
debug_crystallite , &
debug_levelBasic , &
debug_levelExtensive , &
debug_levelSelective , &
2010-10-01 17:48:49 +05:30
debug_e , &
debug_i , &
debug_g , &
debug_StateLoopDistribution
use FEsolving , only : FEsolving_execElem , &
FEsolving_execIP
use mesh , only : mesh_element , &
mesh_NcpElems , &
mesh_maxNips
use material , only : homogenization_Ngrains , &
homogenization_maxNgrains
use constitutive , only : constitutive_sizeDotState , &
constitutive_state , &
constitutive_subState0 , &
constitutive_dotState , &
constitutive_RK4dotState , &
constitutive_collectDotState , &
2012-05-17 20:55:21 +05:30
constitutive_deltaState , &
constitutive_collectDeltaState , &
2010-10-01 17:48:49 +05:30
constitutive_dotTemperature , &
constitutive_microstructure
2009-12-15 13:50:31 +05:30
2010-10-01 17:48:49 +05:30
implicit none
2009-07-22 21:37:19 +05:30
2012-08-31 01:56:28 +05:30
real ( pReal ) , dimension ( 4 ) , parameter :: timeStepFraction = [ 0.5_pReal , 0.5_pReal , 1.0_pReal , 1.0_pReal ] ! factor giving the fraction of the original timestep used for Runge Kutta Integration
real ( pReal ) , dimension ( 4 ) , parameter :: weight = [ 1.0_pReal , 2.0_pReal , 2.0_pReal , 1.0_pReal ] ! weight of slope used for Runge Kutta integration
2009-07-22 21:37:19 +05:30
2010-10-01 17:48:49 +05:30
!*** input variables ***!
2012-08-31 01:56:28 +05:30
integer ( pInt ) , optional , intent ( in ) :: ee , & ! element index
ii , & ! integration point index
gg ! grain index
2009-07-22 21:37:19 +05:30
2010-10-01 17:48:49 +05:30
!*** output variables ***!
2009-07-22 21:37:19 +05:30
2010-10-01 17:48:49 +05:30
!*** local variables ***!
2012-08-31 01:56:28 +05:30
integer ( pInt ) e , & ! element index in element loop
i , & ! integration point index in ip loop
g , & ! grain index in grain loop
2010-10-01 17:48:49 +05:30
n , &
mySizeDotState
2012-08-31 01:56:28 +05:30
integer ( pInt ) , dimension ( 2 ) :: eIter ! bounds for element iteration
integer ( pInt ) , dimension ( 2 , mesh_NcpElems ) :: iIter , & ! bounds for ip iteration
gIter ! bounds for grain iteration
2010-10-01 17:48:49 +05:30
real ( pReal ) , dimension ( homogenization_maxNgrains , mesh_maxNips , mesh_NcpElems ) :: &
2012-08-31 01:56:28 +05:30
RK4dotTemperature ! evolution of Temperature of each grain for Runge Kutta integration
logical singleRun ! flag indicating computation for single (g,i,e) triple
2010-10-01 17:48:49 +05:30
if ( present ( ee ) . and . present ( ii ) . and . present ( gg ) ) then
eIter = ee
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
iIter ( 1 : 2 , ee ) = ii
gIter ( 1 : 2 , ee ) = gg
2010-10-01 17:48:49 +05:30
singleRun = . true .
else
eIter = FEsolving_execElem ( 1 : 2 )
do e = eIter ( 1 ) , eIter ( 2 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
iIter ( 1 : 2 , e ) = FEsolving_execIP ( 1 : 2 , e )
2012-02-13 19:48:07 +05:30
gIter ( 1 : 2 , e ) = [ 1_pInt , homogenization_Ngrains ( mesh_element ( 3 , e ) ) ]
2010-10-01 17:48:49 +05:30
enddo
singleRun = . false .
endif
! --- FIRST RUNGE KUTTA STEP ---
2010-10-26 19:34:33 +05:30
RK4dotTemperature = 0.0_pReal ! initialize Runge-Kutta dotTemperature
2012-12-16 16:24:13 +05:30
!$OMP PARALLEL PRIVATE(mySizeDotState)
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2010-10-26 19:34:33 +05:30
constitutive_RK4dotState ( g , i , e ) % p = 0.0_pReal ! initialize Runge-Kutta dotState
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
call constitutive_collectDotState ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , crystallite_Fe , crystallite_Fp , &
2012-11-30 00:14:00 +05:30
crystallite_Temperature ( g , i , e ) , crystallite_subdt ( g , i , e ) , crystallite_subFrac , g , i , e )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
crystallite_dotTemperature ( g , i , e ) = constitutive_dotTemperature ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , &
2010-10-01 17:48:49 +05:30
crystallite_Temperature ( g , i , e ) , g , i , e )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
enddo ; enddo ; enddo
!$OMP ENDDO
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
if ( crystallite_todo ( g , i , e ) ) then
if ( any ( constitutive_dotState ( g , i , e ) % p / = constitutive_dotState ( g , i , e ) % p ) & ! NaN occured in dotState
. or . crystallite_dotTemperature ( g , i , e ) / = crystallite_dotTemperature ( g , i , e ) ) then ! NaN occured in dotTemperature
2012-03-12 19:39:37 +05:30
if ( . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken non-local...
2010-11-03 22:52:48 +05:30
!$OMP CRITICAL (checkTodo)
2012-03-12 19:39:37 +05:30
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
2010-11-03 22:52:48 +05:30
!$OMP END CRITICAL (checkTodo)
2010-10-01 17:48:49 +05:30
else ! if broken local...
crystallite_todo ( g , i , e ) = . false . ! ... skip this one next time
endif
endif
endif
2010-11-03 22:52:48 +05:30
enddo ; enddo ; enddo
!$OMP ENDDO
2012-12-16 16:24:13 +05:30
!$OMP END PARALLEL
2010-10-01 17:48:49 +05:30
! --- SECOND TO FOURTH RUNGE KUTTA STEP PLUS FINAL INTEGRATION ---
2012-02-21 22:01:37 +05:30
do n = 1_pInt , 4_pInt
2010-10-01 17:48:49 +05:30
! --- state update ---
2012-12-16 16:24:13 +05:30
!$OMP PARALLEL PRIVATE(mySizeDotState)
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
mySizeDotState = constitutive_sizeDotState ( g , i , e )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
if ( n < 4 ) then
constitutive_RK4dotState ( g , i , e ) % p = constitutive_RK4dotState ( g , i , e ) % p + weight ( n ) * constitutive_dotState ( g , i , e ) % p
RK4dotTemperature ( g , i , e ) = RK4dotTemperature ( g , i , e ) + weight ( n ) * crystallite_dotTemperature ( g , i , e )
elseif ( n == 4 ) then
2012-01-11 22:26:35 +05:30
constitutive_dotState ( g , i , e ) % p = ( constitutive_RK4dotState ( g , i , e ) % p + &
weight ( n ) * constitutive_dotState ( g , i , e ) % p ) / 6.0_pReal ! use weighted RKdotState for final integration
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
crystallite_dotTemperature ( g , i , e ) = ( RK4dotTemperature ( g , i , e ) + weight ( n ) * crystallite_dotTemperature ( g , i , e ) ) / 6.0_pReal
2010-10-26 19:34:33 +05:30
endif
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
enddo ; enddo ; enddo
!$OMP ENDDO
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
if ( crystallite_todo ( g , i , e ) ) then
mySizeDotState = constitutive_sizeDotState ( g , i , e )
2010-10-01 17:48:49 +05:30
constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState ) = constitutive_subState0 ( g , i , e ) % p ( 1 : mySizeDotState ) &
+ constitutive_dotState ( g , i , e ) % p ( 1 : mySizeDotState ) * crystallite_subdt ( g , i , e ) * timeStepFraction ( n )
crystallite_Temperature ( g , i , e ) = crystallite_subTemperature0 ( g , i , e ) &
+ crystallite_dotTemperature ( g , i , e ) * crystallite_subdt ( g , i , e ) * timeStepFraction ( n )
2012-06-06 20:41:30 +05:30
if ( n == 4 ) then ! final integration step
#ifndef _OPENMP
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt &
2012-06-06 20:41:30 +05:30
. and . ( ( e == debug_e . and . i == debug_i . and . g == debug_g ) &
2012-07-05 15:24:50 +05:30
. or . . not . iand ( debug_level ( debug_crystallite ) , debug_levelSelective ) / = 0_pInt ) ) then
2012-06-06 20:41:30 +05:30
mySizeDotState = constitutive_sizeDotState ( g , i , e )
write ( 6 , '(a,i8,1x,i2,1x,i3)' ) '<< CRYST >> updateState at el ip g ' , e , i , g
write ( 6 , * )
2012-10-18 15:23:26 +05:30
write ( 6 , '(a,/,(12x,12(e12.6,1x)))' ) '<< CRYST >> dotState' , constitutive_dotState ( g , i , e ) % p ( 1 : mySizeDotState )
2012-06-06 20:41:30 +05:30
write ( 6 , * )
2012-10-18 15:23:26 +05:30
write ( 6 , '(a,/,(12x,12(e12.6,1x)))' ) '<< CRYST >> new state' , constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState )
2012-06-06 20:41:30 +05:30
write ( 6 , * )
endif
#endif
endif
2010-10-01 17:48:49 +05:30
endif
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2012-11-22 18:34:19 +05:30
! --- state jump ---
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-22 18:34:19 +05:30
if ( crystallite_todo ( g , i , e ) ) then
crystallite_todo ( g , i , e ) = crystallite_stateJump ( g , i , e )
2012-12-11 20:35:08 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-22 18:34:19 +05:30
if ( . not . crystallite_todo ( g , i , e ) . and . . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken non-local...
!$OMP CRITICAL (checkTodo)
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
!$OMP END CRITICAL (checkTodo)
endif
endif
enddo ; enddo ; enddo
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
! --- update dependent states ---
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
2012-01-17 15:56:57 +05:30
call constitutive_microstructure ( crystallite_Temperature ( g , i , e ) , crystallite_Fe ( 1 : 3 , 1 : 3 , g , i , e ) , &
crystallite_Fp ( 1 : 3 , 1 : 3 , g , i , e ) , g , i , e ) ! update dependent state variables to be consistent with basic states
2010-10-01 17:48:49 +05:30
endif
2010-10-26 19:34:33 +05:30
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
! --- stress integration ---
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
2012-11-09 01:30:29 +05:30
crystallite_todo ( g , i , e ) = crystallite_integrateStress ( g , i , e , timeStepFraction ( n ) ) ! fraction of original times step
2012-12-11 20:35:08 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-09 01:30:29 +05:30
if ( . not . crystallite_todo ( g , i , e ) . and . . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken non-local...
!$OMP CRITICAL (checkTodo)
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
!$OMP END CRITICAL (checkTodo)
2010-10-01 17:48:49 +05:30
endif
endif
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
! --- dot state and RK dot state---
if ( n < 4 ) then
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
call constitutive_collectDotState ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , crystallite_Fe , crystallite_Fp , &
2011-02-25 15:23:20 +05:30
crystallite_Temperature ( g , i , e ) , timeStepFraction ( n ) * crystallite_subdt ( g , i , e ) , & ! fraction of original timestep
2012-11-30 00:14:00 +05:30
crystallite_subFrac , g , i , e )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
crystallite_dotTemperature ( g , i , e ) = constitutive_dotTemperature ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , &
2010-10-01 17:48:49 +05:30
crystallite_Temperature ( g , i , e ) , g , i , e )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
enddo ; enddo ; enddo
!$OMP ENDDO
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
if ( crystallite_todo ( g , i , e ) ) then
if ( any ( constitutive_dotState ( g , i , e ) % p / = constitutive_dotState ( g , i , e ) % p ) & ! NaN occured in dotState
. or . crystallite_dotTemperature ( g , i , e ) / = crystallite_dotTemperature ( g , i , e ) ) then ! NaN occured in dotTemperature
2012-03-12 19:39:37 +05:30
if ( . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken non-local...
2010-11-03 22:52:48 +05:30
!$OMP CRITICAL (checkTodo)
2012-03-12 19:39:37 +05:30
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
2010-11-03 22:52:48 +05:30
!$OMP END CRITICAL (checkTodo)
2010-10-01 17:48:49 +05:30
else ! if broken local...
crystallite_todo ( g , i , e ) = . false . ! ... skip this one next time
endif
endif
endif
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
endif
2012-12-16 16:24:13 +05:30
!$OMP END PARALLEL
2010-10-01 17:48:49 +05:30
enddo
2012-06-06 20:41:30 +05:30
! --- SET CONVERGENCE FLAG ---
2012-12-16 16:24:13 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
if ( crystallite_todo ( g , i , e ) ) then
crystallite_converged ( g , i , e ) = . true . ! if still "to do" then converged per definitionem
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
!$OMP CRITICAL (distributionState)
debug_StateLoopDistribution ( 4 , numerics_integrationMode ) = &
debug_StateLoopDistribution ( 4 , numerics_integrationMode ) + 1_pInt
!$OMP END CRITICAL (distributionState)
2012-06-06 20:41:30 +05:30
endif
2012-12-16 16:24:13 +05:30
endif
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
2010-10-01 17:48:49 +05:30
2012-06-06 20:41:30 +05:30
! --- CHECK NONLOCAL CONVERGENCE ---
2010-10-01 17:48:49 +05:30
2011-02-09 14:09:07 +05:30
if ( . not . singleRun ) then ! if not requesting Integration of just a single IP
2012-03-12 19:39:37 +05:30
if ( any ( . not . crystallite_converged . and . . not . crystallite_localPlasticity ) ) then ! any non-local not yet converged (or broken)...
crystallite_converged = crystallite_converged . and . crystallite_localPlasticity ! ...restart all non-local as not converged
2010-10-15 20:27:13 +05:30
endif
2010-10-01 17:48:49 +05:30
endif
2012-03-09 01:55:28 +05:30
end subroutine crystallite_integrateStateRK4
2010-10-01 17:48:49 +05:30
!********************************************************************
! integrate stress, state and Temperature with
! 5th order Runge-Kutta Cash-Karp method with adaptive step size
! (use 5th order solution to advance = "local extrapolation")
!********************************************************************
2011-02-23 13:59:51 +05:30
subroutine crystallite_integrateStateRKCK45 ( gg , ii , ee )
2010-10-01 17:48:49 +05:30
!*** variables and functions from other modules ***!
2012-07-05 15:24:50 +05:30
use debug , only : debug_level , &
2012-03-09 01:55:28 +05:30
debug_crystallite , &
debug_levelBasic , &
debug_levelExtensive , &
debug_levelSelective , &
2010-10-01 17:48:49 +05:30
debug_e , &
debug_i , &
debug_g , &
debug_StateLoopDistribution
use numerics , only : rTol_crystalliteState , &
rTol_crystalliteTemperature , &
2011-02-23 13:59:51 +05:30
numerics_integrationMode
2010-10-01 17:48:49 +05:30
use FEsolving , only : FEsolving_execElem , &
2012-02-21 22:01:37 +05:30
FEsolving_execIP
2010-10-01 17:48:49 +05:30
use mesh , only : mesh_element , &
mesh_NcpElems , &
mesh_maxNips
use material , only : homogenization_Ngrains , &
homogenization_maxNgrains
use constitutive , only : constitutive_sizeDotState , &
constitutive_maxSizeDotState , &
constitutive_state , &
2010-10-26 18:46:37 +05:30
constitutive_aTolState , &
2010-10-01 17:48:49 +05:30
constitutive_subState0 , &
constitutive_dotState , &
constitutive_RKCK45dotState , &
constitutive_collectDotState , &
2012-05-17 20:55:21 +05:30
constitutive_deltaState , &
constitutive_collectDeltaState , &
2010-10-01 17:48:49 +05:30
constitutive_dotTemperature , &
constitutive_microstructure
implicit none
!*** input variables ***!
integer ( pInt ) , optional , intent ( in ) :: ee , & ! element index
ii , & ! integration point index
gg ! grain index
!*** local variables ***!
integer ( pInt ) e , & ! element index in element loop
i , & ! integration point index in ip loop
g , & ! grain index in grain loop
n , & ! stage index in integration stage loop
2010-11-03 22:52:48 +05:30
mySizeDotState , & ! size of dot State
2010-10-01 17:48:49 +05:30
s ! state index
integer ( pInt ) , dimension ( 2 ) :: eIter ! bounds for element iteration
integer ( pInt ) , dimension ( 2 , mesh_NcpElems ) :: iIter , & ! bounds for ip iteration
gIter ! bounds for grain iteration
real ( pReal ) , dimension ( 6 , homogenization_maxNgrains , mesh_maxNips , mesh_NcpElems ) :: &
RKCK45dotTemperature ! evolution of Temperature of each grain for Runge Kutta Cash Karp integration
real ( pReal ) , dimension ( 5 , 5 ) :: a ! coefficients in Butcher tableau (used for preliminary integration in stages 2 to 6)
real ( pReal ) , dimension ( 6 ) :: b , db ! coefficients in Butcher tableau (used for final integration and error estimate)
real ( pReal ) , dimension ( 5 ) :: c ! coefficients in Butcher tableau (fractions of original time step in stages 2 to 6)
real ( pReal ) , dimension ( constitutive_maxSizeDotState , homogenization_maxNgrains , mesh_maxNips , mesh_NcpElems ) :: &
stateResiduum , & ! residuum from evolution in micrstructure
relStateResiduum ! relative residuum from evolution in microstructure
real ( pReal ) , dimension ( homogenization_maxNgrains , mesh_maxNips , mesh_NcpElems ) :: &
temperatureResiduum , & ! residuum from evolution in temperature
relTemperatureResiduum ! relative residuum from evolution in temperature
logical singleRun ! flag indicating computation for single (g,i,e) triple
! --- FILL BUTCHER TABLEAU ---
a = 0.0_pReal
b = 0.0_pReal
db = 0.0_pReal
c = 0.0_pReal
a ( 1 , 1 ) = 0.2_pReal
a ( 1 , 2 ) = 0.075_pReal
a ( 2 , 2 ) = 0.225_pReal
a ( 1 , 3 ) = 0.3_pReal
a ( 2 , 3 ) = - 0.9_pReal
a ( 3 , 3 ) = 1.2_pReal
a ( 1 , 4 ) = - 1 1.0_pReal / 5 4.0_pReal
a ( 2 , 4 ) = 2.5_pReal
a ( 3 , 4 ) = - 7 0.0_pReal / 2 7.0_pReal
a ( 4 , 4 ) = 3 5.0_pReal / 2 7.0_pReal
a ( 1 , 5 ) = 163 1.0_pReal / 5529 6.0_pReal
a ( 2 , 5 ) = 17 5.0_pReal / 51 2.0_pReal
a ( 3 , 5 ) = 57 5.0_pReal / 1382 4.0_pReal
a ( 4 , 5 ) = 4427 5.0_pReal / 11059 2.0_pReal
a ( 5 , 5 ) = 25 3.0_pReal / 409 6.0_pReal
b ( 1 ) = 3 7.0_pReal / 37 8.0_pReal
b ( 3 ) = 25 0.0_pReal / 62 1.0_pReal
b ( 4 ) = 12 5.0_pReal / 59 4.0_pReal
b ( 6 ) = 51 2.0_pReal / 177 1.0_pReal
db ( 1 ) = b ( 1 ) - 282 5.0_pReal / 2764 8.0_pReal
db ( 3 ) = b ( 3 ) - 1857 5.0_pReal / 4838 4.0_pReal
db ( 4 ) = b ( 4 ) - 1352 5.0_pReal / 5529 6.0_pReal
db ( 5 ) = - 27 7.0_pReal / 1433 6.0_pReal
db ( 6 ) = b ( 6 ) - 0.25_pReal
c ( 1 ) = 0.2_pReal
c ( 2 ) = 0.3_pReal
c ( 3 ) = 0.6_pReal
c ( 4 ) = 1.0_pReal
c ( 5 ) = 0.875_pReal
! --- LOOP ITERATOR FOR ELEMENT, GRAIN, IP ---
if ( present ( ee ) . and . present ( ii ) . and . present ( gg ) ) then
eIter = ee
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
iIter ( 1 : 2 , ee ) = ii
gIter ( 1 : 2 , ee ) = gg
2010-10-01 17:48:49 +05:30
singleRun = . true .
else
eIter = FEsolving_execElem ( 1 : 2 )
do e = eIter ( 1 ) , eIter ( 2 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
iIter ( 1 : 2 , e ) = FEsolving_execIP ( 1 : 2 , e )
2012-02-21 22:01:37 +05:30
gIter ( 1 : 2 , e ) = [ 1_pInt , homogenization_Ngrains ( mesh_element ( 3 , e ) ) ]
2010-10-01 17:48:49 +05:30
enddo
singleRun = . false .
endif
! --- FIRST RUNGE KUTTA STEP ---
2012-12-16 16:24:13 +05:30
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
2012-12-16 16:24:13 +05:30
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,1x,i1)' ) '<< CRYST >> RUNGE KUTTA STEP' , 1
2012-12-16 16:24:13 +05:30
!$OMP END CRITICAL (write2out)
2010-11-11 18:44:53 +05:30
endif
2012-12-16 16:24:13 +05:30
!$OMP PARALLEL PRIVATE(mySizeDotState)
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
call constitutive_collectDotState ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , crystallite_Fe , crystallite_Fp , &
2012-11-30 00:14:00 +05:30
crystallite_Temperature ( g , i , e ) , crystallite_subdt ( g , i , e ) , crystallite_subFrac , g , i , e )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
crystallite_dotTemperature ( g , i , e ) = constitutive_dotTemperature ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , &
2010-10-01 17:48:49 +05:30
crystallite_Temperature ( g , i , e ) , g , i , e )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
2011-03-29 12:57:19 +05:30
enddo ; enddo ; enddo
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP ENDDO
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
if ( crystallite_todo ( g , i , e ) ) then
if ( any ( constitutive_dotState ( g , i , e ) % p / = constitutive_dotState ( g , i , e ) % p ) & ! NaN occured in dotState
. or . crystallite_dotTemperature ( g , i , e ) / = crystallite_dotTemperature ( g , i , e ) ) then ! NaN occured in dotTemperature
2012-03-12 19:39:37 +05:30
if ( . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken non-local...
2010-11-03 22:52:48 +05:30
!$OMP CRITICAL (checkTodo)
2012-03-12 19:39:37 +05:30
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
2010-11-03 22:52:48 +05:30
!$OMP END CRITICAL (checkTodo)
2010-10-01 17:48:49 +05:30
else ! if broken local...
crystallite_todo ( g , i , e ) = . false . ! ... skip this one next time
endif
endif
endif
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2012-12-16 16:24:13 +05:30
!$OMP END PARALLEL
2010-10-01 17:48:49 +05:30
! --- SECOND TO SIXTH RUNGE KUTTA STEP ---
2012-02-21 22:01:37 +05:30
do n = 1_pInt , 5_pInt
2010-10-01 17:48:49 +05:30
! --- state update ---
2012-12-16 16:24:13 +05:30
!$OMP PARALLEL PRIVATE(mySizeDotState)
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
2010-11-03 22:52:48 +05:30
mySizeDotState = constitutive_sizeDotState ( g , i , e )
2010-10-26 19:34:33 +05:30
constitutive_RKCK45dotState ( n , g , i , e ) % p = constitutive_dotState ( g , i , e ) % p ! store Runge-Kutta dotState
RKCK45dotTemperature ( n , g , i , e ) = crystallite_dotTemperature ( g , i , e ) ! store Runge-Kutta dotTemperature
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
enddo ; enddo ; enddo
!$OMP ENDDO
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
if ( crystallite_todo ( g , i , e ) ) then
if ( n == 1 ) then ! NEED TO DO THE ADDITION IN THIS LENGTHY WAY BECAUSE OF PARALLELIZATION (CAN'T USE A REDUCTION CLAUSE ON A POINTER OR USER DEFINED TYPE)
constitutive_dotState ( g , i , e ) % p = a ( 1 , 1 ) * constitutive_RKCK45dotState ( 1 , g , i , e ) % p
crystallite_dotTemperature ( g , i , e ) = a ( 1 , 1 ) * RKCK45dotTemperature ( 1 , g , i , e )
elseif ( n == 2 ) then
constitutive_dotState ( g , i , e ) % p = a ( 1 , 2 ) * constitutive_RKCK45dotState ( 1 , g , i , e ) % p &
+ a ( 2 , 2 ) * constitutive_RKCK45dotState ( 2 , g , i , e ) % p
crystallite_dotTemperature ( g , i , e ) = a ( 1 , 2 ) * RKCK45dotTemperature ( 1 , g , i , e ) &
+ a ( 2 , 2 ) * RKCK45dotTemperature ( 2 , g , i , e )
elseif ( n == 3 ) then
constitutive_dotState ( g , i , e ) % p = a ( 1 , 3 ) * constitutive_RKCK45dotState ( 1 , g , i , e ) % p &
+ a ( 2 , 3 ) * constitutive_RKCK45dotState ( 2 , g , i , e ) % p &
+ a ( 3 , 3 ) * constitutive_RKCK45dotState ( 3 , g , i , e ) % p
crystallite_dotTemperature ( g , i , e ) = a ( 1 , 3 ) * RKCK45dotTemperature ( 1 , g , i , e ) &
+ a ( 2 , 3 ) * RKCK45dotTemperature ( 2 , g , i , e ) &
+ a ( 3 , 3 ) * RKCK45dotTemperature ( 3 , g , i , e )
elseif ( n == 4 ) then
constitutive_dotState ( g , i , e ) % p = a ( 1 , 4 ) * constitutive_RKCK45dotState ( 1 , g , i , e ) % p &
+ a ( 2 , 4 ) * constitutive_RKCK45dotState ( 2 , g , i , e ) % p &
+ a ( 3 , 4 ) * constitutive_RKCK45dotState ( 3 , g , i , e ) % p &
+ a ( 4 , 4 ) * constitutive_RKCK45dotState ( 4 , g , i , e ) % p
crystallite_dotTemperature ( g , i , e ) = a ( 1 , 4 ) * RKCK45dotTemperature ( 1 , g , i , e ) &
+ a ( 2 , 4 ) * RKCK45dotTemperature ( 2 , g , i , e ) &
+ a ( 3 , 4 ) * RKCK45dotTemperature ( 3 , g , i , e ) &
+ a ( 4 , 4 ) * RKCK45dotTemperature ( 4 , g , i , e )
elseif ( n == 5 ) then
constitutive_dotState ( g , i , e ) % p = a ( 1 , 5 ) * constitutive_RKCK45dotState ( 1 , g , i , e ) % p &
+ a ( 2 , 5 ) * constitutive_RKCK45dotState ( 2 , g , i , e ) % p &
+ a ( 3 , 5 ) * constitutive_RKCK45dotState ( 3 , g , i , e ) % p &
+ a ( 4 , 5 ) * constitutive_RKCK45dotState ( 4 , g , i , e ) % p &
+ a ( 5 , 5 ) * constitutive_RKCK45dotState ( 5 , g , i , e ) % p
crystallite_dotTemperature ( g , i , e ) = a ( 1 , 5 ) * RKCK45dotTemperature ( 1 , g , i , e ) &
+ a ( 2 , 5 ) * RKCK45dotTemperature ( 2 , g , i , e ) &
+ a ( 3 , 5 ) * RKCK45dotTemperature ( 3 , g , i , e ) &
+ a ( 4 , 5 ) * RKCK45dotTemperature ( 4 , g , i , e ) &
+ a ( 5 , 5 ) * RKCK45dotTemperature ( 5 , g , i , e )
endif
endif
enddo ; enddo ; enddo
!$OMP ENDDO
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
if ( crystallite_todo ( g , i , e ) ) then
mySizeDotState = constitutive_sizeDotState ( g , i , e )
2010-11-03 22:52:48 +05:30
constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState ) = constitutive_subState0 ( g , i , e ) % p ( 1 : mySizeDotState ) &
+ constitutive_dotState ( g , i , e ) % p ( 1 : mySizeDotState ) * crystallite_subdt ( g , i , e )
2010-10-01 17:48:49 +05:30
crystallite_Temperature ( g , i , e ) = crystallite_subTemperature0 ( g , i , e ) &
+ crystallite_dotTemperature ( g , i , e ) * crystallite_subdt ( g , i , e )
endif
enddo ; enddo ; enddo
2010-11-19 22:59:29 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2012-11-22 18:34:19 +05:30
! --- state jump ---
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-22 18:34:19 +05:30
if ( crystallite_todo ( g , i , e ) ) then
crystallite_todo ( g , i , e ) = crystallite_stateJump ( g , i , e )
2012-12-11 20:35:08 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-22 18:34:19 +05:30
if ( . not . crystallite_todo ( g , i , e ) . and . . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken non-local...
!$OMP CRITICAL (checkTodo)
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
!$OMP END CRITICAL (checkTodo)
endif
endif
enddo ; enddo ; enddo
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
! --- update dependent states ---
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
2012-01-17 15:56:57 +05:30
call constitutive_microstructure ( crystallite_Temperature ( g , i , e ) , crystallite_Fe ( 1 : 3 , 1 : 3 , g , i , e ) , &
crystallite_Fp ( 1 : 3 , 1 : 3 , g , i , e ) , g , i , e ) ! update dependent state variables to be consistent with basic states
2010-10-01 17:48:49 +05:30
endif
2010-10-26 19:34:33 +05:30
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
! --- stress integration ---
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
2012-11-09 01:30:29 +05:30
crystallite_todo ( g , i , e ) = crystallite_integrateStress ( g , i , e , c ( n ) ) ! fraction of original time step
2012-12-11 20:35:08 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-09 01:30:29 +05:30
if ( . not . crystallite_todo ( g , i , e ) . and . . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken non-local...
!$OMP CRITICAL (checkTodo)
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
!$OMP END CRITICAL (checkTodo)
2010-10-01 17:48:49 +05:30
endif
endif
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-11-11 21:46:05 +05:30
2010-10-01 17:48:49 +05:30
! --- dot state and RK dot state---
2011-03-29 12:57:19 +05:30
#ifndef _OPENMP
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
2012-11-21 22:27:57 +05:30
write ( 6 , '(a,1x,i1)' ) '<< CRYST >> Runge--Kutta step' , n + 1_pInt
2010-11-11 18:44:53 +05:30
endif
2011-03-29 12:57:19 +05:30
#endif
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
call constitutive_collectDotState ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , crystallite_Fe , crystallite_Fp , &
2011-02-25 15:23:20 +05:30
crystallite_Temperature ( g , i , e ) , c ( n ) * crystallite_subdt ( g , i , e ) , & ! fraction of original timestep
2012-11-30 00:14:00 +05:30
crystallite_subFrac , g , i , e )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
crystallite_dotTemperature ( g , i , e ) = constitutive_dotTemperature ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , &
2010-10-01 17:48:49 +05:30
crystallite_Temperature ( g , i , e ) , g , i , e )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
enddo ; enddo ; enddo
!$OMP ENDDO
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
if ( crystallite_todo ( g , i , e ) ) then
2010-10-01 17:48:49 +05:30
if ( any ( constitutive_dotState ( g , i , e ) % p / = constitutive_dotState ( g , i , e ) % p ) & ! NaN occured in dotState
. or . crystallite_dotTemperature ( g , i , e ) / = crystallite_dotTemperature ( g , i , e ) ) then ! NaN occured in dotTemperature
2012-03-12 19:39:37 +05:30
if ( . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken non-local...
2010-11-03 22:52:48 +05:30
!$OMP CRITICAL (checkTodo)
2012-03-12 19:39:37 +05:30
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
2010-11-03 22:52:48 +05:30
!$OMP END CRITICAL (checkTodo)
2010-10-01 17:48:49 +05:30
else ! if broken local...
crystallite_todo ( g , i , e ) = . false . ! ... skip this one next time
endif
endif
endif
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2012-12-16 16:24:13 +05:30
!$OMP END PARALLEL
2010-10-01 17:48:49 +05:30
enddo
! --- STATE UPDATE WITH ERROR ESTIMATE FOR STATE AND TEMPERATURE ---
relStateResiduum = 0.0_pReal
relTemperatureResiduum = 0.0_pReal
2012-12-16 16:24:13 +05:30
!$OMP PARALLEL PRIVATE(mySizeDotState)
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
2010-11-03 22:52:48 +05:30
mySizeDotState = constitutive_sizeDotState ( g , i , e )
2010-10-27 14:18:04 +05:30
constitutive_RKCK45dotState ( 6 , g , i , e ) % p = constitutive_dotState ( g , i , e ) % p ! store Runge-Kutta dotState
RKCK45dotTemperature ( 6 , g , i , e ) = crystallite_dotTemperature ( g , i , e ) ! store Runge-Kutta dotTemperature
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
enddo ; enddo ; enddo
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
if ( crystallite_todo ( g , i , e ) ) then
mySizeDotState = constitutive_sizeDotState ( g , i , e )
2010-10-01 17:48:49 +05:30
! --- absolute residuum in state and temperature ---
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
! NEED TO DO THE ADDITION IN THIS LENGTHY WAY BECAUSE OF PARALLELIZATION
! CAN'T USE A REDUCTION CLAUSE ON A POINTER OR USER DEFINED TYPE
2010-10-01 17:48:49 +05:30
2012-03-09 01:55:28 +05:30
stateResiduum ( 1 : mySizeDotState , g , i , e ) = &
( db ( 1 ) * constitutive_RKCK45dotState ( 1 , g , i , e ) % p ( 1 : mySizeDotState ) &
+ db ( 2 ) * constitutive_RKCK45dotState ( 2 , g , i , e ) % p ( 1 : mySizeDotState ) &
+ db ( 3 ) * constitutive_RKCK45dotState ( 3 , g , i , e ) % p ( 1 : mySizeDotState ) &
+ db ( 4 ) * constitutive_RKCK45dotState ( 4 , g , i , e ) % p ( 1 : mySizeDotState ) &
+ db ( 5 ) * constitutive_RKCK45dotState ( 5 , g , i , e ) % p ( 1 : mySizeDotState ) &
+ db ( 6 ) * constitutive_RKCK45dotState ( 6 , g , i , e ) % p ( 1 : mySizeDotState ) ) &
* crystallite_subdt ( g , i , e )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
temperatureResiduum ( g , i , e ) = ( db ( 1 ) * RKCK45dotTemperature ( 1 , g , i , e ) &
+ db ( 2 ) * RKCK45dotTemperature ( 2 , g , i , e ) &
+ db ( 3 ) * RKCK45dotTemperature ( 3 , g , i , e ) &
+ db ( 4 ) * RKCK45dotTemperature ( 4 , g , i , e ) &
+ db ( 5 ) * RKCK45dotTemperature ( 5 , g , i , e ) &
+ db ( 6 ) * RKCK45dotTemperature ( 6 , g , i , e ) ) &
* crystallite_subdt ( g , i , e )
2010-10-01 17:48:49 +05:30
! --- dot state and dot temperature ---
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
constitutive_dotState ( g , i , e ) % p = b ( 1 ) * constitutive_RKCK45dotState ( 1 , g , i , e ) % p &
+ b ( 2 ) * constitutive_RKCK45dotState ( 2 , g , i , e ) % p &
+ b ( 3 ) * constitutive_RKCK45dotState ( 3 , g , i , e ) % p &
+ b ( 4 ) * constitutive_RKCK45dotState ( 4 , g , i , e ) % p &
+ b ( 5 ) * constitutive_RKCK45dotState ( 5 , g , i , e ) % p &
+ b ( 6 ) * constitutive_RKCK45dotState ( 6 , g , i , e ) % p
crystallite_dotTemperature ( g , i , e ) = b ( 1 ) * RKCK45dotTemperature ( 1 , g , i , e ) &
+ b ( 2 ) * RKCK45dotTemperature ( 2 , g , i , e ) &
+ b ( 3 ) * RKCK45dotTemperature ( 3 , g , i , e ) &
+ b ( 4 ) * RKCK45dotTemperature ( 4 , g , i , e ) &
+ b ( 5 ) * RKCK45dotTemperature ( 5 , g , i , e ) &
+ b ( 6 ) * RKCK45dotTemperature ( 6 , g , i , e )
endif
enddo ; enddo ; enddo
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
! --- state and temperature update ---
2010-10-01 17:48:49 +05:30
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
if ( crystallite_todo ( g , i , e ) ) then
mySizeDotState = constitutive_sizeDotState ( g , i , e )
2010-11-03 22:52:48 +05:30
constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState ) = constitutive_subState0 ( g , i , e ) % p ( 1 : mySizeDotState ) &
+ constitutive_dotState ( g , i , e ) % p ( 1 : mySizeDotState ) * crystallite_subdt ( g , i , e )
2010-10-01 17:48:49 +05:30
crystallite_Temperature ( g , i , e ) = crystallite_subTemperature0 ( g , i , e ) &
+ crystallite_dotTemperature ( g , i , e ) * crystallite_subdt ( g , i , e )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
enddo ; enddo ; enddo
!$OMP ENDDO
! --- relative residui and state convergence ---
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
if ( crystallite_todo ( g , i , e ) ) then
mySizeDotState = constitutive_sizeDotState ( g , i , e )
2012-02-21 22:01:37 +05:30
forall ( s = 1_pInt : mySizeDotState , abs ( constitutive_state ( g , i , e ) % p ( s ) ) > 0.0_pReal ) &
2010-10-26 19:34:33 +05:30
relStateResiduum ( s , g , i , e ) = stateResiduum ( s , g , i , e ) / constitutive_state ( g , i , e ) % p ( s )
2012-11-28 00:06:55 +05:30
if ( crystallite_Temperature ( g , i , e ) > 0 ) then
2010-10-26 19:34:33 +05:30
relTemperatureResiduum ( g , i , e ) = temperatureResiduum ( g , i , e ) / crystallite_Temperature ( g , i , e )
2012-11-28 00:06:55 +05:30
endif
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP FLUSH(relStateResiduum,relTemperatureResiduum)
2010-10-15 20:27:13 +05:30
2010-10-26 19:34:33 +05:30
crystallite_todo ( g , i , e ) = &
( all ( abs ( relStateResiduum ( : , g , i , e ) ) < rTol_crystalliteState &
2010-11-03 22:52:48 +05:30
. or . abs ( stateResiduum ( 1 : mySizeDotState , g , i , e ) ) < constitutive_aTolState ( g , i , e ) % p ( 1 : mySizeDotState ) ) &
2010-10-26 19:34:33 +05:30
. and . abs ( relTemperatureResiduum ( g , i , e ) ) < rTol_crystalliteTemperature )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
2011-03-29 12:57:19 +05:30
#ifndef _OPENMP
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt &
2012-03-09 01:55:28 +05:30
. and . ( ( e == debug_e . and . i == debug_i . and . g == debug_g ) &
2012-07-05 15:24:50 +05:30
. or . . not . iand ( debug_level ( debug_crystallite ) , debug_levelSelective ) / = 0_pInt ) ) then
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,i8,1x,i3,1x,i3)' ) '<< CRYST >> updateState at el ip g ' , e , i , g
2011-03-29 12:57:19 +05:30
write ( 6 , * )
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,/,(12x,12(f12.1,1x)))' ) '<< CRYST >> absolute residuum tolerance' , &
2011-03-29 12:57:19 +05:30
stateResiduum ( 1 : mySizeDotState , g , i , e ) / constitutive_aTolState ( g , i , e ) % p ( 1 : mySizeDotState )
write ( 6 , * )
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,/,(12x,12(f12.1,1x)))' ) '<< CRYST >> relative residuum tolerance' , &
2011-03-29 12:57:19 +05:30
relStateResiduum ( 1 : mySizeDotState , g , i , e ) / rTol_crystalliteState
write ( 6 , * )
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,/,(12x,12(e12.5,1x)))' ) '<< CRYST >> dotState' , constitutive_dotState ( g , i , e ) % p ( 1 : mySizeDotState )
2011-03-29 12:57:19 +05:30
write ( 6 , * )
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,/,(12x,12(e12.5,1x)))' ) '<< CRYST >> new state' , constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState )
2011-03-29 12:57:19 +05:30
write ( 6 , * )
2010-10-01 17:48:49 +05:30
endif
2011-03-29 12:57:19 +05:30
#endif
2010-10-01 17:48:49 +05:30
endif
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2012-11-22 18:34:19 +05:30
! --- STATE JUMP ---
2010-10-01 17:48:49 +05:30
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
2012-11-22 18:34:19 +05:30
crystallite_todo ( g , i , e ) = crystallite_stateJump ( g , i , e )
2012-12-11 20:35:08 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-22 18:34:19 +05:30
if ( . not . crystallite_todo ( g , i , e ) . and . . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken non-local...
!$OMP CRITICAL (checkTodo)
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
!$OMP END CRITICAL (checkTodo)
endif
2010-10-01 17:48:49 +05:30
endif
2012-11-22 18:34:19 +05:30
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2012-11-22 18:34:19 +05:30
! --- UPDATE DEPENDENT STATES IF RESIDUUM BELOW TOLERANCE ---
2010-10-01 17:48:49 +05:30
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
2012-11-22 18:34:19 +05:30
call constitutive_microstructure ( crystallite_Temperature ( g , i , e ) , crystallite_Fe ( 1 : 3 , 1 : 3 , g , i , e ) , &
crystallite_Fp ( 1 : 3 , 1 : 3 , g , i , e ) , g , i , e ) ! update dependent state variables to be consistent with basic states
2010-10-01 17:48:49 +05:30
endif
2012-11-22 18:34:19 +05:30
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2012-05-17 20:55:21 +05:30
2012-11-22 18:34:19 +05:30
! --- FINAL STRESS INTEGRATION STEP IF RESIDUUM BELOW TOLERANCE ---
2012-05-17 20:55:21 +05:30
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2012-05-17 20:55:21 +05:30
if ( crystallite_todo ( g , i , e ) ) then
2012-11-22 18:34:19 +05:30
crystallite_todo ( g , i , e ) = crystallite_integrateStress ( g , i , e )
2012-12-11 20:35:08 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-09 01:30:29 +05:30
if ( . not . crystallite_todo ( g , i , e ) . and . . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken non-local...
!$OMP CRITICAL (checkTodo)
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
!$OMP END CRITICAL (checkTodo)
2012-05-17 20:55:21 +05:30
endif
endif
enddo ; enddo ; enddo
!$OMP ENDDO
! --- SET CONVERGENCE FLAG ---
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
if ( crystallite_todo ( g , i , e ) ) then
crystallite_converged ( g , i , e ) = . true . ! if still "to do" then converged per definitionem
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
2012-05-17 20:55:21 +05:30
!$OMP CRITICAL (distributionState)
debug_StateLoopDistribution ( 6 , numerics_integrationMode ) = &
debug_StateLoopDistribution ( 6 , numerics_integrationMode ) + 1_pInt
!$OMP END CRITICAL (distributionState)
endif
endif
enddo ; enddo ; enddo
!$OMP ENDDO
2010-11-03 22:52:48 +05:30
!$OMP END PARALLEL
2010-10-01 17:48:49 +05:30
2012-05-17 20:55:21 +05:30
2010-10-01 17:48:49 +05:30
! --- nonlocal convergence check ---
2012-12-16 16:24:13 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
!$OMP CRITICAL (write2out)
write ( 6 , '(a,i8,a,i2)' ) '<< CRYST >> ' , count ( crystallite_converged ( : , : , : ) ) , ' grains converged'
write ( 6 , * )
!$OMP END CRITICAL (write2out)
endif
2011-02-09 14:09:07 +05:30
if ( . not . singleRun ) then ! if not requesting Integration of just a single IP
2012-03-12 19:39:37 +05:30
if ( any ( . not . crystallite_converged . and . . not . crystallite_localPlasticity ) ) then ! any non-local not yet converged (or broken)...
crystallite_converged = crystallite_converged . and . crystallite_localPlasticity ! ...restart all non-local as not converged
2010-10-01 17:48:49 +05:30
endif
endif
2012-03-09 01:55:28 +05:30
end subroutine crystallite_integrateStateRKCK45
2010-10-01 17:48:49 +05:30
!********************************************************************
! integrate stress, state and Temperature with
! 1nd order Euler method with adaptive step size
!********************************************************************
2011-02-23 13:59:51 +05:30
subroutine crystallite_integrateStateAdaptiveEuler ( gg , ii , ee )
2010-10-01 17:48:49 +05:30
!*** variables and functions from other modules ***!
2012-07-05 15:24:50 +05:30
use debug , only : debug_level , &
2012-03-09 01:55:28 +05:30
debug_crystallite , &
debug_levelBasic , &
debug_levelExtensive , &
debug_levelSelective , &
2010-10-01 17:48:49 +05:30
debug_e , &
debug_i , &
debug_g , &
debug_StateLoopDistribution
use numerics , only : rTol_crystalliteState , &
rTol_crystalliteTemperature , &
2011-02-23 13:59:51 +05:30
numerics_integrationMode
2010-10-01 17:48:49 +05:30
use FEsolving , only : FEsolving_execElem , &
FEsolving_execIP
use mesh , only : mesh_element , &
mesh_NcpElems , &
mesh_maxNips
use material , only : homogenization_Ngrains , &
homogenization_maxNgrains
use constitutive , only : constitutive_sizeDotState , &
constitutive_maxSizeDotState , &
constitutive_state , &
2010-10-26 18:46:37 +05:30
constitutive_aTolState , &
2010-10-01 17:48:49 +05:30
constitutive_subState0 , &
constitutive_dotState , &
constitutive_collectDotState , &
constitutive_dotTemperature , &
constitutive_microstructure
implicit none
!*** input variables ***!
integer ( pInt ) , optional , intent ( in ) :: ee , & ! element index
ii , & ! integration point index
gg ! grain index
!*** local variables ***!
integer ( pInt ) e , & ! element index in element loop
i , & ! integration point index in ip loop
g , & ! grain index in grain loop
2010-11-03 22:52:48 +05:30
mySizeDotState , & ! size of dot State
2010-10-01 17:48:49 +05:30
s ! state index
integer ( pInt ) , dimension ( 2 ) :: eIter ! bounds for element iteration
integer ( pInt ) , dimension ( 2 , mesh_NcpElems ) :: iIter , & ! bounds for ip iteration
gIter ! bounds for grain iteration
real ( pReal ) , dimension ( constitutive_maxSizeDotState , homogenization_maxNgrains , mesh_maxNips , mesh_NcpElems ) :: &
stateResiduum , & ! residuum from evolution in micrstructure
relStateResiduum ! relative residuum from evolution in microstructure
real ( pReal ) , dimension ( homogenization_maxNgrains , mesh_maxNips , mesh_NcpElems ) :: &
temperatureResiduum , & ! residuum from evolution in temperature
relTemperatureResiduum ! relative residuum from evolution in temperature
logical singleRun ! flag indicating computation for single (g,i,e) triple
! --- LOOP ITERATOR FOR ELEMENT, GRAIN, IP ---
if ( present ( ee ) . and . present ( ii ) . and . present ( gg ) ) then
eIter = ee
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
iIter ( 1 : 2 , ee ) = ii
gIter ( 1 : 2 , ee ) = gg
2010-10-01 17:48:49 +05:30
singleRun = . true .
else
eIter = FEsolving_execElem ( 1 : 2 )
do e = eIter ( 1 ) , eIter ( 2 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
iIter ( 1 : 2 , e ) = FEsolving_execIP ( 1 : 2 , e )
2012-02-13 19:48:07 +05:30
gIter ( 1 : 2 , e ) = [ 1_pInt , homogenization_Ngrains ( mesh_element ( 3 , e ) ) ]
2010-10-01 17:48:49 +05:30
enddo
singleRun = . false .
endif
2012-12-16 16:24:13 +05:30
stateResiduum = 0.0_pReal
2010-11-03 22:52:48 +05:30
!$OMP PARALLEL PRIVATE(mySizeDotState)
2010-10-01 17:48:49 +05:30
2012-11-22 18:34:19 +05:30
if ( numerics_integrationMode == 1_pInt ) then
2012-10-22 13:29:35 +05:30
2011-11-04 18:14:50 +05:30
! --- DOT STATE AND TEMPERATURE (EULER INTEGRATION) ---
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
if ( crystallite_todo ( g , i , e ) ) then
call constitutive_collectDotState ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , crystallite_Fe , crystallite_Fp , &
2012-11-30 00:14:00 +05:30
crystallite_Temperature ( g , i , e ) , crystallite_subdt ( g , i , e ) , crystallite_subFrac , g , i , e )
2011-11-04 18:14:50 +05:30
crystallite_dotTemperature ( g , i , e ) = constitutive_dotTemperature ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , &
crystallite_Temperature ( g , i , e ) , g , i , e )
endif
enddo ; enddo ; enddo
!$OMP ENDDO
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2011-11-04 18:14:50 +05:30
if ( crystallite_todo ( g , i , e ) ) then
if ( any ( constitutive_dotState ( g , i , e ) % p / = constitutive_dotState ( g , i , e ) % p ) & ! NaN occured in dotState
. or . crystallite_dotTemperature ( g , i , e ) / = crystallite_dotTemperature ( g , i , e ) ) then ! NaN occured in dotTemperature
2012-03-12 19:39:37 +05:30
if ( . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken non-local...
2011-11-04 18:14:50 +05:30
!$OMP CRITICAL (checkTodo)
2012-03-12 19:39:37 +05:30
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
2011-11-04 18:14:50 +05:30
!$OMP END CRITICAL (checkTodo)
else ! if broken local...
crystallite_todo ( g , i , e ) = . false . ! ... skip this one next time
endif
2010-10-01 17:48:49 +05:30
endif
endif
2011-11-04 18:14:50 +05:30
enddo ; enddo ; enddo
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2011-11-04 18:14:50 +05:30
! --- STATE UPDATE (EULER INTEGRATION) ---
2010-10-01 17:48:49 +05:30
2011-11-04 18:14:50 +05:30
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
if ( crystallite_todo ( g , i , e ) ) then
mySizeDotState = constitutive_sizeDotState ( g , i , e )
stateResiduum ( 1 : mySizeDotState , g , i , e ) = - 0.5_pReal * constitutive_dotState ( g , i , e ) % p * crystallite_subdt ( g , i , e ) ! contribution to absolute residuum in state and temperature
temperatureResiduum ( g , i , e ) = - 0.5_pReal * crystallite_dotTemperature ( g , i , e ) * crystallite_subdt ( g , i , e )
2012-10-22 19:04:15 +05:30
constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState ) = constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState ) &
2011-11-04 18:14:50 +05:30
+ constitutive_dotState ( g , i , e ) % p ( 1 : mySizeDotState ) * crystallite_subdt ( g , i , e )
crystallite_Temperature ( g , i , e ) = crystallite_subTemperature0 ( g , i , e ) &
+ crystallite_dotTemperature ( g , i , e ) * crystallite_subdt ( g , i , e )
endif
enddo ; enddo ; enddo
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2012-11-22 18:34:19 +05:30
! --- STATE JUMP ---
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-22 18:34:19 +05:30
if ( crystallite_todo ( g , i , e ) ) then
crystallite_todo ( g , i , e ) = crystallite_stateJump ( g , i , e )
2012-12-11 20:35:08 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-22 18:34:19 +05:30
if ( . not . crystallite_todo ( g , i , e ) . and . . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken non-local...
!$OMP CRITICAL (checkTodo)
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
!$OMP END CRITICAL (checkTodo)
endif
endif
enddo ; enddo ; enddo
!$OMP ENDDO
2011-11-04 18:14:50 +05:30
! --- UPDATE DEPENDENT STATES (EULER INTEGRATION) ---
2010-10-01 17:48:49 +05:30
2011-11-04 18:14:50 +05:30
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
if ( crystallite_todo ( g , i , e ) ) then
2012-01-17 15:56:57 +05:30
call constitutive_microstructure ( crystallite_Temperature ( g , i , e ) , crystallite_Fe ( 1 : 3 , 1 : 3 , g , i , e ) , &
crystallite_Fp ( 1 : 3 , 1 : 3 , g , i , e ) , g , i , e ) ! update dependent state variables to be consistent with basic states
2011-11-04 18:14:50 +05:30
endif
2012-05-16 21:05:14 +05:30
enddo ; enddo ; enddo
2011-11-04 18:14:50 +05:30
!$OMP ENDDO
endif
2010-10-01 17:48:49 +05:30
! --- STRESS INTEGRATION (EULER INTEGRATION) ---
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
2012-11-09 01:30:29 +05:30
crystallite_todo ( g , i , e ) = crystallite_integrateStress ( g , i , e )
2012-12-11 20:35:08 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-09 01:30:29 +05:30
if ( . not . crystallite_todo ( g , i , e ) . and . . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken non-local...
!$OMP CRITICAL (checkTodo)
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
!$OMP END CRITICAL (checkTodo)
2010-10-01 17:48:49 +05:30
endif
endif
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2012-11-22 18:34:19 +05:30
if ( numerics_integrationMode == 1_pInt ) then
2010-10-01 17:48:49 +05:30
2012-05-17 20:55:21 +05:30
! --- DOT STATE AND TEMPERATURE (HEUN METHOD) ---
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
if ( crystallite_todo ( g , i , e ) ) then
call constitutive_collectDotState ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , crystallite_Fe , crystallite_Fp , &
2012-11-30 00:14:00 +05:30
crystallite_Temperature ( g , i , e ) , crystallite_subdt ( g , i , e ) , crystallite_subFrac , g , i , e )
2012-05-17 20:55:21 +05:30
crystallite_dotTemperature ( g , i , e ) = constitutive_dotTemperature ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , &
crystallite_Temperature ( g , i , e ) , g , i , e )
endif
enddo ; enddo ; enddo
!$OMP ENDDO
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2012-05-17 20:55:21 +05:30
if ( crystallite_todo ( g , i , e ) ) then
if ( any ( constitutive_dotState ( g , i , e ) % p / = constitutive_dotState ( g , i , e ) % p ) & ! NaN occured in dotState
. or . crystallite_dotTemperature ( g , i , e ) / = crystallite_dotTemperature ( g , i , e ) ) then ! NaN occured in dotTemperature
if ( . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken non-local...
!$OMP CRITICAL (checkTodo)
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
!$OMP END CRITICAL (checkTodo)
else ! if broken local...
crystallite_todo ( g , i , e ) = . false . ! ... skip this one next time
endif
endif
endif
enddo ; enddo ; enddo
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2012-11-22 18:34:19 +05:30
! --- ERROR ESTIMATE FOR STATE AND TEMPERATURE (HEUN METHOD) ---
2010-10-01 17:48:49 +05:30
2012-12-16 16:24:13 +05:30
!$OMP SINGLE
2012-11-22 18:34:19 +05:30
relStateResiduum = 0.0_pReal
relTemperatureResiduum = 0.0_pReal
2012-12-16 16:24:13 +05:30
!$OMP END SINGLE
2012-11-22 18:34:19 +05:30
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
if ( crystallite_todo ( g , i , e ) ) then
mySizeDotState = constitutive_sizeDotState ( g , i , e )
! --- contribution of heun step to absolute residui ---
stateResiduum ( 1 : mySizeDotState , g , i , e ) = stateResiduum ( 1 : mySizeDotState , g , i , e ) &
+ 0.5_pReal * constitutive_dotState ( g , i , e ) % p * crystallite_subdt ( g , i , e ) ! contribution to absolute residuum in state and temperature
temperatureResiduum ( g , i , e ) = temperatureResiduum ( g , i , e ) &
+ 0.5_pReal * crystallite_dotTemperature ( g , i , e ) * crystallite_subdt ( g , i , e )
!$OMP FLUSH(stateResiduum,temperatureResiduum)
2010-10-01 17:48:49 +05:30
2012-11-22 18:34:19 +05:30
! --- relative residui ---
2010-10-01 17:48:49 +05:30
2012-11-22 18:34:19 +05:30
forall ( s = 1_pInt : mySizeDotState , abs ( constitutive_state ( g , i , e ) % p ( s ) ) > 0.0_pReal ) &
relStateResiduum ( s , g , i , e ) = stateResiduum ( s , g , i , e ) / constitutive_state ( g , i , e ) % p ( s )
2012-11-28 00:06:55 +05:30
if ( crystallite_Temperature ( g , i , e ) > 0_pInt ) then
2012-11-22 18:34:19 +05:30
relTemperatureResiduum ( g , i , e ) = temperatureResiduum ( g , i , e ) / crystallite_Temperature ( g , i , e )
2012-11-28 00:06:55 +05:30
endif
2012-11-22 18:34:19 +05:30
!$OMP FLUSH(relStateResiduum,relTemperatureResiduum)
2011-03-29 12:57:19 +05:30
#ifndef _OPENMP
2012-11-22 18:34:19 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt &
. and . ( ( e == debug_e . and . i == debug_i . and . g == debug_g ) &
. or . . not . iand ( debug_level ( debug_crystallite ) , debug_levelSelective ) / = 0_pInt ) ) then
write ( 6 , '(a,i8,1x,i2,1x,i3)' ) '<< CRYST >> updateState at el ip g ' , e , i , g
write ( 6 , * )
write ( 6 , '(a,/,(12x,12(f12.1,1x)))' ) '<< CRYST >> absolute residuum tolerance' , &
stateResiduum ( 1 : mySizeDotState , g , i , e ) / constitutive_aTolState ( g , i , e ) % p ( 1 : mySizeDotState )
write ( 6 , * )
write ( 6 , '(a,/,(12x,12(f12.1,1x)))' ) '<< CRYST >> relative residuum tolerance' , &
relStateResiduum ( 1 : mySizeDotState , g , i , e ) / rTol_crystalliteState
write ( 6 , * )
write ( 6 , '(a,/,(12x,12(e12.5,1x)))' ) '<< CRYST >> dotState' , constitutive_dotState ( g , i , e ) % p ( 1 : mySizeDotState ) &
- 2.0_pReal * stateResiduum ( 1 : mySizeDotState , g , i , e ) / crystallite_subdt ( g , i , e ) ! calculate former dotstate from higher order solution and state residuum
write ( 6 , * )
write ( 6 , '(a,/,(12x,12(e12.5,1x)))' ) '<< CRYST >> new state' , constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState )
write ( 6 , * )
endif
2012-03-09 01:55:28 +05:30
#endif
2012-11-22 18:34:19 +05:30
! --- converged ? ---
if ( all ( abs ( relStateResiduum ( : , g , i , e ) ) < rTol_crystalliteState &
. or . abs ( stateResiduum ( 1 : mySizeDotState , g , i , e ) ) < constitutive_aTolState ( g , i , e ) % p ( 1 : mySizeDotState ) ) &
. and . abs ( relTemperatureResiduum ( g , i , e ) ) < rTol_crystalliteTemperature ) then
crystallite_converged ( g , i , e ) = . true . ! ... converged per definitionem
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
!$OMP CRITICAL (distributionState)
debug_StateLoopDistribution ( 2 , numerics_integrationMode ) = &
debug_StateLoopDistribution ( 2 , numerics_integrationMode ) + 1_pInt
!$OMP END CRITICAL (distributionState)
endif
endif
2010-10-01 17:48:49 +05:30
2012-11-22 18:34:19 +05:30
endif
enddo ; enddo ; enddo
!$OMP ENDDO
elseif ( numerics_integrationMode > 1 ) then ! stiffness calculation
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
if ( crystallite_todo ( g , i , e ) ) then
crystallite_converged ( g , i , e ) = . true . ! ... converged per definitionem
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
2011-03-21 16:01:17 +05:30
!$OMP CRITICAL (distributionState)
2012-05-17 20:55:21 +05:30
debug_StateLoopDistribution ( 2 , numerics_integrationMode ) = &
debug_StateLoopDistribution ( 2 , numerics_integrationMode ) + 1_pInt
2011-03-21 16:01:17 +05:30
!$OMP END CRITICAL (distributionState)
endif
2010-10-01 17:48:49 +05:30
endif
2012-11-22 18:34:19 +05:30
enddo ; enddo ; enddo
!$OMP ENDDO
endif
2010-10-01 17:48:49 +05:30
2010-11-03 22:52:48 +05:30
!$OMP END PARALLEL
2010-10-01 17:48:49 +05:30
2012-06-06 20:41:30 +05:30
2010-10-01 17:48:49 +05:30
! --- NONLOCAL CONVERGENCE CHECK ---
2012-12-16 16:24:13 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
!$OMP CRITICAL (write2out)
write ( 6 , '(a,i8,a,i2)' ) '<< CRYST >> ' , count ( crystallite_converged ( : , : , : ) ) , ' grains converged'
write ( 6 , * )
!$OMP END CRITICAL (write2out)
endif
2011-02-09 14:09:07 +05:30
if ( . not . singleRun ) then ! if not requesting Integration of just a single IP
2012-03-12 19:39:37 +05:30
if ( any ( . not . crystallite_converged . and . . not . crystallite_localPlasticity ) ) then ! any non-local not yet converged (or broken)...
crystallite_converged = crystallite_converged . and . crystallite_localPlasticity ! ...restart all non-local as not converged
2010-10-01 17:48:49 +05:30
endif
endif
2012-03-09 01:55:28 +05:30
end subroutine crystallite_integrateStateAdaptiveEuler
2010-10-01 17:48:49 +05:30
!********************************************************************
! integrate stress, state and Temperature with
2012-06-06 20:41:30 +05:30
! 1st order explicit Euler method
2010-10-01 17:48:49 +05:30
!********************************************************************
2011-02-23 13:59:51 +05:30
subroutine crystallite_integrateStateEuler ( gg , ii , ee )
2010-10-01 17:48:49 +05:30
!*** variables and functions from other modules ***!
2012-11-28 00:06:55 +05:30
use numerics , only : numerics_integrationMode , &
numerics_timeSyncing
2012-07-05 15:24:50 +05:30
use debug , only : debug_level , &
2012-03-09 01:55:28 +05:30
debug_crystallite , &
debug_levelBasic , &
debug_levelExtensive , &
debug_levelSelective , &
2010-10-01 17:48:49 +05:30
debug_e , &
debug_i , &
debug_g , &
debug_StateLoopDistribution
use FEsolving , only : FEsolving_execElem , &
FEsolving_execIP
use mesh , only : mesh_element , &
mesh_NcpElems
use material , only : homogenization_Ngrains
use constitutive , only : constitutive_sizeDotState , &
constitutive_state , &
constitutive_subState0 , &
constitutive_dotState , &
constitutive_collectDotState , &
constitutive_dotTemperature , &
constitutive_microstructure
implicit none
!*** input variables ***!
integer ( pInt ) , optional , intent ( in ) :: ee , & ! element index
ii , & ! integration point index
gg ! grain index
!*** local variables ***!
integer ( pInt ) e , & ! element index in element loop
i , & ! integration point index in ip loop
g , & ! grain index in grain loop
mySizeDotState
integer ( pInt ) , dimension ( 2 ) :: eIter ! bounds for element iteration
integer ( pInt ) , dimension ( 2 , mesh_NcpElems ) :: iIter , & ! bounds for ip iteration
gIter ! bounds for grain iteration
logical singleRun ! flag indicating computation for single (g,i,e) triple
if ( present ( ee ) . and . present ( ii ) . and . present ( gg ) ) then
eIter = ee
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
iIter ( 1 : 2 , ee ) = ii
gIter ( 1 : 2 , ee ) = gg
2010-10-01 17:48:49 +05:30
singleRun = . true .
else
eIter = FEsolving_execElem ( 1 : 2 )
do e = eIter ( 1 ) , eIter ( 2 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
iIter ( 1 : 2 , e ) = FEsolving_execIP ( 1 : 2 , e )
2012-02-13 19:48:07 +05:30
gIter ( 1 : 2 , e ) = [ 1_pInt , homogenization_Ngrains ( mesh_element ( 3 , e ) ) ]
2010-10-01 17:48:49 +05:30
enddo
singleRun = . false .
endif
2012-05-17 20:55:21 +05:30
!$OMP PARALLEL
2010-11-03 22:52:48 +05:30
2012-11-22 18:34:19 +05:30
if ( numerics_integrationMode == 1_pInt ) then
2012-10-22 13:29:35 +05:30
2011-11-04 18:14:50 +05:30
! --- DOT STATE AND TEMPERATURE ---
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-11-28 00:06:55 +05:30
if ( crystallite_todo ( g , i , e ) . and . . not . crystallite_converged ( g , i , e ) ) then
2011-11-04 18:14:50 +05:30
call constitutive_collectDotState ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , crystallite_Fe , crystallite_Fp , &
2012-11-30 00:14:00 +05:30
crystallite_Temperature ( g , i , e ) , crystallite_subdt ( g , i , e ) , crystallite_subFrac , g , i , e )
2011-11-04 18:14:50 +05:30
crystallite_dotTemperature ( g , i , e ) = constitutive_dotTemperature ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , &
crystallite_Temperature ( g , i , e ) , g , i , e )
endif
enddo ; enddo ; enddo
!$OMP ENDDO
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-28 00:06:55 +05:30
if ( crystallite_todo ( g , i , e ) . and . . not . crystallite_converged ( g , i , e ) ) then
2011-11-04 18:14:50 +05:30
if ( any ( constitutive_dotState ( g , i , e ) % p / = constitutive_dotState ( g , i , e ) % p ) & ! NaN occured in dotState
. or . crystallite_dotTemperature ( g , i , e ) / = crystallite_dotTemperature ( g , i , e ) ) then ! NaN occured in dotTemperature
2012-11-28 00:06:55 +05:30
if ( . not . crystallite_localPlasticity ( g , i , e ) . and . . not . numerics_timeSyncing ) then ! if broken non-local...
2011-11-04 18:14:50 +05:30
!$OMP CRITICAL (checkTodo)
2012-03-12 19:39:37 +05:30
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
2011-11-04 18:14:50 +05:30
!$OMP END CRITICAL (checkTodo)
else ! if broken local...
crystallite_todo ( g , i , e ) = . false . ! ... skip this one next time
endif
2010-10-01 17:48:49 +05:30
endif
endif
2011-11-04 18:14:50 +05:30
enddo ; enddo ; enddo
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2011-11-04 18:14:50 +05:30
! --- UPDATE STATE AND TEMPERATURE ---
2010-10-01 17:48:49 +05:30
2012-05-17 20:55:21 +05:30
!$OMP DO PRIVATE(mySizeDotState)
2011-11-04 18:14:50 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-11-28 00:06:55 +05:30
if ( crystallite_todo ( g , i , e ) . and . . not . crystallite_converged ( g , i , e ) ) then
2011-11-04 18:14:50 +05:30
mySizeDotState = constitutive_sizeDotState ( g , i , e )
2012-10-22 19:04:15 +05:30
constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState ) = constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState ) &
2011-11-04 18:14:50 +05:30
+ constitutive_dotState ( g , i , e ) % p ( 1 : mySizeDotState ) * crystallite_subdt ( g , i , e )
crystallite_Temperature ( g , i , e ) = crystallite_subTemperature0 ( g , i , e ) &
2012-10-22 19:04:15 +05:30
+ crystallite_dotTemperature ( g , i , e ) * crystallite_subdt ( g , i , e )
2012-11-23 01:34:33 +05:30
#ifndef _OPENMP
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt &
2012-03-09 01:55:28 +05:30
. and . ( ( e == debug_e . and . i == debug_i . and . g == debug_g ) &
2012-07-05 15:24:50 +05:30
. or . . not . iand ( debug_level ( debug_crystallite ) , debug_levelSelective ) / = 0_pInt ) ) then
2012-05-17 20:55:21 +05:30
write ( 6 , '(a,i8,1x,i2,1x,i3)' ) '<< CRYST >> update state at el ip g ' , e , i , g
2011-11-04 18:14:50 +05:30
write ( 6 , * )
2012-10-18 15:23:26 +05:30
write ( 6 , '(a,/,(12x,12(e12.6,1x)))' ) '<< CRYST >> dotState' , constitutive_dotState ( g , i , e ) % p ( 1 : mySizeDotState )
2011-11-04 18:14:50 +05:30
write ( 6 , * )
2012-10-18 15:23:26 +05:30
write ( 6 , '(a,/,(12x,12(e12.6,1x)))' ) '<< CRYST >> new state' , constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState )
2011-11-04 18:14:50 +05:30
write ( 6 , * )
endif
2012-11-23 01:34:33 +05:30
#endif
2011-11-04 18:14:50 +05:30
endif
enddo ; enddo ; enddo
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2012-11-22 18:34:19 +05:30
2012-11-28 00:06:55 +05:30
! --- STATE JUMP ---
2012-11-22 18:34:19 +05:30
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-28 00:06:55 +05:30
if ( crystallite_todo ( g , i , e ) . and . . not . crystallite_converged ( g , i , e ) ) then
2012-11-22 18:34:19 +05:30
crystallite_todo ( g , i , e ) = crystallite_stateJump ( g , i , e )
2012-12-11 20:35:08 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-28 00:06:55 +05:30
if ( . not . crystallite_todo ( g , i , e ) . and . . not . crystallite_localPlasticity ( g , i , e ) & ! if broken non-local...
. and . . not . numerics_timeSyncing ) then
2012-11-22 18:34:19 +05:30
!$OMP CRITICAL (checkTodo)
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
!$OMP END CRITICAL (checkTodo)
endif
endif
enddo ; enddo ; enddo
!$OMP ENDDO
2011-11-04 18:14:50 +05:30
! --- UPDATE DEPENDENT STATES ---
2010-10-01 17:48:49 +05:30
2011-11-04 18:14:50 +05:30
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-11-28 00:06:55 +05:30
if ( crystallite_todo ( g , i , e ) . and . . not . crystallite_converged ( g , i , e ) ) then
2012-01-17 15:56:57 +05:30
call constitutive_microstructure ( crystallite_Temperature ( g , i , e ) , crystallite_Fe ( 1 : 3 , 1 : 3 , g , i , e ) , &
2012-11-22 18:34:19 +05:30
crystallite_Fp ( 1 : 3 , 1 : 3 , g , i , e ) , g , i , e ) ! update dependent state variables to be consistent with basic states
2011-11-04 18:14:50 +05:30
endif
enddo ; enddo ; enddo
!$OMP ENDDO
endif
2010-10-01 17:48:49 +05:30
! --- STRESS INTEGRATION ---
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-28 00:06:55 +05:30
if ( crystallite_todo ( g , i , e ) . and . . not . crystallite_converged ( g , i , e ) ) then
2012-05-18 18:30:15 +05:30
crystallite_todo ( g , i , e ) = crystallite_integrateStress ( g , i , e )
2012-12-11 20:35:08 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-28 00:06:55 +05:30
if ( . not . crystallite_todo ( g , i , e ) . and . . not . crystallite_localPlasticity ( g , i , e ) & ! if broken non-local...
. and . . not . numerics_timeSyncing ) then
2012-05-18 18:30:15 +05:30
!$OMP CRITICAL (checkTodo)
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
!$OMP END CRITICAL (checkTodo)
2010-10-01 17:48:49 +05:30
endif
endif
enddo ; enddo ; enddo
2012-05-18 18:30:15 +05:30
!$OMP ENDDO
2010-11-03 22:52:48 +05:30
2012-05-17 20:55:21 +05:30
! --- SET CONVERGENCE FLAG ---
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-11-28 00:06:55 +05:30
if ( crystallite_todo ( g , i , e ) . and . . not . crystallite_converged ( g , i , e ) ) then
2012-05-17 20:55:21 +05:30
crystallite_converged ( g , i , e ) = . true . ! if still "to do" then converged per definitionem
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
2012-05-17 20:55:21 +05:30
!$OMP CRITICAL (distributionState)
debug_StateLoopDistribution ( 1 , numerics_integrationMode ) = &
debug_StateLoopDistribution ( 1 , numerics_integrationMode ) + 1_pInt
!$OMP END CRITICAL (distributionState)
endif
endif
enddo ; enddo ; enddo
!$OMP ENDDO
2010-11-03 22:52:48 +05:30
!$OMP END PARALLEL
2010-10-01 17:48:49 +05:30
! --- CHECK NON-LOCAL CONVERGENCE ---
2011-02-09 14:09:07 +05:30
if ( . not . singleRun ) then ! if not requesting Integration of just a single IP
2012-11-28 00:06:55 +05:30
if ( any ( . not . crystallite_converged . and . . not . crystallite_localPlasticity ) & ! any non-local not yet converged (or broken)...
. and . . not . numerics_timeSyncing ) then
2012-03-12 19:39:37 +05:30
crystallite_converged = crystallite_converged . and . crystallite_localPlasticity ! ...restart all non-local as not converged
2010-10-15 20:27:13 +05:30
endif
2010-10-01 17:48:49 +05:30
endif
2012-03-09 01:55:28 +05:30
end subroutine crystallite_integrateStateEuler
2010-10-01 17:48:49 +05:30
!********************************************************************
! integrate stress, state and Temperature with
! adaptive 1st order explicit Euler method
! using Fixed Point Iteration to adapt the stepsize
!********************************************************************
2011-02-23 13:59:51 +05:30
subroutine crystallite_integrateStateFPI ( gg , ii , ee )
2010-10-01 17:48:49 +05:30
!*** variables and functions from other modules ***!
2012-05-17 20:55:21 +05:30
use debug , only : debug_e , &
debug_i , &
debug_g , &
2012-07-05 15:24:50 +05:30
debug_level , &
2012-03-09 01:55:28 +05:30
debug_crystallite , &
debug_levelBasic , &
debug_levelExtensive , &
2012-05-17 20:55:21 +05:30
debug_levelSelective , &
2010-10-01 17:48:49 +05:30
debug_StateLoopDistribution
2011-02-23 13:59:51 +05:30
use numerics , only : nState , &
2012-05-17 20:55:21 +05:30
numerics_integrationMode , &
rTol_crystalliteState , &
rTol_crystalliteTemperature
2010-10-01 17:48:49 +05:30
use FEsolving , only : FEsolving_execElem , &
FEsolving_execIP
use mesh , only : mesh_element , &
mesh_NcpElems
use material , only : homogenization_Ngrains
2012-05-17 20:55:21 +05:30
use constitutive , only : constitutive_subState0 , &
constitutive_state , &
constitutive_sizeDotState , &
constitutive_maxSizeDotState , &
constitutive_dotState , &
2010-10-01 17:48:49 +05:30
constitutive_collectDotState , &
constitutive_dotTemperature , &
constitutive_microstructure , &
constitutive_previousDotState , &
2012-05-17 20:55:21 +05:30
constitutive_previousDotState2 , &
constitutive_aTolState
2010-10-01 17:48:49 +05:30
implicit none
!*** input variables ***!
integer ( pInt ) , optional , intent ( in ) :: ee , & ! element index
ii , & ! integration point index
gg ! grain index
!*** output variables ***!
!*** local variables ***!
integer ( pInt ) NiterationState , & ! number of iterations in state loop
e , & ! element index in element loop
i , & ! integration point index in ip loop
2012-05-17 20:55:21 +05:30
g , & ! grain index in grain loop
mySizeDotState
2010-10-01 17:48:49 +05:30
integer ( pInt ) , dimension ( 2 ) :: eIter ! bounds for element iteration
integer ( pInt ) , dimension ( 2 , mesh_NcpElems ) :: iIter , & ! bounds for ip iteration
gIter ! bounds for grain iteration
real ( pReal ) dot_prod12 , &
2012-05-17 20:55:21 +05:30
dot_prod22 , &
stateDamper , & ! damper for integration of state
temperatureResiduum
real ( pReal ) , dimension ( constitutive_maxSizeDotState ) :: &
2012-11-07 21:13:29 +05:30
stateResiduum , &
tempState
2012-05-17 20:55:21 +05:30
logical singleRun ! flag indicating computation for single (g,i,e) triple
2010-10-01 17:48:49 +05:30
2012-10-18 15:23:26 +05:30
singleRun = present ( ee ) . and . present ( ii ) . and . present ( gg )
if ( singleRun ) then
2010-10-01 17:48:49 +05:30
eIter = ee
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
iIter ( 1 : 2 , ee ) = ii
gIter ( 1 : 2 , ee ) = gg
2010-10-01 17:48:49 +05:30
else
eIter = FEsolving_execElem ( 1 : 2 )
do e = eIter ( 1 ) , eIter ( 2 )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
iIter ( 1 : 2 , e ) = FEsolving_execIP ( 1 : 2 , e )
2012-02-21 22:01:37 +05:30
gIter ( 1 : 2 , e ) = [ 1_pInt , homogenization_Ngrains ( mesh_element ( 3 , e ) ) ]
2010-10-01 17:48:49 +05:30
enddo
endif
! --+>> PREGUESS FOR STATE <<+--
2011-03-29 12:57:19 +05:30
!$OMP PARALLEL
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2010-10-26 19:34:33 +05:30
constitutive_previousDotState ( g , i , e ) % p = 0.0_pReal
constitutive_previousDotState2 ( g , i , e ) % p = 0.0_pReal
2010-10-01 17:48:49 +05:30
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
! --- DOT STATES ---
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
call constitutive_collectDotState ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , crystallite_Fe , crystallite_Fp , &
2012-11-30 00:14:00 +05:30
crystallite_Temperature ( g , i , e ) , crystallite_subdt ( g , i , e ) , crystallite_subFrac , g , i , e )
2012-05-17 20:55:21 +05:30
crystallite_dotTemperature ( g , i , e ) = constitutive_dotTemperature ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , &
crystallite_Temperature ( g , i , e ) , g , i , e )
endif
enddo ; enddo ; enddo
!$OMP ENDDO
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2012-05-17 20:55:21 +05:30
if ( crystallite_todo ( g , i , e ) ) then
if ( any ( constitutive_dotState ( g , i , e ) % p / = constitutive_dotState ( g , i , e ) % p ) & ! NaN occured in dotState
. or . crystallite_dotTemperature ( g , i , e ) / = crystallite_dotTemperature ( g , i , e ) ) then ! NaN occured in dotTemperature
2012-10-18 15:23:26 +05:30
if ( . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken is a non-local...
2012-05-17 20:55:21 +05:30
!$OMP CRITICAL (checkTodo)
2012-10-18 15:23:26 +05:30
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals done (and broken)
2012-05-17 20:55:21 +05:30
!$OMP END CRITICAL (checkTodo)
2012-10-18 15:23:26 +05:30
else ! broken one was local...
crystallite_todo ( g , i , e ) = . false . ! ... done (and broken)
2012-05-17 20:55:21 +05:30
endif
endif
2010-10-01 17:48:49 +05:30
endif
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2012-05-17 20:55:21 +05:30
! --- UPDATE STATE AND TEMPERATURE ---
2010-10-01 17:48:49 +05:30
2012-05-17 20:55:21 +05:30
!$OMP DO PRIVATE(mySizeDotState)
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2010-10-01 17:48:49 +05:30
if ( crystallite_todo ( g , i , e ) ) then
2012-05-17 20:55:21 +05:30
mySizeDotState = constitutive_sizeDotState ( g , i , e )
constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState ) = constitutive_subState0 ( g , i , e ) % p ( 1 : mySizeDotState ) &
+ constitutive_dotState ( g , i , e ) % p * crystallite_subdt ( g , i , e )
crystallite_Temperature ( g , i , e ) = crystallite_subTemperature0 ( g , i , e ) &
+ crystallite_dotTemperature ( g , i , e ) * crystallite_subdt ( g , i , e )
2010-10-01 17:48:49 +05:30
endif
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2011-03-21 20:00:10 +05:30
!$OMP END PARALLEL
2010-10-26 19:34:33 +05:30
2011-03-29 12:57:19 +05:30
2010-10-01 17:48:49 +05:30
! --+>> STATE LOOP <<+--
2011-03-21 20:00:10 +05:30
NiterationState = 0_pInt
2012-11-09 01:30:29 +05:30
do while ( any ( crystallite_todo . and . . not . crystallite_converged ) . and . NiterationState < nState ) ! convergence loop for crystallite
2011-03-21 20:00:10 +05:30
NiterationState = NiterationState + 1_pInt
2011-03-29 12:57:19 +05:30
!$OMP PARALLEL
2010-10-01 17:48:49 +05:30
2012-06-06 20:41:30 +05:30
! --- UPDATE DEPENDENT STATES ---
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-11-09 01:45:19 +05:30
if ( crystallite_todo ( g , i , e ) . and . . not . crystallite_converged ( g , i , e ) ) then
2012-06-06 20:41:30 +05:30
call constitutive_microstructure ( crystallite_Temperature ( g , i , e ) , crystallite_Fe ( 1 : 3 , 1 : 3 , g , i , e ) , &
crystallite_Fp ( 1 : 3 , 1 : 3 , g , i , e ) , g , i , e ) ! update dependent state variables to be consistent with basic states
endif
2012-10-18 15:23:26 +05:30
constitutive_previousDotState2 ( g , i , e ) % p = constitutive_previousDotState ( g , i , e ) % p ! remember previous dotState
constitutive_previousDotState ( g , i , e ) % p = constitutive_dotState ( g , i , e ) % p ! remember current dotState
2012-06-06 20:41:30 +05:30
enddo ; enddo ; enddo
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
! --- STRESS INTEGRATION ---
2010-11-19 22:59:29 +05:30
2010-11-03 22:52:48 +05:30
!$OMP DO
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-09 01:45:19 +05:30
if ( crystallite_todo ( g , i , e ) . and . . not . crystallite_converged ( g , i , e ) ) then
2012-11-09 01:30:29 +05:30
crystallite_todo ( g , i , e ) = crystallite_integrateStress ( g , i , e )
2012-12-11 20:35:08 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-09 01:30:29 +05:30
if ( . not . crystallite_todo ( g , i , e ) . and . . not . crystallite_localPlasticity ( g , i , e ) ) then ! broken non-local...
!$OMP CRITICAL (checkTodo)
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ... then all non-locals skipped
!$OMP END CRITICAL (checkTodo)
2010-10-01 17:48:49 +05:30
endif
endif
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2012-03-09 01:55:28 +05:30
2012-12-16 16:24:13 +05:30
!$OMP SINGLE
2012-11-07 21:13:29 +05:30
!$OMP CRITICAL (write2out)
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
2011-03-29 12:57:19 +05:30
write ( 6 , '(a,i8,a)' ) '<< CRYST >> ' , count ( crystallite_todo ( : , : , : ) ) , ' grains todo after stress integration'
2010-10-01 17:48:49 +05:30
endif
2012-11-07 21:13:29 +05:30
!$OMP END CRITICAL (write2out)
2012-12-16 16:24:13 +05:30
!$OMP END SINGLE
2010-10-01 17:48:49 +05:30
2012-05-17 20:55:21 +05:30
! --- DOT STATE AND TEMPERATURE ---
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-11-09 01:45:19 +05:30
if ( crystallite_todo ( g , i , e ) . and . . not . crystallite_converged ( g , i , e ) ) then
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
call constitutive_collectDotState ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , crystallite_Fe , crystallite_Fp , &
2012-11-30 00:14:00 +05:30
crystallite_Temperature ( g , i , e ) , crystallite_subdt ( g , i , e ) , crystallite_subFrac , g , i , e )
2012-05-17 20:55:21 +05:30
crystallite_dotTemperature ( g , i , e ) = constitutive_dotTemperature ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , &
crystallite_Temperature ( g , i , e ) , g , i , e )
2010-10-01 17:48:49 +05:30
endif
2012-05-17 20:55:21 +05:30
enddo ; enddo ; enddo
!$OMP ENDDO
!$OMP DO
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-09 01:45:19 +05:30
if ( crystallite_todo ( g , i , e ) . and . . not . crystallite_converged ( g , i , e ) ) then
2012-05-17 20:55:21 +05:30
if ( any ( constitutive_dotState ( g , i , e ) % p / = constitutive_dotState ( g , i , e ) % p ) & ! NaN occured in dotState
. or . crystallite_dotTemperature ( g , i , e ) / = crystallite_dotTemperature ( g , i , e ) ) then ! NaN occured in dotTemperature
2012-10-18 15:23:26 +05:30
crystallite_todo ( g , i , e ) = . false . ! ... skip me next time
if ( . not . crystallite_localPlasticity ( g , i , e ) ) then ! if me is non-local...
2012-05-17 20:55:21 +05:30
!$OMP CRITICAL (checkTodo)
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
!$OMP END CRITICAL (checkTodo)
endif
endif
endif
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2012-05-17 20:55:21 +05:30
! --- UPDATE STATE AND TEMPERATURE ---
2010-10-01 17:48:49 +05:30
2012-11-07 21:13:29 +05:30
!$OMP DO PRIVATE(dot_prod12,dot_prod22,statedamper,mySizeDotState,stateResiduum,temperatureResiduum,tempState)
2012-05-17 20:55:21 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-11-09 01:45:19 +05:30
if ( crystallite_todo ( g , i , e ) . and . . not . crystallite_converged ( g , i , e ) ) then
2010-11-19 22:59:29 +05:30
2010-10-01 17:48:49 +05:30
! --- state damper ---
dot_prod12 = dot_product ( constitutive_dotState ( g , i , e ) % p - constitutive_previousDotState ( g , i , e ) % p , &
2012-10-18 15:23:26 +05:30
constitutive_previousDotState ( g , i , e ) % p - constitutive_previousDotState2 ( g , i , e ) % p )
2010-10-01 17:48:49 +05:30
dot_prod22 = dot_product ( constitutive_previousDotState ( g , i , e ) % p - constitutive_previousDotState2 ( g , i , e ) % p , &
constitutive_previousDotState ( g , i , e ) % p - constitutive_previousDotState2 ( g , i , e ) % p )
if ( dot_prod22 > 0.0_pReal &
. and . ( dot_prod12 < 0.0_pReal &
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
. or . dot_product ( constitutive_dotState ( g , i , e ) % p , constitutive_previousDotState ( g , i , e ) % p ) < 0.0_pReal ) ) then
2012-05-17 20:55:21 +05:30
statedamper = 0.75_pReal + 0.25_pReal * tanh ( 2.0_pReal + 4.0_pReal * dot_prod12 / dot_prod22 )
2012-11-07 21:13:29 +05:30
else
statedamper = 1.0_pReal
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
2012-05-17 20:55:21 +05:30
! --- get residui ---
2010-10-01 17:48:49 +05:30
2012-05-17 20:55:21 +05:30
mySizeDotState = constitutive_sizeDotState ( g , i , e )
stateResiduum ( 1 : mySizeDotState ) = constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState ) &
- constitutive_subState0 ( g , i , e ) % p ( 1 : mySizeDotState ) &
- ( constitutive_dotState ( g , i , e ) % p * statedamper &
+ constitutive_previousDotState ( g , i , e ) % p * ( 1.0_pReal - statedamper ) ) * crystallite_subdt ( g , i , e )
temperatureResiduum = crystallite_Temperature ( g , i , e ) &
- crystallite_subTemperature0 ( g , i , e ) &
- crystallite_dotTemperature ( g , i , e ) * crystallite_subdt ( g , i , e )
! --- correct state and temperature with residuum ---
2012-11-07 21:13:29 +05:30
tempState ( 1 : mySizeDotState ) = constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState ) - stateResiduum ( 1 : mySizeDotState ) ! need to copy to local variable, since we cant flush a pointer in openmp
2012-05-17 20:55:21 +05:30
crystallite_Temperature ( g , i , e ) = crystallite_Temperature ( g , i , e ) - temperatureResiduum
2012-11-07 21:13:29 +05:30
!$OMP FLUSH(crystallite_Temperature)
2012-05-17 20:55:21 +05:30
#ifndef _OPENMP
2012-10-18 15:23:26 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt &
2012-05-17 20:55:21 +05:30
. and . ( ( e == debug_e . and . i == debug_i . and . g == debug_g ) &
2012-07-05 15:24:50 +05:30
. or . . not . iand ( debug_level ( debug_crystallite ) , debug_levelSelective ) / = 0_pInt ) ) then
2012-05-17 20:55:21 +05:30
write ( 6 , '(a,i8,1x,i2,1x,i3)' ) '<< CRYST >> update state at el ip g ' , e , i , g
write ( 6 , * )
write ( 6 , '(a,f6.1)' ) '<< CRYST >> statedamper ' , statedamper
write ( 6 , * )
2012-10-18 15:23:26 +05:30
write ( 6 , '(a,/,(12x,12(e12.6,1x)))' ) '<< CRYST >> state residuum' , stateResiduum ( 1 : mySizeDotState )
2012-05-17 20:55:21 +05:30
write ( 6 , * )
2012-11-07 21:13:29 +05:30
write ( 6 , '(a,/,(12x,12(e12.6,1x)))' ) '<< CRYST >> new state' , tempState ( 1 : mySizeDotState )
2012-05-17 20:55:21 +05:30
write ( 6 , * )
endif
#endif
! --- store corrected dotState --- (cannot do this before state update, because not sure how to flush pointers in openmp)
constitutive_dotState ( g , i , e ) % p = constitutive_dotState ( g , i , e ) % p * statedamper &
+ constitutive_previousDotState ( g , i , e ) % p * ( 1.0_pReal - statedamper )
2012-06-06 20:41:30 +05:30
2012-05-17 20:55:21 +05:30
! --- converged ? ---
if ( all ( abs ( stateResiduum ( 1 : mySizeDotState ) ) < constitutive_aTolState ( g , i , e ) % p ( 1 : mySizeDotState ) &
. or . abs ( stateResiduum ( 1 : mySizeDotState ) ) < rTol_crystalliteState &
2012-11-07 21:13:29 +05:30
* abs ( tempState ( 1 : mySizeDotState ) ) ) &
2012-05-17 20:55:21 +05:30
. and . ( abs ( temperatureResiduum ) < rTol_crystalliteTemperature * crystallite_Temperature ( g , i , e ) &
. or . crystallite_Temperature ( g , i , e ) == 0.0_pReal ) ) then
crystallite_converged ( g , i , e ) = . true . ! ... converged per definitionem
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
2011-03-21 16:01:17 +05:30
!$OMP CRITICAL (distributionState)
debug_StateLoopDistribution ( NiterationState , numerics_integrationMode ) = &
2012-02-21 22:01:37 +05:30
debug_StateLoopDistribution ( NiterationState , numerics_integrationMode ) + 1_pInt
2011-03-21 16:01:17 +05:30
!$OMP END CRITICAL (distributionState)
endif
2010-10-01 17:48:49 +05:30
endif
2012-11-07 21:13:29 +05:30
constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState ) = tempState ( 1 : mySizeDotState ) ! copy local backup to global pointer
2012-05-17 20:55:21 +05:30
2010-10-01 17:48:49 +05:30
endif
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2010-10-01 17:48:49 +05:30
2012-06-06 20:41:30 +05:30
! --- STATE JUMP ---
2010-10-01 17:48:49 +05:30
2010-11-03 22:52:48 +05:30
!$OMP DO
2012-06-06 20:41:30 +05:30
do e = eIter ( 1 ) , eIter ( 2 ) ; do i = iIter ( 1 , e ) , iIter ( 2 , e ) ; do g = gIter ( 1 , e ) , gIter ( 2 , e ) ! iterate over elements, ips and grains
2012-12-16 16:24:13 +05:30
!$OMP FLUSH(crystallite_todo)
2012-06-06 20:41:30 +05:30
if ( crystallite_todo ( g , i , e ) . and . crystallite_converged ( g , i , e ) ) then ! converged and still alive...
2012-11-09 01:30:29 +05:30
crystallite_todo ( g , i , e ) = crystallite_stateJump ( g , i , e )
2012-12-11 20:35:08 +05:30
!$OMP FLUSH(crystallite_todo)
2012-11-09 01:30:29 +05:30
if ( . not . crystallite_todo ( g , i , e ) ) then ! if state jump fails, then convergence is broken
crystallite_converged ( g , i , e ) = . false .
2012-06-06 20:41:30 +05:30
if ( . not . crystallite_localPlasticity ( g , i , e ) ) then ! if broken non-local...
!$OMP CRITICAL (checkTodo)
crystallite_todo = crystallite_todo . and . crystallite_localPlasticity ! ...all non-locals skipped
!$OMP END CRITICAL (checkTodo)
endif
endif
2010-10-01 17:48:49 +05:30
endif
2012-06-06 20:41:30 +05:30
enddo ; enddo ; enddo
2010-11-03 22:52:48 +05:30
!$OMP ENDDO
2012-06-06 20:41:30 +05:30
2011-03-21 20:00:10 +05:30
!$OMP END PARALLEL
2011-03-29 12:57:19 +05:30
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
2012-11-07 21:13:29 +05:30
!$OMP CRITICAL(write2out)
2011-03-29 12:57:19 +05:30
write ( 6 , '(a,i8,a,i2)' ) '<< CRYST >> ' , count ( crystallite_converged ( : , : , : ) ) , &
' grains converged after state integration no. ' , NiterationState
write ( 6 , * )
2012-11-07 21:13:29 +05:30
!$OMP END CRITICAL(write2out)
2010-10-01 17:48:49 +05:30
endif
2012-05-17 20:55:21 +05:30
! --- NON-LOCAL CONVERGENCE CHECK ---
2010-10-01 17:48:49 +05:30
2011-02-09 14:09:07 +05:30
if ( . not . singleRun ) then ! if not requesting Integration of just a single IP
2012-03-12 19:39:37 +05:30
if ( any ( . not . crystallite_converged . and . . not . crystallite_localPlasticity ) ) then ! any non-local not yet converged (or broken)...
crystallite_converged = crystallite_converged . and . crystallite_localPlasticity ! ...restart all non-local as not converged
2011-03-21 20:00:10 +05:30
endif
2010-10-01 17:48:49 +05:30
endif
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt ) then
2012-11-07 21:13:29 +05:30
!$OMP CRITICAL(write2out)
2011-03-29 12:57:19 +05:30
write ( 6 , '(a,i8,a)' ) '<< CRYST >> ' , count ( crystallite_converged ( : , : , : ) ) , ' grains converged after non-local check'
write ( 6 , '(a,i8,a,i2)' ) '<< CRYST >> ' , count ( crystallite_todo ( : , : , : ) ) , ' grains todo after state integration no. ' , &
NiterationState
write ( 6 , * )
2012-11-07 21:13:29 +05:30
!$OMP END CRITICAL(write2out)
2010-10-01 17:48:49 +05:30
endif
enddo ! crystallite convergence loop
2012-06-06 20:41:30 +05:30
2012-03-09 01:55:28 +05:30
end subroutine crystallite_integrateStateFPI
2010-10-01 17:48:49 +05:30
2012-06-06 20:41:30 +05:30
!***********************************************************
!* calculates a jump in the state according to the current *
!* state and the current stress *
!***********************************************************
function crystallite_stateJump ( g , i , e )
!*** variables and functions from other modules ***!
2012-07-05 15:24:50 +05:30
use debug , only : debug_level , &
2012-06-06 20:41:30 +05:30
debug_crystallite , &
debug_levelExtensive , &
debug_levelSelective , &
debug_e , &
debug_i , &
debug_g
use FEsolving , only : FEsolving_execElem , &
FEsolving_execIP
use mesh , only : mesh_element , &
mesh_NcpElems
use material , only : homogenization_Ngrains
use constitutive , only : constitutive_sizeDotState , &
constitutive_state , &
constitutive_deltaState , &
constitutive_collectDeltaState , &
constitutive_microstructure
implicit none
!*** input variables ***!
integer ( pInt ) , intent ( in ) :: e , & ! element index
i , & ! integration point index
g ! grain index
!*** output variables ***!
logical crystallite_stateJump
!*** local variables ***!
integer ( pInt ) mySizeDotState
crystallite_stateJump = . false .
call constitutive_collectDeltaState ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , crystallite_Temperature ( g , i , e ) , g , i , e )
mySizeDotState = constitutive_sizeDotState ( g , i , e )
if ( any ( constitutive_deltaState ( g , i , e ) % p ( 1 : mySizeDotState ) / = constitutive_deltaState ( g , i , e ) % p ( 1 : mySizeDotState ) ) ) then
return
endif
constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState ) = constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState ) &
+ constitutive_deltaState ( g , i , e ) % p ( 1 : mySizeDotState )
#ifndef _OPENMP
2012-10-18 15:23:26 +05:30
if ( any ( constitutive_deltaState ( g , i , e ) % p ( 1 : mySizeDotState ) / = 0.0_pReal ) &
. and . iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt &
2012-06-06 20:41:30 +05:30
. and . ( ( e == debug_e . and . i == debug_i . and . g == debug_g ) &
2012-07-05 15:24:50 +05:30
. or . . not . iand ( debug_level ( debug_crystallite ) , debug_levelSelective ) / = 0_pInt ) ) then
2012-06-06 20:41:30 +05:30
write ( 6 , '(a,i8,1x,i2,1x,i3)' ) '<< CRYST >> update state at el ip g ' , e , i , g
write ( 6 , * )
2012-10-18 15:23:26 +05:30
write ( 6 , '(a,/,(12x,12(e12.6,1x)))' ) '<< CRYST >> deltaState' , constitutive_deltaState ( g , i , e ) % p ( 1 : mySizeDotState )
2012-06-06 20:41:30 +05:30
write ( 6 , * )
2012-10-18 15:23:26 +05:30
write ( 6 , '(a,/,(12x,12(e12.6,1x)))' ) '<< CRYST >> new state' , constitutive_state ( g , i , e ) % p ( 1 : mySizeDotState )
2012-06-06 20:41:30 +05:30
write ( 6 , * )
endif
#endif
crystallite_stateJump = . true .
end function crystallite_stateJump
2009-05-07 21:57:36 +05:30
!***********************************************************************
2009-05-28 22:08:40 +05:30
!*** calculation of stress (P) with time integration ***
!*** based on a residuum in Lp and intermediate ***
2009-05-07 21:57:36 +05:30
!*** acceleration of the Newton-Raphson correction ***
!***********************************************************************
2010-11-03 22:52:48 +05:30
function crystallite_integrateStress ( &
2009-05-07 21:57:36 +05:30
g , & ! grain number
i , & ! integration point number
2010-10-01 17:48:49 +05:30
e , & ! element number
2012-01-26 18:18:59 +05:30
timeFraction &
2010-10-01 17:48:49 +05:30
)
2010-09-06 21:36:41 +05:30
2009-05-07 21:57:36 +05:30
2012-03-09 01:55:28 +05:30
use prec , only : pLongInt
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
use numerics , only : nStress , &
aTol_crystalliteStress , &
rTol_crystalliteStress , &
iJacoLpresiduum , &
numerics_integrationMode
2012-07-05 15:24:50 +05:30
use debug , only : debug_level , &
2012-03-09 01:55:28 +05:30
debug_crystallite , &
debug_levelBasic , &
debug_levelExtensive , &
debug_levelSelective , &
2012-02-21 22:01:37 +05:30
debug_e , &
debug_i , &
debug_g , &
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
debug_cumLpCalls , &
debug_cumLpTicks , &
2012-11-06 18:05:45 +05:30
debug_StressLoopDistribution
2012-02-21 22:01:37 +05:30
use constitutive , only : constitutive_LpAndItsTangent , &
2012-03-21 16:27:27 +05:30
constitutive_TandItsTangent , &
2012-02-21 22:01:37 +05:30
constitutive_homogenizedC
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
use math , only : math_mul33x33 , &
2011-11-04 18:27:12 +05:30
math_mul33xx33 , &
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
math_mul66x6 , &
math_mul99x99 , &
2012-01-26 19:20:00 +05:30
math_transpose33 , &
math_inv33 , &
math_invert33 , &
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
math_invert , &
2012-01-26 19:20:00 +05:30
math_det33 , &
2011-08-02 16:59:08 +05:30
math_norm33 , &
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
math_I3 , &
math_identity2nd , &
math_Mandel66to3333 , &
math_Mandel6to33 , &
2012-03-21 16:27:27 +05:30
math_Mandel33to6 , &
2012-10-31 15:26:26 +05:30
math_Plain3333to99 , &
math_Plain33to9 , &
math_Plain9to33
2009-05-07 21:57:36 +05:30
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
implicit none
2009-05-07 21:57:36 +05:30
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!*** input variables ***!
integer ( pInt ) , intent ( in ) :: e , & ! element index
i , & ! integration point index
g ! grain index
2012-01-26 18:18:59 +05:30
real ( pReal ) , optional , intent ( in ) :: timeFraction ! fraction of timestep
2010-10-01 17:48:49 +05:30
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!*** output variables ***!
logical crystallite_integrateStress ! flag indicating if integration suceeded
2009-08-28 19:20:47 +05:30
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!*** local variables ***!
2012-08-28 22:29:45 +05:30
real ( pReal ) , dimension ( 3 , 3 ) :: Fg_new , & ! deformation gradient at end of timestep
Fp_current , & ! plastic deformation gradient at start of timestep
Fp_new , & ! plastic deformation gradient at end of timestep
Fe_new , & ! elastic deformation gradient at end of timestep
invFp_new , & ! inverse of Fp_new
invFp_current , & ! inverse of Fp_current
Lpguess , & ! current guess for plastic velocity gradient
Lpguess_old , & ! known last good guess for plastic velocity gradient
Lp_constitutive , & ! plastic velocity gradient resulting from constitutive law
residuum , & ! current residuum of plastic velocity gradient
residuum_old , & ! last residuum of plastic velocity gradient
deltaLp , & ! direction of next guess
Tstar , & ! 2nd Piola-Kirchhoff Stress
2012-03-21 16:27:27 +05:30
A , &
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
B , &
2012-08-28 22:29:45 +05:30
Fe ! elastic deformation gradient
real ( pReal ) , dimension ( 6 ) :: Tstar_v ! 2nd Piola-Kirchhoff Stress in Mandel-Notation
real ( pReal ) , dimension ( 9 ) :: work ! needed for matrix inversion by LAPACK
integer ( pInt ) , dimension ( 9 ) :: ipiv ! needed for matrix inversion by LAPACK
real ( pReal ) , dimension ( 9 , 9 ) :: dLp_dT_constitutive , & ! partial derivative of plastic velocity gradient calculated by constitutive law
dT_dFe_constitutive , & ! partial derivative of 2nd Piola-Kirchhoff stress calculated by constitutive law
dFe_dLp , & ! partial derivative of elastic deformation gradient
dR_dLp , & ! partial derivative of residuum (Jacobian for NEwton-Raphson scheme)
2012-10-31 15:26:26 +05:30
dR_dLp2 ! working copy of dRdLp
2012-08-28 22:29:45 +05:30
real ( pReal ) , dimension ( 3 , 3 , 3 , 3 ) :: dT_dFe3333 , & ! partial derivative of 2nd Piola-Kirchhoff stress
dFe_dLp3333 ! partial derivative of elastic deformation gradient
real ( pReal ) p_hydro , & ! volumetric part of 2nd Piola-Kirchhoff Stress
det , & ! determinant
2011-08-02 16:59:08 +05:30
steplength0 , &
steplength , &
2012-10-31 15:26:26 +05:30
dt , & ! time increment
2012-08-14 17:37:59 +05:30
aTol
2012-08-28 22:29:45 +05:30
logical error ! flag indicating an error
integer ( pInt ) NiterationStress , & ! number of stress integrations
2012-10-31 15:26:26 +05:30
ierr , & ! error indicator for LAPACK
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
n , &
2012-03-21 16:27:27 +05:30
o , &
p , &
2012-08-28 22:29:45 +05:30
jacoCounter ! counter to check for Jacobian update
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
integer ( pLongInt ) tick , &
tock , &
tickrate , &
maxticks
2011-08-02 16:59:08 +05:30
2009-05-28 22:08:40 +05:30
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!* be pessimistic
crystallite_integrateStress = . false .
2011-03-29 12:57:19 +05:30
#ifndef _OPENMP
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt &
2012-03-09 01:55:28 +05:30
. and . ( ( e == debug_e . and . i == debug_i . and . g == debug_g ) &
2012-07-05 15:24:50 +05:30
. or . . not . iand ( debug_level ( debug_crystallite ) , debug_levelSelective ) / = 0_pInt ) ) then
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,i8,1x,i2,1x,i3)' ) '<< CRYST >> integrateStress at el ip g ' , e , i , g
2011-03-21 16:01:17 +05:30
endif
2011-03-29 12:57:19 +05:30
#endif
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!* only integrate over fraction of timestep?
2012-01-26 18:18:59 +05:30
if ( present ( timeFraction ) ) then
dt = crystallite_subdt ( g , i , e ) * timeFraction
Fg_new = crystallite_subF0 ( 1 : 3 , 1 : 3 , g , i , e ) + ( crystallite_subF ( 1 : 3 , 1 : 3 , g , i , e ) - crystallite_subF0 ( 1 : 3 , 1 : 3 , g , i , e ) ) * timeFraction
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
else
dt = crystallite_subdt ( g , i , e )
Fg_new = crystallite_subF ( 1 : 3 , 1 : 3 , g , i , e )
endif
2009-05-28 22:08:40 +05:30
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!* feed local variables
2012-10-18 15:23:26 +05:30
Fp_current = crystallite_subFp0 ( 1 : 3 , 1 : 3 , g , i , e ) ! "Fp_current" is only used as temp var here...
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
Lpguess_old = crystallite_Lp ( 1 : 3 , 1 : 3 , g , i , e ) ! consider present Lp good (i.e. worth remembering) ...
Lpguess = crystallite_Lp ( 1 : 3 , 1 : 3 , g , i , e ) ! ... and take it as first guess
!* inversion of Fp_current...
2012-01-26 19:20:00 +05:30
invFp_current = math_inv33 ( Fp_current )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
if ( all ( invFp_current == 0.0_pReal ) ) then ! ... failed?
2011-03-29 12:57:19 +05:30
#ifndef _OPENMP
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
2012-11-06 18:05:45 +05:30
write ( 6 , '(a,i8,1x,i2,1x,i3)' ) '<< CRYST >> integrateStress failed on inversion of Fp_current at el ip g ' , e , i , g
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) > 0_pInt ) then
write ( 6 , * )
write ( 6 , '(a,/,3(12x,3(f12.7,1x)/))' ) '<< CRYST >> Fp_current' , math_transpose33 ( Fp_current ( 1 : 3 , 1 : 3 ) )
endif
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
2011-03-29 12:57:19 +05:30
#endif
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
return
endif
2012-08-21 14:23:36 +05:30
A = math_mul33x33 ( Fg_new , invFp_current ) ! intermediate tensor needed later to calculate dFe_dLp
2009-05-28 22:08:40 +05:30
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
2011-08-02 16:59:08 +05:30
!* start LpLoop with normal step length
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
NiterationStress = 0_pInt
2012-10-18 15:23:26 +05:30
jacoCounter = 0_pInt
2011-08-02 16:59:08 +05:30
steplength0 = 1.0_pReal
2012-10-18 15:23:26 +05:30
steplength = steplength0
2012-11-08 18:56:22 +05:30
residuum_old = 0.0_pReal
2009-05-07 21:57:36 +05:30
2009-05-28 22:08:40 +05:30
LpLoop : do
2012-02-21 22:01:37 +05:30
NiterationStress = NiterationStress + 1_pInt
2009-05-28 22:08:40 +05:30
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!* too many loops required ?
if ( NiterationStress > nStress ) then
2011-03-29 12:57:19 +05:30
#ifndef _OPENMP
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
2012-10-18 15:23:26 +05:30
write ( 6 , '(a,i3,a,i8,1x,i2,1x,i3)' ) '<< CRYST >> integrateStress reached loop limit' , nStress , ' at el ip g ' , e , i , g
2011-03-29 12:57:19 +05:30
write ( 6 , * )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
2011-03-29 12:57:19 +05:30
#endif
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
return
endif
2012-08-28 22:29:45 +05:30
2012-08-21 14:23:36 +05:30
2012-10-18 15:23:26 +05:30
!* calculate (elastic) 2nd Piola--Kirchhoff stress tensor and its tangent from constitutive law
2012-08-21 14:23:36 +05:30
B = math_I3 - dt * Lpguess
Fe = math_mul33x33 ( A , B ) ! current elastic deformation tensor
2012-03-21 16:27:27 +05:30
call constitutive_TandItsTangent ( Tstar , dT_dFe3333 , Fe , g , i , e ) ! call constitutive law to calculate 2nd Piola-Kirchhoff stress and its derivative
Tstar_v = math_Mandel33to6 ( Tstar )
2012-12-03 22:04:41 +05:30
p_hydro = sum ( Tstar_v ( 1 : 3 ) ) / 3.0_pReal
forall ( n = 1_pInt : 3_pInt ) Tstar_v ( n ) = Tstar_v ( n ) - p_hydro ! get deviatoric stress tensor
2009-05-28 22:08:40 +05:30
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
2012-10-18 15:23:26 +05:30
!* calculate plastic velocity gradient and its tangent from constitutive law
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
2012-10-18 15:23:26 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) &
2011-03-21 16:01:17 +05:30
call system_clock ( count = tick , count_rate = tickrate , count_max = maxticks )
2012-10-18 15:23:26 +05:30
2011-08-02 16:59:08 +05:30
call constitutive_LpAndItsTangent ( Lp_constitutive , dLp_dT_constitutive , Tstar_v , crystallite_Temperature ( g , i , e ) , g , i , e )
2012-10-18 15:23:26 +05:30
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
2011-03-21 16:01:17 +05:30
call system_clock ( count = tock , count_rate = tickrate , count_max = maxticks )
!$OMP CRITICAL (debugTimingLpTangent)
debug_cumLpCalls = debug_cumLpCalls + 1_pInt
debug_cumLpTicks = debug_cumLpTicks + tock - tick
!$OMP FLUSH (debug_cumLpTicks)
if ( tock < tick ) debug_cumLpTicks = debug_cumLpTicks + maxticks
!$OMP END CRITICAL (debugTimingLpTangent)
endif
2010-11-19 22:59:29 +05:30
2011-03-29 12:57:19 +05:30
#ifndef _OPENMP
2012-10-22 13:29:35 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt &
2012-03-09 01:55:28 +05:30
. and . ( ( e == debug_e . and . i == debug_i . and . g == debug_g ) &
2012-10-22 13:29:35 +05:30
. or . . not . iand ( debug_level ( debug_crystallite ) , debug_levelSelective ) / = 0_pInt ) ) then
2011-03-29 12:57:19 +05:30
write ( 6 , '(a,i3)' ) '<< CRYST >> iteration ' , NiterationStress
write ( 6 , * )
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,/,3(12x,3(e20.7,1x)/))' ) '<< CRYST >> Lp_constitutive' , math_transpose33 ( Lp_constitutive )
write ( 6 , '(a,/,3(12x,3(e20.7,1x)/))' ) '<< CRYST >> Lpguess' , math_transpose33 ( Lpguess )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
2011-03-29 12:57:19 +05:30
#endif
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!* update current residuum and check for convergence of loop
2012-11-08 18:56:22 +05:30
aTol = max ( rTol_crystalliteStress * max ( math_norm33 ( Lpguess ) , math_norm33 ( Lp_constitutive ) ) , & ! absolute tolerance from largest acceptable relative error
aTol_crystalliteStress ) ! minimum lower cutoff
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
residuum = Lpguess - Lp_constitutive
2011-08-02 16:59:08 +05:30
2012-11-08 18:56:22 +05:30
if ( any ( residuum / = residuum ) ) then ! NaN in residuum...
2011-03-29 12:57:19 +05:30
#ifndef _OPENMP
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,i8,1x,i2,1x,i3,a,i3,a)' ) '<< CRYST >> integrateStress encountered NaN at el ip g ' , e , i , g , &
2011-03-29 12:57:19 +05:30
' ; iteration ' , NiterationStress , &
' >> returning..!'
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
2011-03-29 12:57:19 +05:30
#endif
2012-11-08 18:56:22 +05:30
return ! ...me = .false. to inform integrator about problem
elseif ( math_norm33 ( residuum ) < aTol ) then ! converged if below absolute tolerance
exit LpLoop ! ...leave iteration loop
elseif ( math_norm33 ( residuum ) < math_norm33 ( residuum_old ) . or . NiterationStress == 1_pInt ) then ! not converged, but improved norm of residuum (always proceed in first iteration)...
residuum_old = residuum ! ...remember old values and...
Lpguess_old = Lpguess
steplength = steplength0 ! ...proceed with normal step length (calculate new search direction)
else ! not converged and residuum not improved...
steplength = 0.5_pReal * steplength ! ...try with smaller step length in same direction
2012-11-06 18:05:45 +05:30
Lpguess = Lpguess_old + steplength * deltaLp
cycle LpLoop
2011-08-02 16:59:08 +05:30
endif
2012-11-06 18:05:45 +05:30
!* calculate Jacobian for correction term
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
2011-08-02 16:59:08 +05:30
if ( mod ( jacoCounter , iJacoLpresiduum ) == 0_pInt ) then
2012-03-21 16:27:27 +05:30
dFe_dLp3333 = 0.0_pReal
do o = 1_pInt , 3_pInt ; do p = 1_pInt , 3_pInt
2012-08-28 22:29:45 +05:30
dFe_dLp3333 ( p , o , 1 : 3 , p ) = A ( o , 1 : 3 ) ! dFe_dLp(i,j,k,l) = -dt * A(i,k) delta(j,l)
2012-03-21 16:27:27 +05:30
enddo ; enddo
dFe_dLp3333 = - dt * dFe_dLp3333
dFe_dLp = math_Plain3333to99 ( dFe_dLp3333 )
dT_dFe_constitutive = math_Plain3333to99 ( dT_dFe3333 )
dR_dLp = math_identity2nd ( 9_pInt ) - &
2012-08-28 22:29:45 +05:30
math_mul99x99 ( dLp_dT_constitutive , math_mul99x99 ( dT_dFe_constitutive , dFe_dLp ) )
2012-11-06 18:05:45 +05:30
dR_dLp2 = dR_dLp ! will be overwritten in first call to LAPACK routine
2012-10-31 15:26:26 +05:30
work = math_plain33to9 ( residuum )
2012-08-28 22:29:45 +05:30
#if(FLOAT==8)
2012-11-06 18:05:45 +05:30
call dgesv ( 9 , 1 , dR_dLp2 , 9 , ipiv , work , 9 , ierr ) ! solve dR/dLp * delta Lp = -res for dR/dLp
2012-08-28 22:29:45 +05:30
#elif(FLOAT==4)
2012-11-06 18:05:45 +05:30
call sgesv ( 9 , 1 , dR_dLp2 , 9 , ipiv , work , 9 , ierr ) ! solve dR/dLp * delta Lp = -res for dR/dLp
2012-08-28 22:29:45 +05:30
#endif
2012-10-31 15:26:26 +05:30
if ( ierr / = 0_pInt ) then
2011-03-29 12:57:19 +05:30
#ifndef _OPENMP
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,i8,1x,i2,1x,i3,a,i3)' ) '<< CRYST >> integrateStress failed on dR/dLp inversion at el ip g ' , e , i , g
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt &
2012-03-09 01:55:28 +05:30
. and . ( ( e == debug_e . and . i == debug_i . and . g == debug_g ) &
2012-07-05 15:24:50 +05:30
. or . . not . iand ( debug_level ( debug_crystallite ) , debug_levelSelective ) / = 0_pInt ) ) then
2011-08-02 16:59:08 +05:30
write ( 6 , * )
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,/,9(12x,9(e15.3,1x)/))' ) '<< CRYST >> dR_dLp' , transpose ( dR_dLp )
2012-03-21 16:27:27 +05:30
write ( 6 , '(a,/,9(12x,9(e15.3,1x)/))' ) '<< CRYST >> dFe_dLp' , transpose ( dFe_dLp )
write ( 6 , '(a,/,9(12x,9(e15.3,1x)/))' ) '<< CRYST >> dT_dFe_constitutive' , transpose ( dT_dFe_constitutive )
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,/,9(12x,9(e15.3,1x)/))' ) '<< CRYST >> dLp_dT_constitutive' , transpose ( dLp_dT_constitutive )
2012-03-21 16:27:27 +05:30
write ( 6 , '(a,/,3(12x,3(e20.7,1x)/))' ) '<< CRYST >> A' , math_transpose33 ( A )
write ( 6 , '(a,/,3(12x,3(e20.7,1x)/))' ) '<< CRYST >> B' , math_transpose33 ( B )
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,/,3(12x,3(e20.7,1x)/))' ) '<< CRYST >> Lp_constitutive' , math_transpose33 ( Lp_constitutive )
write ( 6 , '(a,/,3(12x,3(e20.7,1x)/))' ) '<< CRYST >> Lpguess' , math_transpose33 ( Lpguess )
2011-03-29 12:57:19 +05:30
endif
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
2011-08-02 16:59:08 +05:30
#endif
return
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
2012-10-31 15:26:26 +05:30
deltaLp = - math_plain9to33 ( work )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
2011-08-02 16:59:08 +05:30
jacoCounter = jacoCounter + 1_pInt ! increase counter for jaco update
2012-11-06 18:05:45 +05:30
2011-08-02 16:59:08 +05:30
Lpguess = Lpguess + steplength * deltaLp
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
enddo LpLoop
!* calculate new plastic and elastic deformation gradient
invFp_new = math_mul33x33 ( invFp_current , B )
2012-12-03 22:04:41 +05:30
invFp_new = invFp_new / math_det33 ( invFp_new ) ** ( 1.0_pReal / 3.0_pReal ) ! regularize by det
2012-01-26 19:20:00 +05:30
call math_invert33 ( invFp_new , Fp_new , det , error )
2012-10-17 21:49:42 +05:30
if ( error . or . any ( Fp_new / = Fp_new ) ) then
2011-03-29 12:57:19 +05:30
#ifndef _OPENMP
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
2012-02-21 22:01:37 +05:30
write ( 6 , '(a,i8,1x,i2,1x,i3,a,i3)' ) '<< CRYST >> integrateStress failed on invFp_new inversion at el ip g ' , &
e , i , g , ' ; iteration ' , NiterationStress
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt &
2012-03-09 01:55:28 +05:30
. and . ( ( e == debug_e . and . i == debug_i . and . g == debug_g ) &
2012-07-05 15:24:50 +05:30
. or . . not . iand ( debug_level ( debug_crystallite ) , debug_levelSelective ) / = 0_pInt ) ) then
2011-03-29 12:57:19 +05:30
write ( 6 , * )
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,/,3(12x,3(f12.7,1x)/))' ) '<< CRYST >> invFp_new' , math_transpose33 ( invFp_new )
2011-03-29 12:57:19 +05:30
endif
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
2011-03-29 12:57:19 +05:30
#endif
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
return
endif
Fe_new = math_mul33x33 ( Fg_new , invFp_new ) ! calc resulting Fe
!* add volumetric component to 2nd Piola-Kirchhoff stress and calculate 1st Piola-Kirchhoff stress
2012-12-03 22:04:41 +05:30
forall ( n = 1_pInt : 3_pInt ) Tstar_v ( n ) = Tstar_v ( n ) + p_hydro
2012-01-26 19:20:00 +05:30
crystallite_P ( 1 : 3 , 1 : 3 , g , i , e ) = math_mul33x33 ( Fe_new , math_mul33x33 ( math_Mandel6to33 ( Tstar_v ) , math_transpose33 ( invFp_new ) ) )
2009-05-28 22:08:40 +05:30
2009-05-07 21:57:36 +05:30
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!* store local values in global variables
crystallite_Lp ( 1 : 3 , 1 : 3 , g , i , e ) = Lpguess
crystallite_Tstar_v ( 1 : 6 , g , i , e ) = Tstar_v
crystallite_Fp ( 1 : 3 , 1 : 3 , g , i , e ) = Fp_new
crystallite_Fe ( 1 : 3 , 1 : 3 , g , i , e ) = Fe_new
crystallite_invFp ( 1 : 3 , 1 : 3 , g , i , e ) = invFp_new
!* set return flag to true
crystallite_integrateStress = . true .
2011-03-29 12:57:19 +05:30
#ifndef _OPENMP
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelExtensive ) / = 0_pInt &
2012-03-09 01:55:28 +05:30
. and . ( ( e == debug_e . and . i == debug_i . and . g == debug_g ) &
2012-10-22 13:29:35 +05:30
. or . . not . iand ( debug_level ( debug_crystallite ) , debug_levelSelective ) / = 0_pInt ) ) then
2012-02-21 22:01:37 +05:30
write ( 6 , '(a,/,3(12x,3(f12.7,1x)/))' ) '<< CRYST >> P / MPa' , math_transpose33 ( crystallite_P ( 1 : 3 , 1 : 3 , g , i , e ) ) / 1.0e6_pReal
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,/,3(12x,3(f12.7,1x)/))' ) '<< CRYST >> Cauchy / MPa' , &
2012-02-21 22:01:37 +05:30
math_mul33x33 ( crystallite_P ( 1 : 3 , 1 : 3 , g , i , e ) , math_transpose33 ( Fg_new ) ) / 1.0e6_pReal / math_det33 ( Fg_new )
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,/,3(12x,3(f12.7,1x)/))' ) '<< CRYST >> Fe Lp Fe^-1' , &
2012-02-21 22:01:37 +05:30
math_transpose33 ( math_mul33x33 ( Fe_new , math_mul33x33 ( crystallite_Lp ( 1 : 3 , 1 : 3 , g , i , e ) , math_inv33 ( Fe_new ) ) ) ) ! transpose to get correct print out order
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,/,3(12x,3(f12.7,1x)/))' ) '<< CRYST >> Fp' , math_transpose33 ( crystallite_Fp ( 1 : 3 , 1 : 3 , g , i , e ) )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
endif
2011-03-29 12:57:19 +05:30
#endif
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
2012-07-05 15:24:50 +05:30
if ( iand ( debug_level ( debug_crystallite ) , debug_levelBasic ) / = 0_pInt ) then
2011-03-21 16:01:17 +05:30
!$OMP CRITICAL (distributionStress)
debug_StressLoopDistribution ( NiterationStress , numerics_integrationMode ) = &
2012-02-21 22:01:37 +05:30
debug_StressLoopDistribution ( NiterationStress , numerics_integrationMode ) + 1_pInt
2011-03-21 16:01:17 +05:30
!$OMP END CRITICAL (distributionStress)
endif
2009-05-07 21:57:36 +05:30
2012-03-09 01:55:28 +05:30
end function crystallite_integrateStress
2009-05-28 22:08:40 +05:30
2009-05-07 21:57:36 +05:30
2009-12-18 21:16:33 +05:30
!********************************************************************
2010-04-28 22:49:58 +05:30
! calculates orientations and disorientations (in case of single grain ips)
2009-12-18 21:16:33 +05:30
!********************************************************************
2012-03-09 01:55:28 +05:30
subroutine crystallite_orientations
2009-12-18 21:16:33 +05:30
!*** variables and functions from other modules ***!
2012-03-09 01:55:28 +05:30
2010-02-19 19:14:38 +05:30
use math , only : math_pDecomposition , &
2013-01-31 21:58:08 +05:30
math_RtoQ , &
math_qDisorientation , &
2010-05-11 20:36:21 +05:30
math_qConj
2010-02-19 19:14:38 +05:30
use FEsolving , only : FEsolving_execElem , &
FEsolving_execIP
use IO , only : IO_warning
use material , only : material_phase , &
homogenization_Ngrains , &
2012-03-12 19:39:37 +05:30
phase_localPlasticity , &
phase_plasticityInstance
2010-02-19 19:14:38 +05:30
use mesh , only : mesh_element , &
mesh_ipNeighborhood , &
2012-11-16 04:15:20 +05:30
FE_NipNeighbors , &
FE_geomtype
2010-10-12 18:38:54 +05:30
use constitutive_nonlocal , only : constitutive_nonlocal_structure , &
constitutive_nonlocal_updateCompatibility
2009-12-18 21:16:33 +05:30
implicit none
!*** input variables ***!
!*** output variables ***!
!*** local variables ***!
2010-02-19 19:14:38 +05:30
integer ( pInt ) e , & ! element index
i , & ! integration point index
g , & ! grain index
n , & ! neighbor index
neighboring_e , & ! element index of my neighbor
neighboring_i , & ! integration point index of my neighbor
2010-10-12 18:38:54 +05:30
myPhase , & ! phase
neighboringPhase , &
2012-03-12 19:39:37 +05:30
myInstance , & ! instance of plasticity
2010-10-12 18:38:54 +05:30
neighboringInstance , &
myStructure , & ! lattice structure
neighboringStructure
2010-03-18 17:53:17 +05:30
real ( pReal ) , dimension ( 3 , 3 ) :: U , R
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
real ( pReal ) , dimension ( 4 ) :: orientation
2009-12-18 21:16:33 +05:30
logical error
2010-10-12 18:38:54 +05:30
! --- CALCULATE ORIENTATION AND LATTICE ROTATION ---
2009-12-18 21:16:33 +05:30
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
!$OMP PARALLEL DO PRIVATE(error,U,R,orientation)
2010-02-19 19:14:38 +05:30
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 )
do i = FEsolving_execIP ( 1 , e ) , FEsolving_execIP ( 2 , e )
2012-02-21 22:01:37 +05:30
do g = 1_pInt , homogenization_Ngrains ( mesh_element ( 3 , e ) )
2010-10-12 18:38:54 +05:30
2010-11-11 18:44:53 +05:30
call math_pDecomposition ( crystallite_Fe ( 1 : 3 , 1 : 3 , g , i , e ) , U , R , error ) ! polar decomposition of Fe
2010-02-19 19:14:38 +05:30
if ( error ) then
2012-02-13 19:48:07 +05:30
call IO_warning ( 650_pInt , e , i , g )
orientation = [ 1.0_pReal , 0.0_pReal , 0.0_pReal , 0.0_pReal ] ! fake orientation
2010-02-19 19:14:38 +05:30
else
2013-01-31 21:58:08 +05:30
orientation = math_RtoQ ( transpose ( R ) )
2010-02-19 19:14:38 +05:30
endif
2013-01-31 21:58:08 +05:30
crystallite_rotation ( 1 : 4 , g , i , e ) = math_qDisorientation ( crystallite_orientation0 ( 1 : 4 , g , i , e ) , & ! active rotation from ori0
2011-04-15 00:39:44 +05:30
orientation , & ! to current orientation
0_pInt ) ! we don't want symmetry here
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
crystallite_orientation ( 1 : 4 , g , i , e ) = orientation
2010-04-12 16:44:36 +05:30
enddo
enddo
enddo
2010-11-19 22:59:29 +05:30
!$OMP END PARALLEL DO
2010-10-12 18:38:54 +05:30
! --- UPDATE SOME ADDITIONAL VARIABLES THAT ARE NEEDED FOR NONLOCAL MATERIAL ---
! --- we use crystallite_orientation from above, so need a seperate loop
2011-12-06 22:28:17 +05:30
!$OMP PARALLEL DO PRIVATE(myPhase,myInstance,myStructure,neighboring_e,neighboring_i,neighboringPhase,&
!$OMP neighboringInstance,neighboringStructure)
2010-04-12 16:44:36 +05:30
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 )
do i = FEsolving_execIP ( 1 , e ) , FEsolving_execIP ( 2 , e )
2010-10-12 18:38:54 +05:30
myPhase = material_phase ( 1 , i , e ) ! get my phase
2012-03-12 19:39:37 +05:30
if ( . not . phase_localPlasticity ( myPhase ) ) then ! if nonlocal model
myInstance = phase_plasticityInstance ( myPhase )
2010-10-12 18:38:54 +05:30
myStructure = constitutive_nonlocal_structure ( myInstance ) ! get my crystal structure
2010-04-19 15:33:34 +05:30
2010-10-12 18:38:54 +05:30
! --- calculate disorientation between me and my neighbor ---
2012-11-16 04:15:20 +05:30
do n = 1_pInt , FE_NipNeighbors ( FE_geomtype ( mesh_element ( 2 , e ) ) ) ! loop through my neighbors
2010-02-19 19:14:38 +05:30
neighboring_e = mesh_ipNeighborhood ( 1 , n , i , e )
neighboring_i = mesh_ipNeighborhood ( 2 , n , i , e )
2010-10-12 18:38:54 +05:30
if ( ( neighboring_e > 0 ) . and . ( neighboring_i > 0 ) ) then ! if neighbor exists
neighboringPhase = material_phase ( 1 , neighboring_i , neighboring_e ) ! get my neighbor's phase
2012-03-12 19:39:37 +05:30
if ( . not . phase_localPlasticity ( neighboringPhase ) ) then ! neighbor got also nonlocal plasticity
neighboringInstance = phase_plasticityInstance ( neighboringPhase )
2012-11-16 04:15:20 +05:30
neighboringStructure = constitutive_nonlocal_structure ( neighboringInstance ) ! get my neighbor's crystal structure
2010-10-12 18:38:54 +05:30
if ( myStructure == neighboringStructure ) then ! if my neighbor has same crystal structure like me
crystallite_disorientation ( : , n , 1 , i , e ) = &
2013-01-31 21:58:08 +05:30
math_qDisorientation ( crystallite_orientation ( 1 : 4 , 1 , i , e ) , &
2010-11-11 18:44:53 +05:30
crystallite_orientation ( 1 : 4 , 1 , neighboring_i , neighboring_e ) , &
2012-11-16 04:15:20 +05:30
crystallite_symmetryID ( 1 , i , e ) ) ! calculate disorientation
2010-10-12 18:38:54 +05:30
else ! for neighbor with different phase
crystallite_disorientation ( : , n , 1 , i , e ) = ( / 0.0_pReal , 1.0_pReal , 0.0_pReal , 0.0_pReal / ) ! 180 degree rotation about 100 axis
endif
2012-03-12 19:39:37 +05:30
else ! for neighbor with local plasticity
2010-10-12 18:38:54 +05:30
crystallite_disorientation ( : , n , 1 , i , e ) = ( / - 1.0_pReal , 0.0_pReal , 0.0_pReal , 0.0_pReal / ) ! homomorphic identity
2010-02-19 19:14:38 +05:30
endif
2010-10-12 18:38:54 +05:30
else ! no existing neighbor
crystallite_disorientation ( : , n , 1 , i , e ) = ( / - 1.0_pReal , 0.0_pReal , 0.0_pReal , 0.0_pReal / ) ! homomorphic identity
2010-04-29 13:11:29 +05:30
endif
2010-02-19 19:14:38 +05:30
enddo
2010-10-12 18:38:54 +05:30
! --- calculate compatibility and transmissivity between me and my neighbor ---
call constitutive_nonlocal_updateCompatibility ( crystallite_orientation , i , e )
2010-02-19 19:14:38 +05:30
endif
enddo
enddo
2010-11-19 22:59:29 +05:30
!$OMP END PARALLEL DO
2009-12-18 21:16:33 +05:30
2012-03-09 01:55:28 +05:30
end subroutine crystallite_orientations
2009-12-18 21:16:33 +05:30
2009-06-09 16:35:29 +05:30
2009-05-07 21:57:36 +05:30
!********************************************************************
! return results of particular grain
!********************************************************************
function crystallite_postResults ( &
dt , & ! time increment
g , & ! grain number
i , & ! integration point number
e & ! element number
)
2009-06-09 16:35:29 +05:30
!*** variables and functions from other modules ***!
2013-01-31 21:58:08 +05:30
use math , only : math_qToEuler , &
math_qToAxisAngle , &
2010-05-18 13:27:13 +05:30
math_mul33x33 , &
2012-01-26 19:20:00 +05:30
math_transpose33 , &
math_det33 , &
2010-05-18 13:27:13 +05:30
math_I3 , &
inDeg , &
2013-01-16 14:15:41 +05:30
math_Mandel6to33 , &
math_qMul , &
math_qConj
2011-08-10 22:07:17 +05:30
use mesh , only : mesh_element , &
mesh_ipVolume
2010-02-25 23:09:11 +05:30
use material , only : microstructure_crystallite , &
crystallite_Noutput , &
material_phase , &
2011-03-22 19:10:27 +05:30
material_texture , &
2011-08-10 22:07:17 +05:30
homogenization_Ngrains
2009-06-09 16:35:29 +05:30
use constitutive , only : constitutive_sizePostResults , &
2012-03-01 18:36:09 +05:30
constitutive_postResults , &
constitutive_homogenizedC
2009-06-09 16:35:29 +05:30
2009-05-07 21:57:36 +05:30
implicit none
2009-06-09 16:35:29 +05:30
!*** input variables ***!
integer ( pInt ) , intent ( in ) :: e , & ! element index
i , & ! integration point index
g ! grain index
constitutive_nonlocal:
- read in activation energy for dislocation glide from material.config
- changed naming of dDipMin/Max to dLower/dUpper
- added new outputs: rho_dot, rho_dot_dip, rho_dot_gen, rho_dot_sgl2dip, rho_dot_dip2sgl, rho_dot_ann_ath, rho_dot_ann_the, rho_dot_flux, d_upper_edge, d_upper_screw, d_upper_dot_edge, d_upper_dot_screw
- poisson's ratio is now calculated from elastic constants
- microstrucutre has state as first argument, since this is our output variable
- periodic boundary conditions are taken into account for fluxes and internal stresses. for the moment, flag has to be set in constitutive_nonlocal.
- corrected calculation for dipole formation by glide
- added terms for dipole formation/annihilation by stress decrease/increase
constitutive:
- passing of arguments is adapted for constitutive_nonlocal model
crystallite:
- in stiffness calculation: call to collect_dotState used wrong arguments
- crystallite_postResults uses own Tstar_v and temperature, no need for passing them from materialpoint_postResults
homogenization:
- crystallite_postResults uses own Tstar_v and temperature, no need for passing them from materialpoint_postResults
IO:
- changed error message 229
material.config:
- changed example for nonlocal constitution according to constitutive_nonlocal
all:
- added some flush statements
2009-10-20 20:06:03 +05:30
real ( pReal ) , intent ( in ) :: dt ! time increment
2009-05-07 21:57:36 +05:30
2009-06-09 16:35:29 +05:30
!*** output variables ***!
2010-02-25 23:09:11 +05:30
real ( pReal ) , dimension ( 1 + crystallite_sizePostResults ( microstructure_crystallite ( mesh_element ( 4 , e ) ) ) + &
1 + constitutive_sizePostResults ( g , i , e ) ) :: crystallite_postResults
2009-05-07 21:57:36 +05:30
2009-06-09 16:35:29 +05:30
!*** local variables ***!
2010-05-18 18:06:09 +05:30
real ( pReal ) , dimension ( 3 , 3 ) :: Ee
2013-01-16 14:15:41 +05:30
real ( pReal ) , dimension ( 4 ) :: rotation
2011-08-19 16:48:56 +05:30
real ( pReal ) detF
2011-04-13 19:46:22 +05:30
integer ( pInt ) o , c , crystID , mySize
2010-02-25 23:09:11 +05:30
crystID = microstructure_crystallite ( mesh_element ( 4 , e ) )
crystallite_postResults = 0.0_pReal
c = 0_pInt
2013-01-16 14:15:41 +05:30
crystallite_postResults ( c + 1 ) = real ( crystallite_sizePostResults ( crystID ) , pReal ) ! size of results from cryst
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
c = c + 1_pInt
2009-05-07 21:57:36 +05:30
2012-02-21 22:01:37 +05:30
do o = 1_pInt , crystallite_Noutput ( crystID )
2011-08-01 23:40:55 +05:30
mySize = 0_pInt
2010-02-25 23:09:11 +05:30
select case ( crystallite_output ( o , crystID ) )
case ( 'phase' )
2011-08-01 23:40:55 +05:30
mySize = 1_pInt
2013-01-16 14:15:41 +05:30
crystallite_postResults ( c + 1 ) = real ( material_phase ( g , i , e ) , pReal ) ! phaseID of grain
2011-03-22 19:10:27 +05:30
case ( 'texture' )
2011-08-01 23:40:55 +05:30
mySize = 1_pInt
2013-01-16 14:15:41 +05:30
crystallite_postResults ( c + 1 ) = real ( material_texture ( g , i , e ) , pReal ) ! textureID of grain
2010-02-25 23:09:11 +05:30
case ( 'volume' )
2011-08-01 23:40:55 +05:30
mySize = 1_pInt
2013-01-16 14:15:41 +05:30
detF = math_det33 ( crystallite_partionedF ( 1 : 3 , 1 : 3 , g , i , e ) ) ! V_current = det(F) * V_reference
crystallite_postResults ( c + 1 ) = detF * mesh_ipVolume ( i , e ) / homogenization_Ngrains ( mesh_element ( 3 , e ) ) ! grain volume (not fraction but absolute)
2010-02-25 23:09:11 +05:30
case ( 'orientation' )
2011-08-01 23:40:55 +05:30
mySize = 4_pInt
2013-01-16 14:15:41 +05:30
crystallite_postResults ( c + 1 : c + mySize ) = crystallite_orientation ( 1 : 4 , g , i , e ) ! grain orientation as quaternion
2010-03-18 17:53:17 +05:30
case ( 'eulerangles' )
2011-08-01 23:40:55 +05:30
mySize = 3_pInt
2013-01-31 21:58:08 +05:30
crystallite_postResults ( c + 1 : c + mySize ) = inDeg * math_qToEuler ( crystallite_orientation ( 1 : 4 , g , i , e ) ) ! grain orientation as Euler angles in degree
2010-04-12 16:44:36 +05:30
case ( 'grainrotation' )
2011-08-01 23:40:55 +05:30
mySize = 4_pInt
2013-01-31 21:58:08 +05:30
crystallite_postResults ( c + 1 : c + mySize ) = math_qToAxisAngle ( crystallite_rotation ( 1 : 4 , g , i , e ) ) ! grain rotation away from initial orientation as axis-angle in crystal reference coordinates
2013-01-16 14:15:41 +05:30
crystallite_postResults ( c + 4 ) = inDeg * crystallite_postResults ( c + 4 ) ! angle in degree
case ( 'grainrotationx' )
mySize = 1_pInt
2013-01-31 21:58:08 +05:30
rotation = math_qToAxisAngle ( math_qMul ( math_qMul ( crystallite_orientation ( 1 : 4 , g , i , e ) , &
2013-01-16 14:15:41 +05:30
crystallite_rotation ( 1 : 4 , g , i , e ) ) , &
math_qConj ( crystallite_orientation ( 1 : 4 , g , i , e ) ) ) ) ! grain rotation away from initial orientation as axis-angle in sample reference coordinates
crystallite_postResults ( c + 1 ) = inDeg * rotation ( 1 ) * rotation ( 4 ) ! angle in degree
case ( 'grainrotationy' )
mySize = 1_pInt
2013-01-31 21:58:08 +05:30
rotation = math_qToAxisAngle ( math_qMul ( math_qMul ( crystallite_orientation ( 1 : 4 , g , i , e ) , &
2013-01-16 14:15:41 +05:30
crystallite_rotation ( 1 : 4 , g , i , e ) ) , &
math_qConj ( crystallite_orientation ( 1 : 4 , g , i , e ) ) ) ) ! grain rotation away from initial orientation as axis-angle in sample reference coordinates
crystallite_postResults ( c + 1 ) = inDeg * rotation ( 2 ) * rotation ( 4 ) ! angle in degree
case ( 'grainrotationz' )
mySize = 1_pInt
2013-01-31 21:58:08 +05:30
rotation = math_qToAxisAngle ( math_qMul ( math_qMul ( crystallite_orientation ( 1 : 4 , g , i , e ) , &
2013-01-16 14:15:41 +05:30
crystallite_rotation ( 1 : 4 , g , i , e ) ) , &
math_qConj ( crystallite_orientation ( 1 : 4 , g , i , e ) ) ) ) ! grain rotation away from initial orientation as axis-angle in sample reference coordinates
crystallite_postResults ( c + 1 ) = inDeg * rotation ( 3 ) * rotation ( 4 ) ! angle in degree
2011-05-11 22:08:45 +05:30
! remark: tensor output is of the form 11,12,13, 21,22,23, 31,32,33
! thus row index i is slow, while column index j is fast. reminder: "row is slow"
2010-05-18 13:27:13 +05:30
case ( 'defgrad' , 'f' )
mySize = 9_pInt
2012-02-16 00:28:38 +05:30
crystallite_postResults ( c + 1 : c + mySize ) = reshape ( math_transpose33 ( crystallite_partionedF ( 1 : 3 , 1 : 3 , g , i , e ) ) , [ mySize ] )
2012-01-20 15:55:35 +05:30
case ( 'e' )
mySize = 9_pInt
crystallite_postResults ( c + 1 : c + mySize ) = 0.5_pReal * reshape ( ( math_mul33x33 ( &
2012-01-26 19:20:00 +05:30
math_transpose33 ( crystallite_partionedF ( 1 : 3 , 1 : 3 , g , i , e ) ) , &
2012-02-16 00:28:38 +05:30
crystallite_partionedF ( 1 : 3 , 1 : 3 , g , i , e ) ) - math_I3 ) , [ mySize ] )
2010-05-18 13:27:13 +05:30
case ( 'fe' )
mySize = 9_pInt
2012-02-16 00:28:38 +05:30
crystallite_postResults ( c + 1 : c + mySize ) = reshape ( math_transpose33 ( crystallite_Fe ( 1 : 3 , 1 : 3 , g , i , e ) ) , [ mySize ] )
2010-05-18 13:27:13 +05:30
case ( 'ee' )
2012-01-26 19:20:00 +05:30
Ee = 0.5_pReal * ( math_mul33x33 ( math_transpose33 ( crystallite_Fe ( 1 : 3 , 1 : 3 , g , i , e ) ) , crystallite_Fe ( 1 : 3 , 1 : 3 , g , i , e ) ) - math_I3 )
2010-05-18 13:27:13 +05:30
mySize = 9_pInt
2012-02-16 00:28:38 +05:30
crystallite_postResults ( c + 1 : c + mySize ) = reshape ( Ee , [ mySize ] )
2010-05-18 13:27:13 +05:30
case ( 'fp' )
mySize = 9_pInt
2012-02-16 00:28:38 +05:30
crystallite_postResults ( c + 1 : c + mySize ) = reshape ( math_transpose33 ( crystallite_Fp ( 1 : 3 , 1 : 3 , g , i , e ) ) , [ mySize ] )
2011-02-25 13:45:26 +05:30
case ( 'lp' )
mySize = 9_pInt
2012-02-16 00:28:38 +05:30
crystallite_postResults ( c + 1 : c + mySize ) = reshape ( math_transpose33 ( crystallite_Lp ( 1 : 3 , 1 : 3 , g , i , e ) ) , [ mySize ] )
2010-05-18 13:27:13 +05:30
case ( 'p' , 'firstpiola' , '1stpiola' )
mySize = 9_pInt
2012-02-16 00:28:38 +05:30
crystallite_postResults ( c + 1 : c + mySize ) = reshape ( math_transpose33 ( crystallite_P ( 1 : 3 , 1 : 3 , g , i , e ) ) , [ mySize ] )
2010-05-18 13:27:13 +05:30
case ( 's' , 'tstar' , 'secondpiola' , '2ndpiola' )
mySize = 9_pInt
2012-02-16 00:28:38 +05:30
crystallite_postResults ( c + 1 : c + mySize ) = reshape ( math_Mandel6to33 ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) ) , [ mySize ] )
2012-03-01 18:36:09 +05:30
case ( 'elasmatrix' )
mySize = 36_pInt
crystallite_postResults ( c + 1 : c + mySize ) = reshape ( constitutive_homogenizedC ( g , i , e ) , ( / mySize / ) )
2010-02-25 23:09:11 +05:30
end select
2011-08-01 23:40:55 +05:30
c = c + mySize
2010-02-25 23:09:11 +05:30
enddo
2011-05-11 22:08:45 +05:30
2012-02-16 00:28:38 +05:30
crystallite_postResults ( c + 1 ) = real ( constitutive_sizePostResults ( g , i , e ) , pReal ) ! size of constitutive results
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
c = c + 1_pInt
2012-05-11 13:08:20 +05:30
if ( constitutive_sizePostResults ( g , i , e ) > 0_pInt ) &
crystallite_postResults ( c + 1 : c + constitutive_sizePostResults ( g , i , e ) ) = constitutive_postResults ( crystallite_Tstar_v ( 1 : 6 , g , i , e ) , &
crystallite_Fe , &
crystallite_Temperature ( g , i , e ) , &
dt , g , i , e )
2009-12-15 13:50:31 +05:30
c = c + constitutive_sizePostResults ( g , i , e )
2011-09-13 21:24:06 +05:30
2012-03-09 01:55:28 +05:30
end function crystallite_postResults
2009-05-07 21:57:36 +05:30
END MODULE
2009-05-28 22:08:40 +05:30
!##############################################################