2011-04-07 12:50:28 +05:30
! Copyright 2011 Max-Planck-Institut für Eisenforschung GmbH
2011-04-04 19:39:54 +05:30
!
! This file is part of DAMASK,
2011-04-07 12:50:28 +05:30
! the Düsseldorf Advanced MAterial Simulation Kit.
2011-04-04 19:39:54 +05:30
!
! DAMASK is free software: you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation, either version 3 of the License, or
! (at your option) any later version.
!
! DAMASK is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License
! along with DAMASK. If not, see <http://www.gnu.org/licenses/>.
!
!##############################################################
2009-08-31 20:39:15 +05:30
!* $Id$
2009-03-04 19:31:36 +05:30
!##############################################################
2009-06-15 18:41:21 +05:30
MODULE CPFEM
2009-03-04 19:31:36 +05:30
!##############################################################
! *** CPFEM engine ***
!
2009-06-15 18:41:21 +05:30
use prec , only : pReal , &
pInt
implicit none
real ( pReal ) , parameter :: CPFEM_odd_stress = 1e15_pReal , &
CPFEM_odd_jacobian = 1e50_pReal
2009-03-04 19:31:36 +05:30
2009-06-15 18:41:21 +05:30
real ( pReal ) , dimension ( : , : , : ) , allocatable :: CPFEM_cs ! Cauchy stress
real ( pReal ) , dimension ( : , : , : , : ) , allocatable :: CPFEM_dcsdE ! Cauchy stress tangent
real ( pReal ) , dimension ( : , : , : , : ) , allocatable :: CPFEM_dcsdE_knownGood ! known good tangent
2010-02-02 20:30:08 +05:30
logical :: CPFEM_init_done = . false . , & ! remember whether init has been done already
CPFEM_init_inProgress = . false . , & ! remember whether first IP is currently performing init
CPFEM_calc_done = . false . ! remember whether first IP has already calced the results
2009-06-15 18:41:21 +05:30
CONTAINS
2009-03-04 19:31:36 +05:30
2010-11-03 20:09:18 +05:30
!*********************************************************
!*** call (thread safe) all module initializations ***
!*********************************************************
subroutine CPFEM_initAll ( Temperature , element , IP )
use prec , only : pReal , &
prec_init
use numerics , only : numerics_init
use debug , only : debug_init
use FEsolving , only : FE_init
use math , only : math_init
use mesh , only : mesh_init
use lattice , only : lattice_init
use material , only : material_init
use constitutive , only : constitutive_init
use crystallite , only : crystallite_init
use homogenization , only : homogenization_init
use IO , only : IO_init
2011-05-11 22:31:03 +05:30
use DAMASK_interface
2010-11-03 20:09:18 +05:30
implicit none
integer ( pInt ) , intent ( in ) :: element , & ! FE element number
IP ! FE integration point number
real ( pReal ) , intent ( in ) :: Temperature ! temperature
real ( pReal ) rnd
integer ( pInt ) i , n
! initialization step (three dimensional stress state check missing?)
if ( . not . CPFEM_init_done ) then
call random_number ( rnd )
do i = 1 , int ( 25 6.0 * rnd )
n = n + 1_pInt ! wasting random amount of time...
enddo ! ...to break potential race in multithreading
n = n + 1_pInt
if ( . not . CPFEM_init_inProgress ) then ! yes my thread won!
CPFEM_init_inProgress = . true .
call prec_init ( )
call IO_init ( )
call numerics_init ( )
call debug_init ( )
call math_init ( )
call FE_init ( )
call mesh_init ( IP , element ) ! pass on coordinates to alter calcMode of first ip
call lattice_init ( )
call material_init ( )
call constitutive_init ( )
call crystallite_init ( Temperature ) ! (have to) use temperature of first IP for whole model
call homogenization_init ( Temperature )
call CPFEM_init ( )
2011-11-04 01:02:11 +05:30
if ( trim ( FEsolver ) / = 'Spectral' ) call DAMASK_interface_init ( ) ! Spectral solver is doing initialization earlier
2010-11-03 20:09:18 +05:30
CPFEM_init_done = . true .
CPFEM_init_inProgress = . false .
else ! loser, loser...
do while ( CPFEM_init_inProgress )
enddo
endif
endif
end subroutine
2009-03-04 19:31:36 +05:30
!*********************************************************
!*** allocate the arrays defined in module CPFEM ***
!*** and initialize them ***
!*********************************************************
2010-11-03 20:09:18 +05:30
2009-06-15 18:41:21 +05:30
subroutine CPFEM_init ( )
use prec , only : pInt
2011-03-21 16:01:17 +05:30
use debug , only : debug_verbosity
2010-11-03 20:09:18 +05:30
use IO , only : IO_read_jobBinaryFile
2009-06-15 18:41:21 +05:30
use FEsolving , only : parallelExecution , &
2010-11-03 20:09:18 +05:30
symmetricSolver , &
restartRead , &
2011-05-28 15:14:43 +05:30
FEmodelGeometry
2009-06-15 18:41:21 +05:30
use mesh , only : mesh_NcpElems , &
mesh_maxNips
2010-11-03 20:09:18 +05:30
use material , only : homogenization_maxNgrains , &
material_phase
use constitutive , only : constitutive_state0
use crystallite , only : crystallite_F0 , &
crystallite_Fp0 , &
crystallite_Lp0 , &
crystallite_dPdF0 , &
crystallite_Tstar0_v
use homogenization , only : homogenization_sizeState , &
homogenization_state0 , &
materialpoint_F , &
materialpoint_F0
2009-06-15 18:41:21 +05:30
implicit none
2010-11-03 20:09:18 +05:30
integer ( pInt ) i , j , k , l , m
2009-06-15 18:41:21 +05:30
! initialize stress and jacobian to zero
allocate ( CPFEM_cs ( 6 , mesh_maxNips , mesh_NcpElems ) ) ; CPFEM_cs = 0.0_pReal
2009-07-22 21:37:19 +05:30
allocate ( CPFEM_dcsdE ( 6 , 6 , mesh_maxNips , mesh_NcpElems ) ) ; CPFEM_dcsdE = 0.0_pReal
allocate ( CPFEM_dcsdE_knownGood ( 6 , 6 , mesh_maxNips , mesh_NcpElems ) ) ; CPFEM_dcsdE_knownGood = 0.0_pReal
2009-06-15 18:41:21 +05:30
2010-11-03 20:09:18 +05:30
! *** restore the last converged values of each essential variable from the binary file
if ( restartRead ) then
2011-03-21 16:01:17 +05:30
if ( debug_verbosity > 0 ) then
2010-11-03 20:09:18 +05:30
!$OMP CRITICAL (write2out)
2011-03-21 16:01:17 +05:30
write ( 6 , '(a)' ) '<< CPFEM >> Restored state variables of last converged step from binary files'
2010-11-03 20:09:18 +05:30
!$OMP END CRITICAL (write2out)
endif
2011-05-28 15:14:43 +05:30
if ( IO_read_jobBinaryFile ( 777 , 'recordedPhase' , FEmodelGeometry , size ( material_phase ) ) ) then
2010-11-03 20:09:18 +05:30
read ( 777 , rec = 1 ) material_phase
close ( 777 )
endif
2011-05-28 15:14:43 +05:30
if ( IO_read_jobBinaryFile ( 777 , 'convergedF' , FEmodelGeometry , size ( crystallite_F0 ) ) ) then
2010-11-03 20:09:18 +05:30
read ( 777 , rec = 1 ) crystallite_F0
close ( 777 )
endif
2011-05-28 15:14:43 +05:30
if ( IO_read_jobBinaryFile ( 777 , 'convergedFp' , FEmodelGeometry , size ( crystallite_Fp0 ) ) ) then
2010-11-03 20:09:18 +05:30
read ( 777 , rec = 1 ) crystallite_Fp0
close ( 777 )
endif
2011-05-28 15:14:43 +05:30
if ( IO_read_jobBinaryFile ( 777 , 'convergedLp' , FEmodelGeometry , size ( crystallite_Lp0 ) ) ) then
2010-11-03 20:09:18 +05:30
read ( 777 , rec = 1 ) crystallite_Lp0
close ( 777 )
endif
2011-05-28 15:14:43 +05:30
if ( IO_read_jobBinaryFile ( 777 , 'convergeddPdF' , FEmodelGeometry , size ( crystallite_dPdF0 ) ) ) then
2010-11-03 20:09:18 +05:30
read ( 777 , rec = 1 ) crystallite_dPdF0
close ( 777 )
endif
2011-05-28 15:14:43 +05:30
if ( IO_read_jobBinaryFile ( 777 , 'convergedTstar' , FEmodelGeometry , size ( crystallite_Tstar0_v ) ) ) then
2010-11-03 20:09:18 +05:30
read ( 777 , rec = 1 ) crystallite_Tstar0_v
close ( 777 )
endif
2011-05-28 15:14:43 +05:30
if ( IO_read_jobBinaryFile ( 777 , 'convergedStateConst' , FEmodelGeometry ) ) then
2010-11-03 20:09:18 +05:30
m = 0_pInt
do i = 1 , homogenization_maxNgrains ; do j = 1 , mesh_maxNips ; do k = 1 , mesh_NcpElems
do l = 1 , size ( constitutive_state0 ( i , j , k ) % p )
m = m + 1_pInt
read ( 777 , rec = m ) constitutive_state0 ( i , j , k ) % p ( l )
enddo
enddo ; enddo ; enddo
close ( 777 )
endif
2011-05-28 15:14:43 +05:30
if ( IO_read_jobBinaryFile ( 777 , 'convergedStateHomog' , FEmodelGeometry ) ) then
2010-11-03 20:09:18 +05:30
m = 0_pInt
do k = 1 , mesh_NcpElems ; do j = 1 , mesh_maxNips
do l = 1 , homogenization_sizeState ( j , k )
m = m + 1_pInt
read ( 777 , rec = m ) homogenization_state0 ( j , k ) % p ( l )
enddo
enddo ; enddo
close ( 777 )
endif
2011-05-28 15:14:43 +05:30
if ( IO_read_jobBinaryFile ( 777 , 'convergeddcsdE' , FEmodelGeometry , size ( CPFEM_dcsdE ) ) ) then
2010-11-03 20:09:18 +05:30
read ( 777 , rec = 1 ) CPFEM_dcsdE
close ( 777 )
endif
restartRead = . false .
endif
! *** end of restoring
2009-06-15 18:41:21 +05:30
!$OMP CRITICAL (write2out)
write ( 6 , * )
write ( 6 , * ) '<<<+- cpfem init -+>>>'
2009-08-31 20:39:15 +05:30
write ( 6 , * ) '$Id$'
2012-02-01 00:48:55 +05:30
#include "compilation_info.f90"
2011-03-21 16:01:17 +05:30
if ( debug_verbosity > 0 ) then
2012-02-01 00:48:55 +05:30
write ( 6 , '(a32,1x,6(i8,1x))' ) 'CPFEM_cs: ' , shape ( CPFEM_cs )
write ( 6 , '(a32,1x,6(i8,1x))' ) 'CPFEM_dcsdE: ' , shape ( CPFEM_dcsdE )
write ( 6 , '(a32,1x,6(i8,1x))' ) 'CPFEM_dcsdE_knownGood: ' , shape ( CPFEM_dcsdE_knownGood )
2011-03-21 16:01:17 +05:30
write ( 6 , * )
write ( 6 , * ) 'parallelExecution: ' , parallelExecution
write ( 6 , * ) 'symmetricSolver: ' , symmetricSolver
endif
2009-06-15 18:41:21 +05:30
call flush ( 6 )
!$OMP END CRITICAL (write2out)
endsubroutine
2009-03-04 19:31:36 +05:30
!***********************************************************************
!*** perform initialization at first call, update variables and ***
!*** call the actual material model ***
!***********************************************************************
2011-05-24 21:27:59 +05:30
subroutine CPFEM_general ( mode , coords , ffn , ffn1 , Temperature , dt , element , IP , cauchyStress , &
2010-07-07 15:28:18 +05:30
& jacobian , pstress , dPdF )
2009-07-22 21:37:19 +05:30
! note: cauchyStress = Cauchy stress cs(6) and jacobian = Consistent tangent dcs/dE
2009-06-15 18:41:21 +05:30
!*** variables and functions from other modules ***!
use prec , only : pReal , &
2010-11-03 20:09:18 +05:30
pInt
use numerics , only : relevantStrain , &
2010-05-20 20:25:11 +05:30
defgradTolerance , &
2009-06-15 18:41:21 +05:30
iJacoStiffness
2011-03-21 16:01:17 +05:30
use debug , only : debug_e , &
2010-02-17 18:51:36 +05:30
debug_i , &
2011-03-21 16:01:17 +05:30
debug_g , &
debug_selectiveDebugger , &
debug_verbosity , &
2011-03-17 18:43:13 +05:30
debug_stressMaxLocation , &
debug_stressMinLocation , &
debug_jacobianMaxLocation , &
debug_jacobianMinLocation , &
debug_stressMax , &
debug_stressMin , &
debug_jacobianMax , &
debug_jacobianMin
2010-11-03 20:09:18 +05:30
use FEsolving , only : parallelExecution , &
2009-06-15 18:41:21 +05:30
outdatedFFN1 , &
2009-08-11 22:01:57 +05:30
terminallyIll , &
2009-06-15 18:41:21 +05:30
cycleCounter , &
theInc , &
theTime , &
2010-03-24 21:53:21 +05:30
theDelta , &
2009-06-15 18:41:21 +05:30
FEsolving_execElem , &
2010-11-03 20:09:18 +05:30
FEsolving_execIP , &
restartWrite
use math , only : math_identity2nd , &
2009-06-15 18:41:21 +05:30
math_mul33x33 , &
2012-01-26 19:20:00 +05:30
math_det33 , &
math_transpose33 , &
2009-06-15 18:41:21 +05:30
math_I3 , &
math_Mandel3333to66 , &
2011-03-17 18:43:13 +05:30
math_Mandel66to3333 , &
math_Mandel33to6 , &
math_Mandel6to33
2010-11-03 20:09:18 +05:30
use mesh , only : mesh_FEasCP , &
2009-06-15 18:41:21 +05:30
mesh_NcpElems , &
2010-03-24 21:53:21 +05:30
mesh_maxNips , &
mesh_element , &
2011-05-24 21:27:59 +05:30
mesh_node0 , &
mesh_node , &
mesh_ipCenterOfGravity , &
mesh_build_subNodeCoords , &
mesh_build_ipVolumes , &
2011-08-10 22:07:17 +05:30
mesh_build_ipCoordinates , &
2011-05-24 21:27:59 +05:30
FE_Nips , &
FE_Nnodes
2010-11-03 20:09:18 +05:30
use material , only : homogenization_maxNgrains , &
microstructure_elemhomo , &
material_phase
use constitutive , only : constitutive_state0 , constitutive_state
use crystallite , only : crystallite_F0 , &
2009-06-15 18:41:21 +05:30
crystallite_partionedF , &
crystallite_Fp0 , &
crystallite_Fp , &
crystallite_Lp0 , &
2009-08-28 19:20:47 +05:30
crystallite_Lp , &
2010-10-01 17:48:49 +05:30
crystallite_dPdF0 , &
crystallite_dPdF , &
2009-08-28 19:20:47 +05:30
crystallite_Tstar0_v , &
2011-05-24 21:27:59 +05:30
crystallite_Tstar_v , &
crystallite_localConstitution
2010-11-03 20:09:18 +05:30
use homogenization , only : homogenization_sizeState , &
2009-06-15 18:41:21 +05:30
homogenization_state , &
homogenization_state0 , &
materialpoint_F , &
materialpoint_F0 , &
materialpoint_P , &
materialpoint_dPdF , &
2010-03-24 18:50:12 +05:30
materialpoint_results , &
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
materialpoint_sizeResults , &
2009-06-15 18:41:21 +05:30
materialpoint_Temperature , &
materialpoint_stressAndItsTangent , &
materialpoint_postResults
2010-11-04 23:48:01 +05:30
use IO , only : IO_write_jobBinaryFile , &
IO_warning
2011-05-11 22:31:03 +05:30
use DAMASK_interface
2009-06-15 18:41:21 +05:30
implicit none
2011-05-24 21:27:59 +05:30
2009-06-15 18:41:21 +05:30
!*** input variables ***!
2011-05-24 21:27:59 +05:30
2009-06-15 18:41:21 +05:30
integer ( pInt ) , intent ( in ) :: element , & ! FE element number
2010-02-18 15:45:08 +05:30
IP ! FE integration point number
2009-07-01 16:25:31 +05:30
real ( pReal ) , intent ( inout ) :: Temperature ! temperature
real ( pReal ) , intent ( in ) :: dt ! time increment
2011-05-24 21:27:59 +05:30
real ( pReal ) , dimension ( 3 , * ) , intent ( in ) :: coords ! MARC: displacements for each node of the current element
! ABAQUS: coordinates of the current material point (IP)
! SPECTRAL: coordinates of the current material point (IP)
2009-06-15 18:41:21 +05:30
real ( pReal ) , dimension ( 3 , 3 ) , intent ( in ) :: ffn , & ! deformation gradient for t=t0
ffn1 ! deformation gradient for t=t1
2009-10-12 21:31:49 +05:30
integer ( pInt ) , intent ( in ) :: mode ! computation mode 1: regular computation plus aging of results
2009-06-15 18:41:21 +05:30
! 2: regular computation
! 3: collection of FEM data
2009-10-12 21:31:49 +05:30
! 4: backup tangent from former converged inc
! 5: restore tangent from former converged inc
! 6: recycling of former results (MARC speciality)
2009-06-15 18:41:21 +05:30
2011-05-24 21:27:59 +05:30
2009-06-15 18:41:21 +05:30
!*** output variables ***!
2011-05-24 21:27:59 +05:30
2010-02-18 15:45:08 +05:30
real ( pReal ) , dimension ( 6 ) , intent ( out ) :: cauchyStress ! stress vector in Mandel notation
real ( pReal ) , dimension ( 6 , 6 ) , intent ( out ) :: jacobian ! jacobian in Mandel notation
2011-03-17 18:43:13 +05:30
real ( pReal ) , dimension ( 3 , 3 ) , intent ( out ) :: pstress ! Piola-Kirchhoff stress in Matrix notation
2010-07-07 15:28:18 +05:30
real ( pReal ) , dimension ( 3 , 3 , 3 , 3 ) , intent ( out ) :: dPdF !
2011-05-24 21:27:59 +05:30
2009-06-15 18:41:21 +05:30
!*** local variables ***!
2011-05-24 21:27:59 +05:30
2009-12-09 16:03:00 +05:30
real ( pReal ) J_inverse , & ! inverse of Jacobian
rnd
2011-03-17 18:43:13 +05:30
real ( pReal ) , dimension ( 3 , 3 ) :: Kirchhoff , & ! Piola-Kirchhoff stress in Matrix notation
cauchyStress33 ! stress vector in Matrix notation
2010-07-07 15:28:18 +05:30
real ( pReal ) , dimension ( 3 , 3 , 3 , 3 ) :: H_sym , &
2011-03-17 18:43:13 +05:30
H , &
jacobian3333 ! jacobian in Matrix notation
2009-06-15 18:41:21 +05:30
integer ( pInt ) cp_en , & ! crystal plasticity element number
i , &
j , &
k , &
l , &
m , &
2010-03-24 21:53:21 +05:30
n , &
2011-05-24 21:27:59 +05:30
e , &
node , &
FEnodeID
2009-06-15 18:41:21 +05:30
logical updateJaco ! flag indicating if JAcobian has to be updated
cp_en = mesh_FEasCP ( 'elem' , element )
2011-03-21 16:01:17 +05:30
if ( debug_verbosity > 0 . and . cp_en == 1 . and . IP == 1 ) then
2010-05-20 20:25:11 +05:30
!$OMP CRITICAL (write2out)
write ( 6 , * )
2011-03-21 16:01:17 +05:30
write ( 6 , '(a)' ) '#############################################'
2012-02-01 00:48:55 +05:30
write ( 6 , '(a1,a22,1x,f15.7,a6)' ) '#' , 'theTime' , theTime , '#'
write ( 6 , '(a1,a22,1x,f15.7,a6)' ) '#' , 'theDelta' , theDelta , '#'
write ( 6 , '(a1,a22,1x,i8,a13)' ) '#' , 'theInc' , theInc , '#'
write ( 6 , '(a1,a22,1x,i8,a13)' ) '#' , 'cycleCounter' , cycleCounter , '#'
write ( 6 , '(a1,a22,1x,i8,a13)' ) '#' , 'computationMode' , mode , '#'
2011-03-21 16:01:17 +05:30
write ( 6 , '(a)' ) '#############################################'
write ( 6 , * )
2010-05-20 20:25:11 +05:30
call flush ( 6 )
!$OMP END CRITICAL (write2out)
2009-06-15 18:41:21 +05:30
endif
2011-05-24 21:27:59 +05:30
!*** according to our "mode" we decide what to do
2009-06-15 18:41:21 +05:30
select case ( mode )
2011-05-24 21:27:59 +05:30
2009-06-15 18:41:21 +05:30
! --+>> REGULAR COMPUTATION (WITH AGING OF RESULTS IF MODE == 1) <<+--
2011-05-24 21:27:59 +05:30
2010-02-18 15:45:08 +05:30
case ( 1 , 2 , 8 , 9 )
2011-05-24 21:27:59 +05:30
!*** age results
2010-02-18 15:45:08 +05:30
if ( mode == 1 . or . mode == 8 ) then
2009-06-15 18:41:21 +05:30
crystallite_F0 = crystallite_partionedF ! crystallite deformation (_subF is perturbed...)
crystallite_Fp0 = crystallite_Fp ! crystallite plastic deformation
2009-07-22 21:37:19 +05:30
crystallite_Lp0 = crystallite_Lp ! crystallite plastic velocity
2010-10-01 17:48:49 +05:30
crystallite_dPdF0 = crystallite_dPdF ! crystallite stiffness
2009-08-28 19:20:47 +05:30
crystallite_Tstar0_v = crystallite_Tstar_v ! crystallite 2nd Piola Kirchhoff stress
2009-07-22 21:37:19 +05:30
forall ( i = 1 : homogenization_maxNgrains , &
j = 1 : mesh_maxNips , &
k = 1 : mesh_NcpElems ) &
constitutive_state0 ( i , j , k ) % p = constitutive_state ( i , j , k ) % p ! microstructure of crystallites
2011-03-21 16:01:17 +05:30
if ( debug_verbosity > 0 ) then
2010-03-19 19:44:08 +05:30
!$OMP CRITICAL (write2out)
2011-03-21 16:01:17 +05:30
write ( 6 , '(a)' ) '<< CPFEM >> Aging states'
if ( debug_e == cp_en . and . debug_i == IP ) then
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,1x,i8,1x,i2,1x,i4,/,(12x,6(e20.8,1x)))' ) '<< CPFEM >> AGED state of element ip grain' , &
2011-03-21 16:01:17 +05:30
cp_en , IP , 1 , constitutive_state ( 1 , IP , cp_en ) % p
2011-04-04 14:04:52 +05:30
write ( 6 , * )
2011-03-21 16:01:17 +05:30
endif
2010-03-19 19:44:08 +05:30
!$OMP END CRITICAL (write2out)
endif
2011-03-29 12:57:19 +05:30
!$OMP PARALLEL DO
do k = 1 , mesh_NcpElems
do j = 1 , mesh_maxNips
if ( homogenization_sizeState ( j , k ) > 0_pInt ) &
homogenization_state0 ( j , k ) % p = homogenization_state ( j , k ) % p ! internal state of homogenization scheme
enddo
2009-06-15 18:41:21 +05:30
enddo
2011-03-29 12:57:19 +05:30
!$OMP END PARALLEL DO
2010-11-03 20:09:18 +05:30
2011-05-24 21:27:59 +05:30
! * dump the last converged values of each essential variable to a binary file
2010-11-03 20:09:18 +05:30
if ( restartWrite ) then
2011-03-21 16:01:17 +05:30
if ( debug_verbosity > 0 ) then
2010-11-03 20:09:18 +05:30
!$OMP CRITICAL (write2out)
2011-03-21 16:01:17 +05:30
write ( 6 , '(a)' ) '<< CPFEM >> Writing state variables of last converged step to binary files'
2010-11-03 20:09:18 +05:30
!$OMP END CRITICAL (write2out)
endif
if ( IO_write_jobBinaryFile ( 777 , 'recordedPhase' , size ( material_phase ) ) ) then
write ( 777 , rec = 1 ) material_phase
close ( 777 )
endif
if ( IO_write_jobBinaryFile ( 777 , 'convergedF' , size ( crystallite_F0 ) ) ) then
write ( 777 , rec = 1 ) crystallite_F0
close ( 777 )
endif
if ( IO_write_jobBinaryFile ( 777 , 'convergedFp' , size ( crystallite_Fp0 ) ) ) then
write ( 777 , rec = 1 ) crystallite_Fp0
close ( 777 )
endif
if ( IO_write_jobBinaryFile ( 777 , 'convergedLp' , size ( crystallite_Lp0 ) ) ) then
write ( 777 , rec = 1 ) crystallite_Lp0
close ( 777 )
endif
if ( IO_write_jobBinaryFile ( 777 , 'convergeddPdF' , size ( crystallite_dPdF0 ) ) ) then
write ( 777 , rec = 1 ) crystallite_dPdF0
close ( 777 )
endif
if ( IO_write_jobBinaryFile ( 777 , 'convergedTstar' , size ( crystallite_Tstar0_v ) ) ) then
write ( 777 , rec = 1 ) crystallite_Tstar0_v
close ( 777 )
endif
if ( IO_write_jobBinaryFile ( 777 , 'convergedStateConst' ) ) then
m = 0_pInt
do i = 1 , homogenization_maxNgrains ; do j = 1 , mesh_maxNips ; do k = 1 , mesh_NcpElems
do l = 1 , size ( constitutive_state0 ( i , j , k ) % p )
m = m + 1_pInt
write ( 777 , rec = m ) constitutive_state0 ( i , j , k ) % p ( l )
enddo
enddo ; enddo ; enddo
close ( 777 )
endif
if ( IO_write_jobBinaryFile ( 777 , 'convergedStateHomog' ) ) then
m = 0_pInt
do k = 1 , mesh_NcpElems ; do j = 1 , mesh_maxNips
do l = 1 , homogenization_sizeState ( j , k )
m = m + 1_pInt
write ( 777 , rec = m ) homogenization_state0 ( j , k ) % p ( l )
enddo
enddo ; enddo
close ( 777 )
endif
if ( IO_write_jobBinaryFile ( 777 , 'convergeddcsdE' , size ( CPFEM_dcsdE ) ) ) then
write ( 777 , rec = 1 ) CPFEM_dcsdE
close ( 777 )
endif
endif
2011-05-24 21:27:59 +05:30
! * end of dumping
2009-06-15 18:41:21 +05:30
endif
2010-02-18 15:45:08 +05:30
if ( mode == 8 . or . mode == 9 ) then ! Abaqus explicit skips collect
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
materialpoint_Temperature ( IP , cp_en ) = Temperature
materialpoint_F0 ( 1 : 3 , 1 : 3 , IP , cp_en ) = ffn
materialpoint_F ( 1 : 3 , 1 : 3 , IP , cp_en ) = ffn1
2010-02-18 15:45:08 +05:30
endif
2011-05-24 21:27:59 +05:30
!*** deformation gradient outdated or any actual deformation gradient differs more than relevantStrain from the stored one
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
if ( terminallyIll . or . outdatedFFN1 . or . any ( abs ( ffn1 - materialpoint_F ( 1 : 3 , 1 : 3 , IP , cp_en ) ) > defgradTolerance ) ) then
2009-08-11 22:01:57 +05:30
if ( . not . terminallyIll . and . . not . outdatedFFN1 ) then
2011-03-21 16:01:17 +05:30
if ( debug_verbosity > 0 ) then
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,1x,i8,1x,i2)' ) '<< CPFEM >> OUTDATED at element ip' , cp_en , IP
write ( 6 , '(a,/,3(12x,3(f10.6,1x),/))' ) '<< CPFEM >> FFN1 old:' , math_transpose33 ( materialpoint_F ( 1 : 3 , 1 : 3 , IP , cp_en ) )
write ( 6 , '(a,/,3(12x,3(f10.6,1x),/))' ) '<< CPFEM >> FFN1 now:' , math_transpose33 ( ffn1 )
2011-03-21 16:01:17 +05:30
!$OMP END CRITICAL (write2out)
endif
2009-08-11 22:01:57 +05:30
outdatedFFN1 = . true .
endif
2010-09-02 02:34:02 +05:30
call random_number ( rnd )
2010-11-03 20:28:11 +05:30
rnd = 2.0_pReal * rnd - 1.0_pReal
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
CPFEM_cs ( 1 : 6 , IP , cp_en ) = rnd * CPFEM_odd_stress
CPFEM_dcsde ( 1 : 6 , 1 : 6 , IP , cp_en ) = CPFEM_odd_jacobian * math_identity2nd ( 6 )
2009-06-15 18:41:21 +05:30
2011-05-24 21:27:59 +05:30
!*** deformation gradient is not outdated
2009-06-15 18:41:21 +05:30
else
2009-10-12 21:31:49 +05:30
updateJaco = mod ( cycleCounter , iJacoStiffness ) == 0
2009-06-15 18:41:21 +05:30
2011-05-24 21:27:59 +05:30
!* no parallel computation, so we use just one single element and IP for computation
2009-06-15 18:41:21 +05:30
if ( . not . parallelExecution ) then
FEsolving_execElem ( 1 ) = cp_en
FEsolving_execElem ( 2 ) = cp_en
FEsolving_execIP ( 1 , cp_en ) = IP
FEsolving_execIP ( 2 , cp_en ) = IP
2011-03-21 16:01:17 +05:30
if ( debug_verbosity > 0 ) then
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,i8,1x,i2)' ) '<< CPFEM >> Calculation for element ip ' , cp_en , IP
2011-03-21 16:01:17 +05:30
!$OMP END CRITICAL (write2out)
endif
2009-06-15 18:41:21 +05:30
call materialpoint_stressAndItsTangent ( updateJaco , dt ) ! calculate stress and its tangent
call materialpoint_postResults ( dt ) ! post results
2011-05-24 21:27:59 +05:30
!* parallel computation and calulation not yet done
2009-06-15 18:41:21 +05:30
elseif ( . not . CPFEM_calc_done ) then
2011-03-21 16:01:17 +05:30
if ( debug_verbosity > 0 ) then
!$OMP CRITICAL (write2out)
2011-05-11 22:08:45 +05:30
write ( 6 , '(a,i8,a,i8)' ) '<< CPFEM >> Calculation for elements ' , FEsolving_execElem ( 1 ) , ' to ' , FEsolving_execElem ( 2 )
2011-03-21 16:01:17 +05:30
!$OMP END CRITICAL (write2out)
endif
2011-08-19 16:48:56 +05:30
if ( FEsolver == 'Marc' ) then ! marc returns nodal coordinates, whereas Abaqus and spectral solver return ip coordinates. So for marc we have to calculate the ip coordinates from the nodal coordinates.
2011-05-24 21:27:59 +05:30
call mesh_build_subNodeCoords ( ) ! update subnodal coordinates
2011-08-10 22:07:17 +05:30
call mesh_build_ipCoordinates ( ) ! update ip coordinates
2011-05-24 21:27:59 +05:30
endif
2011-06-14 19:38:13 +05:30
if ( debug_verbosity > 0 ) then
2011-05-24 21:27:59 +05:30
!$OMP CRITICAL (write2out)
2011-07-15 17:55:38 +05:30
write ( 6 , '(a,i8,a,i8)' ) '<< CPFEM >> Start stress and tangent ' , FEsolving_execElem ( 1 ) , ' to ' , FEsolving_execElem ( 2 )
2011-05-24 21:27:59 +05:30
!$OMP END CRITICAL (write2out)
2011-06-14 19:38:13 +05:30
endif
2009-06-15 18:41:21 +05:30
call materialpoint_stressAndItsTangent ( updateJaco , dt ) ! calculate stress and its tangent (parallel execution inside)
call materialpoint_postResults ( dt ) ! post results
2011-03-29 12:57:19 +05:30
!$OMP PARALLEL DO
do e = FEsolving_execElem ( 1 ) , FEsolving_execElem ( 2 ) ! loop over all parallely processed elements
if ( microstructure_elemhomo ( mesh_element ( 4 , e ) ) ) then ! dealing with homogeneous element?
forall ( i = 2 : FE_Nips ( mesh_element ( 2 , e ) ) ) ! copy results of first IP to all others
materialpoint_P ( 1 : 3 , 1 : 3 , i , e ) = materialpoint_P ( 1 : 3 , 1 : 3 , 1 , e )
materialpoint_F ( 1 : 3 , 1 : 3 , i , e ) = materialpoint_F ( 1 : 3 , 1 : 3 , 1 , e )
materialpoint_dPdF ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , i , e ) = materialpoint_dPdF ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , 1 , e )
materialpoint_results ( 1 : materialpoint_sizeResults , i , e ) = materialpoint_results ( 1 : materialpoint_sizeResults , 1 , e )
end forall
endif
enddo
!$OMP END PARALLEL DO
2009-06-15 18:41:21 +05:30
CPFEM_calc_done = . true .
endif
2011-05-24 21:27:59 +05:30
!*** map stress and stiffness (or return odd values if terminally ill)
2010-09-02 02:34:02 +05:30
if ( terminallyIll ) then
call random_number ( rnd )
2010-11-03 20:28:11 +05:30
rnd = 2.0_pReal * rnd - 1.0_pReal
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
CPFEM_cs ( 1 : 6 , IP , cp_en ) = rnd * CPFEM_odd_stress
CPFEM_dcsde ( 1 : 6 , 1 : 6 , IP , cp_en ) = CPFEM_odd_jacobian * math_identity2nd ( 6 )
2009-08-11 22:01:57 +05:30
else
2011-03-29 12:57:19 +05:30
! translate from P to CS
2012-01-26 19:20:00 +05:30
Kirchhoff = math_mul33x33 ( materialpoint_P ( 1 : 3 , 1 : 3 , IP , cp_en ) , math_transpose33 ( materialpoint_F ( 1 : 3 , 1 : 3 , IP , cp_en ) ) )
J_inverse = 1.0_pReal / math_det33 ( materialpoint_F ( 1 : 3 , 1 : 3 , IP , cp_en ) )
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
CPFEM_cs ( 1 : 6 , IP , cp_en ) = math_Mandel33to6 ( J_inverse * Kirchhoff )
2009-06-15 18:41:21 +05:30
! translate from dP/dF to dCS/dE
2009-08-11 22:01:57 +05:30
H = 0.0_pReal
2010-08-20 03:05:38 +05:30
do i = 1 , 3 ; do j = 1 , 3 ; do k = 1 , 3 ; do l = 1 , 3 ; do m = 1 , 3 ; do n = 1 , 3
2009-08-11 22:01:57 +05:30
H ( i , j , k , l ) = H ( i , j , k , l ) + &
materialpoint_F ( j , m , IP , cp_en ) * &
materialpoint_F ( l , n , IP , cp_en ) * &
materialpoint_dPdF ( i , m , k , n , IP , cp_en ) - &
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
math_I3 ( j , l ) * materialpoint_F ( i , m , IP , cp_en ) * materialpoint_P ( k , m , IP , cp_en ) + &
0.5_pReal * ( math_I3 ( i , k ) * Kirchhoff ( j , l ) + math_I3 ( j , l ) * Kirchhoff ( i , k ) + &
math_I3 ( i , l ) * Kirchhoff ( j , k ) + math_I3 ( j , k ) * Kirchhoff ( i , l ) )
2010-08-20 03:05:38 +05:30
enddo ; enddo ; enddo ; enddo ; enddo ; enddo
2010-11-03 20:28:11 +05:30
do i = 1 , 3 ; do j = 1 , 3 ; do k = 1 , 3 ; do l = 1 , 3
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
H_sym ( i , j , k , l ) = 0.25_pReal * ( H ( i , j , k , l ) + H ( j , i , k , l ) + H ( i , j , l , k ) + H ( j , i , l , k ) )
2010-11-03 20:28:11 +05:30
enddo ; enddo ; enddo ; enddo
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
CPFEM_dcsde ( 1 : 6 , 1 : 6 , IP , cp_en ) = math_Mandel3333to66 ( J_inverse * H_sym )
2009-08-11 22:01:57 +05:30
endif
2009-03-04 19:31:36 +05:30
endif
2009-06-15 18:41:21 +05:30
2011-05-24 21:27:59 +05:30
2010-09-02 02:34:02 +05:30
! --+>> COLLECTION OF FEM INPUT WITH RETURNING OF RANDOMIZED ODD STRESS AND JACOBIAN <<+--
2011-05-24 21:27:59 +05:30
2009-10-12 21:31:49 +05:30
case ( 3 , 4 , 5 )
if ( mode == 4 ) then
CPFEM_dcsde_knownGood = CPFEM_dcsde ! --+>> BACKUP JACOBIAN FROM FORMER CONVERGED INC
else if ( mode == 5 ) then
CPFEM_dcsde = CPFEM_dcsde_knownGood ! --+>> RESTORE CONSISTENT JACOBIAN FROM FORMER CONVERGED INC
end if
2010-08-20 03:05:38 +05:30
call random_number ( rnd )
2010-11-03 20:28:11 +05:30
rnd = 2.0_pReal * rnd - 1.0_pReal
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
materialpoint_Temperature ( IP , cp_en ) = Temperature
materialpoint_F0 ( 1 : 3 , 1 : 3 , IP , cp_en ) = ffn
materialpoint_F ( 1 : 3 , 1 : 3 , IP , cp_en ) = ffn1
CPFEM_cs ( 1 : 6 , IP , cp_en ) = rnd * CPFEM_odd_stress
CPFEM_dcsde ( 1 : 6 , 1 : 6 , IP , cp_en ) = CPFEM_odd_jacobian * math_identity2nd ( 6 )
2009-06-15 18:41:21 +05:30
CPFEM_calc_done = . false .
2011-05-24 21:27:59 +05:30
select case ( FEsolver )
case ( 'Abaqus' , 'Spectral' )
mesh_ipCenterOfGravity ( 1 : 3 , IP , cp_en ) = coords ( 1 : 3 , 1 )
case ( 'Marc' )
do node = 1 , FE_Nnodes ( mesh_element ( 2 , cp_en ) )
FEnodeID = mesh_FEasCP ( 'node' , mesh_element ( 4 + node , cp_en ) )
mesh_node ( 1 : 3 , FEnodeID ) = mesh_node0 ( 1 : 3 , FEnodeID ) + coords ( 1 : 3 , node )
enddo
end select
2009-06-15 18:41:21 +05:30
2011-05-24 21:27:59 +05:30
2009-06-15 18:41:21 +05:30
! --+>> RECYCLING OF FORMER RESULTS (MARC SPECIALTY) <<+--
2011-05-24 21:27:59 +05:30
2010-02-18 15:45:08 +05:30
case ( 6 )
2009-06-15 18:41:21 +05:30
! do nothing
2011-05-24 21:27:59 +05:30
2010-09-02 02:34:02 +05:30
! --+>> RESTORE CONSISTENT JACOBIAN FROM FORMER CONVERGED INC
2011-05-24 21:27:59 +05:30
2010-02-18 15:45:08 +05:30
case ( 7 )
2010-09-02 02:34:02 +05:30
CPFEM_dcsde = CPFEM_dcsde_knownGood
2009-06-15 18:41:21 +05:30
end select
2009-03-04 19:31:36 +05:30
2011-05-24 21:27:59 +05:30
!*** fill output variables with computed values
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
cauchyStress = CPFEM_cs ( 1 : 6 , IP , cp_en )
jacobian = CPFEM_dcsdE ( 1 : 6 , 1 : 6 , IP , cp_en )
pstress = materialpoint_P ( 1 : 3 , 1 : 3 , IP , cp_en )
dPdF = materialpoint_dPdF ( 1 : 3 , 1 : 3 , 1 : 3 , 1 : 3 , IP , cp_en )
2011-05-24 21:27:59 +05:30
if ( theTime > 0.0_pReal ) then
Temperature = materialpoint_Temperature ( IP , cp_en ) ! homogenized result except for potentially non-isothermal starting condition.
2010-11-04 23:48:01 +05:30
endif
2011-05-24 21:27:59 +05:30
2011-08-03 23:29:31 +05:30
if ( mode < 3 . and . debug_verbosity > 0 . and . ( ( debug_e == cp_en . and . debug_i == IP ) . or . . not . debug_selectiveDebugger ) ) then
2010-05-20 20:25:11 +05:30
!$OMP CRITICAL (write2out)
2012-02-01 00:48:55 +05:30
write ( 6 , '(a,i8,1x,i2,/,12x,6(f10.3,1x)/)' ) '<< CPFEM >> stress/MPa at el ip ' , cp_en , IP , cauchyStress / 1e6
write ( 6 , '(a,i8,1x,i2,/,6(12x,6(f10.3,1x)/))' ) '<< CPFEM >> jacobian/GPa at el ip ' , cp_en , IP , transpose ( jacobian ) / 1e9
2010-05-20 20:25:11 +05:30
call flush ( 6 )
!$OMP END CRITICAL (write2out)
2009-10-12 21:31:49 +05:30
endif
2011-05-24 21:27:59 +05:30
!*** warn if stiffness close to zero
if ( all ( abs ( jacobian ) < 1e-10_pReal ) ) then
call IO_warning ( 601 , cp_en , IP )
endif
!*** remember extreme values of stress and jacobian
2011-03-17 18:43:13 +05:30
if ( mode < 3 ) then
cauchyStress33 = math_Mandel6to33 ( cauchyStress )
if ( maxval ( cauchyStress33 ) > debug_stressMax ) then
debug_stressMaxLocation = ( / cp_en , IP / )
debug_stressMax = maxval ( cauchyStress33 )
endif
if ( minval ( cauchyStress33 ) < debug_stressMin ) then
debug_stressMinLocation = ( / cp_en , IP / )
debug_stressMin = minval ( cauchyStress33 )
endif
jacobian3333 = math_Mandel66to3333 ( jacobian )
if ( maxval ( jacobian3333 ) > debug_jacobianMax ) then
debug_jacobianMaxLocation = ( / cp_en , IP / )
debug_jacobianMax = maxval ( jacobian3333 )
endif
if ( minval ( jacobian3333 ) < debug_jacobianMin ) then
debug_jacobianMinLocation = ( / cp_en , IP / )
debug_jacobianMin = minval ( jacobian3333 )
endif
endif
2009-05-07 21:57:36 +05:30
end subroutine
2009-03-04 19:31:36 +05:30
2009-10-12 21:31:49 +05:30
END MODULE CPFEM