2009-08-31 20:39:15 +05:30
|
|
|
!* $Id$
|
2009-05-07 21:57:36 +05:30
|
|
|
!***************************************
|
|
|
|
!* Module: HOMOGENIZATION *
|
|
|
|
!***************************************
|
|
|
|
!* contains: *
|
|
|
|
!* - _init *
|
|
|
|
!* - materialpoint_stressAndItsTangent *
|
|
|
|
!* - _partitionDeformation *
|
|
|
|
!* - _updateState *
|
|
|
|
!* - _averageStressAndItsTangent *
|
|
|
|
!* - _postResults *
|
|
|
|
!***************************************
|
|
|
|
|
|
|
|
MODULE homogenization
|
|
|
|
|
|
|
|
!*** Include other modules ***
|
|
|
|
use prec, only: pInt,pReal,p_vec
|
|
|
|
implicit none
|
|
|
|
|
|
|
|
! ****************************************************************
|
|
|
|
! *** General variables for the homogenization at a ***
|
|
|
|
! *** material point ***
|
|
|
|
! ****************************************************************
|
2009-06-16 14:33:30 +05:30
|
|
|
type(p_vec), dimension(:,:), allocatable :: homogenization_state0, & ! pointer array to homogenization state at start of FE increment
|
|
|
|
homogenization_subState0, & ! pointer array to homogenization state at start of homogenization increment
|
|
|
|
homogenization_state ! pointer array to current homogenization state (end of converged time step)
|
|
|
|
integer(pInt), dimension(:,:), allocatable :: homogenization_sizeState, & ! size of state array per grain
|
|
|
|
homogenization_sizePostResults ! size of postResults array per material point
|
|
|
|
|
|
|
|
real(pReal), dimension(:,:,:,:,:,:), allocatable :: materialpoint_dPdF ! tangent of first P--K stress at IP
|
|
|
|
real(pReal), dimension(:,:,:,:), allocatable :: materialpoint_F0, & ! def grad of IP at start of FE increment
|
|
|
|
materialpoint_F, & ! def grad of IP to be reached at end of FE increment
|
|
|
|
materialpoint_subF0, & ! def grad of IP at beginning of homogenization increment
|
|
|
|
materialpoint_subF, & ! def grad of IP to be reached at end of homog inc
|
|
|
|
materialpoint_P ! first P--K stress of IP
|
|
|
|
real(pReal), dimension(:,:), allocatable :: materialpoint_Temperature, & ! temperature at IP
|
|
|
|
materialpoint_subFrac, &
|
|
|
|
materialpoint_subStep, &
|
|
|
|
materialpoint_subdt
|
|
|
|
|
|
|
|
real(pReal), dimension(:,:,:), allocatable :: materialpoint_results ! results array of material point
|
|
|
|
|
|
|
|
logical, dimension(:,:), allocatable :: materialpoint_requested, &
|
|
|
|
materialpoint_converged
|
|
|
|
logical, dimension(:,:,:), allocatable :: materialpoint_doneAndHappy
|
|
|
|
integer(pInt) homogenization_maxSizeState, &
|
2009-10-12 22:31:42 +05:30
|
|
|
homogenization_maxSizePostResults, &
|
|
|
|
materialpoint_sizeResults
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
CONTAINS
|
|
|
|
|
|
|
|
!**************************************
|
|
|
|
!* Module initialization *
|
|
|
|
!**************************************
|
2009-07-01 16:25:31 +05:30
|
|
|
subroutine homogenization_init(Temperature)
|
2009-05-07 21:57:36 +05:30
|
|
|
use prec, only: pReal,pInt
|
|
|
|
use math, only: math_I3
|
2010-02-19 23:33:16 +05:30
|
|
|
use IO, only: IO_error, IO_open_file, IO_open_jobFile
|
2009-05-07 21:57:36 +05:30
|
|
|
use mesh, only: mesh_maxNips,mesh_NcpElems,mesh_element,FE_Nips
|
|
|
|
use material
|
|
|
|
use constitutive, only: constitutive_maxSizePostResults
|
2010-02-25 23:09:11 +05:30
|
|
|
use crystallite, only: crystallite_maxSizePostResults
|
2009-05-07 21:57:36 +05:30
|
|
|
use homogenization_isostrain
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
use homogenization_RGC
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2009-07-22 21:37:19 +05:30
|
|
|
real(pReal) Temperature
|
2009-05-07 21:57:36 +05:30
|
|
|
integer(pInt), parameter :: fileunit = 200
|
2010-02-19 23:33:16 +05:30
|
|
|
integer(pInt) e,i,g,p,myInstance,j
|
|
|
|
integer(pInt), dimension(:,:), pointer :: thisSize
|
|
|
|
character(len=64), dimension(:,:), pointer :: thisOutput
|
|
|
|
logical knownHomogenization
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
if(.not. IO_open_file(fileunit,material_configFile)) call IO_error (100) ! corrupt config file
|
|
|
|
|
|
|
|
call homogenization_isostrain_init(fileunit) ! parse all homogenizations of this type
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
call homogenization_RGC_init(fileunit)
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
close(fileunit)
|
|
|
|
|
2010-02-19 23:33:16 +05:30
|
|
|
! write description file for homogenization output
|
|
|
|
|
|
|
|
if(.not. IO_open_jobFile(fileunit,'outputHomogenization')) call IO_error (50) ! problems in writing file
|
|
|
|
|
|
|
|
do p = 1,material_Nhomogenization
|
|
|
|
i = homogenization_typeInstance(p) ! which instance of this homogenization type
|
|
|
|
knownHomogenization = .true. ! assume valid
|
|
|
|
select case(homogenization_type(p)) ! split per homogenization type
|
|
|
|
case (homogenization_isostrain_label)
|
|
|
|
thisOutput => homogenization_isostrain_output
|
|
|
|
thisSize => homogenization_isostrain_sizePostResult
|
|
|
|
case (homogenization_RGC_label)
|
|
|
|
thisOutput => homogenization_RGC_output
|
|
|
|
thisSize => homogenization_RGC_sizePostResult
|
|
|
|
case default
|
|
|
|
knownHomogenization = .false.
|
|
|
|
end select
|
|
|
|
|
|
|
|
write(fileunit,*)
|
|
|
|
write(fileunit,'(a)') '['//trim(homogenization_name(p))//']'
|
|
|
|
write(fileunit,*)
|
|
|
|
if (knownHomogenization) then
|
2010-02-25 23:09:11 +05:30
|
|
|
write(fileunit,'(a)') '(type)'//char(9)//trim(homogenization_type(p))
|
|
|
|
write(fileunit,'(a,i)') '(ngrains)'//char(9),homogenization_Ngrains(p)
|
2010-02-19 23:33:16 +05:30
|
|
|
do e = 1,homogenization_Noutput(p)
|
|
|
|
write(fileunit,'(a,i4)') trim(thisOutput(e,i))//char(9),thisSize(e,i)
|
|
|
|
enddo
|
|
|
|
endif
|
|
|
|
enddo
|
|
|
|
|
|
|
|
close(fileunit)
|
|
|
|
|
|
|
|
|
2009-05-07 21:57:36 +05:30
|
|
|
allocate(homogenization_state0(mesh_maxNips,mesh_NcpElems))
|
|
|
|
allocate(homogenization_subState0(mesh_maxNips,mesh_NcpElems))
|
|
|
|
allocate(homogenization_state(mesh_maxNips,mesh_NcpElems))
|
|
|
|
allocate(homogenization_sizeState(mesh_maxNips,mesh_NcpElems)); homogenization_sizeState = 0_pInt
|
|
|
|
allocate(homogenization_sizePostResults(mesh_maxNips,mesh_NcpElems)); homogenization_sizePostResults = 0_pInt
|
|
|
|
|
|
|
|
allocate(materialpoint_dPdF(3,3,3,3,mesh_maxNips,mesh_NcpElems)); materialpoint_dPdF = 0.0_pReal
|
|
|
|
allocate(materialpoint_F0(3,3,mesh_maxNips,mesh_NcpElems));
|
|
|
|
allocate(materialpoint_F(3,3,mesh_maxNips,mesh_NcpElems)); materialpoint_F = 0.0_pReal
|
|
|
|
allocate(materialpoint_subF0(3,3,mesh_maxNips,mesh_NcpElems)); materialpoint_subF0 = 0.0_pReal
|
|
|
|
allocate(materialpoint_subF(3,3,mesh_maxNips,mesh_NcpElems)); materialpoint_subF = 0.0_pReal
|
|
|
|
allocate(materialpoint_P(3,3,mesh_maxNips,mesh_NcpElems)); materialpoint_P = 0.0_pReal
|
2009-07-01 16:25:31 +05:30
|
|
|
allocate(materialpoint_Temperature(mesh_maxNips,mesh_NcpElems)); materialpoint_Temperature = Temperature
|
2009-05-07 21:57:36 +05:30
|
|
|
allocate(materialpoint_subFrac(mesh_maxNips,mesh_NcpElems)); materialpoint_subFrac = 0.0_pReal
|
|
|
|
allocate(materialpoint_subStep(mesh_maxNips,mesh_NcpElems)); materialpoint_subStep = 0.0_pReal
|
|
|
|
allocate(materialpoint_subdt(mesh_maxNips,mesh_NcpElems)); materialpoint_subdt = 0.0_pReal
|
|
|
|
allocate(materialpoint_requested(mesh_maxNips,mesh_NcpElems)); materialpoint_requested = .false.
|
|
|
|
allocate(materialpoint_converged(mesh_maxNips,mesh_NcpElems)); materialpoint_converged = .true.
|
|
|
|
allocate(materialpoint_doneAndHappy(2,mesh_maxNips,mesh_NcpElems)); materialpoint_doneAndHappy = .true.
|
|
|
|
|
|
|
|
forall (i = 1:mesh_maxNips,e = 1:mesh_NcpElems)
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
materialpoint_F0(1:3,1:3,i,e) = math_I3
|
|
|
|
materialpoint_F(1:3,1:3,i,e) = math_I3
|
2009-05-07 21:57:36 +05:30
|
|
|
end forall
|
|
|
|
|
|
|
|
do e = 1,mesh_NcpElems ! loop over elements
|
|
|
|
myInstance = homogenization_typeInstance(mesh_element(3,e))
|
|
|
|
do i = 1,FE_Nips(mesh_element(2,e)) ! loop over IPs
|
|
|
|
select case(homogenization_type(mesh_element(3,e)))
|
2009-07-31 17:32:20 +05:30
|
|
|
!* isostrain
|
2009-05-07 21:57:36 +05:30
|
|
|
case (homogenization_isostrain_label)
|
|
|
|
if (homogenization_isostrain_sizeState(myInstance) > 0_pInt) then
|
|
|
|
allocate(homogenization_state0(i,e)%p(homogenization_isostrain_sizeState(myInstance)))
|
|
|
|
allocate(homogenization_subState0(i,e)%p(homogenization_isostrain_sizeState(myInstance)))
|
|
|
|
allocate(homogenization_state(i,e)%p(homogenization_isostrain_sizeState(myInstance)))
|
|
|
|
homogenization_state0(i,e)%p = homogenization_isostrain_stateInit(myInstance)
|
|
|
|
homogenization_sizeState(i,e) = homogenization_isostrain_sizeState(myInstance)
|
|
|
|
endif
|
|
|
|
homogenization_sizePostResults(i,e) = homogenization_isostrain_sizePostResults(myInstance)
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
!* RGC homogenization
|
2009-07-31 17:32:20 +05:30
|
|
|
case (homogenization_RGC_label)
|
|
|
|
if (homogenization_RGC_sizeState(myInstance) > 0_pInt) then
|
|
|
|
allocate(homogenization_state0(i,e)%p(homogenization_RGC_sizeState(myInstance)))
|
|
|
|
allocate(homogenization_subState0(i,e)%p(homogenization_RGC_sizeState(myInstance)))
|
|
|
|
allocate(homogenization_state(i,e)%p(homogenization_RGC_sizeState(myInstance)))
|
|
|
|
homogenization_state0(i,e)%p = homogenization_RGC_stateInit(myInstance)
|
|
|
|
homogenization_sizeState(i,e) = homogenization_RGC_sizeState(myInstance)
|
|
|
|
endif
|
|
|
|
homogenization_sizePostResults(i,e) = homogenization_RGC_sizePostResults(myInstance)
|
2009-05-07 21:57:36 +05:30
|
|
|
case default
|
2009-06-08 18:58:00 +05:30
|
|
|
call IO_error(201,ext_msg=homogenization_type(mesh_element(3,e))) ! unknown type 201 is homogenization!
|
2009-05-07 21:57:36 +05:30
|
|
|
end select
|
|
|
|
enddo
|
|
|
|
enddo
|
|
|
|
|
|
|
|
homogenization_maxSizeState = maxval(homogenization_sizeState)
|
|
|
|
homogenization_maxSizePostResults = maxval(homogenization_sizePostResults)
|
|
|
|
|
2009-10-12 22:31:42 +05:30
|
|
|
materialpoint_sizeResults = 1+ 1+homogenization_maxSizePostResults + & ! grain count, homogSize, homogResult
|
2010-02-25 23:09:11 +05:30
|
|
|
homogenization_maxNgrains*(1+crystallite_maxSizePostResults+ & ! results count, cryst results
|
|
|
|
1+constitutive_maxSizePostResults) ! results count, constitutive results
|
|
|
|
allocate(materialpoint_results(materialpoint_sizeResults, mesh_maxNips,mesh_NcpElems))
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
|
|
|
|
! *** Output to MARC output file ***
|
|
|
|
!$OMP CRITICAL (write2out)
|
|
|
|
write(6,*)
|
|
|
|
write(6,*) '<<<+- homogenization init -+>>>'
|
2009-08-31 20:39:15 +05:30
|
|
|
write(6,*) '$Id$'
|
2009-05-07 21:57:36 +05:30
|
|
|
write(6,*)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'homogenization_state0: ', shape(homogenization_state0)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'homogenization_subState0: ', shape(homogenization_subState0)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'homogenization_state: ', shape(homogenization_state)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'homogenization_sizeState: ', shape(homogenization_sizeState)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'homogenization_sizePostResults: ', shape(homogenization_sizePostResults)
|
|
|
|
write(6,*)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'materialpoint_dPdF: ', shape(materialpoint_dPdF)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'materialpoint_F0: ', shape(materialpoint_F0)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'materialpoint_F: ', shape(materialpoint_F)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'materialpoint_subF0: ', shape(materialpoint_subF0)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'materialpoint_subF: ', shape(materialpoint_subF)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'materialpoint_P: ', shape(materialpoint_P)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'materialpoint_Temperature: ', shape(materialpoint_Temperature)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'materialpoint_subFrac: ', shape(materialpoint_subFrac)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'materialpoint_subStep: ', shape(materialpoint_subStep)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'materialpoint_subdt: ', shape(materialpoint_subdt)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'materialpoint_requested: ', shape(materialpoint_requested)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'materialpoint_converged: ', shape(materialpoint_converged)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'materialpoint_doneAndHappy: ', shape(materialpoint_doneAndHappy)
|
|
|
|
write(6,*)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'materialpoint_results: ', shape(materialpoint_results)
|
|
|
|
write(6,*)
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'maxSizeState: ', homogenization_maxSizeState
|
|
|
|
write(6,'(a32,x,7(i5,x))') 'maxSizePostResults: ', homogenization_maxSizePostResults
|
|
|
|
call flush(6)
|
|
|
|
!$OMP END CRITICAL (write2out)
|
|
|
|
|
|
|
|
return
|
|
|
|
|
2009-06-15 18:41:21 +05:30
|
|
|
endsubroutine
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
|
|
|
|
!********************************************************************
|
|
|
|
!* parallelized calculation of
|
|
|
|
!* stress and corresponding tangent
|
|
|
|
!* at material points
|
|
|
|
!********************************************************************
|
|
|
|
subroutine materialpoint_stressAndItsTangent(&
|
|
|
|
updateJaco,& ! flag to initiate Jacobian updating
|
|
|
|
dt & ! time increment
|
|
|
|
)
|
|
|
|
|
2009-06-15 18:41:21 +05:30
|
|
|
use prec, only: pInt, &
|
|
|
|
pReal
|
2009-10-26 22:13:43 +05:30
|
|
|
use numerics, only: subStepMinHomog, &
|
2009-11-10 19:06:27 +05:30
|
|
|
subStepSizeHomog, &
|
|
|
|
stepIncreaseHomog, &
|
2009-08-11 22:01:57 +05:30
|
|
|
nHomog, &
|
|
|
|
nMPstate
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
use math, only: math_det3x3, &
|
|
|
|
math_transpose3x3
|
2009-06-15 18:41:21 +05:30
|
|
|
use FEsolving, only: FEsolving_execElem, &
|
2009-08-11 22:01:57 +05:30
|
|
|
FEsolving_execIP, &
|
|
|
|
terminallyIll
|
2009-06-15 18:41:21 +05:30
|
|
|
use mesh, only: mesh_element
|
|
|
|
use material, only: homogenization_Ngrains
|
|
|
|
use constitutive, only: constitutive_state0, &
|
|
|
|
constitutive_partionedState0, &
|
|
|
|
constitutive_state
|
2009-07-22 21:37:19 +05:30
|
|
|
use crystallite, only: crystallite_Temperature, &
|
2009-07-01 15:59:35 +05:30
|
|
|
crystallite_F0, &
|
2009-06-16 14:33:30 +05:30
|
|
|
crystallite_Fp0, &
|
|
|
|
crystallite_Fp, &
|
|
|
|
crystallite_Lp0, &
|
|
|
|
crystallite_Lp, &
|
2010-10-01 17:48:49 +05:30
|
|
|
crystallite_dPdF, &
|
|
|
|
crystallite_dPdF0, &
|
2009-06-16 14:33:30 +05:30
|
|
|
crystallite_Tstar0_v, &
|
2009-07-22 21:37:19 +05:30
|
|
|
crystallite_Tstar_v, &
|
2009-07-01 15:59:35 +05:30
|
|
|
crystallite_partionedTemperature0, &
|
2009-06-16 14:33:30 +05:30
|
|
|
crystallite_partionedF0, &
|
|
|
|
crystallite_partionedF, &
|
|
|
|
crystallite_partionedFp0, &
|
|
|
|
crystallite_partionedLp0, &
|
2010-10-01 17:48:49 +05:30
|
|
|
crystallite_partioneddPdF0, &
|
2009-06-16 14:33:30 +05:30
|
|
|
crystallite_partionedTstar0_v, &
|
|
|
|
crystallite_dt, &
|
|
|
|
crystallite_requested, &
|
2009-08-11 22:01:57 +05:30
|
|
|
crystallite_converged, &
|
2009-12-18 21:16:33 +05:30
|
|
|
crystallite_stressAndItsTangent, &
|
|
|
|
crystallite_orientations
|
2009-08-24 13:46:01 +05:30
|
|
|
use debug, only: debugger, &
|
2010-03-19 19:44:08 +05:30
|
|
|
verboseDebugger, &
|
2010-02-17 18:51:36 +05:30
|
|
|
debug_e, &
|
|
|
|
debug_i, &
|
2009-08-24 13:46:01 +05:30
|
|
|
debug_MaterialpointLoopDistribution, &
|
2009-08-11 22:01:57 +05:30
|
|
|
debug_MaterialpointStateLoopDistribution
|
2010-03-24 18:50:12 +05:30
|
|
|
use math, only: math_pDecomposition
|
2009-06-16 14:33:30 +05:30
|
|
|
|
2009-05-07 21:57:36 +05:30
|
|
|
implicit none
|
|
|
|
|
|
|
|
real(pReal), intent(in) :: dt
|
|
|
|
logical, intent(in) :: updateJaco
|
2009-08-11 22:01:57 +05:30
|
|
|
integer(pInt) NiterationHomog,NiterationMPstate
|
2009-05-07 21:57:36 +05:30
|
|
|
integer(pInt) g,i,e,myNgrains
|
2010-03-24 18:50:12 +05:30
|
|
|
logical error
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
! ------ initialize to starting condition ------
|
|
|
|
|
2010-03-19 19:44:08 +05:30
|
|
|
if (debugger) then
|
|
|
|
write (6,*)
|
|
|
|
write (6,*) 'Material Point start'
|
2010-09-22 14:24:36 +05:30
|
|
|
write (6,'(a,/,(f14.9,x))') 'Temp0 of 1 1',materialpoint_Temperature(1,1)
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
write (6,'(a,/,3(3(f14.9,x)/))') 'F0 of 1 1',math_transpose3x3(materialpoint_F0(1:3,1:3,1,1))
|
|
|
|
write (6,'(a,/,3(3(f14.9,x)/))') 'F of 1 1',math_transpose3x3(materialpoint_F(1:3,1:3,1,1))
|
|
|
|
write (6,'(a,/,3(3(f14.9,x)/))') 'Fp0 of 1 1 1',math_transpose3x3(crystallite_Fp0(1:3,1:3,1,1,1))
|
|
|
|
write (6,'(a,/,3(3(f14.9,x)/))') 'Lp0 of 1 1 1',math_transpose3x3(crystallite_Lp0(1:3,1:3,1,1,1))
|
2010-03-19 19:44:08 +05:30
|
|
|
endif
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2010-03-24 18:50:12 +05:30
|
|
|
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP PARALLEL DO PRIVATE(myNgrains)
|
2009-06-16 14:33:30 +05:30
|
|
|
do e = FEsolving_execElem(1),FEsolving_execElem(2) ! iterate over elements to be processed
|
2009-05-07 21:57:36 +05:30
|
|
|
myNgrains = homogenization_Ngrains(mesh_element(3,e))
|
2009-06-16 14:33:30 +05:30
|
|
|
do i = FEsolving_execIP(1,e),FEsolving_execIP(2,e) ! iterate over IPs of this element to be processed
|
|
|
|
|
|
|
|
! initialize restoration points of grain...
|
2009-07-01 15:59:35 +05:30
|
|
|
forall (g = 1:myNgrains) constitutive_partionedState0(g,i,e)%p = constitutive_state0(g,i,e)%p ! ...microstructures
|
2009-07-22 21:37:19 +05:30
|
|
|
crystallite_partionedTemperature0(1:myNgrains,i,e) = materialpoint_Temperature(i,e) ! ...temperatures
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
crystallite_partionedFp0(1:3,1:3,1:myNgrains,i,e) = crystallite_Fp0(1:3,1:3,1:myNgrains,i,e) ! ...plastic def grads
|
|
|
|
crystallite_partionedLp0(1:3,1:3,1:myNgrains,i,e) = crystallite_Lp0(1:3,1:3,1:myNgrains,i,e) ! ...plastic velocity grads
|
|
|
|
crystallite_partioneddPdF0(1:3,1:3,1:3,1:3,1:myNgrains,i,e) = crystallite_dPdF0(1:3,1:3,1:3,1:3,1:myNgrains,i,e) ! ...stiffness
|
|
|
|
crystallite_partionedF0(1:3,1:3,1:myNgrains,i,e) = crystallite_F0(1:3,1:3,1:myNgrains,i,e) ! ...def grads
|
|
|
|
crystallite_partionedTstar0_v(1:6,1:myNgrains,i,e) = crystallite_Tstar0_v(1:6,1:myNgrains,i,e) ! ...2nd PK stress
|
2009-06-16 14:33:30 +05:30
|
|
|
|
|
|
|
! initialize restoration points of ...
|
2009-05-07 21:57:36 +05:30
|
|
|
if (homogenization_sizeState(i,e) > 0_pInt) &
|
2009-06-16 14:33:30 +05:30
|
|
|
homogenization_subState0(i,e)%p = homogenization_state0(i,e)%p ! ...internal homogenization state
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
materialpoint_subF0(1:3,1:3,i,e) = materialpoint_F0(1:3,1:3,i,e) ! ...def grad
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
materialpoint_subFrac(i,e) = 0.0_pReal
|
2009-11-10 19:06:27 +05:30
|
|
|
materialpoint_subStep(i,e) = 1.0_pReal/subStepSizeHomog ! <<added to adopt flexibility in cutback size>>
|
2009-06-16 14:33:30 +05:30
|
|
|
materialpoint_converged(i,e) = .false. ! pretend failed step of twice the required size
|
|
|
|
materialpoint_requested(i,e) = .true. ! everybody requires calculation
|
2009-05-07 21:57:36 +05:30
|
|
|
enddo
|
|
|
|
enddo
|
|
|
|
!$OMP END PARALLEL DO
|
|
|
|
|
2009-08-11 22:01:57 +05:30
|
|
|
NiterationHomog = 0_pInt
|
|
|
|
|
2009-05-07 21:57:36 +05:30
|
|
|
! ------ cutback loop ------
|
|
|
|
|
2010-09-02 02:34:02 +05:30
|
|
|
do while (.not. terminallyIll .and. &
|
|
|
|
any(materialpoint_subStep(:,FEsolving_execELem(1):FEsolving_execElem(2)) > subStepMinHomog)) ! cutback loop for material points
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP PARALLEL DO PRIVATE(myNgrains)
|
2010-09-02 02:34:02 +05:30
|
|
|
do e = FEsolving_execElem(1),FEsolving_execElem(2) ! iterate over elements to be processed
|
2009-05-07 21:57:36 +05:30
|
|
|
myNgrains = homogenization_Ngrains(mesh_element(3,e))
|
2010-09-02 02:34:02 +05:30
|
|
|
do i = FEsolving_execIP(1,e),FEsolving_execIP(2,e) ! iterate over IPs of this element to be processed
|
2010-11-03 22:52:48 +05:30
|
|
|
|
2010-09-02 02:34:02 +05:30
|
|
|
if ( materialpoint_converged(i,e) ) then
|
2010-11-03 22:52:48 +05:30
|
|
|
if (verboseDebugger .and. (e == debug_e .and. i == debug_i)) then
|
2009-08-24 13:46:01 +05:30
|
|
|
!$OMP CRITICAL (write2out)
|
2010-09-02 02:34:02 +05:30
|
|
|
write(6,'(a,x,f10.8,x,a,x,f10.8,x,a,/)') '°°° winding forward from', &
|
|
|
|
materialpoint_subFrac(i,e), 'to current materialpoint_subFrac', &
|
|
|
|
materialpoint_subFrac(i,e)+materialpoint_subStep(i,e),'in materialpoint_stressAndItsTangent'
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP END CRITICAL (write2out)
|
2009-08-24 13:46:01 +05:30
|
|
|
endif
|
|
|
|
|
2009-08-27 17:40:06 +05:30
|
|
|
! calculate new subStep and new subFrac
|
|
|
|
materialpoint_subFrac(i,e) = materialpoint_subFrac(i,e) + materialpoint_subStep(i,e)
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
!$OMP FLUSH(materialpoint_subFrac)
|
2009-11-10 19:06:27 +05:30
|
|
|
materialpoint_subStep(i,e) = min(1.0_pReal-materialpoint_subFrac(i,e), &
|
|
|
|
stepIncreaseHomog*materialpoint_subStep(i,e)) ! <<introduce flexibility for step increase/acceleration>>
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
!$OMP FLUSH(materialpoint_subStep)
|
2009-08-27 17:40:06 +05:30
|
|
|
|
2009-06-16 14:33:30 +05:30
|
|
|
! still stepping needed
|
2009-10-26 22:13:43 +05:30
|
|
|
if (materialpoint_subStep(i,e) > subStepMinHomog) then
|
2009-06-16 14:33:30 +05:30
|
|
|
|
|
|
|
! wind forward grain starting point of...
|
2009-07-22 21:37:19 +05:30
|
|
|
crystallite_partionedTemperature0(1:myNgrains,i,e) = crystallite_Temperature(1:myNgrains,i,e) ! ...temperatures
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
crystallite_partionedF0(1:3,1:3,1:myNgrains,i,e) = crystallite_partionedF(1:3,1:3,1:myNgrains,i,e) ! ...def grads
|
|
|
|
crystallite_partionedFp0(1:3,1:3,1:myNgrains,i,e) = crystallite_Fp(1:3,1:3,1:myNgrains,i,e) ! ...plastic def grads
|
|
|
|
crystallite_partionedLp0(1:3,1:3,1:myNgrains,i,e) = crystallite_Lp(1:3,1:3,1:myNgrains,i,e) ! ...plastic velocity grads
|
|
|
|
crystallite_partioneddPdF0(1:3,1:3,1:3,1:3,1:myNgrains,i,e) = crystallite_dPdF(1:3,1:3,1:3,1:3,1:myNgrains,i,e)! ...stiffness
|
|
|
|
crystallite_partionedTstar0_v(1:6,1:myNgrains,i,e) = crystallite_Tstar_v(1:6,1:myNgrains,i,e) ! ...2nd PK stress
|
2009-06-16 14:33:30 +05:30
|
|
|
forall (g = 1:myNgrains) constitutive_partionedState0(g,i,e)%p = constitutive_state(g,i,e)%p ! ...microstructures
|
2009-05-07 21:57:36 +05:30
|
|
|
if (homogenization_sizeState(i,e) > 0_pInt) &
|
2009-06-16 14:33:30 +05:30
|
|
|
homogenization_subState0(i,e)%p = homogenization_state(i,e)%p ! ...internal state of homog scheme
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
materialpoint_subF0(1:3,1:3,i,e) = materialpoint_subF(1:3,1:3,i,e) ! ...def grad
|
|
|
|
!$OMP FLUSH(materialpoint_subF0)
|
2009-10-19 18:20:59 +05:30
|
|
|
elseif (materialpoint_requested(i,e)) then ! this materialpoint just converged ! already at final time (??)
|
2009-08-11 22:01:57 +05:30
|
|
|
!$OMP CRITICAL (distributionHomog)
|
|
|
|
debug_MaterialpointLoopDistribution(min(nHomog+1,NiterationHomog)) = &
|
|
|
|
debug_MaterialpointLoopDistribution(min(nHomog+1,NiterationHomog)) + 1
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP END CRITICAL (distributionHomog)
|
2009-05-07 21:57:36 +05:30
|
|
|
endif
|
2009-06-16 14:33:30 +05:30
|
|
|
|
|
|
|
! materialpoint didn't converge, so we need a cutback here
|
2009-05-07 21:57:36 +05:30
|
|
|
else
|
2010-09-02 02:34:02 +05:30
|
|
|
if ( (myNgrains == 1_pInt .and. materialpoint_subStep(i,e) <= 1.0 ) .or. & ! single grain already tried internal subStepping in crystallite
|
|
|
|
subStepSizeHomog * materialpoint_subStep(i,e) <= subStepMinHomog ) then ! would require too small subStep
|
|
|
|
! cutback makes no sense and...
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP CRITICAL (setTerminallyIll)
|
|
|
|
terminallyIll = .true. ! ...one kills all
|
|
|
|
!$OMP END CRITICAL (setTerminallyIll)
|
2010-09-02 02:34:02 +05:30
|
|
|
else ! cutback makes sense
|
|
|
|
materialpoint_subStep(i,e) = subStepSizeHomog * materialpoint_subStep(i,e) ! crystallite had severe trouble, so do a significant cutback
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
!$OMP FLUSH(materialpoint_subStep)
|
2010-09-02 02:34:02 +05:30
|
|
|
|
2010-11-03 22:52:48 +05:30
|
|
|
if (verboseDebugger .and. (e == debug_e .and. i == debug_i)) then
|
2010-09-02 02:34:02 +05:30
|
|
|
!$OMP CRITICAL (write2out)
|
|
|
|
write(6,'(a,x,f10.8,/)') '°°° cutback step in materialpoint_stressAndItsTangent with new materialpoint_subStep:',&
|
|
|
|
materialpoint_subStep(i,e)
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP END CRITICAL (write2out)
|
2010-09-02 02:34:02 +05:30
|
|
|
endif
|
|
|
|
|
|
|
|
! restore...
|
|
|
|
crystallite_Temperature(1:myNgrains,i,e) = crystallite_partionedTemperature0(1:myNgrains,i,e) ! ...temperatures
|
|
|
|
! ...initial def grad unchanged
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
crystallite_Fp(1:3,1:3,1:myNgrains,i,e) = crystallite_partionedFp0(1:3,1:3,1:myNgrains,i,e) ! ...plastic def grads
|
|
|
|
crystallite_Lp(1:3,1:3,1:myNgrains,i,e) = crystallite_partionedLp0(1:3,1:3,1:myNgrains,i,e) ! ...plastic velocity grads
|
|
|
|
crystallite_dPdF(1:3,1:3,1:3,1:3,1:myNgrains,i,e) = crystallite_partioneddPdF0(1:3,1:3,1:3,1:3,1:myNgrains,i,e) ! ...stiffness
|
|
|
|
crystallite_Tstar_v(1:6,1:myNgrains,i,e) = crystallite_partionedTstar0_v(1:6,1:myNgrains,i,e) ! ...2nd PK stress
|
2010-09-02 02:34:02 +05:30
|
|
|
forall (g = 1:myNgrains) constitutive_state(g,i,e)%p = constitutive_partionedState0(g,i,e)%p ! ...microstructures
|
|
|
|
if (homogenization_sizeState(i,e) > 0_pInt) &
|
|
|
|
homogenization_state(i,e)%p = homogenization_subState0(i,e)%p ! ...internal state of homog scheme
|
|
|
|
endif
|
2009-05-07 21:57:36 +05:30
|
|
|
endif
|
|
|
|
|
2009-10-26 22:13:43 +05:30
|
|
|
materialpoint_requested(i,e) = materialpoint_subStep(i,e) > subStepMinHomog
|
2009-05-07 21:57:36 +05:30
|
|
|
if (materialpoint_requested(i,e)) then
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
materialpoint_subF(1:3,1:3,i,e) = materialpoint_subF0(1:3,1:3,i,e) + &
|
|
|
|
materialpoint_subStep(i,e) * (materialpoint_F(1:3,1:3,i,e) - materialpoint_F0(1:3,1:3,i,e))
|
|
|
|
materialpoint_subdt(i,e) = materialpoint_subStep(i,e) * dt
|
|
|
|
materialpoint_doneAndHappy(1:2,i,e) = (/.false.,.true./)
|
2009-05-07 21:57:36 +05:30
|
|
|
endif
|
2010-09-02 02:34:02 +05:30
|
|
|
enddo ! loop IPs
|
|
|
|
enddo ! loop elements
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP END PARALLEL DO
|
2009-05-07 21:57:36 +05:30
|
|
|
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
!* Checks for cutback/substepping loops
|
2009-10-26 22:13:43 +05:30
|
|
|
! write (6,'(a,/,8(L,x))') 'MP exceeds substep min',materialpoint_subStep(:,FEsolving_execELem(1):FEsolving_execElem(2)) > subStepMinHomog
|
2009-08-11 22:01:57 +05:30
|
|
|
! write (6,'(a,/,8(L,x))') 'MP requested',materialpoint_requested(:,FEsolving_execELem(1):FEsolving_execElem(2))
|
|
|
|
! write (6,'(a,/,8(f6.4,x))') 'MP subFrac',materialpoint_subFrac(:,FEsolving_execELem(1):FEsolving_execElem(2))
|
|
|
|
! write (6,'(a,/,8(f6.4,x))') 'MP subStep',materialpoint_subStep(:,FEsolving_execELem(1):FEsolving_execElem(2))
|
2009-07-31 17:32:20 +05:30
|
|
|
|
2009-05-07 21:57:36 +05:30
|
|
|
! ------ convergence loop material point homogenization ------
|
|
|
|
|
2009-08-11 22:01:57 +05:30
|
|
|
NiterationMPstate = 0_pInt
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2010-09-02 02:34:02 +05:30
|
|
|
do while (.not. terminallyIll .and. &
|
|
|
|
any( materialpoint_requested(:,FEsolving_execELem(1):FEsolving_execElem(2)) &
|
2009-05-07 21:57:36 +05:30
|
|
|
.and. .not. materialpoint_doneAndHappy(1,:,FEsolving_execELem(1):FEsolving_execElem(2)) &
|
2010-09-02 02:34:02 +05:30
|
|
|
) .and. &
|
|
|
|
NiterationMPstate < nMPstate) ! convergence loop for materialpoint
|
2009-08-11 22:01:57 +05:30
|
|
|
NiterationMPstate = NiterationMPstate + 1
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2009-08-27 17:40:06 +05:30
|
|
|
! write(6,'(a,/,125(8(l,x),/))') 'material point request and not done', &
|
|
|
|
! materialpoint_requested .and. .not. materialpoint_doneAndHappy(1,:,:)
|
|
|
|
|
2009-05-07 21:57:36 +05:30
|
|
|
! --+>> deformation partitioning <<+--
|
|
|
|
!
|
|
|
|
! based on materialpoint_subF0,.._subF,
|
|
|
|
! crystallite_partionedF0,
|
|
|
|
! homogenization_state
|
|
|
|
! results in crystallite_partionedF
|
|
|
|
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP PARALLEL DO PRIVATE(myNgrains)
|
2009-05-07 21:57:36 +05:30
|
|
|
do e = FEsolving_execElem(1),FEsolving_execElem(2) ! iterate over elements to be processed
|
|
|
|
myNgrains = homogenization_Ngrains(mesh_element(3,e))
|
|
|
|
do i = FEsolving_execIP(1,e),FEsolving_execIP(2,e) ! iterate over IPs of this element to be processed
|
|
|
|
if ( materialpoint_requested(i,e) .and. & ! process requested but...
|
|
|
|
.not. materialpoint_doneAndHappy(1,i,e)) then ! ...not yet done material points
|
|
|
|
call homogenization_partitionDeformation(i,e) ! partition deformation onto constituents
|
|
|
|
crystallite_dt(1:myNgrains,i,e) = materialpoint_subdt(i,e) ! propagate materialpoint dt to grains
|
|
|
|
crystallite_requested(1:myNgrains,i,e) = .true. ! request calculation for constituents
|
2009-08-27 17:40:06 +05:30
|
|
|
else
|
|
|
|
crystallite_requested(1:myNgrains,i,e) = .false. ! calculation for constituents not required anymore
|
2009-05-07 21:57:36 +05:30
|
|
|
endif
|
|
|
|
enddo
|
|
|
|
enddo
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP END PARALLEL DO
|
2009-08-27 17:40:06 +05:30
|
|
|
! write(6,'(a,/,125(8(8(l,x),2x),/))') 'crystallite request with updated partitioning', crystallite_requested
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
|
|
|
|
! --+>> crystallite integration <<+--
|
|
|
|
!
|
|
|
|
! based on crystallite_partionedF0,.._partionedF
|
|
|
|
! incrementing by crystallite_dt
|
|
|
|
call crystallite_stressAndItsTangent(updateJaco) ! request stress and tangent calculation for constituent grains
|
2009-08-27 17:40:06 +05:30
|
|
|
|
|
|
|
! write(6,'(a,/,125(8(8(l,x),2x),/))') 'crystallite converged', crystallite_converged
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
! --+>> state update <<+--
|
|
|
|
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP PARALLEL DO
|
2009-05-07 21:57:36 +05:30
|
|
|
do e = FEsolving_execElem(1),FEsolving_execElem(2) ! iterate over elements to be processed
|
|
|
|
do i = FEsolving_execIP(1,e),FEsolving_execIP(2,e) ! iterate over IPs of this element to be processed
|
|
|
|
if ( materialpoint_requested(i,e) .and. &
|
|
|
|
.not. materialpoint_doneAndHappy(1,i,e)) then
|
2009-08-11 22:01:57 +05:30
|
|
|
if (.not. all(crystallite_converged(:,i,e))) then
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
materialpoint_doneAndHappy(1:2,i,e) = (/.true.,.false./)
|
|
|
|
materialpoint_converged(i,e) = .false.
|
2009-08-11 22:01:57 +05:30
|
|
|
else
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
materialpoint_doneAndHappy(1:2,i,e) = homogenization_updateState(i,e)
|
|
|
|
materialpoint_converged(i,e) = all(homogenization_updateState(i,e)) ! converged if done and happy
|
2009-08-11 22:01:57 +05:30
|
|
|
endif
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
!$OMP FLUSH(materialpoint_converged)
|
2010-11-03 22:52:48 +05:30
|
|
|
if (materialpoint_converged(i,e)) then
|
|
|
|
!$OMP CRITICAL (distributionMPState)
|
|
|
|
debug_MaterialpointStateLoopdistribution(NiterationMPstate) = &
|
|
|
|
debug_MaterialpointStateLoopdistribution(NiterationMPstate) + 1
|
|
|
|
!$OMP END CRITICAL (distributionMPState)
|
|
|
|
endif
|
2009-05-07 21:57:36 +05:30
|
|
|
endif
|
|
|
|
enddo
|
|
|
|
enddo
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP END PARALLEL DO
|
2009-08-27 17:40:06 +05:30
|
|
|
! write(6,'(a,/,125(8(l,x),/))') 'material point done', materialpoint_doneAndHappy(1,:,:)
|
|
|
|
! write(6,'(a,/,125(8(l,x),/))') 'material point converged', materialpoint_converged
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
enddo ! homogenization convergence loop
|
|
|
|
|
2010-10-01 17:48:49 +05:30
|
|
|
NiterationHomog = NiterationHomog + 1_pInt
|
2009-08-11 22:01:57 +05:30
|
|
|
|
2009-05-07 21:57:36 +05:30
|
|
|
enddo ! cutback loop
|
|
|
|
|
|
|
|
|
2010-09-02 02:34:02 +05:30
|
|
|
if (.not. terminallyIll ) then
|
2010-10-01 17:48:49 +05:30
|
|
|
|
2010-09-02 02:34:02 +05:30
|
|
|
call crystallite_orientations() ! calculate crystal orientations
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP PARALLEL DO
|
2010-09-02 02:34:02 +05:30
|
|
|
do e = FEsolving_execElem(1),FEsolving_execElem(2) ! iterate over elements to be processed
|
|
|
|
do i = FEsolving_execIP(1,e),FEsolving_execIP(2,e) ! iterate over IPs of this element to be processed
|
2009-08-11 22:01:57 +05:30
|
|
|
call homogenization_averageStressAndItsTangent(i,e)
|
2009-12-18 21:16:33 +05:30
|
|
|
call homogenization_averageTemperature(i,e)
|
2010-09-02 02:34:02 +05:30
|
|
|
enddo; enddo
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP END PARALLEL DO
|
2010-09-02 02:34:02 +05:30
|
|
|
|
|
|
|
if (debugger) then
|
|
|
|
write (6,*)
|
|
|
|
write (6,'(a)') '°°° Material Point end'
|
|
|
|
write (6,*)
|
|
|
|
endif
|
|
|
|
else
|
2010-03-19 19:44:08 +05:30
|
|
|
write (6,*)
|
2010-09-02 02:34:02 +05:30
|
|
|
write (6,'(a)') '°°° Material Point terminally ill'
|
2010-03-19 19:44:08 +05:30
|
|
|
write (6,*)
|
2010-09-02 02:34:02 +05:30
|
|
|
|
2010-03-19 19:44:08 +05:30
|
|
|
endif
|
2009-05-07 21:57:36 +05:30
|
|
|
return
|
|
|
|
|
2009-06-15 18:41:21 +05:30
|
|
|
endsubroutine
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
|
|
|
|
!********************************************************************
|
|
|
|
!* parallelized calculation of
|
|
|
|
!* result array at material points
|
|
|
|
!********************************************************************
|
|
|
|
subroutine materialpoint_postResults(dt)
|
|
|
|
|
|
|
|
use FEsolving, only: FEsolving_execElem, FEsolving_execIP
|
|
|
|
use mesh, only: mesh_element
|
2010-02-25 23:09:11 +05:30
|
|
|
use material, only: homogenization_Ngrains, microstructure_crystallite
|
2009-05-07 21:57:36 +05:30
|
|
|
use constitutive, only: constitutive_sizePostResults, constitutive_postResults
|
2010-02-25 23:09:11 +05:30
|
|
|
use crystallite, only: crystallite_sizePostResults, crystallite_postResults
|
2009-05-07 21:57:36 +05:30
|
|
|
implicit none
|
|
|
|
|
|
|
|
real(pReal), intent(in) :: dt
|
2010-02-25 23:09:11 +05:30
|
|
|
integer(pInt) g,i,e,c,d,myNgrains,myCrystallite
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP PARALLEL DO PRIVATE(myNgrains,myCrystallite,c,d)
|
2009-05-07 21:57:36 +05:30
|
|
|
do e = FEsolving_execElem(1),FEsolving_execElem(2) ! iterate over elements to be processed
|
|
|
|
myNgrains = homogenization_Ngrains(mesh_element(3,e))
|
2010-02-25 23:09:11 +05:30
|
|
|
myCrystallite = microstructure_crystallite(mesh_element(4,e))
|
2009-05-07 21:57:36 +05:30
|
|
|
do i = FEsolving_execIP(1,e),FEsolving_execIP(2,e) ! iterate over IPs of this element to be processed
|
|
|
|
c = 0_pInt
|
2009-07-22 21:37:19 +05:30
|
|
|
materialpoint_results(c+1,i,e) = myNgrains; c = c+1_pInt ! tell number of grains at materialpoint
|
2009-05-07 21:57:36 +05:30
|
|
|
d = homogenization_sizePostResults(i,e)
|
|
|
|
materialpoint_results(c+1,i,e) = d; c = c+1_pInt ! tell size of homogenization results
|
2009-07-22 21:37:19 +05:30
|
|
|
if (d > 0_pInt) then ! any homogenization results to mention?
|
2009-05-07 21:57:36 +05:30
|
|
|
materialpoint_results(c+1:c+d,i,e) = & ! tell homogenization results
|
constitutive_nonlocal:
- read in activation energy for dislocation glide from material.config
- changed naming of dDipMin/Max to dLower/dUpper
- added new outputs: rho_dot, rho_dot_dip, rho_dot_gen, rho_dot_sgl2dip, rho_dot_dip2sgl, rho_dot_ann_ath, rho_dot_ann_the, rho_dot_flux, d_upper_edge, d_upper_screw, d_upper_dot_edge, d_upper_dot_screw
- poisson's ratio is now calculated from elastic constants
- microstrucutre has state as first argument, since this is our output variable
- periodic boundary conditions are taken into account for fluxes and internal stresses. for the moment, flag has to be set in constitutive_nonlocal.
- corrected calculation for dipole formation by glide
- added terms for dipole formation/annihilation by stress decrease/increase
constitutive:
- passing of arguments is adapted for constitutive_nonlocal model
crystallite:
- in stiffness calculation: call to collect_dotState used wrong arguments
- crystallite_postResults uses own Tstar_v and temperature, no need for passing them from materialpoint_postResults
homogenization:
- crystallite_postResults uses own Tstar_v and temperature, no need for passing them from materialpoint_postResults
IO:
- changed error message 229
material.config:
- changed example for nonlocal constitution according to constitutive_nonlocal
all:
- added some flush statements
2009-10-20 20:06:03 +05:30
|
|
|
homogenization_postResults(i,e); c = c+d
|
2009-05-07 21:57:36 +05:30
|
|
|
endif
|
2009-10-22 22:29:24 +05:30
|
|
|
do g = 1,myNgrains ! loop over all grains
|
2010-02-25 23:09:11 +05:30
|
|
|
d = 1+crystallite_sizePostResults(myCrystallite) + 1+constitutive_sizePostResults(g,i,e)
|
2009-05-07 21:57:36 +05:30
|
|
|
materialpoint_results(c+1:c+d,i,e) = & ! tell crystallite results
|
constitutive_nonlocal:
- read in activation energy for dislocation glide from material.config
- changed naming of dDipMin/Max to dLower/dUpper
- added new outputs: rho_dot, rho_dot_dip, rho_dot_gen, rho_dot_sgl2dip, rho_dot_dip2sgl, rho_dot_ann_ath, rho_dot_ann_the, rho_dot_flux, d_upper_edge, d_upper_screw, d_upper_dot_edge, d_upper_dot_screw
- poisson's ratio is now calculated from elastic constants
- microstrucutre has state as first argument, since this is our output variable
- periodic boundary conditions are taken into account for fluxes and internal stresses. for the moment, flag has to be set in constitutive_nonlocal.
- corrected calculation for dipole formation by glide
- added terms for dipole formation/annihilation by stress decrease/increase
constitutive:
- passing of arguments is adapted for constitutive_nonlocal model
crystallite:
- in stiffness calculation: call to collect_dotState used wrong arguments
- crystallite_postResults uses own Tstar_v and temperature, no need for passing them from materialpoint_postResults
homogenization:
- crystallite_postResults uses own Tstar_v and temperature, no need for passing them from materialpoint_postResults
IO:
- changed error message 229
material.config:
- changed example for nonlocal constitution according to constitutive_nonlocal
all:
- added some flush statements
2009-10-20 20:06:03 +05:30
|
|
|
crystallite_postResults(dt,g,i,e); c = c+d
|
2009-05-07 21:57:36 +05:30
|
|
|
enddo
|
|
|
|
enddo
|
|
|
|
enddo
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP END PARALLEL DO
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2009-06-15 18:41:21 +05:30
|
|
|
endsubroutine
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
|
|
|
|
!********************************************************************
|
|
|
|
! partition material point def grad onto constituents
|
|
|
|
!********************************************************************
|
|
|
|
subroutine homogenization_partitionDeformation(&
|
|
|
|
ip, & ! integration point
|
|
|
|
el & ! element
|
|
|
|
)
|
|
|
|
|
|
|
|
use prec, only: pReal,pInt
|
|
|
|
use mesh, only: mesh_element
|
|
|
|
use material, only: homogenization_type, homogenization_maxNgrains
|
|
|
|
use crystallite, only: crystallite_partionedF0,crystallite_partionedF
|
|
|
|
use homogenization_isostrain
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
use homogenization_RGC
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
implicit none
|
|
|
|
|
|
|
|
integer(pInt), intent(in) :: ip,el
|
|
|
|
|
|
|
|
select case(homogenization_type(mesh_element(3,el)))
|
|
|
|
case (homogenization_isostrain_label)
|
2009-07-31 17:32:20 +05:30
|
|
|
!* isostrain
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
call homogenization_isostrain_partitionDeformation(crystallite_partionedF(1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
crystallite_partionedF0(1:3,1:3,1:homogenization_maxNgrains,ip,el),&
|
|
|
|
materialpoint_subF(1:3,1:3,ip,el),&
|
2009-06-16 14:33:30 +05:30
|
|
|
homogenization_state(ip,el), &
|
|
|
|
ip, &
|
|
|
|
el)
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
!* RGC homogenization
|
2009-07-31 17:32:20 +05:30
|
|
|
case (homogenization_RGC_label)
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
call homogenization_RGC_partitionDeformation(crystallite_partionedF(1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
crystallite_partionedF0(1:3,1:3,1:homogenization_maxNgrains,ip,el),&
|
|
|
|
materialpoint_subF(1:3,1:3,ip,el),&
|
2009-07-31 17:32:20 +05:30
|
|
|
homogenization_state(ip,el), &
|
|
|
|
ip, &
|
|
|
|
el)
|
2009-05-07 21:57:36 +05:30
|
|
|
end select
|
|
|
|
|
2009-06-15 18:41:21 +05:30
|
|
|
endsubroutine
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
|
|
|
|
!********************************************************************
|
|
|
|
! update the internal state of the homogenization scheme
|
|
|
|
! and tell whether "done" and "happy" with result
|
|
|
|
!********************************************************************
|
|
|
|
function homogenization_updateState(&
|
|
|
|
ip, & ! integration point
|
|
|
|
el & ! element
|
|
|
|
)
|
|
|
|
use prec, only: pReal,pInt
|
|
|
|
use mesh, only: mesh_element
|
|
|
|
use material, only: homogenization_type, homogenization_maxNgrains
|
2009-07-31 17:32:20 +05:30
|
|
|
use crystallite, only: crystallite_P,crystallite_dPdF,crystallite_partionedF,crystallite_partionedF0 ! modified <<<updated 31.07.2009>>>
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
use homogenization_isostrain
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
use homogenization_RGC
|
2009-05-07 21:57:36 +05:30
|
|
|
implicit none
|
|
|
|
|
|
|
|
integer(pInt), intent(in) :: ip,el
|
|
|
|
logical, dimension(2) :: homogenization_updateState
|
|
|
|
|
|
|
|
select case(homogenization_type(mesh_element(3,el)))
|
2009-07-31 17:32:20 +05:30
|
|
|
!* isostrain
|
2009-05-07 21:57:36 +05:30
|
|
|
case (homogenization_isostrain_label)
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
homogenization_updateState = &
|
|
|
|
homogenization_isostrain_updateState( homogenization_state(ip,el), &
|
|
|
|
crystallite_P(1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
crystallite_dPdF(1:3,1:3,1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
ip, &
|
|
|
|
el)
|
|
|
|
!* RGC homogenization
|
2009-07-31 17:32:20 +05:30
|
|
|
case (homogenization_RGC_label)
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
homogenization_updateState = &
|
|
|
|
homogenization_RGC_updateState( homogenization_state(ip,el), &
|
|
|
|
homogenization_subState0(ip,el), &
|
|
|
|
crystallite_P(1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
crystallite_partionedF(1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
crystallite_partionedF0(1:3,1:3,1:homogenization_maxNgrains,ip,el),&
|
|
|
|
materialpoint_subF(1:3,1:3,ip,el),&
|
|
|
|
materialpoint_subdt(ip,el), &
|
|
|
|
crystallite_dPdF(1:3,1:3,1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
ip, &
|
|
|
|
el)
|
2009-05-07 21:57:36 +05:30
|
|
|
end select
|
|
|
|
|
|
|
|
return
|
|
|
|
|
2009-06-15 18:41:21 +05:30
|
|
|
endfunction
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
|
2009-07-22 21:37:19 +05:30
|
|
|
!********************************************************************
|
|
|
|
! derive average stress and stiffness from constituent quantities
|
|
|
|
!********************************************************************
|
|
|
|
subroutine homogenization_averageStressAndItsTangent(&
|
|
|
|
ip, & ! integration point
|
|
|
|
el & ! element
|
|
|
|
)
|
|
|
|
use prec, only: pReal,pInt
|
|
|
|
use mesh, only: mesh_element
|
|
|
|
use material, only: homogenization_type, homogenization_maxNgrains
|
|
|
|
use crystallite, only: crystallite_P,crystallite_dPdF
|
|
|
|
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
use homogenization_RGC
|
2009-07-22 21:37:19 +05:30
|
|
|
use homogenization_isostrain
|
|
|
|
implicit none
|
|
|
|
|
|
|
|
integer(pInt), intent(in) :: ip,el
|
|
|
|
|
|
|
|
select case(homogenization_type(mesh_element(3,el)))
|
2009-07-31 17:32:20 +05:30
|
|
|
!* isostrain
|
2009-07-22 21:37:19 +05:30
|
|
|
case (homogenization_isostrain_label)
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
call homogenization_isostrain_averageStressAndItsTangent(materialpoint_P(1:3,1:3,ip,el), &
|
|
|
|
materialpoint_dPdF(1:3,1:3,1:3,1:3,ip,el),&
|
|
|
|
crystallite_P(1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
crystallite_dPdF(1:3,1:3,1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
ip, &
|
|
|
|
el)
|
|
|
|
!* RGC homogenization
|
2009-07-31 17:32:20 +05:30
|
|
|
case (homogenization_RGC_label)
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
call homogenization_RGC_averageStressAndItsTangent( materialpoint_P(1:3,1:3,ip,el), &
|
|
|
|
materialpoint_dPdF(1:3,1:3,1:3,1:3,ip,el),&
|
|
|
|
crystallite_P(1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
crystallite_dPdF(1:3,1:3,1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
2009-07-31 17:32:20 +05:30
|
|
|
ip, &
|
|
|
|
el)
|
2009-07-22 21:37:19 +05:30
|
|
|
end select
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
endsubroutine
|
|
|
|
|
|
|
|
|
|
|
|
!********************************************************************
|
|
|
|
! derive average stress and stiffness from constituent quantities
|
|
|
|
!********************************************************************
|
|
|
|
subroutine homogenization_averageTemperature(&
|
|
|
|
ip, & ! integration point
|
|
|
|
el & ! element
|
|
|
|
)
|
|
|
|
use prec, only: pReal,pInt
|
|
|
|
use mesh, only: mesh_element
|
|
|
|
use material, only: homogenization_type, homogenization_maxNgrains
|
|
|
|
use crystallite, only: crystallite_Temperature
|
|
|
|
|
|
|
|
use homogenization_isostrain
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
use homogenization_RGC
|
2009-07-22 21:37:19 +05:30
|
|
|
implicit none
|
|
|
|
|
|
|
|
integer(pInt), intent(in) :: ip,el
|
|
|
|
|
|
|
|
select case(homogenization_type(mesh_element(3,el)))
|
2009-07-31 17:32:20 +05:30
|
|
|
!* isostrain
|
2009-07-22 21:37:19 +05:30
|
|
|
case (homogenization_isostrain_label)
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
materialpoint_Temperature(ip,el) = &
|
|
|
|
homogenization_isostrain_averageTemperature(crystallite_Temperature(1:homogenization_maxNgrains,ip,el), ip, el)
|
|
|
|
!* RGC homogenization
|
2009-07-31 17:32:20 +05:30
|
|
|
case (homogenization_RGC_label)
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
materialpoint_Temperature(ip,el) = &
|
|
|
|
homogenization_RGC_averageTemperature(crystallite_Temperature(1:homogenization_maxNgrains,ip,el), ip, el)
|
2009-07-22 21:37:19 +05:30
|
|
|
end select
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
endsubroutine
|
|
|
|
|
|
|
|
|
2009-05-07 21:57:36 +05:30
|
|
|
!********************************************************************
|
|
|
|
! return array of homogenization results for post file inclusion
|
|
|
|
! call only, if homogenization_sizePostResults(ip,el) > 0 !!
|
|
|
|
!********************************************************************
|
|
|
|
function homogenization_postResults(&
|
|
|
|
ip, & ! integration point
|
|
|
|
el & ! element
|
|
|
|
)
|
2009-10-22 22:29:24 +05:30
|
|
|
use prec, only: pReal,pInt
|
|
|
|
use mesh, only: mesh_element
|
|
|
|
use material, only: homogenization_type
|
2009-05-07 21:57:36 +05:30
|
|
|
use homogenization_isostrain
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
use homogenization_RGC
|
2009-05-07 21:57:36 +05:30
|
|
|
implicit none
|
|
|
|
|
|
|
|
!* Definition of variables
|
|
|
|
integer(pInt), intent(in) :: ip,el
|
|
|
|
real(pReal), dimension(homogenization_sizePostResults(ip,el)) :: homogenization_postResults
|
|
|
|
|
|
|
|
homogenization_postResults = 0.0_pReal
|
|
|
|
select case (homogenization_type(mesh_element(3,el)))
|
2009-07-31 17:32:20 +05:30
|
|
|
!* isostrain
|
2009-05-07 21:57:36 +05:30
|
|
|
case (homogenization_isostrain_label)
|
|
|
|
homogenization_postResults = homogenization_isostrain_postResults(homogenization_state(ip,el),ip,el)
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
!* RGC homogenization
|
2009-07-31 17:32:20 +05:30
|
|
|
case (homogenization_RGC_label)
|
2009-10-22 22:29:24 +05:30
|
|
|
homogenization_postResults = homogenization_RGC_postResults(homogenization_state(ip,el),ip,el)
|
2009-05-07 21:57:36 +05:30
|
|
|
end select
|
|
|
|
|
|
|
|
return
|
|
|
|
|
2009-06-15 18:41:21 +05:30
|
|
|
endfunction
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2009-07-31 17:32:20 +05:30
|
|
|
END MODULE
|