2012-08-10 21:28:17 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
! $Id$
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH
|
|
|
|
!> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH
|
|
|
|
!> @author Denny Tjahjanto, Max-Planck-Institut für Eisenforschung GmbH
|
|
|
|
!> @brief homogenization manager, organizing deformation partitioning and stress homogenization
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
module homogenization
|
2013-01-29 15:58:01 +05:30
|
|
|
use prec, only: &
|
|
|
|
pInt, &
|
2014-09-19 23:29:06 +05:30
|
|
|
pReal
|
2014-08-21 23:18:20 +05:30
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
! General variables for the homogenization at a material point
|
2009-05-07 21:57:36 +05:30
|
|
|
implicit none
|
2013-01-29 15:58:01 +05:30
|
|
|
private
|
2014-08-21 23:18:20 +05:30
|
|
|
real(pReal), dimension(:,:,:,:), allocatable, public :: &
|
2013-01-29 15:58:01 +05:30
|
|
|
materialpoint_F0, & !< def grad of IP at start of FE increment
|
|
|
|
materialpoint_F, & !< def grad of IP to be reached at end of FE increment
|
|
|
|
materialpoint_P !< first P--K stress of IP
|
|
|
|
real(pReal), dimension(:,:,:,:,:,:), allocatable, public :: &
|
|
|
|
materialpoint_dPdF !< tangent of first P--K stress at IP
|
|
|
|
real(pReal), dimension(:,:,:), allocatable, public :: &
|
|
|
|
materialpoint_results !< results array of material point
|
|
|
|
integer(pInt), public, protected :: &
|
|
|
|
materialpoint_sizeResults, &
|
2014-09-26 16:04:36 +05:30
|
|
|
homogenization_maxSizePostResults, &
|
|
|
|
field_maxSizePostResults
|
2013-01-29 15:58:01 +05:30
|
|
|
|
|
|
|
real(pReal), dimension(:,:,:,:), allocatable, private :: &
|
|
|
|
materialpoint_subF0, & !< def grad of IP at beginning of homogenization increment
|
|
|
|
materialpoint_subF !< def grad of IP to be reached at end of homog inc
|
|
|
|
real(pReal), dimension(:,:), allocatable, private :: &
|
|
|
|
materialpoint_subFrac, &
|
|
|
|
materialpoint_subStep, &
|
2013-10-19 02:26:10 +05:30
|
|
|
materialpoint_subdt
|
2013-01-29 15:58:01 +05:30
|
|
|
integer(pInt), private :: &
|
|
|
|
homogenization_maxSizeState
|
|
|
|
logical, dimension(:,:), allocatable, private :: &
|
|
|
|
materialpoint_requested, &
|
|
|
|
materialpoint_converged
|
|
|
|
logical, dimension(:,:,:), allocatable, private :: &
|
|
|
|
materialpoint_doneAndHappy
|
2014-09-26 16:04:36 +05:30
|
|
|
enum, bind(c)
|
|
|
|
enumerator :: undefined_ID, &
|
|
|
|
temperature_ID, &
|
2014-10-11 02:25:09 +05:30
|
|
|
damage_ID, &
|
|
|
|
vacancy_concentration_ID
|
2014-09-26 16:04:36 +05:30
|
|
|
end enum
|
|
|
|
integer(pInt), dimension(:), allocatable, private, protected :: &
|
|
|
|
field_sizePostResults
|
|
|
|
integer(pInt), dimension(:,:), allocatable, private :: &
|
|
|
|
field_sizePostResult
|
|
|
|
|
|
|
|
character(len=64), dimension(:,:), allocatable, private :: &
|
|
|
|
field_output !< name of each post result output
|
|
|
|
integer(pInt), dimension(:), allocatable, private :: &
|
|
|
|
field_Noutput !< number of outputs per homog instance
|
|
|
|
integer(kind(undefined_ID)), dimension(:,:), allocatable, private :: &
|
|
|
|
field_outputID !< ID of each post result output
|
2013-01-29 15:58:01 +05:30
|
|
|
|
|
|
|
public :: &
|
|
|
|
homogenization_init, &
|
|
|
|
materialpoint_stressAndItsTangent, &
|
2014-09-26 21:37:26 +05:30
|
|
|
field_getLocalDamage, &
|
|
|
|
field_putFieldDamage, &
|
|
|
|
field_getLocalTemperature, &
|
|
|
|
field_putFieldTemperature, &
|
2014-10-11 02:25:09 +05:30
|
|
|
field_getLocalVacancyConcentration, &
|
|
|
|
field_putFieldVacancyConcentration, &
|
2014-09-10 14:07:12 +05:30
|
|
|
field_getDamageMobility, &
|
|
|
|
field_getDamageDiffusion33, &
|
|
|
|
field_getThermalConductivity33, &
|
|
|
|
field_getMassDensity, &
|
|
|
|
field_getSpecificHeat, &
|
2014-10-11 02:25:09 +05:30
|
|
|
field_getVacancyMobility, &
|
|
|
|
field_getVacancyDiffusion33, &
|
2014-09-26 16:04:36 +05:30
|
|
|
materialpoint_postResults, &
|
|
|
|
field_postResults
|
2013-01-29 15:58:01 +05:30
|
|
|
private :: &
|
|
|
|
homogenization_partitionDeformation, &
|
|
|
|
homogenization_updateState, &
|
|
|
|
homogenization_averageStressAndItsTangent, &
|
|
|
|
homogenization_postResults
|
2012-08-10 21:28:17 +05:30
|
|
|
|
|
|
|
contains
|
|
|
|
|
|
|
|
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief module initialization
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
2013-10-16 18:34:59 +05:30
|
|
|
subroutine homogenization_init()
|
2014-03-12 22:21:01 +05:30
|
|
|
#ifdef HDF
|
|
|
|
use hdf5, only: &
|
|
|
|
HID_T
|
|
|
|
use IO, only : &
|
2014-04-15 15:19:50 +05:30
|
|
|
HDF5_mappingHomogenization
|
2014-03-12 22:21:01 +05:30
|
|
|
#endif
|
2012-08-10 21:28:17 +05:30
|
|
|
use, intrinsic :: iso_fortran_env ! to get compiler_version and compiler_options (at least for gfortran 4.6 at the moment)
|
2013-01-29 15:58:01 +05:30
|
|
|
use math, only: &
|
|
|
|
math_I3
|
|
|
|
use debug, only: &
|
|
|
|
debug_level, &
|
|
|
|
debug_homogenization, &
|
2013-10-16 18:34:59 +05:30
|
|
|
debug_levelBasic, &
|
|
|
|
debug_e, &
|
|
|
|
debug_g
|
2013-01-29 15:58:01 +05:30
|
|
|
use mesh, only: &
|
|
|
|
mesh_maxNips, &
|
|
|
|
mesh_NcpElems, &
|
|
|
|
mesh_element, &
|
|
|
|
FE_Nips, &
|
|
|
|
FE_geomtype
|
2014-09-10 23:56:12 +05:30
|
|
|
use lattice, only: &
|
|
|
|
lattice_referenceTemperature
|
2013-01-29 15:58:01 +05:30
|
|
|
use constitutive, only: &
|
2014-09-23 16:08:20 +05:30
|
|
|
constitutive_maxSizePostResults, &
|
|
|
|
constitutive_damage_maxSizePostResults, &
|
2014-10-11 02:25:09 +05:30
|
|
|
constitutive_thermal_maxSizePostResults, &
|
|
|
|
constitutive_vacancy_maxSizePostResults
|
2013-01-29 15:58:01 +05:30
|
|
|
use crystallite, only: &
|
|
|
|
crystallite_maxSizePostResults
|
2012-08-10 21:28:17 +05:30
|
|
|
use material
|
2014-03-14 04:50:50 +05:30
|
|
|
use homogenization_none
|
2012-08-10 21:28:17 +05:30
|
|
|
use homogenization_isostrain
|
|
|
|
use homogenization_RGC
|
2014-09-26 16:04:36 +05:30
|
|
|
use IO
|
2014-10-10 01:53:06 +05:30
|
|
|
use numerics, only: &
|
|
|
|
worldrank
|
2012-05-08 20:27:06 +05:30
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
implicit none
|
2013-12-12 22:39:59 +05:30
|
|
|
integer(pInt), parameter :: FILEUNIT = 200_pInt
|
2014-09-23 02:06:55 +05:30
|
|
|
integer(pInt) :: e,i,p,myInstance
|
2012-08-10 21:28:17 +05:30
|
|
|
integer(pInt), dimension(:,:), pointer :: thisSize
|
2014-09-26 16:04:36 +05:30
|
|
|
integer(pInt), dimension(:) , pointer :: thisNoutput
|
2012-08-10 21:28:17 +05:30
|
|
|
character(len=64), dimension(:,:), pointer :: thisOutput
|
2013-11-27 13:34:05 +05:30
|
|
|
character(len=32) :: outputName !< name of output, intermediate fix until HDF5 output is ready
|
2013-01-29 15:58:01 +05:30
|
|
|
logical :: knownHomogenization
|
2014-03-12 22:21:01 +05:30
|
|
|
#ifdef HDF
|
|
|
|
integer(pInt), dimension(:,:), allocatable :: mapping
|
|
|
|
integer(pInt), dimension(:), allocatable :: InstancePosition
|
|
|
|
allocate(mapping(mesh_ncpelems,4),source=0_pInt)
|
|
|
|
allocate(InstancePosition(material_Nhomogenization),source=0_pInt)
|
|
|
|
#endif
|
2014-09-26 16:04:36 +05:30
|
|
|
integer(pInt), parameter :: MAXNCHUNKS = 2_pInt
|
|
|
|
integer(pInt), dimension(1_pInt+2_pInt*MAXNCHUNKS) :: positions
|
|
|
|
integer(pInt) :: section = 0_pInt
|
|
|
|
character(len=65536) :: &
|
|
|
|
tag = '', &
|
|
|
|
line = ''
|
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
! parse homogenization from config file
|
2013-12-12 22:39:59 +05:30
|
|
|
if (.not. IO_open_jobFile_stat(FILEUNIT,material_localFileExt)) & ! no local material configuration present...
|
|
|
|
call IO_open_file(FILEUNIT,material_configFile) ! ... open material.config file
|
2014-03-14 04:50:50 +05:30
|
|
|
if (any(homogenization_type == HOMOGENIZATION_NONE_ID)) &
|
2014-05-08 23:14:28 +05:30
|
|
|
call homogenization_none_init()
|
2014-03-14 04:50:50 +05:30
|
|
|
if (any(homogenization_type == HOMOGENIZATION_ISOSTRAIN_ID)) &
|
|
|
|
call homogenization_isostrain_init(FILEUNIT)
|
|
|
|
if (any(homogenization_type == HOMOGENIZATION_RGC_ID)) &
|
|
|
|
call homogenization_RGC_init(FILEUNIT)
|
2013-12-12 22:39:59 +05:30
|
|
|
close(FILEUNIT)
|
2012-08-10 21:28:17 +05:30
|
|
|
|
2014-09-26 16:04:36 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
! parse field from config file
|
|
|
|
allocate(field_sizePostResults(material_Nhomogenization), source=0_pInt)
|
|
|
|
allocate(field_sizePostResult(maxval(homogenization_Noutput),material_Nhomogenization), &
|
|
|
|
source=0_pInt)
|
|
|
|
allocate(field_Noutput(material_Nhomogenization), source=0_pInt)
|
|
|
|
allocate(field_outputID(maxval(homogenization_Noutput),material_Nhomogenization), &
|
|
|
|
source=undefined_ID)
|
|
|
|
allocate(field_output(maxval(homogenization_Noutput),material_Nhomogenization))
|
|
|
|
field_output = ''
|
|
|
|
|
|
|
|
if (.not. IO_open_jobFile_stat(FILEUNIT,material_localFileExt)) & ! no local material configuration present...
|
|
|
|
call IO_open_file(FILEUNIT,material_configFile) ! ... open material.config file
|
|
|
|
rewind(FILEUNIT)
|
|
|
|
do while (trim(line) /= IO_EOF .and. IO_lc(IO_getTag(line,'<','>')) /= material_partHomogenization)! wind forward to <homogenization>
|
|
|
|
line = IO_read(FILEUNIT)
|
|
|
|
enddo
|
|
|
|
|
|
|
|
parsingFile: do while (trim(line) /= IO_EOF) ! read through sections of homogenization part
|
|
|
|
line = IO_read(FILEUNIT)
|
|
|
|
if (IO_isBlank(line)) cycle ! skip empty lines
|
|
|
|
if (IO_getTag(line,'<','>') /= '') then ! stop at next part
|
|
|
|
line = IO_read(FILEUNIT, .true.) ! reset IO_read
|
|
|
|
exit
|
|
|
|
endif
|
|
|
|
if (IO_getTag(line,'[',']') /= '') then ! next section
|
|
|
|
section = section + 1_pInt
|
|
|
|
cycle
|
|
|
|
endif
|
|
|
|
if (section > 0_pInt ) then ! do not short-circuit here (.and. with next if-statement). It's not safe in Fortran
|
|
|
|
positions = IO_stringPos(line,MAXNCHUNKS)
|
|
|
|
tag = IO_lc(IO_stringValue(line,positions,1_pInt)) ! extract key
|
|
|
|
select case(tag)
|
|
|
|
case ('(output)')
|
|
|
|
select case(IO_lc(IO_stringValue(line,positions,2_pInt)))
|
|
|
|
case('temperature')
|
|
|
|
field_Noutput(section) = field_Noutput(section) + 1_pInt
|
|
|
|
field_outputID(field_Noutput(section),section) = temperature_ID
|
|
|
|
field_sizePostResult(field_Noutput(section),section) = 1_pInt
|
|
|
|
field_sizePostResults(section) = field_sizePostResults(section) + 1_pInt
|
|
|
|
field_output(field_Noutput(section),section) = IO_lc(IO_stringValue(line,positions,2_pInt))
|
|
|
|
case('damage')
|
|
|
|
field_Noutput(section) = field_Noutput(section) + 1_pInt
|
|
|
|
field_outputID(field_Noutput(section),section) = damage_ID
|
|
|
|
field_sizePostResult(field_Noutput(section),section) = 1_pInt
|
|
|
|
field_sizePostResults(section) = field_sizePostResults(section) + 1_pInt
|
|
|
|
field_output(field_Noutput(section),section) = IO_lc(IO_stringValue(line,positions,2_pInt))
|
2014-10-11 02:25:09 +05:30
|
|
|
case('vacancy_concentration')
|
|
|
|
field_Noutput(section) = field_Noutput(section) + 1_pInt
|
|
|
|
field_outputID(field_Noutput(section),section) = vacancy_concentration_ID
|
|
|
|
field_sizePostResult(field_Noutput(section),section) = 1_pInt
|
|
|
|
field_sizePostResults(section) = field_sizePostResults(section) + 1_pInt
|
|
|
|
field_output(field_Noutput(section),section) = IO_lc(IO_stringValue(line,positions,2_pInt))
|
2014-09-26 16:04:36 +05:30
|
|
|
|
|
|
|
end select
|
|
|
|
|
|
|
|
end select
|
|
|
|
endif
|
|
|
|
enddo parsingFile
|
|
|
|
close(FILEUNIT)
|
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
! write description file for homogenization output
|
2013-12-12 22:39:59 +05:30
|
|
|
call IO_write_jobFile(FILEUNIT,'outputHomogenization')
|
2012-08-10 21:28:17 +05:30
|
|
|
do p = 1,material_Nhomogenization
|
|
|
|
i = homogenization_typeInstance(p) ! which instance of this homogenization type
|
|
|
|
knownHomogenization = .true. ! assume valid
|
|
|
|
select case(homogenization_type(p)) ! split per homogenization type
|
2014-03-14 04:50:50 +05:30
|
|
|
case (HOMOGENIZATION_NONE_ID)
|
|
|
|
outputName = HOMOGENIZATION_NONE_label
|
2014-09-26 16:04:36 +05:30
|
|
|
thisNoutput => null()
|
2014-03-14 04:50:50 +05:30
|
|
|
thisOutput => null()
|
|
|
|
thisSize => null()
|
2013-11-27 13:34:05 +05:30
|
|
|
case (HOMOGENIZATION_ISOSTRAIN_ID)
|
|
|
|
outputName = HOMOGENIZATION_ISOSTRAIN_label
|
2014-09-26 16:04:36 +05:30
|
|
|
thisNoutput => homogenization_isostrain_Noutput
|
2012-08-10 21:28:17 +05:30
|
|
|
thisOutput => homogenization_isostrain_output
|
|
|
|
thisSize => homogenization_isostrain_sizePostResult
|
2013-11-27 13:34:05 +05:30
|
|
|
case (HOMOGENIZATION_RGC_ID)
|
|
|
|
outputName = HOMOGENIZATION_RGC_label
|
2014-09-26 16:04:36 +05:30
|
|
|
thisNoutput => homogenization_RGC_Noutput
|
2012-08-10 21:28:17 +05:30
|
|
|
thisOutput => homogenization_RGC_output
|
|
|
|
thisSize => homogenization_RGC_sizePostResult
|
|
|
|
case default
|
|
|
|
knownHomogenization = .false.
|
|
|
|
end select
|
2013-12-12 22:39:59 +05:30
|
|
|
write(FILEUNIT,'(/,a,/)') '['//trim(homogenization_name(p))//']'
|
2012-08-10 21:28:17 +05:30
|
|
|
if (knownHomogenization) then
|
2013-12-12 22:39:59 +05:30
|
|
|
write(FILEUNIT,'(a)') '(type)'//char(9)//trim(outputName)
|
|
|
|
write(FILEUNIT,'(a,i4)') '(ngrains)'//char(9),homogenization_Ngrains(p)
|
2014-09-26 20:46:10 +05:30
|
|
|
if (homogenization_type(p) /= HOMOGENIZATION_NONE_ID) then
|
|
|
|
do e = 1,thisNoutput(i)
|
|
|
|
write(FILEUNIT,'(a,i4)') trim(thisOutput(e,i))//char(9),thisSize(e,i)
|
|
|
|
enddo
|
|
|
|
endif
|
2012-08-10 21:28:17 +05:30
|
|
|
endif
|
2014-09-26 16:04:36 +05:30
|
|
|
#ifdef multiphysicsOut
|
|
|
|
write(FILEUNIT,'(a)') '(field)'
|
|
|
|
do e = 1_pInt,field_Noutput(p)
|
|
|
|
write(FILEUNIT,'(a,i4)') trim(field_output(e,p))//char(9),field_sizePostResult(e,p)
|
|
|
|
enddo
|
|
|
|
#endif
|
2012-08-10 21:28:17 +05:30
|
|
|
enddo
|
2013-12-12 22:39:59 +05:30
|
|
|
close(FILEUNIT)
|
2014-09-22 23:45:19 +05:30
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
! allocate and initialize global variables
|
2013-12-16 17:28:03 +05:30
|
|
|
allocate(materialpoint_dPdF(3,3,3,3,mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
|
|
|
allocate(materialpoint_F0(3,3,mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
2013-01-29 15:58:01 +05:30
|
|
|
materialpoint_F0 = spread(spread(math_I3,3,mesh_maxNips),4,mesh_NcpElems) ! initialize to identity
|
2013-12-16 17:28:03 +05:30
|
|
|
allocate(materialpoint_F(3,3,mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
|
|
|
materialpoint_F = materialpoint_F0 ! initialize to identity
|
|
|
|
allocate(materialpoint_subF0(3,3,mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
|
|
|
allocate(materialpoint_subF(3,3,mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
|
|
|
allocate(materialpoint_P(3,3,mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
|
|
|
allocate(materialpoint_subFrac(mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
|
|
|
allocate(materialpoint_subStep(mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
|
|
|
allocate(materialpoint_subdt(mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
|
|
|
allocate(materialpoint_requested(mesh_maxNips,mesh_NcpElems), source=.false.)
|
|
|
|
allocate(materialpoint_converged(mesh_maxNips,mesh_NcpElems), source=.true.)
|
|
|
|
allocate(materialpoint_doneAndHappy(2,mesh_maxNips,mesh_NcpElems), source=.true.)
|
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
2013-10-16 18:34:59 +05:30
|
|
|
! allocate and initialize global state and postresutls variables
|
2013-01-29 15:58:01 +05:30
|
|
|
elementLooping: do e = 1,mesh_NcpElems
|
2012-12-14 20:00:08 +05:30
|
|
|
myInstance = homogenization_typeInstance(mesh_element(3,e))
|
2013-01-29 15:58:01 +05:30
|
|
|
IpLooping: do i = 1,FE_Nips(FE_geomtype(mesh_element(2,e)))
|
2014-03-12 22:21:01 +05:30
|
|
|
#ifdef HDF
|
|
|
|
InstancePosition(myInstance) = InstancePosition(myInstance)+1_pInt
|
|
|
|
mapping(e,1:4) = [instancePosition(myinstance),myinstance,e,i]
|
|
|
|
#endif
|
2013-01-29 15:58:01 +05:30
|
|
|
enddo IpLooping
|
|
|
|
enddo elementLooping
|
2014-03-12 22:21:01 +05:30
|
|
|
#ifdef HDF
|
|
|
|
call HDF5_mappingHomogenization(mapping)
|
|
|
|
#endif
|
2012-08-10 21:28:17 +05:30
|
|
|
|
2014-09-26 16:02:56 +05:30
|
|
|
homogenization_maxSizePostResults = 0_pInt
|
2014-09-26 16:04:36 +05:30
|
|
|
field_maxSizePostResults = 0_pInt
|
2014-09-26 16:02:56 +05:30
|
|
|
do p = 1,material_Nhomogenization
|
|
|
|
homogenization_maxSizePostResults = max(homogenization_maxSizePostResults,homogState(p)%sizePostResults)
|
2014-09-26 20:46:10 +05:30
|
|
|
field_maxSizePostResults = max(field_maxSizePostResults,field_sizePostResults(p))
|
2014-09-26 16:02:56 +05:30
|
|
|
enddo
|
2013-01-29 15:58:01 +05:30
|
|
|
materialpoint_sizeResults = 1 & ! grain count
|
|
|
|
+ 1 + homogenization_maxSizePostResults & ! homogSize & homogResult
|
2014-09-26 16:04:36 +05:30
|
|
|
#ifdef multiphysicsOut
|
2014-09-26 20:46:10 +05:30
|
|
|
+ field_maxSizePostResults & ! field size & field result
|
2014-09-26 16:04:36 +05:30
|
|
|
#endif
|
2013-01-29 15:58:01 +05:30
|
|
|
+ homogenization_maxNgrains * (1 + crystallite_maxSizePostResults & ! crystallite size & crystallite results
|
2014-09-26 16:02:56 +05:30
|
|
|
#ifdef multiphysicsOut
|
2014-09-26 20:46:10 +05:30
|
|
|
+ constitutive_damage_maxSizePostResults &
|
|
|
|
+ constitutive_thermal_maxSizePostResults &
|
2014-10-11 02:25:09 +05:30
|
|
|
+ constitutive_vacancy_maxSizePostResults &
|
2014-09-26 16:02:56 +05:30
|
|
|
#endif
|
2013-01-29 15:58:01 +05:30
|
|
|
+ 1 + constitutive_maxSizePostResults) ! constitutive size & constitutive results
|
2012-08-10 21:28:17 +05:30
|
|
|
allocate(materialpoint_results(materialpoint_sizeResults,mesh_maxNips,mesh_NcpElems))
|
|
|
|
|
2014-10-10 21:51:10 +05:30
|
|
|
mainProcess: if (worldrank == 0) then
|
|
|
|
write(6,'(/,a)') ' <<<+- homogenization init -+>>>'
|
|
|
|
write(6,'(a)') ' $Id$'
|
|
|
|
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
|
2012-02-01 00:48:55 +05:30
|
|
|
#include "compilation_info.f90"
|
2014-10-10 21:51:10 +05:30
|
|
|
endif mainProcess
|
2014-10-10 01:53:06 +05:30
|
|
|
|
2013-01-29 15:58:01 +05:30
|
|
|
if (iand(debug_level(debug_homogenization), debug_levelBasic) /= 0_pInt) then
|
2014-09-19 23:29:06 +05:30
|
|
|
#ifdef TODO
|
2013-10-16 18:34:59 +05:30
|
|
|
write(6,'(a32,1x,7(i8,1x))') 'homogenization_state0: ', shape(homogenization_state0)
|
|
|
|
write(6,'(a32,1x,7(i8,1x))') 'homogenization_subState0: ', shape(homogenization_subState0)
|
|
|
|
write(6,'(a32,1x,7(i8,1x))') 'homogenization_state: ', shape(homogenization_state)
|
2014-08-21 23:18:20 +05:30
|
|
|
#endif
|
2013-10-16 18:34:59 +05:30
|
|
|
write(6,'(a32,1x,7(i8,1x))') 'materialpoint_dPdF: ', shape(materialpoint_dPdF)
|
|
|
|
write(6,'(a32,1x,7(i8,1x))') 'materialpoint_F0: ', shape(materialpoint_F0)
|
|
|
|
write(6,'(a32,1x,7(i8,1x))') 'materialpoint_F: ', shape(materialpoint_F)
|
|
|
|
write(6,'(a32,1x,7(i8,1x))') 'materialpoint_subF0: ', shape(materialpoint_subF0)
|
|
|
|
write(6,'(a32,1x,7(i8,1x))') 'materialpoint_subF: ', shape(materialpoint_subF)
|
|
|
|
write(6,'(a32,1x,7(i8,1x))') 'materialpoint_P: ', shape(materialpoint_P)
|
|
|
|
write(6,'(a32,1x,7(i8,1x))') 'materialpoint_subFrac: ', shape(materialpoint_subFrac)
|
|
|
|
write(6,'(a32,1x,7(i8,1x))') 'materialpoint_subStep: ', shape(materialpoint_subStep)
|
|
|
|
write(6,'(a32,1x,7(i8,1x))') 'materialpoint_subdt: ', shape(materialpoint_subdt)
|
|
|
|
write(6,'(a32,1x,7(i8,1x))') 'materialpoint_requested: ', shape(materialpoint_requested)
|
|
|
|
write(6,'(a32,1x,7(i8,1x))') 'materialpoint_converged: ', shape(materialpoint_converged)
|
|
|
|
write(6,'(a32,1x,7(i8,1x),/)') 'materialpoint_doneAndHappy: ', shape(materialpoint_doneAndHappy)
|
|
|
|
write(6,'(a32,1x,7(i8,1x),/)') 'materialpoint_results: ', shape(materialpoint_results)
|
|
|
|
write(6,'(a32,1x,7(i8,1x))') 'maxSizePostResults: ', homogenization_maxSizePostResults
|
2013-01-29 15:58:01 +05:30
|
|
|
endif
|
|
|
|
flush(6)
|
2013-10-16 18:34:59 +05:30
|
|
|
|
|
|
|
if (debug_g < 1 .or. debug_g > homogenization_Ngrains(mesh_element(3,debug_e))) &
|
|
|
|
call IO_error(602_pInt,ext_msg='component (grain)')
|
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
end subroutine homogenization_init
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief parallelized calculation of stress and corresponding tangent at material points
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
|
2013-01-29 15:58:01 +05:30
|
|
|
use numerics, only: &
|
|
|
|
subStepMinHomog, &
|
|
|
|
subStepSizeHomog, &
|
|
|
|
stepIncreaseHomog, &
|
|
|
|
nHomog, &
|
|
|
|
nMPstate
|
|
|
|
use math, only: &
|
|
|
|
math_transpose33
|
|
|
|
use FEsolving, only: &
|
|
|
|
FEsolving_execElem, &
|
|
|
|
FEsolving_execIP, &
|
|
|
|
terminallyIll
|
|
|
|
use mesh, only: &
|
|
|
|
mesh_element, &
|
|
|
|
mesh_NcpElems, &
|
|
|
|
mesh_maxNips
|
|
|
|
use material, only: &
|
2014-05-12 06:14:44 +05:30
|
|
|
plasticState, &
|
2014-06-25 04:49:21 +05:30
|
|
|
damageState, &
|
|
|
|
thermalState, &
|
2014-08-21 23:18:20 +05:30
|
|
|
homogState, &
|
2014-09-19 23:29:06 +05:30
|
|
|
mappingHomogenization, &
|
2014-06-23 00:28:29 +05:30
|
|
|
mappingConstitutive, &
|
2013-01-29 15:58:01 +05:30
|
|
|
homogenization_Ngrains
|
2014-07-02 17:57:39 +05:30
|
|
|
|
2014-05-22 17:37:50 +05:30
|
|
|
|
2013-01-29 15:58:01 +05:30
|
|
|
use crystallite, only: &
|
|
|
|
crystallite_F0, &
|
|
|
|
crystallite_Fp0, &
|
|
|
|
crystallite_Fp, &
|
|
|
|
crystallite_Lp0, &
|
|
|
|
crystallite_Lp, &
|
|
|
|
crystallite_dPdF, &
|
|
|
|
crystallite_dPdF0, &
|
|
|
|
crystallite_Tstar0_v, &
|
|
|
|
crystallite_Tstar_v, &
|
|
|
|
crystallite_partionedF0, &
|
|
|
|
crystallite_partionedF, &
|
|
|
|
crystallite_partionedFp0, &
|
|
|
|
crystallite_partionedLp0, &
|
|
|
|
crystallite_partioneddPdF0, &
|
|
|
|
crystallite_partionedTstar0_v, &
|
|
|
|
crystallite_dt, &
|
|
|
|
crystallite_requested, &
|
|
|
|
crystallite_converged, &
|
|
|
|
crystallite_stressAndItsTangent, &
|
|
|
|
crystallite_orientations
|
|
|
|
use debug, only: &
|
|
|
|
debug_level, &
|
|
|
|
debug_homogenization, &
|
|
|
|
debug_levelBasic, &
|
|
|
|
debug_levelSelective, &
|
|
|
|
debug_e, &
|
|
|
|
debug_i, &
|
|
|
|
debug_MaterialpointLoopDistribution, &
|
|
|
|
debug_MaterialpointStateLoopDistribution
|
|
|
|
use math, only: &
|
|
|
|
math_pDecomposition
|
2009-06-16 14:33:30 +05:30
|
|
|
|
2009-05-07 21:57:36 +05:30
|
|
|
implicit none
|
2012-08-10 21:28:17 +05:30
|
|
|
real(pReal), intent(in) :: dt !< time increment
|
|
|
|
logical, intent(in) :: updateJaco !< initiating Jacobian update
|
2013-01-29 15:58:01 +05:30
|
|
|
integer(pInt) :: &
|
|
|
|
NiterationHomog, &
|
|
|
|
NiterationMPstate, &
|
|
|
|
g, & !< grain number
|
|
|
|
i, & !< integration point number
|
|
|
|
e, & !< element number
|
|
|
|
myNgrains
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
! initialize to starting condition
|
2013-10-16 18:34:59 +05:30
|
|
|
if (iand(debug_level(debug_homogenization), debug_levelBasic) /= 0_pInt) then
|
2011-03-21 16:01:17 +05:30
|
|
|
!$OMP CRITICAL (write2out)
|
2013-10-16 18:34:59 +05:30
|
|
|
write(6,'(/a,i5,1x,i2)') '<< HOMOG >> Material Point start at el ip ', debug_e, debug_i
|
|
|
|
|
2012-10-02 18:23:25 +05:30
|
|
|
write(6,'(a,/,3(12x,3(f14.9,1x)/))') '<< HOMOG >> F0', &
|
2012-08-10 21:28:17 +05:30
|
|
|
math_transpose33(materialpoint_F0(1:3,1:3,debug_i,debug_e))
|
2012-10-02 18:23:25 +05:30
|
|
|
write(6,'(a,/,3(12x,3(f14.9,1x)/))') '<< HOMOG >> F', &
|
2012-08-10 21:28:17 +05:30
|
|
|
math_transpose33(materialpoint_F(1:3,1:3,debug_i,debug_e))
|
2011-03-21 16:01:17 +05:30
|
|
|
!$OMP END CRITICAL (write2out)
|
2010-03-19 19:44:08 +05:30
|
|
|
endif
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2013-01-29 15:58:01 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
2012-11-07 21:13:29 +05:30
|
|
|
! initialize restoration points of ...
|
|
|
|
do e = FEsolving_execElem(1),FEsolving_execElem(2)
|
2009-05-07 21:57:36 +05:30
|
|
|
myNgrains = homogenization_Ngrains(mesh_element(3,e))
|
2012-11-07 21:13:29 +05:30
|
|
|
forall(i = FEsolving_execIP(1,e):FEsolving_execIP(2,e), g = 1:myNgrains)
|
2014-09-03 01:46:33 +05:30
|
|
|
|
2014-05-22 17:37:50 +05:30
|
|
|
plasticState(mappingConstitutive(2,g,i,e))%partionedState0(:,mappingConstitutive(1,g,i,e)) = &
|
2014-09-03 01:46:33 +05:30
|
|
|
plasticState(mappingConstitutive(2,g,i,e))%state0( :,mappingConstitutive(1,g,i,e))
|
|
|
|
damageState( mappingConstitutive(2,g,i,e))%partionedState0(:,mappingConstitutive(1,g,i,e)) = &
|
|
|
|
damageState( mappingConstitutive(2,g,i,e))%state0( :,mappingConstitutive(1,g,i,e))
|
2014-06-25 04:49:21 +05:30
|
|
|
thermalState(mappingConstitutive(2,g,i,e))%partionedState0(:,mappingConstitutive(1,g,i,e)) = &
|
2014-09-03 01:46:33 +05:30
|
|
|
thermalState(mappingConstitutive(2,g,i,e))%state0( :,mappingConstitutive(1,g,i,e))
|
|
|
|
|
2012-11-07 21:13:29 +05:30
|
|
|
crystallite_partionedFp0(1:3,1:3,g,i,e) = crystallite_Fp0(1:3,1:3,g,i,e) ! ...plastic def grads
|
|
|
|
crystallite_partionedLp0(1:3,1:3,g,i,e) = crystallite_Lp0(1:3,1:3,g,i,e) ! ...plastic velocity grads
|
|
|
|
crystallite_partioneddPdF0(1:3,1:3,1:3,1:3,g,i,e) = crystallite_dPdF0(1:3,1:3,1:3,1:3,g,i,e) ! ...stiffness
|
|
|
|
crystallite_partionedF0(1:3,1:3,g,i,e) = crystallite_F0(1:3,1:3,g,i,e) ! ...def grads
|
|
|
|
crystallite_partionedTstar0_v(1:6,g,i,e) = crystallite_Tstar0_v(1:6,g,i,e) ! ...2nd PK stress
|
2014-05-22 17:37:50 +05:30
|
|
|
|
2012-11-07 21:13:29 +05:30
|
|
|
endforall
|
|
|
|
forall(i = FEsolving_execIP(1,e):FEsolving_execIP(2,e))
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
materialpoint_subF0(1:3,1:3,i,e) = materialpoint_F0(1:3,1:3,i,e) ! ...def grad
|
2009-05-07 21:57:36 +05:30
|
|
|
materialpoint_subFrac(i,e) = 0.0_pReal
|
2009-11-10 19:06:27 +05:30
|
|
|
materialpoint_subStep(i,e) = 1.0_pReal/subStepSizeHomog ! <<added to adopt flexibility in cutback size>>
|
2009-06-16 14:33:30 +05:30
|
|
|
materialpoint_converged(i,e) = .false. ! pretend failed step of twice the required size
|
|
|
|
materialpoint_requested(i,e) = .true. ! everybody requires calculation
|
2012-11-07 21:13:29 +05:30
|
|
|
endforall
|
2014-09-19 23:29:06 +05:30
|
|
|
forall(i = FEsolving_execIP(1,e):FEsolving_execIP(2,e), &
|
|
|
|
homogState(mappingHomogenization(2,i,e))%sizeState > 0_pInt) &
|
|
|
|
homogState(mappingHomogenization(2,i,e))%subState0(:,mappingHomogenization(1,i,e)) = &
|
|
|
|
homogState(mappingHomogenization(2,i,e))%State0( :,mappingHomogenization(1,i,e)) ! ...internal homogenization state
|
2009-05-07 21:57:36 +05:30
|
|
|
enddo
|
2009-08-11 22:01:57 +05:30
|
|
|
NiterationHomog = 0_pInt
|
|
|
|
|
2013-01-29 15:58:01 +05:30
|
|
|
cutBackLooping: do while (.not. terminallyIll .and. &
|
|
|
|
any(materialpoint_subStep(:,FEsolving_execELem(1):FEsolving_execElem(2)) > subStepMinHomog))
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP PARALLEL DO PRIVATE(myNgrains)
|
2013-01-29 15:58:01 +05:30
|
|
|
elementLooping1: do e = FEsolving_execElem(1),FEsolving_execElem(2)
|
2009-05-07 21:57:36 +05:30
|
|
|
myNgrains = homogenization_Ngrains(mesh_element(3,e))
|
2013-01-29 15:58:01 +05:30
|
|
|
IpLooping1: do i = FEsolving_execIP(1,e),FEsolving_execIP(2,e)
|
2010-11-03 22:52:48 +05:30
|
|
|
|
2013-01-29 15:58:01 +05:30
|
|
|
converged: if ( materialpoint_converged(i,e) ) then
|
2011-03-29 12:57:19 +05:30
|
|
|
#ifndef _OPENMP
|
2012-07-05 15:24:50 +05:30
|
|
|
if (iand(debug_level(debug_homogenization), debug_levelBasic) /= 0_pInt &
|
2012-03-09 01:55:28 +05:30
|
|
|
.and. ((e == debug_e .and. i == debug_i) &
|
2012-07-05 15:24:50 +05:30
|
|
|
.or. .not. iand(debug_level(debug_homogenization),debug_levelSelective) /= 0_pInt)) then
|
2012-11-21 22:28:14 +05:30
|
|
|
write(6,'(a,1x,f12.8,1x,a,1x,f12.8,1x,a,i8,1x,i2/)') '<< HOMOG >> winding forward from', &
|
2011-03-29 12:57:19 +05:30
|
|
|
materialpoint_subFrac(i,e), 'to current materialpoint_subFrac', &
|
2012-11-21 22:28:14 +05:30
|
|
|
materialpoint_subFrac(i,e)+materialpoint_subStep(i,e),'in materialpoint_stressAndItsTangent at el ip',e,i
|
2009-08-24 13:46:01 +05:30
|
|
|
endif
|
2011-03-29 12:57:19 +05:30
|
|
|
#endif
|
2009-08-24 13:46:01 +05:30
|
|
|
|
2013-01-29 15:58:01 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
! calculate new subStep and new subFrac
|
2009-08-27 17:40:06 +05:30
|
|
|
materialpoint_subFrac(i,e) = materialpoint_subFrac(i,e) + materialpoint_subStep(i,e)
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
!$OMP FLUSH(materialpoint_subFrac)
|
2009-11-10 19:06:27 +05:30
|
|
|
materialpoint_subStep(i,e) = min(1.0_pReal-materialpoint_subFrac(i,e), &
|
2012-08-10 21:28:17 +05:30
|
|
|
stepIncreaseHomog*materialpoint_subStep(i,e)) ! introduce flexibility for step increase/acceleration
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
!$OMP FLUSH(materialpoint_subStep)
|
2009-08-27 17:40:06 +05:30
|
|
|
|
2013-01-29 15:58:01 +05:30
|
|
|
steppingNeeded: if (materialpoint_subStep(i,e) > subStepMinHomog) then
|
2009-06-16 14:33:30 +05:30
|
|
|
|
|
|
|
! wind forward grain starting point of...
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
crystallite_partionedF0(1:3,1:3,1:myNgrains,i,e) = crystallite_partionedF(1:3,1:3,1:myNgrains,i,e) ! ...def grads
|
|
|
|
crystallite_partionedFp0(1:3,1:3,1:myNgrains,i,e) = crystallite_Fp(1:3,1:3,1:myNgrains,i,e) ! ...plastic def grads
|
|
|
|
crystallite_partionedLp0(1:3,1:3,1:myNgrains,i,e) = crystallite_Lp(1:3,1:3,1:myNgrains,i,e) ! ...plastic velocity grads
|
|
|
|
crystallite_partioneddPdF0(1:3,1:3,1:3,1:3,1:myNgrains,i,e) = crystallite_dPdF(1:3,1:3,1:3,1:3,1:myNgrains,i,e)! ...stiffness
|
|
|
|
crystallite_partionedTstar0_v(1:6,1:myNgrains,i,e) = crystallite_Tstar_v(1:6,1:myNgrains,i,e) ! ...2nd PK stress
|
2014-06-25 04:49:21 +05:30
|
|
|
forall (g = 1:myNgrains)
|
|
|
|
plasticState(mappingConstitutive(2,g,i,e))%partionedState0(:,mappingConstitutive(1,g,i,e)) = &
|
2014-09-03 01:46:33 +05:30
|
|
|
plasticState(mappingConstitutive(2,g,i,e))%state( :,mappingConstitutive(1,g,i,e))
|
|
|
|
damageState( mappingConstitutive(2,g,i,e))%partionedState0(:,mappingConstitutive(1,g,i,e)) = &
|
|
|
|
damageState( mappingConstitutive(2,g,i,e))%state( :,mappingConstitutive(1,g,i,e))
|
2014-06-25 04:49:21 +05:30
|
|
|
thermalState(mappingConstitutive(2,g,i,e))%partionedState0(:,mappingConstitutive(1,g,i,e)) = &
|
2014-09-03 01:46:33 +05:30
|
|
|
thermalState(mappingConstitutive(2,g,i,e))%state( :,mappingConstitutive(1,g,i,e))
|
2014-06-25 04:49:21 +05:30
|
|
|
end forall
|
2014-09-19 23:29:06 +05:30
|
|
|
if (homogState(mappingHomogenization(2,i,e))%sizeState > 0_pInt) &
|
2014-08-21 23:18:20 +05:30
|
|
|
homogState(mappingHomogenization(2,i,e))%subState0(:,mappingHomogenization(1,i,e)) = &
|
2014-09-03 01:46:33 +05:30
|
|
|
homogState(mappingHomogenization(2,i,e))%state( :,mappingHomogenization(1,i,e))
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
materialpoint_subF0(1:3,1:3,i,e) = materialpoint_subF(1:3,1:3,i,e) ! ...def grad
|
|
|
|
!$OMP FLUSH(materialpoint_subF0)
|
2013-01-29 15:58:01 +05:30
|
|
|
elseif (materialpoint_requested(i,e)) then steppingNeeded ! already at final time (??)
|
2012-07-05 15:24:50 +05:30
|
|
|
if (iand(debug_level(debug_homogenization), debug_levelBasic) /= 0_pInt) then
|
2011-03-21 16:01:17 +05:30
|
|
|
!$OMP CRITICAL (distributionHomog)
|
|
|
|
debug_MaterialpointLoopDistribution(min(nHomog+1,NiterationHomog)) = &
|
2014-09-03 01:46:33 +05:30
|
|
|
debug_MaterialpointLoopDistribution(min(nHomog+1,NiterationHomog)) + 1
|
2011-03-21 16:01:17 +05:30
|
|
|
!$OMP END CRITICAL (distributionHomog)
|
|
|
|
endif
|
2013-01-29 15:58:01 +05:30
|
|
|
endif steppingNeeded
|
|
|
|
|
|
|
|
else converged
|
2010-09-02 02:34:02 +05:30
|
|
|
if ( (myNgrains == 1_pInt .and. materialpoint_subStep(i,e) <= 1.0 ) .or. & ! single grain already tried internal subStepping in crystallite
|
|
|
|
subStepSizeHomog * materialpoint_subStep(i,e) <= subStepMinHomog ) then ! would require too small subStep
|
2012-10-18 19:18:06 +05:30
|
|
|
! cutback makes no sense
|
2012-12-16 16:24:13 +05:30
|
|
|
!$OMP FLUSH(terminallyIll)
|
2012-10-18 19:18:06 +05:30
|
|
|
if (.not. terminallyIll) then ! so first signals terminally ill...
|
|
|
|
!$OMP CRITICAL (write2out)
|
|
|
|
write(6,*) 'Integration point ', i,' at element ', e, ' terminally ill'
|
|
|
|
!$OMP END CRITICAL (write2out)
|
|
|
|
endif
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP CRITICAL (setTerminallyIll)
|
2012-10-18 19:18:06 +05:30
|
|
|
terminallyIll = .true. ! ...and kills all others
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP END CRITICAL (setTerminallyIll)
|
2010-09-02 02:34:02 +05:30
|
|
|
else ! cutback makes sense
|
|
|
|
materialpoint_subStep(i,e) = subStepSizeHomog * materialpoint_subStep(i,e) ! crystallite had severe trouble, so do a significant cutback
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
!$OMP FLUSH(materialpoint_subStep)
|
2010-09-02 02:34:02 +05:30
|
|
|
|
2011-03-29 12:57:19 +05:30
|
|
|
#ifndef _OPENMP
|
2012-07-05 15:24:50 +05:30
|
|
|
if (iand(debug_level(debug_homogenization), debug_levelBasic) /= 0_pInt &
|
2012-03-09 01:55:28 +05:30
|
|
|
.and. ((e == debug_e .and. i == debug_i) &
|
2012-07-05 15:24:50 +05:30
|
|
|
.or. .not. iand(debug_level(debug_homogenization), debug_levelSelective) /= 0_pInt)) then
|
2012-11-21 22:28:14 +05:30
|
|
|
write(6,'(a,1x,f12.8,a,i8,1x,i2/)') &
|
2011-08-02 18:06:08 +05:30
|
|
|
'<< HOMOG >> cutback step in materialpoint_stressAndItsTangent with new materialpoint_subStep:',&
|
2012-11-21 22:28:14 +05:30
|
|
|
materialpoint_subStep(i,e),' at el ip',e,i
|
2010-09-02 02:34:02 +05:30
|
|
|
endif
|
2011-03-29 12:57:19 +05:30
|
|
|
#endif
|
2010-09-02 02:34:02 +05:30
|
|
|
|
2013-01-29 15:58:01 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
! restore...
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
crystallite_Fp(1:3,1:3,1:myNgrains,i,e) = crystallite_partionedFp0(1:3,1:3,1:myNgrains,i,e) ! ...plastic def grads
|
|
|
|
crystallite_Lp(1:3,1:3,1:myNgrains,i,e) = crystallite_partionedLp0(1:3,1:3,1:myNgrains,i,e) ! ...plastic velocity grads
|
|
|
|
crystallite_dPdF(1:3,1:3,1:3,1:3,1:myNgrains,i,e) = crystallite_partioneddPdF0(1:3,1:3,1:3,1:3,1:myNgrains,i,e) ! ...stiffness
|
|
|
|
crystallite_Tstar_v(1:6,1:myNgrains,i,e) = crystallite_partionedTstar0_v(1:6,1:myNgrains,i,e) ! ...2nd PK stress
|
2014-06-25 04:49:21 +05:30
|
|
|
forall (g = 1:myNgrains)
|
2014-09-03 01:46:33 +05:30
|
|
|
plasticState(mappingConstitutive(2,g,i,e))%state( :,mappingConstitutive(1,g,i,e)) = &
|
|
|
|
plasticState(mappingConstitutive(2,g,i,e))%partionedState0(:,mappingConstitutive(1,g,i,e))
|
|
|
|
damageState( mappingConstitutive(2,g,i,e))%state( :,mappingConstitutive(1,g,i,e)) = &
|
|
|
|
damageState( mappingConstitutive(2,g,i,e))%partionedState0(:,mappingConstitutive(1,g,i,e))
|
|
|
|
thermalState(mappingConstitutive(2,g,i,e))%state( :,mappingConstitutive(1,g,i,e)) = &
|
|
|
|
thermalState(mappingConstitutive(2,g,i,e))%partionedState0(:,mappingConstitutive(1,g,i,e))
|
2014-06-25 04:49:21 +05:30
|
|
|
end forall
|
2014-09-19 23:29:06 +05:30
|
|
|
if (homogState(mappingHomogenization(2,i,e))%sizeState > 0_pInt) &
|
2014-09-03 01:46:33 +05:30
|
|
|
homogState(mappingHomogenization(2,i,e))%state( :,mappingHomogenization(1,i,e)) = &
|
|
|
|
homogState(mappingHomogenization(2,i,e))%subState0(:,mappingHomogenization(1,i,e))
|
2010-09-02 02:34:02 +05:30
|
|
|
endif
|
2013-01-29 15:58:01 +05:30
|
|
|
endif converged
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2012-12-14 20:00:08 +05:30
|
|
|
if (materialpoint_subStep(i,e) > subStepMinHomog) then
|
|
|
|
materialpoint_requested(i,e) = .true.
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
materialpoint_subF(1:3,1:3,i,e) = materialpoint_subF0(1:3,1:3,i,e) + &
|
|
|
|
materialpoint_subStep(i,e) * (materialpoint_F(1:3,1:3,i,e) - materialpoint_F0(1:3,1:3,i,e))
|
|
|
|
materialpoint_subdt(i,e) = materialpoint_subStep(i,e) * dt
|
2013-01-29 15:58:01 +05:30
|
|
|
materialpoint_doneAndHappy(1:2,i,e) = [.false.,.true.]
|
2009-05-07 21:57:36 +05:30
|
|
|
endif
|
2013-01-29 15:58:01 +05:30
|
|
|
enddo IpLooping1
|
|
|
|
enddo elementLooping1
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP END PARALLEL DO
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2009-08-11 22:01:57 +05:30
|
|
|
NiterationMPstate = 0_pInt
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2013-01-29 15:58:01 +05:30
|
|
|
convergenceLooping: do while (.not. terminallyIll .and. &
|
2010-09-02 02:34:02 +05:30
|
|
|
any( materialpoint_requested(:,FEsolving_execELem(1):FEsolving_execElem(2)) &
|
2009-05-07 21:57:36 +05:30
|
|
|
.and. .not. materialpoint_doneAndHappy(1,:,FEsolving_execELem(1):FEsolving_execElem(2)) &
|
2010-09-02 02:34:02 +05:30
|
|
|
) .and. &
|
2013-01-29 15:58:01 +05:30
|
|
|
NiterationMPstate < nMPstate)
|
2009-08-11 22:01:57 +05:30
|
|
|
NiterationMPstate = NiterationMPstate + 1
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2013-01-29 15:58:01 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
! deformation partitioning
|
|
|
|
! based on materialpoint_subF0,.._subF,crystallite_partionedF0, and homogenization_state,
|
2009-05-07 21:57:36 +05:30
|
|
|
! results in crystallite_partionedF
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP PARALLEL DO PRIVATE(myNgrains)
|
2013-01-29 15:58:01 +05:30
|
|
|
elementLooping2: do e = FEsolving_execElem(1),FEsolving_execElem(2)
|
2009-05-07 21:57:36 +05:30
|
|
|
myNgrains = homogenization_Ngrains(mesh_element(3,e))
|
2013-01-29 15:58:01 +05:30
|
|
|
IpLooping2: do i = FEsolving_execIP(1,e),FEsolving_execIP(2,e)
|
|
|
|
if ( materialpoint_requested(i,e) .and. & ! process requested but...
|
|
|
|
.not. materialpoint_doneAndHappy(1,i,e)) then ! ...not yet done material points
|
|
|
|
call homogenization_partitionDeformation(i,e) ! partition deformation onto constituents
|
|
|
|
crystallite_dt(1:myNgrains,i,e) = materialpoint_subdt(i,e) ! propagate materialpoint dt to grains
|
|
|
|
crystallite_requested(1:myNgrains,i,e) = .true. ! request calculation for constituents
|
2009-08-27 17:40:06 +05:30
|
|
|
else
|
2013-01-29 15:58:01 +05:30
|
|
|
crystallite_requested(1:myNgrains,i,e) = .false. ! calculation for constituents not required anymore
|
2009-05-07 21:57:36 +05:30
|
|
|
endif
|
2013-01-29 15:58:01 +05:30
|
|
|
enddo IpLooping2
|
|
|
|
enddo elementLooping2
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP END PARALLEL DO
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2013-01-29 15:58:01 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
! crystallite integration
|
2009-05-07 21:57:36 +05:30
|
|
|
! based on crystallite_partionedF0,.._partionedF
|
|
|
|
! incrementing by crystallite_dt
|
2014-08-08 02:38:34 +05:30
|
|
|
call crystallite_stressAndItsTangent(updateJaco) ! request stress and tangent calculation for constituent grains
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2013-01-29 15:58:01 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
! state update
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP PARALLEL DO
|
2013-01-29 15:58:01 +05:30
|
|
|
elementLooping3: do e = FEsolving_execElem(1),FEsolving_execElem(2)
|
|
|
|
IpLooping3: do i = FEsolving_execIP(1,e),FEsolving_execIP(2,e)
|
2009-05-07 21:57:36 +05:30
|
|
|
if ( materialpoint_requested(i,e) .and. &
|
|
|
|
.not. materialpoint_doneAndHappy(1,i,e)) then
|
2009-08-11 22:01:57 +05:30
|
|
|
if (.not. all(crystallite_converged(:,i,e))) then
|
2013-01-29 15:58:01 +05:30
|
|
|
materialpoint_doneAndHappy(1:2,i,e) = [.true.,.false.]
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
materialpoint_converged(i,e) = .false.
|
2009-08-11 22:01:57 +05:30
|
|
|
else
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
materialpoint_doneAndHappy(1:2,i,e) = homogenization_updateState(i,e)
|
2013-01-29 15:58:01 +05:30
|
|
|
materialpoint_converged(i,e) = all(homogenization_updateState(i,e)) ! converged if done and happy
|
2009-08-11 22:01:57 +05:30
|
|
|
endif
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
!$OMP FLUSH(materialpoint_converged)
|
2010-11-03 22:52:48 +05:30
|
|
|
if (materialpoint_converged(i,e)) then
|
2012-07-05 15:24:50 +05:30
|
|
|
if (iand(debug_level(debug_homogenization), debug_levelBasic) /= 0_pInt) then
|
2011-03-21 16:01:17 +05:30
|
|
|
!$OMP CRITICAL (distributionMPState)
|
|
|
|
debug_MaterialpointStateLoopdistribution(NiterationMPstate) = &
|
2013-01-29 15:58:01 +05:30
|
|
|
debug_MaterialpointStateLoopdistribution(NiterationMPstate) + 1_pInt
|
2011-03-21 16:01:17 +05:30
|
|
|
!$OMP END CRITICAL (distributionMPState)
|
|
|
|
endif
|
2010-11-03 22:52:48 +05:30
|
|
|
endif
|
2009-05-07 21:57:36 +05:30
|
|
|
endif
|
2013-01-29 15:58:01 +05:30
|
|
|
enddo IpLooping3
|
|
|
|
enddo elementLooping3
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP END PARALLEL DO
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2013-01-29 15:58:01 +05:30
|
|
|
enddo convergenceLooping
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2010-10-01 17:48:49 +05:30
|
|
|
NiterationHomog = NiterationHomog + 1_pInt
|
2009-08-11 22:01:57 +05:30
|
|
|
|
2013-01-29 15:58:01 +05:30
|
|
|
enddo cutBackLooping
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2011-03-29 12:57:19 +05:30
|
|
|
if (.not. terminallyIll ) then
|
2013-01-29 15:58:01 +05:30
|
|
|
call crystallite_orientations() ! calculate crystal orientations
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP PARALLEL DO
|
2013-01-29 15:58:01 +05:30
|
|
|
elementLooping4: do e = FEsolving_execElem(1),FEsolving_execElem(2)
|
|
|
|
IpLooping4: do i = FEsolving_execIP(1,e),FEsolving_execIP(2,e)
|
2009-08-11 22:01:57 +05:30
|
|
|
call homogenization_averageStressAndItsTangent(i,e)
|
2013-01-29 15:58:01 +05:30
|
|
|
enddo IpLooping4
|
|
|
|
enddo elementLooping4
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP END PARALLEL DO
|
2010-09-02 02:34:02 +05:30
|
|
|
else
|
2011-03-21 16:01:17 +05:30
|
|
|
!$OMP CRITICAL (write2out)
|
2013-01-29 15:58:01 +05:30
|
|
|
write(6,'(/,a,/)') '<< HOMOG >> Material Point terminally ill'
|
2011-03-21 16:01:17 +05:30
|
|
|
!$OMP END CRITICAL (write2out)
|
2010-03-19 19:44:08 +05:30
|
|
|
endif
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
end subroutine materialpoint_stressAndItsTangent
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief parallelized calculation of result array at material points
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
2013-10-19 00:27:28 +05:30
|
|
|
subroutine materialpoint_postResults
|
2013-01-29 15:58:01 +05:30
|
|
|
use FEsolving, only: &
|
|
|
|
FEsolving_execElem, &
|
|
|
|
FEsolving_execIP
|
|
|
|
use mesh, only: &
|
|
|
|
mesh_element
|
|
|
|
use material, only: &
|
2014-09-26 16:02:56 +05:30
|
|
|
mappingHomogenization, &
|
|
|
|
homogState, &
|
2014-06-30 20:17:30 +05:30
|
|
|
plasticState, &
|
|
|
|
damageState, &
|
|
|
|
thermalState, &
|
|
|
|
material_phase, &
|
2013-01-29 15:58:01 +05:30
|
|
|
homogenization_Ngrains, &
|
|
|
|
microstructure_crystallite
|
|
|
|
use constitutive, only: &
|
|
|
|
constitutive_postResults
|
|
|
|
use crystallite, only: &
|
|
|
|
crystallite_sizePostResults, &
|
|
|
|
crystallite_postResults
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
implicit none
|
2013-04-26 18:53:36 +05:30
|
|
|
integer(pInt) :: &
|
2013-01-29 15:58:01 +05:30
|
|
|
thePos, &
|
|
|
|
theSize, &
|
|
|
|
myNgrains, &
|
|
|
|
myCrystallite, &
|
|
|
|
g, & !< grain number
|
|
|
|
i, & !< integration point number
|
|
|
|
e !< element number
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2011-08-01 23:40:55 +05:30
|
|
|
!$OMP PARALLEL DO PRIVATE(myNgrains,myCrystallite,thePos,theSize)
|
2013-01-29 15:58:01 +05:30
|
|
|
elementLooping: do e = FEsolving_execElem(1),FEsolving_execElem(2)
|
2009-05-07 21:57:36 +05:30
|
|
|
myNgrains = homogenization_Ngrains(mesh_element(3,e))
|
2010-02-25 23:09:11 +05:30
|
|
|
myCrystallite = microstructure_crystallite(mesh_element(4,e))
|
2013-01-29 15:58:01 +05:30
|
|
|
IpLooping: do i = FEsolving_execIP(1,e),FEsolving_execIP(2,e)
|
2011-08-01 23:40:55 +05:30
|
|
|
thePos = 0_pInt
|
2011-03-29 12:57:19 +05:30
|
|
|
|
2014-09-26 16:02:56 +05:30
|
|
|
theSize = homogState(mappingHomogenization(2,i,e))%sizePostResults
|
2012-08-10 21:28:17 +05:30
|
|
|
materialpoint_results(thePos+1,i,e) = real(theSize,pReal) ! tell size of homogenization results
|
2011-08-01 23:40:55 +05:30
|
|
|
thePos = thePos + 1_pInt
|
2011-03-29 12:57:19 +05:30
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
if (theSize > 0_pInt) then ! any homogenization results to mention?
|
|
|
|
materialpoint_results(thePos+1:thePos+theSize,i,e) = homogenization_postResults(i,e) ! tell homogenization results
|
2011-08-01 23:40:55 +05:30
|
|
|
thePos = thePos + theSize
|
2009-05-07 21:57:36 +05:30
|
|
|
endif
|
2014-09-26 16:04:36 +05:30
|
|
|
|
|
|
|
#ifdef multiphysicsOut
|
2014-09-26 20:46:10 +05:30
|
|
|
theSize = field_sizePostResults(mappingHomogenization(2,i,e))
|
2014-09-26 16:04:36 +05:30
|
|
|
if (theSize > 0_pInt) then ! any homogenization results to mention?
|
|
|
|
materialpoint_results(thePos+1:thePos+theSize,i,e) = field_postResults(i,e) ! tell field results
|
|
|
|
thePos = thePos + theSize
|
|
|
|
endif
|
|
|
|
#endif
|
2011-03-29 12:57:19 +05:30
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
materialpoint_results(thePos+1,i,e) = real(myNgrains,pReal) ! tell number of grains at materialpoint
|
2011-11-23 14:39:00 +05:30
|
|
|
thePos = thePos + 1_pInt
|
|
|
|
|
2013-01-29 15:58:01 +05:30
|
|
|
grainLooping :do g = 1,myNgrains
|
2014-08-10 16:44:43 +05:30
|
|
|
#ifdef multiphysicsOut
|
2014-09-26 20:46:10 +05:30
|
|
|
theSize = 1 + crystallite_sizePostResults(myCrystallite) + &
|
|
|
|
1 + plasticState(material_phase(g,i,e))%sizePostResults + & !ToDo
|
|
|
|
damageState(material_phase(g,i,e))%sizePostResults + &
|
|
|
|
thermalState(material_phase(g,i,e))%sizePostResults
|
2014-08-10 16:44:43 +05:30
|
|
|
#else
|
|
|
|
theSize = (1 + crystallite_sizePostResults(myCrystallite)) + &
|
|
|
|
(1 + plasticState(material_phase(g,i,e))%sizePostResults)
|
|
|
|
#endif
|
2014-03-12 13:03:51 +05:30
|
|
|
materialpoint_results(thePos+1:thePos+theSize,i,e) = crystallite_postResults(g,i,e) ! tell crystallite results
|
2011-08-01 23:40:55 +05:30
|
|
|
thePos = thePos + theSize
|
2013-01-29 15:58:01 +05:30
|
|
|
enddo grainLooping
|
|
|
|
enddo IpLooping
|
|
|
|
enddo elementLooping
|
2010-11-03 22:52:48 +05:30
|
|
|
!$OMP END PARALLEL DO
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
end subroutine materialpoint_postResults
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief partition material point def grad onto constituents
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
2013-10-11 21:31:53 +05:30
|
|
|
subroutine homogenization_partitionDeformation(ip,el)
|
2013-01-29 15:58:01 +05:30
|
|
|
use mesh, only: &
|
|
|
|
mesh_element
|
|
|
|
use material, only: &
|
|
|
|
homogenization_type, &
|
2013-11-27 13:34:05 +05:30
|
|
|
homogenization_maxNgrains, &
|
2014-03-14 04:50:50 +05:30
|
|
|
HOMOGENIZATION_NONE_ID, &
|
2013-11-27 13:34:05 +05:30
|
|
|
HOMOGENIZATION_ISOSTRAIN_ID, &
|
|
|
|
HOMOGENIZATION_RGC_ID
|
2013-01-29 15:58:01 +05:30
|
|
|
use crystallite, only: &
|
|
|
|
crystallite_partionedF0, &
|
|
|
|
crystallite_partionedF
|
|
|
|
use homogenization_isostrain, only: &
|
|
|
|
homogenization_isostrain_partitionDeformation
|
|
|
|
use homogenization_RGC, only: &
|
|
|
|
homogenization_RGC_partitionDeformation
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
implicit none
|
2013-01-29 15:58:01 +05:30
|
|
|
integer(pInt), intent(in) :: &
|
2013-10-11 21:31:53 +05:30
|
|
|
ip, & !< integration point
|
|
|
|
el !< element number
|
2014-03-14 04:50:50 +05:30
|
|
|
|
2013-10-11 21:31:53 +05:30
|
|
|
chosenHomogenization: select case(homogenization_type(mesh_element(3,el)))
|
2014-03-14 04:50:50 +05:30
|
|
|
|
|
|
|
case (HOMOGENIZATION_NONE_ID) chosenHomogenization
|
|
|
|
crystallite_partionedF(1:3,1:3,1:homogenization_maxNgrains,ip,el) = 0.0_pReal
|
|
|
|
crystallite_partionedF(1:3,1:3,1:1,ip,el) = &
|
|
|
|
spread(materialpoint_subF(1:3,1:3,ip,el),3,1)
|
|
|
|
|
2013-11-27 13:34:05 +05:30
|
|
|
case (HOMOGENIZATION_ISOSTRAIN_ID) chosenHomogenization
|
2012-08-10 21:28:17 +05:30
|
|
|
call homogenization_isostrain_partitionDeformation(&
|
2013-10-11 21:31:53 +05:30
|
|
|
crystallite_partionedF(1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
materialpoint_subF(1:3,1:3,ip,el),&
|
|
|
|
el)
|
2013-11-27 13:34:05 +05:30
|
|
|
case (HOMOGENIZATION_RGC_ID) chosenHomogenization
|
2014-08-21 23:18:20 +05:30
|
|
|
call homogenization_RGC_partitionDeformation(&
|
|
|
|
crystallite_partionedF(1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
materialpoint_subF(1:3,1:3,ip,el),&
|
|
|
|
ip, &
|
|
|
|
el)
|
2013-01-29 15:58:01 +05:30
|
|
|
end select chosenHomogenization
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
end subroutine homogenization_partitionDeformation
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief update the internal state of the homogenization scheme and tell whether "done" and
|
|
|
|
!> "happy" with result
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
2013-10-11 21:31:53 +05:30
|
|
|
function homogenization_updateState(ip,el)
|
2013-01-29 15:58:01 +05:30
|
|
|
use mesh, only: &
|
|
|
|
mesh_element
|
|
|
|
use material, only: &
|
|
|
|
homogenization_type, &
|
2013-11-27 13:34:05 +05:30
|
|
|
homogenization_maxNgrains, &
|
|
|
|
HOMOGENIZATION_RGC_ID
|
2013-01-29 15:58:01 +05:30
|
|
|
use crystallite, only: &
|
|
|
|
crystallite_P, &
|
|
|
|
crystallite_dPdF, &
|
|
|
|
crystallite_partionedF,&
|
|
|
|
crystallite_partionedF0
|
|
|
|
use homogenization_RGC, only: &
|
2013-11-27 13:34:05 +05:30
|
|
|
homogenization_RGC_updateState
|
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
implicit none
|
2013-01-29 15:58:01 +05:30
|
|
|
integer(pInt), intent(in) :: &
|
2013-10-11 21:31:53 +05:30
|
|
|
ip, & !< integration point
|
|
|
|
el !< element number
|
2009-05-07 21:57:36 +05:30
|
|
|
logical, dimension(2) :: homogenization_updateState
|
|
|
|
|
2013-10-11 21:31:53 +05:30
|
|
|
chosenHomogenization: select case(homogenization_type(mesh_element(3,el)))
|
|
|
|
|
2013-11-27 13:34:05 +05:30
|
|
|
case (HOMOGENIZATION_RGC_ID) chosenHomogenization
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
homogenization_updateState = &
|
2014-09-19 23:29:06 +05:30
|
|
|
|
2014-08-21 23:18:20 +05:30
|
|
|
homogenization_RGC_updateState(crystallite_P(1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
crystallite_partionedF(1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
crystallite_partionedF0(1:3,1:3,1:homogenization_maxNgrains,ip,el),&
|
|
|
|
materialpoint_subF(1:3,1:3,ip,el),&
|
|
|
|
materialpoint_subdt(ip,el), &
|
|
|
|
crystallite_dPdF(1:3,1:3,1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
ip, &
|
|
|
|
el)
|
2013-10-11 21:31:53 +05:30
|
|
|
case default chosenHomogenization
|
|
|
|
homogenization_updateState = .true.
|
2013-01-29 15:58:01 +05:30
|
|
|
end select chosenHomogenization
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
end function homogenization_updateState
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief derive average stress and stiffness from constituent quantities
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
2013-10-11 21:31:53 +05:30
|
|
|
subroutine homogenization_averageStressAndItsTangent(ip,el)
|
2013-01-29 15:58:01 +05:30
|
|
|
use mesh, only: &
|
|
|
|
mesh_element
|
|
|
|
use material, only: &
|
|
|
|
homogenization_type, &
|
2013-11-27 13:34:05 +05:30
|
|
|
homogenization_maxNgrains, &
|
2014-03-14 04:50:50 +05:30
|
|
|
HOMOGENIZATION_NONE_ID, &
|
2013-11-27 13:34:05 +05:30
|
|
|
HOMOGENIZATION_ISOSTRAIN_ID, &
|
|
|
|
HOMOGENIZATION_RGC_ID
|
2013-01-29 15:58:01 +05:30
|
|
|
use crystallite, only: &
|
|
|
|
crystallite_P,crystallite_dPdF
|
|
|
|
use homogenization_isostrain, only: &
|
2013-11-27 13:34:05 +05:30
|
|
|
homogenization_isostrain_averageStressAndItsTangent
|
2013-01-29 15:58:01 +05:30
|
|
|
use homogenization_RGC, only: &
|
2013-11-27 13:34:05 +05:30
|
|
|
homogenization_RGC_averageStressAndItsTangent
|
2009-07-22 21:37:19 +05:30
|
|
|
|
|
|
|
implicit none
|
2013-01-29 15:58:01 +05:30
|
|
|
integer(pInt), intent(in) :: &
|
2013-10-11 21:31:53 +05:30
|
|
|
ip, & !< integration point
|
|
|
|
el !< element number
|
2009-07-22 21:37:19 +05:30
|
|
|
|
2013-10-11 21:31:53 +05:30
|
|
|
chosenHomogenization: select case(homogenization_type(mesh_element(3,el)))
|
2014-03-14 04:50:50 +05:30
|
|
|
case (HOMOGENIZATION_NONE_ID) chosenHomogenization
|
|
|
|
materialpoint_P(1:3,1:3,ip,el) = sum(crystallite_P(1:3,1:3,1:1,ip,el),3)
|
|
|
|
materialpoint_dPdF(1:3,1:3,1:3,1:3,ip,el) &
|
|
|
|
= sum(crystallite_dPdF(1:3,1:3,1:3,1:3,1:1,ip,el),5)
|
2013-11-27 13:34:05 +05:30
|
|
|
case (HOMOGENIZATION_ISOSTRAIN_ID) chosenHomogenization
|
2013-10-16 18:34:59 +05:30
|
|
|
call homogenization_isostrain_averageStressAndItsTangent(&
|
|
|
|
materialpoint_P(1:3,1:3,ip,el), &
|
|
|
|
materialpoint_dPdF(1:3,1:3,1:3,1:3,ip,el),&
|
|
|
|
crystallite_P(1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
crystallite_dPdF(1:3,1:3,1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
el)
|
2013-11-27 13:34:05 +05:30
|
|
|
case (HOMOGENIZATION_RGC_ID) chosenHomogenization
|
2013-10-16 18:34:59 +05:30
|
|
|
call homogenization_RGC_averageStressAndItsTangent(&
|
|
|
|
materialpoint_P(1:3,1:3,ip,el), &
|
|
|
|
materialpoint_dPdF(1:3,1:3,1:3,1:3,ip,el),&
|
|
|
|
crystallite_P(1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
crystallite_dPdF(1:3,1:3,1:3,1:3,1:homogenization_maxNgrains,ip,el), &
|
|
|
|
el)
|
2013-01-29 15:58:01 +05:30
|
|
|
end select chosenHomogenization
|
2009-07-22 21:37:19 +05:30
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
end subroutine homogenization_averageStressAndItsTangent
|
2009-07-22 21:37:19 +05:30
|
|
|
|
2014-09-05 22:01:27 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
2014-09-10 14:07:12 +05:30
|
|
|
!> @brief Returns average specific heat at each integration point
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
function field_getSpecificHeat(ip,el)
|
|
|
|
use mesh, only: &
|
|
|
|
mesh_element
|
|
|
|
use lattice, only: &
|
|
|
|
lattice_specificHeat
|
|
|
|
use material, only: &
|
|
|
|
material_phase, &
|
|
|
|
material_homog, &
|
|
|
|
field_thermal_type, &
|
2014-10-09 19:38:32 +05:30
|
|
|
FIELD_THERMAL_local_ID, &
|
|
|
|
FIELD_THERMAL_nonlocal_ID, &
|
2014-09-10 14:07:12 +05:30
|
|
|
homogenization_Ngrains
|
|
|
|
|
|
|
|
implicit none
|
|
|
|
real(pReal) :: field_getSpecificHeat
|
|
|
|
integer(pInt), intent(in) :: &
|
|
|
|
ip, & !< integration point number
|
|
|
|
el !< element number
|
|
|
|
integer(pInt) :: &
|
2014-09-13 15:34:44 +05:30
|
|
|
ipc
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
field_getSpecificHeat =0.0_pReal
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
select case(field_thermal_type(material_homog(ip,el)))
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-10-09 19:38:32 +05:30
|
|
|
case (FIELD_THERMAL_local_ID)
|
2014-09-13 15:34:44 +05:30
|
|
|
field_getSpecificHeat = 0.0_pReal
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-10-09 19:38:32 +05:30
|
|
|
case (FIELD_THERMAL_nonlocal_ID)
|
2014-09-13 15:34:44 +05:30
|
|
|
do ipc = 1, homogenization_Ngrains(mesh_element(3,el))
|
|
|
|
field_getSpecificHeat = field_getSpecificHeat + lattice_specificHeat(material_phase(ipc,ip,el))
|
|
|
|
enddo
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
end select
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
field_getSpecificHeat = field_getSpecificHeat /homogenization_Ngrains(mesh_element(3,el))
|
2014-09-10 14:07:12 +05:30
|
|
|
|
|
|
|
end function field_getSpecificHeat
|
|
|
|
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief Returns average mass density at each integration point
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
function field_getMassDensity(ip,el)
|
|
|
|
use mesh, only: &
|
|
|
|
mesh_element
|
|
|
|
use lattice, only: &
|
|
|
|
lattice_massDensity
|
|
|
|
use material, only: &
|
|
|
|
material_phase, &
|
|
|
|
material_homog, &
|
|
|
|
field_thermal_type, &
|
2014-10-09 19:38:32 +05:30
|
|
|
FIELD_THERMAL_local_ID, &
|
|
|
|
FIELD_THERMAL_nonlocal_ID, &
|
2014-09-10 14:07:12 +05:30
|
|
|
homogenization_Ngrains
|
|
|
|
|
|
|
|
|
|
|
|
implicit none
|
|
|
|
real(pReal) :: field_getMassDensity
|
|
|
|
integer(pInt), intent(in) :: &
|
|
|
|
ip, & !< integration point number
|
|
|
|
el !< element number
|
|
|
|
integer(pInt) :: &
|
2014-09-13 15:34:44 +05:30
|
|
|
ipc
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
field_getMassDensity =0.0_pReal
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
select case(field_thermal_type(material_homog(ip,el)))
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-10-09 19:38:32 +05:30
|
|
|
case (FIELD_THERMAL_local_ID)
|
2014-09-19 23:29:06 +05:30
|
|
|
field_getMassDensity = 0.0_pReal
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-10-09 19:38:32 +05:30
|
|
|
case (FIELD_THERMAL_nonlocal_ID)
|
2014-09-13 15:34:44 +05:30
|
|
|
do ipc = 1, homogenization_Ngrains(mesh_element(3,el))
|
2014-09-19 23:29:06 +05:30
|
|
|
field_getMassDensity = field_getMassDensity + lattice_massDensity(material_phase(ipc,ip,el))
|
2014-09-13 15:34:44 +05:30
|
|
|
enddo
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
end select
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
field_getMassDensity = field_getMassDensity /homogenization_Ngrains(mesh_element(3,el))
|
2014-09-10 14:07:12 +05:30
|
|
|
|
|
|
|
end function field_getMassDensity
|
|
|
|
!-------------------------------------------------------------------------------------------
|
|
|
|
!> @brief Returns average conductivity tensor for thermal field at each integration point
|
|
|
|
!-------------------------------------------------------------------------------------------
|
|
|
|
function field_getThermalConductivity33(ip,el)
|
|
|
|
use mesh, only: &
|
|
|
|
mesh_element
|
|
|
|
use lattice, only: &
|
|
|
|
lattice_thermalConductivity33
|
|
|
|
use material, only: &
|
|
|
|
material_phase, &
|
|
|
|
material_homog, &
|
|
|
|
field_thermal_type, &
|
2014-10-09 19:38:32 +05:30
|
|
|
FIELD_THERMAL_nonlocal_ID, &
|
2014-09-10 14:07:12 +05:30
|
|
|
homogenization_Ngrains
|
|
|
|
use crystallite, only: &
|
|
|
|
crystallite_push33ToRef
|
|
|
|
|
|
|
|
|
|
|
|
implicit none
|
|
|
|
real(pReal), dimension(3,3) :: field_getThermalConductivity33
|
|
|
|
integer(pInt), intent(in) :: &
|
|
|
|
ip, & !< integration point number
|
|
|
|
el !< element number
|
|
|
|
integer(pInt) :: &
|
2014-09-13 15:34:44 +05:30
|
|
|
ipc
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
field_getThermalConductivity33 =0.0_pReal
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
select case(field_thermal_type(material_homog(ip,el)))
|
2014-10-09 19:38:32 +05:30
|
|
|
case (FIELD_THERMAL_nonlocal_ID)
|
2014-09-13 15:34:44 +05:30
|
|
|
do ipc = 1, homogenization_Ngrains(mesh_element(3,el))
|
2014-09-10 14:07:12 +05:30
|
|
|
field_getThermalConductivity33 = field_getThermalConductivity33 + &
|
|
|
|
crystallite_push33ToRef(ipc,ip,el,lattice_thermalConductivity33(:,:,material_phase(ipc,ip,el)))
|
2014-09-13 15:34:44 +05:30
|
|
|
enddo
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
end select
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
field_getThermalConductivity33 = field_getThermalConductivity33 /homogenization_Ngrains(mesh_element(3,el))
|
2014-09-10 14:07:12 +05:30
|
|
|
|
|
|
|
end function field_getThermalConductivity33
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief Returns average diffusion tensor for damage field at each integration point
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
function field_getDamageDiffusion33(ip,el)
|
|
|
|
use mesh, only: &
|
|
|
|
mesh_element
|
|
|
|
use material, only: &
|
|
|
|
material_homog, &
|
|
|
|
field_damage_type, &
|
|
|
|
FIELD_DAMAGE_NONLOCAL_ID, &
|
|
|
|
homogenization_Ngrains
|
2014-10-09 21:26:15 +05:30
|
|
|
use crystallite, only: &
|
|
|
|
crystallite_push33ToRef
|
2014-10-11 15:39:36 +05:30
|
|
|
use constitutive, only: &
|
|
|
|
constitutive_getDamageDiffusion33
|
2014-09-10 14:07:12 +05:30
|
|
|
|
|
|
|
implicit none
|
|
|
|
real(pReal), dimension(3,3) :: field_getDamageDiffusion33
|
|
|
|
integer(pInt), intent(in) :: &
|
|
|
|
ip, & !< integration point number
|
|
|
|
el !< element number
|
|
|
|
integer(pInt) :: &
|
2014-09-13 15:34:44 +05:30
|
|
|
ipc
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
field_getDamageDiffusion33 =0.0_pReal
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
select case(field_damage_type(material_homog(ip,el)))
|
2014-10-09 21:26:15 +05:30
|
|
|
case (FIELD_DAMAGE_NONLOCAL_ID)
|
|
|
|
do ipc = 1, homogenization_Ngrains(mesh_element(3,el))
|
|
|
|
field_getDamageDiffusion33 = field_getDamageDiffusion33 + &
|
2014-10-11 15:39:36 +05:30
|
|
|
crystallite_push33ToRef(ipc,ip,el,constitutive_getDamageDiffusion33(ipc,ip,el))
|
2014-10-09 21:26:15 +05:30
|
|
|
enddo
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
end select
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
field_getDamageDiffusion33 = field_getDamageDiffusion33 /homogenization_Ngrains(mesh_element(3,el))
|
2014-09-10 14:07:12 +05:30
|
|
|
|
|
|
|
end function field_getDamageDiffusion33
|
2014-10-11 02:25:09 +05:30
|
|
|
|
2014-09-10 14:07:12 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief Returns average mobility for damage field at each integration point
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
real(pReal) function field_getDamageMobility(ip,el)
|
|
|
|
use mesh, only: &
|
|
|
|
mesh_element
|
|
|
|
use lattice, only: &
|
|
|
|
lattice_damageMobility
|
|
|
|
use material, only: &
|
|
|
|
material_phase, &
|
|
|
|
material_homog, &
|
|
|
|
field_damage_type, &
|
|
|
|
FIELD_DAMAGE_NONLOCAL_ID, &
|
|
|
|
homogenization_Ngrains
|
|
|
|
|
|
|
|
implicit none
|
|
|
|
integer(pInt), intent(in) :: &
|
|
|
|
ip, & !< integration point number
|
|
|
|
el !< element number
|
|
|
|
integer(pInt) :: &
|
2014-09-13 15:34:44 +05:30
|
|
|
ipc
|
|
|
|
|
|
|
|
field_getDamageMobility =0.0_pReal
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
select case(field_damage_type(material_homog(ip,el)))
|
|
|
|
case (FIELD_DAMAGE_NONLOCAL_ID)
|
|
|
|
do ipc = 1, homogenization_Ngrains(mesh_element(3,el))
|
2014-09-10 14:07:12 +05:30
|
|
|
field_getDamageMobility = field_getDamageMobility + lattice_DamageMobility(material_phase(ipc,ip,el))
|
2014-09-13 15:34:44 +05:30
|
|
|
enddo
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
end select
|
2014-09-10 14:07:12 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
field_getDamageMobility = field_getDamageMobility /homogenization_Ngrains(mesh_element(3,el))
|
2014-09-10 14:07:12 +05:30
|
|
|
|
|
|
|
end function field_getDamageMobility
|
2014-10-11 02:25:09 +05:30
|
|
|
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief Returns average diffusion tensor for vacancy field at each integration point
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
function field_getVacancyDiffusion33(ip,el)
|
|
|
|
use mesh, only: &
|
|
|
|
mesh_element
|
|
|
|
use lattice, only: &
|
|
|
|
lattice_vacancyDiffusion33
|
|
|
|
use material, only: &
|
|
|
|
material_homog, &
|
|
|
|
field_vacancy_type, &
|
|
|
|
FIELD_VACANCY_NONLOCAL_ID, &
|
|
|
|
homogenization_Ngrains
|
|
|
|
use crystallite, only: &
|
2014-11-05 23:11:08 +05:30
|
|
|
crystallite_push33ToRef
|
2014-10-11 16:09:44 +05:30
|
|
|
use constitutive, only: &
|
|
|
|
constitutive_getVacancyDiffusion33
|
2014-10-11 02:25:09 +05:30
|
|
|
|
|
|
|
implicit none
|
|
|
|
real(pReal), dimension(3,3) :: field_getVacancyDiffusion33
|
|
|
|
integer(pInt), intent(in) :: &
|
|
|
|
ip, & !< integration point number
|
|
|
|
el !< element number
|
|
|
|
integer(pInt) :: &
|
|
|
|
ipc
|
|
|
|
|
|
|
|
field_getVacancyDiffusion33 = 0.0_pReal
|
|
|
|
|
|
|
|
select case(field_vacancy_type(material_homog(ip,el)))
|
|
|
|
case (FIELD_VACANCY_NONLOCAL_ID)
|
|
|
|
do ipc = 1, homogenization_Ngrains(mesh_element(3,el))
|
|
|
|
field_getVacancyDiffusion33 = field_getVacancyDiffusion33 + &
|
2014-10-11 16:09:44 +05:30
|
|
|
crystallite_push33ToRef(ipc,ip,el, &
|
2014-11-05 23:11:08 +05:30
|
|
|
constitutive_getVacancyDiffusion33(ipc,ip,el))
|
2014-10-11 02:25:09 +05:30
|
|
|
enddo
|
|
|
|
|
|
|
|
end select
|
|
|
|
|
|
|
|
field_getVacancyDiffusion33 = field_getVacancyDiffusion33/ &
|
|
|
|
homogenization_Ngrains(mesh_element(3,el))
|
|
|
|
|
|
|
|
end function field_getVacancyDiffusion33
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief Returns average mobility for vacancy field at each integration point
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
real(pReal) function field_getVacancyMobility(ip,el)
|
|
|
|
use mesh, only: &
|
|
|
|
mesh_element
|
|
|
|
use lattice, only: &
|
|
|
|
lattice_vacancyMobility
|
|
|
|
use material, only: &
|
|
|
|
material_phase, &
|
|
|
|
material_homog, &
|
|
|
|
field_vacancy_type, &
|
|
|
|
FIELD_VACANCY_NONLOCAL_ID, &
|
|
|
|
homogenization_Ngrains
|
|
|
|
|
|
|
|
implicit none
|
|
|
|
integer(pInt), intent(in) :: &
|
|
|
|
ip, & !< integration point number
|
|
|
|
el !< element number
|
|
|
|
integer(pInt) :: &
|
|
|
|
ipc
|
|
|
|
|
|
|
|
|
|
|
|
field_getVacancyMobility =0.0_pReal
|
|
|
|
|
|
|
|
select case(field_vacancy_type(material_homog(ip,el)))
|
|
|
|
case (FIELD_VACANCY_NONLOCAL_ID)
|
|
|
|
do ipc = 1, homogenization_Ngrains(mesh_element(3,el))
|
|
|
|
field_getVacancyMobility = field_getVacancyMobility + lattice_VacancyMobility(material_phase(ipc,ip,el))
|
|
|
|
enddo
|
|
|
|
|
|
|
|
end select
|
|
|
|
|
|
|
|
field_getVacancyMobility = field_getVacancyMobility/ &
|
|
|
|
homogenization_Ngrains(mesh_element(3,el))
|
|
|
|
|
|
|
|
end function field_getVacancyMobility
|
|
|
|
|
2014-09-10 14:07:12 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
2014-09-05 22:01:27 +05:30
|
|
|
!> @brief ToDo
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
2014-09-26 21:37:26 +05:30
|
|
|
real(pReal) function field_getLocalDamage(ip,el)
|
2014-09-05 22:01:27 +05:30
|
|
|
use mesh, only: &
|
|
|
|
mesh_element
|
|
|
|
use material, only: &
|
|
|
|
homogenization_Ngrains
|
|
|
|
use constitutive, only: &
|
|
|
|
constitutive_getLocalDamage
|
|
|
|
|
|
|
|
implicit none
|
|
|
|
integer(pInt), intent(in) :: &
|
|
|
|
ip, & !< integration point number
|
|
|
|
el !< element number
|
|
|
|
integer(pInt) :: &
|
2014-09-13 15:34:44 +05:30
|
|
|
ipc
|
|
|
|
|
2014-09-05 22:01:27 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
! computing the damage value needed to be passed to field solver
|
2014-09-26 21:37:26 +05:30
|
|
|
field_getLocalDamage =0.0_pReal
|
2014-09-05 22:01:27 +05:30
|
|
|
|
2014-09-22 23:45:19 +05:30
|
|
|
do ipc = 1, homogenization_Ngrains(mesh_element(3,el))
|
2014-09-26 21:37:26 +05:30
|
|
|
field_getLocalDamage = field_getLocalDamage + constitutive_getLocalDamage(ipc,ip,el)
|
2014-09-22 23:45:19 +05:30
|
|
|
enddo
|
2014-09-05 22:01:27 +05:30
|
|
|
|
2014-09-26 23:37:48 +05:30
|
|
|
field_getLocalDamage = field_getLocalDamage/homogenization_Ngrains(mesh_element(3,el))
|
2014-09-05 22:01:27 +05:30
|
|
|
|
2014-09-26 21:37:26 +05:30
|
|
|
end function field_getLocalDamage
|
2014-09-05 22:01:27 +05:30
|
|
|
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
2014-09-10 23:56:12 +05:30
|
|
|
!> @brief Sets the regularised damage value in field state
|
2014-09-05 22:01:27 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
2014-09-26 21:37:26 +05:30
|
|
|
subroutine field_putFieldDamage(ip,el,fieldDamageValue) ! naming scheme
|
2014-09-05 22:01:27 +05:30
|
|
|
use material, only: &
|
|
|
|
fieldDamage, &
|
2014-09-19 23:29:06 +05:30
|
|
|
material_homog, &
|
2014-09-05 22:01:27 +05:30
|
|
|
mappingHomogenization, &
|
|
|
|
field_damage_type, &
|
|
|
|
FIELD_DAMAGE_NONLOCAL_ID
|
|
|
|
|
|
|
|
implicit none
|
|
|
|
integer(pInt), intent(in) :: &
|
|
|
|
ip, & !< integration point number
|
2014-09-12 21:28:03 +05:30
|
|
|
el
|
|
|
|
real(pReal), intent(in) :: &
|
|
|
|
fieldDamageValue
|
2014-09-05 22:01:27 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
select case(field_damage_type(material_homog(ip,el)))
|
|
|
|
case (FIELD_DAMAGE_NONLOCAL_ID)
|
|
|
|
fieldDamage(material_homog(ip,el))% &
|
2014-09-22 23:45:19 +05:30
|
|
|
field(1, mappingHomogenization(1,ip,el)) = fieldDamageValue
|
2014-09-05 22:01:27 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
end select
|
2014-09-05 22:01:27 +05:30
|
|
|
|
2014-09-26 21:37:26 +05:30
|
|
|
end subroutine field_putFieldDamage
|
2014-09-05 22:01:27 +05:30
|
|
|
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief ToDo
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
2014-09-26 21:37:26 +05:30
|
|
|
real(pReal) function field_getLocalTemperature(ip,el)
|
2014-09-05 22:01:27 +05:30
|
|
|
use mesh, only: &
|
|
|
|
mesh_element
|
|
|
|
use material, only: &
|
|
|
|
homogenization_Ngrains
|
|
|
|
use constitutive, only: &
|
2014-09-26 23:37:48 +05:30
|
|
|
constitutive_getAdiabaticTemperature
|
2014-09-05 22:01:27 +05:30
|
|
|
|
|
|
|
implicit none
|
|
|
|
integer(pInt), intent(in) :: &
|
|
|
|
ip, & !< integration point number
|
|
|
|
el !< element number
|
|
|
|
integer(pInt) :: &
|
2014-09-13 15:34:44 +05:30
|
|
|
ipc
|
2014-09-05 22:01:27 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
|
2014-09-26 21:37:26 +05:30
|
|
|
field_getLocalTemperature = 0.0_pReal
|
2014-09-22 23:45:19 +05:30
|
|
|
do ipc = 1, homogenization_Ngrains(mesh_element(3,el))
|
2014-09-26 23:37:48 +05:30
|
|
|
field_getLocalTemperature = field_getLocalTemperature + &
|
|
|
|
constitutive_getAdiabaticTemperature(ipc,ip,el) ! array/function/subroutine which is faster
|
2014-09-22 23:45:19 +05:30
|
|
|
enddo
|
2014-09-26 23:37:48 +05:30
|
|
|
field_getLocalTemperature = field_getLocalTemperature/homogenization_Ngrains(mesh_element(3,el))
|
2014-09-05 22:01:27 +05:30
|
|
|
|
2014-09-26 21:37:26 +05:30
|
|
|
end function field_getLocalTemperature
|
2014-09-05 22:01:27 +05:30
|
|
|
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
2014-09-10 23:56:12 +05:30
|
|
|
!> @brief Sets the regularised temperature value in field state
|
2014-09-05 22:01:27 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
2014-09-26 21:37:26 +05:30
|
|
|
subroutine field_putFieldTemperature(ip,el,fieldThermalValue)
|
2014-09-05 22:01:27 +05:30
|
|
|
use material, only: &
|
|
|
|
material_homog, &
|
|
|
|
fieldThermal, &
|
|
|
|
mappingHomogenization, &
|
|
|
|
field_thermal_type, &
|
2014-10-09 19:38:32 +05:30
|
|
|
FIELD_THERMAL_nonlocal_ID
|
2009-07-22 21:37:19 +05:30
|
|
|
|
2014-09-05 22:01:27 +05:30
|
|
|
implicit none
|
|
|
|
integer(pInt), intent(in) :: &
|
|
|
|
ip, & !< integration point number
|
2014-09-12 21:28:03 +05:30
|
|
|
el
|
|
|
|
real(pReal), intent(in) :: &
|
2014-09-13 15:34:44 +05:30
|
|
|
fieldThermalValue
|
2014-09-05 22:01:27 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
select case(field_thermal_type(material_homog(ip,el)))
|
2014-10-09 19:38:32 +05:30
|
|
|
case (FIELD_THERMAL_nonlocal_ID)
|
2014-09-13 15:34:44 +05:30
|
|
|
fieldThermal(material_homog(ip,el))% &
|
2014-09-22 23:45:19 +05:30
|
|
|
field(1,mappingHomogenization(1,ip,el)) = fieldThermalValue
|
2014-09-05 22:01:27 +05:30
|
|
|
|
2014-09-13 15:34:44 +05:30
|
|
|
end select
|
2014-09-05 22:01:27 +05:30
|
|
|
|
2014-09-26 21:37:26 +05:30
|
|
|
end subroutine field_putFieldTemperature
|
2014-09-22 23:45:19 +05:30
|
|
|
|
2014-10-11 02:25:09 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief ToDo
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
real(pReal) function field_getLocalVacancyConcentration(ip,el)
|
|
|
|
use mesh, only: &
|
|
|
|
mesh_element
|
|
|
|
use material, only: &
|
|
|
|
homogenization_Ngrains
|
|
|
|
use constitutive, only: &
|
|
|
|
constitutive_getLocalVacancyConcentration
|
|
|
|
|
|
|
|
implicit none
|
|
|
|
integer(pInt), intent(in) :: &
|
|
|
|
ip, & !< integration point number
|
|
|
|
el !< element number
|
|
|
|
integer(pInt) :: &
|
|
|
|
ipc
|
|
|
|
|
|
|
|
|
|
|
|
field_getLocalVacancyConcentration = 0.0_pReal
|
|
|
|
do ipc = 1, homogenization_Ngrains(mesh_element(3,el))
|
|
|
|
field_getLocalVacancyConcentration = field_getLocalVacancyConcentration + &
|
|
|
|
constitutive_getLocalVacancyConcentration(ipc,ip,el) ! array/function/subroutine which is faster
|
|
|
|
enddo
|
|
|
|
field_getLocalVacancyConcentration = field_getLocalVacancyConcentration/ &
|
|
|
|
homogenization_Ngrains(mesh_element(3,el))
|
|
|
|
|
|
|
|
end function field_getLocalVacancyConcentration
|
|
|
|
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief Sets the diffused vacancy concentration in field state
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
subroutine field_putFieldVacancyConcentration(ip,el,fieldVacancyConcentration)
|
|
|
|
use material, only: &
|
|
|
|
material_homog, &
|
|
|
|
fieldVacancy, &
|
|
|
|
mappingHomogenization, &
|
|
|
|
field_vacancy_type, &
|
|
|
|
FIELD_VACANCY_nonlocal_ID
|
|
|
|
|
|
|
|
implicit none
|
|
|
|
integer(pInt), intent(in) :: &
|
|
|
|
ip, & !< integration point number
|
|
|
|
el
|
|
|
|
real(pReal), intent(in) :: &
|
|
|
|
fieldVacancyConcentration
|
|
|
|
|
|
|
|
select case(field_vacancy_type(material_homog(ip,el)))
|
|
|
|
case (FIELD_VACANCY_nonlocal_ID)
|
|
|
|
fieldVacancy(material_homog(ip,el))% &
|
|
|
|
field(1,mappingHomogenization(1,ip,el)) = fieldVacancyConcentration
|
|
|
|
|
|
|
|
end select
|
|
|
|
|
|
|
|
end subroutine field_putFieldVacancyConcentration
|
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief return array of homogenization results for post file inclusion. call only,
|
2013-01-29 15:58:01 +05:30
|
|
|
!> if homogenization_sizePostResults(i,e) > 0 !!
|
2012-08-10 21:28:17 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
2013-10-11 21:31:53 +05:30
|
|
|
function homogenization_postResults(ip,el)
|
2013-01-29 15:58:01 +05:30
|
|
|
use mesh, only: &
|
|
|
|
mesh_element
|
|
|
|
use material, only: &
|
2014-09-26 16:02:56 +05:30
|
|
|
mappingHomogenization, &
|
|
|
|
homogState, &
|
2013-11-27 13:34:05 +05:30
|
|
|
homogenization_type, &
|
2014-03-14 04:50:50 +05:30
|
|
|
HOMOGENIZATION_NONE_ID, &
|
2013-11-27 13:34:05 +05:30
|
|
|
HOMOGENIZATION_ISOSTRAIN_ID, &
|
|
|
|
HOMOGENIZATION_RGC_ID
|
2013-01-29 15:58:01 +05:30
|
|
|
use homogenization_isostrain, only: &
|
2013-11-27 13:34:05 +05:30
|
|
|
homogenization_isostrain_postResults
|
2013-01-29 15:58:01 +05:30
|
|
|
use homogenization_RGC, only: &
|
2013-11-27 13:34:05 +05:30
|
|
|
homogenization_RGC_postResults
|
2012-08-10 21:28:17 +05:30
|
|
|
|
2009-05-07 21:57:36 +05:30
|
|
|
implicit none
|
2013-01-29 15:58:01 +05:30
|
|
|
integer(pInt), intent(in) :: &
|
2013-10-11 21:31:53 +05:30
|
|
|
ip, & !< integration point
|
|
|
|
el !< element number
|
2014-09-26 16:02:56 +05:30
|
|
|
real(pReal), dimension(homogState(mappingHomogenization(2,ip,el))%sizePostResults) :: &
|
|
|
|
homogenization_postResults
|
2009-05-07 21:57:36 +05:30
|
|
|
|
|
|
|
homogenization_postResults = 0.0_pReal
|
2013-10-11 21:31:53 +05:30
|
|
|
chosenHomogenization: select case (homogenization_type(mesh_element(3,el)))
|
2014-03-14 04:50:50 +05:30
|
|
|
case (HOMOGENIZATION_NONE_ID) chosenHomogenization
|
|
|
|
|
2013-11-27 13:34:05 +05:30
|
|
|
case (HOMOGENIZATION_ISOSTRAIN_ID) chosenHomogenization
|
2013-10-19 00:27:28 +05:30
|
|
|
homogenization_postResults = homogenization_isostrain_postResults(&
|
|
|
|
ip, &
|
|
|
|
el, &
|
|
|
|
materialpoint_P(1:3,1:3,ip,el), &
|
|
|
|
materialpoint_F(1:3,1:3,ip,el))
|
2013-11-27 13:34:05 +05:30
|
|
|
case (HOMOGENIZATION_RGC_ID) chosenHomogenization
|
2014-08-21 23:18:20 +05:30
|
|
|
homogenization_postResults = homogenization_RGC_postResults(&
|
|
|
|
ip, &
|
|
|
|
el, &
|
|
|
|
materialpoint_P(1:3,1:3,ip,el), &
|
|
|
|
materialpoint_F(1:3,1:3,ip,el))
|
2013-01-29 15:58:01 +05:30
|
|
|
end select chosenHomogenization
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
end function homogenization_postResults
|
2009-05-07 21:57:36 +05:30
|
|
|
|
2014-09-26 16:04:36 +05:30
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
!> @brief return array of homogenization results for post file inclusion. call only,
|
|
|
|
!> if homogenization_sizePostResults(i,e) > 0 !!
|
|
|
|
!--------------------------------------------------------------------------------------------------
|
|
|
|
function field_postResults(ip,el)
|
|
|
|
use material, only: &
|
2014-09-26 20:46:10 +05:30
|
|
|
mappingHomogenization, &
|
|
|
|
fieldThermal, &
|
2014-10-11 02:25:09 +05:30
|
|
|
fieldDamage, &
|
|
|
|
fieldVacancy
|
2014-09-26 16:04:36 +05:30
|
|
|
|
|
|
|
implicit none
|
|
|
|
integer(pInt), intent(in) :: &
|
|
|
|
ip, & !< integration point
|
|
|
|
el !< element number
|
|
|
|
real(pReal), dimension(field_sizePostResults(mappingHomogenization(2,ip,el))) :: &
|
|
|
|
field_postResults
|
|
|
|
integer(pInt) :: &
|
2014-09-26 20:46:10 +05:30
|
|
|
c, homog, pos, o
|
2014-09-26 16:04:36 +05:30
|
|
|
|
|
|
|
field_postResults = 0.0_pReal
|
|
|
|
homog = mappingHomogenization(2,ip,el)
|
2014-09-26 20:46:10 +05:30
|
|
|
pos = mappingHomogenization(1,ip,el)
|
|
|
|
c = 0_pInt
|
2014-09-26 16:04:36 +05:30
|
|
|
do o = 1_pInt,field_Noutput(homog)
|
|
|
|
select case(field_outputID(o,homog))
|
|
|
|
case (temperature_ID)
|
2014-09-26 20:46:10 +05:30
|
|
|
field_postResults(c+1_pInt) = fieldThermal(homog)%field(1,pos)
|
2014-09-26 16:04:36 +05:30
|
|
|
c = c + 1_pInt
|
|
|
|
case (damage_ID)
|
2014-09-26 20:46:10 +05:30
|
|
|
field_postResults(c+1_pInt) = fieldDamage(homog)%field(1,pos)
|
2014-09-26 16:04:36 +05:30
|
|
|
c = c + 1_pInt
|
2014-10-11 02:25:09 +05:30
|
|
|
case (vacancy_concentration_ID)
|
|
|
|
field_postResults(c+1_pInt) = fieldVacancy(homog)%field(1,pos)
|
|
|
|
c = c + 1_pInt
|
2014-09-26 20:46:10 +05:30
|
|
|
end select
|
2014-09-26 16:04:36 +05:30
|
|
|
enddo
|
|
|
|
|
|
|
|
end function field_postResults
|
|
|
|
|
2012-08-10 21:28:17 +05:30
|
|
|
end module homogenization
|