2011-04-07 12:50:28 +05:30
|
|
|
! Copyright 2011 Max-Planck-Institut für Eisenforschung GmbH
|
2011-04-04 19:39:54 +05:30
|
|
|
!
|
|
|
|
! This file is part of DAMASK,
|
2011-04-07 12:50:28 +05:30
|
|
|
! the Düsseldorf Advanced MAterial Simulation Kit.
|
2011-04-04 19:39:54 +05:30
|
|
|
!
|
|
|
|
! DAMASK is free software: you can redistribute it and/or modify
|
|
|
|
! it under the terms of the GNU General Public License as published by
|
|
|
|
! the Free Software Foundation, either version 3 of the License, or
|
|
|
|
! (at your option) any later version.
|
|
|
|
!
|
|
|
|
! DAMASK is distributed in the hope that it will be useful,
|
|
|
|
! but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
! GNU General Public License for more details.
|
|
|
|
!
|
|
|
|
! You should have received a copy of the GNU General Public License
|
|
|
|
! along with DAMASK. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
!
|
|
|
|
!##############################################################
|
2010-02-25 23:09:11 +05:30
|
|
|
!* $Id$
|
|
|
|
!*****************************************************
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
!* Module: HOMOGENIZATION_ISOSTRAIN *
|
2010-02-25 23:09:11 +05:30
|
|
|
!*****************************************************
|
|
|
|
!* contains: *
|
|
|
|
!*****************************************************
|
|
|
|
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
! [isostrain]
|
|
|
|
! type isostrain
|
|
|
|
! Ngrains 6
|
2010-02-25 23:09:11 +05:30
|
|
|
! (output) Ngrains
|
|
|
|
|
|
|
|
MODULE homogenization_isostrain
|
|
|
|
|
|
|
|
!*** Include other modules ***
|
|
|
|
use prec, only: pReal,pInt
|
|
|
|
implicit none
|
|
|
|
|
|
|
|
character (len=*), parameter :: homogenization_isostrain_label = 'isostrain'
|
|
|
|
|
|
|
|
integer(pInt), dimension(:), allocatable :: homogenization_isostrain_sizeState, &
|
|
|
|
homogenization_isostrain_Ngrains
|
|
|
|
integer(pInt), dimension(:), allocatable :: homogenization_isostrain_sizePostResults
|
|
|
|
integer(pInt), dimension(:,:), allocatable,target :: homogenization_isostrain_sizePostResult
|
|
|
|
character(len=64), dimension(:,:), allocatable,target :: homogenization_isostrain_output ! name of each post result output
|
|
|
|
|
|
|
|
|
|
|
|
CONTAINS
|
|
|
|
!****************************************
|
|
|
|
!* - homogenization_isostrain_init
|
|
|
|
!* - homogenization_isostrain_stateInit
|
|
|
|
!* - homogenization_isostrain_deformationPartititon
|
|
|
|
!* - homogenization_isostrain_stateUpdate
|
|
|
|
!* - homogenization_isostrain_averageStressAndItsTangent
|
|
|
|
!* - homogenization_isostrain_postResults
|
|
|
|
!****************************************
|
|
|
|
|
|
|
|
|
|
|
|
!**************************************
|
|
|
|
!* Module initialization *
|
|
|
|
!**************************************
|
|
|
|
subroutine homogenization_isostrain_init(&
|
2012-02-21 22:01:37 +05:30
|
|
|
myFile & ! file pointer to material configuration
|
2010-02-25 23:09:11 +05:30
|
|
|
)
|
2012-02-21 22:01:37 +05:30
|
|
|
use, intrinsic :: iso_fortran_env ! to get compiler_version and compiler_options (at least for gfortran 4.6 at the moment)
|
|
|
|
use prec, only: pInt
|
2010-02-25 23:09:11 +05:30
|
|
|
use math, only: math_Mandel3333to66, math_Voigt66to3333
|
|
|
|
use IO
|
|
|
|
use material
|
2012-02-21 22:01:37 +05:30
|
|
|
integer(pInt), intent(in) :: myFile
|
|
|
|
integer(pInt), parameter :: maxNchunks = 2_pInt
|
|
|
|
integer(pInt), dimension(1_pInt+2_pInt*maxNchunks) :: positions
|
|
|
|
integer(pInt) section, i, j, output, mySize
|
|
|
|
integer :: maxNinstance, k !no pInt (stores a system dependen value from 'count'
|
2010-02-25 23:09:11 +05:30
|
|
|
character(len=64) tag
|
|
|
|
character(len=1024) line
|
|
|
|
|
2011-03-21 16:01:17 +05:30
|
|
|
!$OMP CRITICAL (write2out)
|
|
|
|
write(6,*)
|
2011-08-26 19:27:29 +05:30
|
|
|
write(6,*) '<<<+- homogenization_',trim(homogenization_isostrain_label),' init -+>>>'
|
2011-03-21 16:01:17 +05:30
|
|
|
write(6,*) '$Id$'
|
2012-02-01 00:48:55 +05:30
|
|
|
#include "compilation_info.f90"
|
2011-03-21 16:01:17 +05:30
|
|
|
!$OMP END CRITICAL (write2out)
|
2010-02-25 23:09:11 +05:30
|
|
|
|
|
|
|
maxNinstance = count(homogenization_type == homogenization_isostrain_label)
|
2012-02-21 22:01:37 +05:30
|
|
|
if (maxNinstance == 0) return
|
2010-02-25 23:09:11 +05:30
|
|
|
|
|
|
|
allocate(homogenization_isostrain_sizeState(maxNinstance)) ; homogenization_isostrain_sizeState = 0_pInt
|
|
|
|
allocate(homogenization_isostrain_sizePostResults(maxNinstance)); homogenization_isostrain_sizePostResults = 0_pInt
|
|
|
|
allocate(homogenization_isostrain_sizePostResult(maxval(homogenization_Noutput), &
|
|
|
|
maxNinstance)); homogenization_isostrain_sizePostResult = 0_pInt
|
|
|
|
allocate(homogenization_isostrain_Ngrains(maxNinstance)); homogenization_isostrain_Ngrains = 0_pInt
|
|
|
|
allocate(homogenization_isostrain_output(maxval(homogenization_Noutput), &
|
|
|
|
maxNinstance)) ; homogenization_isostrain_output = ''
|
|
|
|
|
2012-02-21 22:01:37 +05:30
|
|
|
rewind(myFile)
|
2010-02-25 23:09:11 +05:30
|
|
|
line = ''
|
2012-02-13 19:48:07 +05:30
|
|
|
section = 0_pInt
|
2010-02-25 23:09:11 +05:30
|
|
|
|
|
|
|
do while (IO_lc(IO_getTag(line,'<','>')) /= material_partHomogenization) ! wind forward to <homogenization>
|
2012-02-21 22:01:37 +05:30
|
|
|
read(myFile,'(a1024)',END=100) line
|
2010-02-25 23:09:11 +05:30
|
|
|
enddo
|
|
|
|
|
|
|
|
do ! read thru sections of phase part
|
2012-02-21 22:01:37 +05:30
|
|
|
read(myFile,'(a1024)',END=100) line
|
2010-02-25 23:09:11 +05:30
|
|
|
if (IO_isBlank(line)) cycle ! skip empty lines
|
|
|
|
if (IO_getTag(line,'<','>') /= '') exit ! stop at next part
|
|
|
|
if (IO_getTag(line,'[',']') /= '') then ! next section
|
2012-02-13 19:48:07 +05:30
|
|
|
section = section + 1_pInt
|
|
|
|
output = 0_pInt ! reset output counter
|
2010-02-25 23:09:11 +05:30
|
|
|
endif
|
|
|
|
if (section > 0 .and. homogenization_type(section) == homogenization_isostrain_label) then ! one of my sections
|
|
|
|
i = homogenization_typeInstance(section) ! which instance of my type is present homogenization
|
|
|
|
positions = IO_stringPos(line,maxNchunks)
|
2012-02-13 19:48:07 +05:30
|
|
|
tag = IO_lc(IO_stringValue(line,positions,1_pInt)) ! extract key
|
2010-02-25 23:09:11 +05:30
|
|
|
select case(tag)
|
|
|
|
case ('(output)')
|
2012-02-13 19:48:07 +05:30
|
|
|
output = output + 1_pInt
|
|
|
|
homogenization_isostrain_output(output,i) = IO_lc(IO_stringValue(line,positions,2_pInt))
|
2010-02-25 23:09:11 +05:30
|
|
|
case ('ngrains')
|
2012-02-13 19:48:07 +05:30
|
|
|
homogenization_isostrain_Ngrains(i) = IO_intValue(line,positions,2_pInt)
|
2010-02-25 23:09:11 +05:30
|
|
|
end select
|
|
|
|
endif
|
|
|
|
enddo
|
|
|
|
|
2012-02-21 22:01:37 +05:30
|
|
|
100 do k = 1,maxNinstance ! sanity checks
|
2010-02-25 23:09:11 +05:30
|
|
|
enddo
|
|
|
|
|
2012-02-21 22:01:37 +05:30
|
|
|
do k = 1,maxNinstance
|
2010-02-25 23:09:11 +05:30
|
|
|
homogenization_isostrain_sizeState(i) = 0_pInt
|
|
|
|
|
2012-02-21 22:01:37 +05:30
|
|
|
do j = 1_pInt,maxval(homogenization_Noutput)
|
2010-02-25 23:09:11 +05:30
|
|
|
select case(homogenization_isostrain_output(j,i))
|
|
|
|
case('ngrains')
|
2012-02-21 22:01:37 +05:30
|
|
|
mySize = 1_pInt
|
2010-02-25 23:09:11 +05:30
|
|
|
case default
|
2012-02-21 22:01:37 +05:30
|
|
|
mySize = 0_pInt
|
2010-02-25 23:09:11 +05:30
|
|
|
end select
|
|
|
|
|
|
|
|
if (mySize > 0_pInt) then ! any meaningful output found
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
homogenization_isostrain_sizePostResult(j,i) = mySize
|
|
|
|
homogenization_isostrain_sizePostResults(i) = &
|
|
|
|
homogenization_isostrain_sizePostResults(i) + mySize
|
2010-02-25 23:09:11 +05:30
|
|
|
endif
|
|
|
|
enddo
|
|
|
|
enddo
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
endsubroutine
|
|
|
|
|
|
|
|
|
|
|
|
!*********************************************************************
|
|
|
|
!* initial homogenization state *
|
|
|
|
!*********************************************************************
|
|
|
|
function homogenization_isostrain_stateInit(myInstance)
|
|
|
|
use prec, only: pReal,pInt
|
|
|
|
implicit none
|
|
|
|
|
|
|
|
!* Definition of variables
|
|
|
|
integer(pInt), intent(in) :: myInstance
|
|
|
|
real(pReal), dimension(homogenization_isostrain_sizeState(myInstance)) :: &
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
homogenization_isostrain_stateInit
|
2010-02-25 23:09:11 +05:30
|
|
|
|
|
|
|
homogenization_isostrain_stateInit = 0.0_pReal
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
endfunction
|
|
|
|
|
|
|
|
|
|
|
|
!********************************************************************
|
|
|
|
! partition material point def grad onto constituents
|
|
|
|
!********************************************************************
|
|
|
|
subroutine homogenization_isostrain_partitionDeformation(&
|
|
|
|
F, & ! partioned def grad per grain
|
|
|
|
!
|
|
|
|
F0, & ! initial partioned def grad per grain
|
|
|
|
avgF, & ! my average def grad
|
|
|
|
state, & ! my state
|
|
|
|
ip, & ! my integration point
|
|
|
|
el & ! my element
|
|
|
|
)
|
|
|
|
use prec, only: pReal,pInt,p_vec
|
2012-02-21 22:01:37 +05:30
|
|
|
use mesh, only: mesh_element
|
2010-02-25 23:09:11 +05:30
|
|
|
use material, only: homogenization_maxNgrains,homogenization_Ngrains
|
|
|
|
implicit none
|
|
|
|
|
|
|
|
!* Definition of variables
|
|
|
|
real(pReal), dimension (3,3,homogenization_maxNgrains), intent(out) :: F
|
|
|
|
real(pReal), dimension (3,3,homogenization_maxNgrains), intent(in) :: F0
|
|
|
|
real(pReal), dimension (3,3), intent(in) :: avgF
|
|
|
|
type(p_vec), intent(in) :: state
|
|
|
|
integer(pInt), intent(in) :: ip,el
|
2011-04-13 19:46:22 +05:30
|
|
|
integer(pInt) i
|
2010-02-25 23:09:11 +05:30
|
|
|
|
|
|
|
! homID = homogenization_typeInstance(mesh_element(3,el))
|
2012-02-21 22:01:37 +05:30
|
|
|
forall (i = 1_pInt:homogenization_Ngrains(mesh_element(3,el))) &
|
openmp parallelization working again (at least for j2 and nonlocal constitutive model).
In order to keep it like that, please follow these simple rules:
DON'T use implicit array subscripts:
example: real, dimension(3,3) :: A,B
A(:,2) = B(:,1) <--- DON'T USE
A(1:3,2) = B(1:3,1) <--- BETTER USE
In many cases the use of explicit array subscripts is inevitable for parallelization. Additionally, it is an easy means to prevent memory leaks.
Enclose all write statements with the following:
!$OMP CRITICAL (write2out)
<your write statement>
!$OMP END CRITICAL (write2out)
Whenever you change something in the code and are not sure if it affects parallelization and leads to nonconforming behavior, please ask me and/or Franz to check this.
2011-03-17 16:16:17 +05:30
|
|
|
F(1:3,1:3,i) = avgF
|
2010-02-25 23:09:11 +05:30
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
endsubroutine
|
|
|
|
|
|
|
|
|
|
|
|
!********************************************************************
|
|
|
|
! update the internal state of the homogenization scheme
|
|
|
|
! and tell whether "done" and "happy" with result
|
|
|
|
!********************************************************************
|
|
|
|
function homogenization_isostrain_updateState(&
|
|
|
|
state, & ! my state
|
|
|
|
!
|
|
|
|
P, & ! array of current grain stresses
|
|
|
|
dPdF, & ! array of current grain stiffnesses
|
|
|
|
ip, & ! my integration point
|
|
|
|
el & ! my element
|
|
|
|
)
|
|
|
|
|
|
|
|
use prec, only: pReal,pInt,p_vec
|
|
|
|
use material, only: homogenization_maxNgrains
|
|
|
|
implicit none
|
|
|
|
|
|
|
|
!* Definition of variables
|
|
|
|
type(p_vec), intent(inout) :: state
|
|
|
|
real(pReal), dimension (3,3,homogenization_maxNgrains), intent(in) :: P
|
|
|
|
real(pReal), dimension (3,3,3,3,homogenization_maxNgrains), intent(in) :: dPdF
|
|
|
|
integer(pInt), intent(in) :: ip,el
|
|
|
|
! integer(pInt) homID
|
|
|
|
logical, dimension(2) :: homogenization_isostrain_updateState
|
|
|
|
|
|
|
|
! homID = homogenization_typeInstance(mesh_element(3,el))
|
|
|
|
homogenization_isostrain_updateState = .true. ! homogenization at material point converged (done and happy)
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
endfunction
|
|
|
|
|
|
|
|
|
|
|
|
!********************************************************************
|
|
|
|
! derive average stress and stiffness from constituent quantities
|
|
|
|
!********************************************************************
|
|
|
|
subroutine homogenization_isostrain_averageStressAndItsTangent(&
|
|
|
|
avgP, & ! average stress at material point
|
|
|
|
dAvgPdAvgF, & ! average stiffness at material point
|
|
|
|
!
|
|
|
|
P, & ! array of current grain stresses
|
|
|
|
dPdF, & ! array of current grain stiffnesses
|
|
|
|
ip, & ! my integration point
|
|
|
|
el & ! my element
|
|
|
|
)
|
|
|
|
|
|
|
|
use prec, only: pReal,pInt,p_vec
|
2012-02-21 22:01:37 +05:30
|
|
|
use mesh, only: mesh_element
|
2010-02-25 23:09:11 +05:30
|
|
|
use material, only: homogenization_maxNgrains, homogenization_Ngrains
|
|
|
|
implicit none
|
|
|
|
|
|
|
|
!* Definition of variables
|
|
|
|
real(pReal), dimension (3,3), intent(out) :: avgP
|
|
|
|
real(pReal), dimension (3,3,3,3), intent(out) :: dAvgPdAvgF
|
|
|
|
real(pReal), dimension (3,3,homogenization_maxNgrains), intent(in) :: P
|
|
|
|
real(pReal), dimension (3,3,3,3,homogenization_maxNgrains), intent(in) :: dPdF
|
|
|
|
integer(pInt), intent(in) :: ip,el
|
2011-04-13 19:46:22 +05:30
|
|
|
integer(pInt) Ngrains
|
2010-02-25 23:09:11 +05:30
|
|
|
|
|
|
|
! homID = homogenization_typeInstance(mesh_element(3,el))
|
|
|
|
Ngrains = homogenization_Ngrains(mesh_element(3,el))
|
2012-02-21 22:01:37 +05:30
|
|
|
avgP = sum(P,3)/real(Ngrains,pReal)
|
|
|
|
dAvgPdAvgF = sum(dPdF,5)/real(Ngrains,pReal)
|
2010-02-25 23:09:11 +05:30
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
endsubroutine
|
|
|
|
|
|
|
|
|
|
|
|
!********************************************************************
|
|
|
|
! derive average stress and stiffness from constituent quantities
|
|
|
|
!********************************************************************
|
|
|
|
function homogenization_isostrain_averageTemperature(&
|
|
|
|
Temperature, & ! temperature
|
|
|
|
ip, & ! my integration point
|
|
|
|
el & ! my element
|
|
|
|
)
|
|
|
|
|
|
|
|
use prec, only: pReal,pInt,p_vec
|
2012-02-21 22:01:37 +05:30
|
|
|
use mesh, only: mesh_element
|
2010-02-25 23:09:11 +05:30
|
|
|
use material, only: homogenization_maxNgrains, homogenization_Ngrains
|
|
|
|
implicit none
|
|
|
|
|
|
|
|
!* Definition of variables
|
|
|
|
real(pReal), dimension (homogenization_maxNgrains), intent(in) :: Temperature
|
|
|
|
integer(pInt), intent(in) :: ip,el
|
|
|
|
real(pReal) homogenization_isostrain_averageTemperature
|
2011-04-13 19:46:22 +05:30
|
|
|
integer(pInt) Ngrains
|
2010-02-25 23:09:11 +05:30
|
|
|
|
|
|
|
! homID = homogenization_typeInstance(mesh_element(3,el))
|
|
|
|
Ngrains = homogenization_Ngrains(mesh_element(3,el))
|
2012-02-21 22:01:37 +05:30
|
|
|
homogenization_isostrain_averageTemperature = sum(Temperature(1:Ngrains))/real(Ngrains,pReal)
|
2010-02-25 23:09:11 +05:30
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
endfunction
|
|
|
|
|
|
|
|
|
|
|
|
!********************************************************************
|
|
|
|
! return array of homogenization results for post file inclusion
|
|
|
|
!********************************************************************
|
|
|
|
pure function homogenization_isostrain_postResults(&
|
|
|
|
state, & ! my state
|
|
|
|
ip, & ! my integration point
|
|
|
|
el & ! my element
|
|
|
|
)
|
|
|
|
|
|
|
|
use prec, only: pReal,pInt,p_vec
|
|
|
|
use mesh, only: mesh_element
|
|
|
|
use material, only: homogenization_typeInstance,homogenization_Noutput
|
|
|
|
implicit none
|
|
|
|
|
|
|
|
!* Definition of variables
|
|
|
|
type(p_vec), intent(in) :: state
|
|
|
|
integer(pInt), intent(in) :: ip,el
|
|
|
|
integer(pInt) homID,o,c
|
|
|
|
real(pReal), dimension(homogenization_isostrain_sizePostResults(homogenization_typeInstance(mesh_element(3,el)))) :: &
|
|
|
|
homogenization_isostrain_postResults
|
|
|
|
|
|
|
|
homID = homogenization_typeInstance(mesh_element(3,el))
|
|
|
|
c = 0_pInt
|
|
|
|
homogenization_isostrain_postResults = 0.0_pReal
|
|
|
|
|
2012-02-21 22:01:37 +05:30
|
|
|
do o = 1_pInt,homogenization_Noutput(mesh_element(3,el))
|
2010-02-25 23:09:11 +05:30
|
|
|
select case(homogenization_isostrain_output(o,homID))
|
|
|
|
case ('ngrains')
|
2012-02-21 22:01:37 +05:30
|
|
|
homogenization_isostrain_postResults(c+1_pInt) = real(homogenization_isostrain_Ngrains(homID),pReal)
|
|
|
|
c = c + 1_pInt
|
2010-02-25 23:09:11 +05:30
|
|
|
end select
|
|
|
|
enddo
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
endfunction
|
|
|
|
|
|
|
|
END MODULE
|