merged and added correction to FreeSurface.config example
This commit is contained in:
commit
cfb2770b93
|
@ -7,9 +7,9 @@ stages:
|
||||||
- compilePETScGNU
|
- compilePETScGNU
|
||||||
- prepareSpectral
|
- prepareSpectral
|
||||||
- spectral
|
- spectral
|
||||||
- compileMarc2018_1
|
- compileMarc
|
||||||
- marc
|
- marc
|
||||||
- compileAbaqus2017
|
- compileAbaqus
|
||||||
- example
|
- example
|
||||||
- performance
|
- performance
|
||||||
- createPackage
|
- createPackage
|
||||||
|
@ -51,39 +51,37 @@ variables:
|
||||||
# Names of module files to load
|
# Names of module files to load
|
||||||
# ===============================================================================================
|
# ===============================================================================================
|
||||||
# ++++++++++++ Compiler ++++++++++++++++++++++++++++++++++++++++++++++
|
# ++++++++++++ Compiler ++++++++++++++++++++++++++++++++++++++++++++++
|
||||||
IntelCompiler16_0: "Compiler/Intel/16.0 Libraries/IMKL/2016"
|
IntelCompiler16_4: "Compiler/Intel/16.4 Libraries/IMKL/2016"
|
||||||
IntelCompiler16_4: "Compiler/Intel/16.4 Libraries/IMKL/2016-4"
|
IntelCompiler17_8: "Compiler/Intel/17.8 Libraries/IMKL/2017"
|
||||||
IntelCompiler17_0: "Compiler/Intel/17.0 Libraries/IMKL/2017"
|
IntelCompiler18_4: "Compiler/Intel/18.4 Libraries/IMKL/2018"
|
||||||
IntelCompiler18_1: "Compiler/Intel/18.1 Libraries/IMKL/2018"
|
GNUCompiler8_2: "Compiler/GNU/8.2"
|
||||||
GNUCompiler7_3: "Compiler/GNU/7.3"
|
|
||||||
# ------------ Defaults ----------------------------------------------
|
# ------------ Defaults ----------------------------------------------
|
||||||
IntelCompiler: "$IntelCompiler18_1"
|
IntelCompiler: "$IntelCompiler18_4"
|
||||||
GNUCompiler: "$GNUCompiler7_3"
|
GNUCompiler: "$GNUCompiler8_2"
|
||||||
# ++++++++++++ MPI +++++++++++++++++++++++++++++++++++++++++++++++++++
|
# ++++++++++++ MPI +++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||||
MPICH3_2Intel18_1: "MPI/Intel/18.1/MPICH/3.2.1"
|
IMPI2018Intel18_4: "MPI/Intel/18.4/IntelMPI/2018"
|
||||||
MPICH3_2GNU7_3: "MPI/GNU/7.3/MPICH/3.2.1"
|
MPICH3_3GNU8_2: "MPI/GNU/8.2/MPICH/3.3"
|
||||||
# ------------ Defaults ----------------------------------------------
|
# ------------ Defaults ----------------------------------------------
|
||||||
MPICH_Intel: "$MPICH3_2Intel18_1"
|
MPICH_Intel: "$IMPI2018Intel18_4"
|
||||||
MPICH_GNU: "$MPICH3_2GNU7_3"
|
MPICH_GNU: "$MPICH3_3GNU8_2"
|
||||||
# ++++++++++++ PETSc +++++++++++++++++++++++++++++++++++++++++++++++++
|
# ++++++++++++ PETSc +++++++++++++++++++++++++++++++++++++++++++++++++
|
||||||
PETSc3_10_0MPICH3_2Intel18_1: "Libraries/PETSc/3.10.0/Intel-18.1-MPICH-3.2.1"
|
PETSc3_10_3IMPI2018Intel18_4: "Libraries/PETSc/3.10.3/Intel-18.4-IntelMPI-2018"
|
||||||
PETSc3_10_0MPICH3_2GNU7_3: "Libraries/PETSc/3.10.0/GNU-7.3-MPICH-3.2.1"
|
PETSc3_10_3MPICH3_3GNU8_2: "Libraries/PETSc/3.10.3/GNU-8.2-MPICH-3.3"
|
||||||
# ------------ Defaults ----------------------------------------------
|
# ------------ Defaults ----------------------------------------------
|
||||||
PETSc_MPICH_Intel: "$PETSc3_10_0MPICH3_2Intel18_1"
|
PETSc_MPICH_Intel: "$PETSc3_10_3IMPI2018Intel18_4"
|
||||||
PETSc_MPICH_GNU: "$PETSc3_10_0MPICH3_2GNU7_3"
|
PETSc_MPICH_GNU: "$PETSc3_10_3MPICH3_3GNU8_2"
|
||||||
# ++++++++++++ FEM +++++++++++++++++++++++++++++++++++++++++++++++++++
|
# ++++++++++++ FEM +++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||||
Abaqus2017: "FEM/Abaqus/2017"
|
Abaqus2019: "FEM/Abaqus/2019"
|
||||||
MSC2018_1: "FEM/MSC/2018.1"
|
MSC2018_1: "FEM/MSC/2018.1"
|
||||||
MSC2017: "FEM/MSC/2017"
|
|
||||||
# ------------ Defaults ----------------------------------------------
|
# ------------ Defaults ----------------------------------------------
|
||||||
Abaqus: "$Abaqus2017"
|
Abaqus: "$Abaqus2019"
|
||||||
MSC: "$MSC2018_1"
|
MSC: "$MSC2018_1"
|
||||||
IntelMarc: "$IntelCompiler17_0"
|
IntelMarc: "$IntelCompiler17_8"
|
||||||
IntelAbaqus: "$IntelCompiler16_4"
|
IntelAbaqus: "$IntelCompiler16_4"
|
||||||
# ++++++++++++ Documentation +++++++++++++++++++++++++++++++++++++++++
|
# ++++++++++++ Documentation +++++++++++++++++++++++++++++++++++++++++
|
||||||
Doxygen1_8_13: "Documentation/Doxygen/1.8.13"
|
Doxygen1_8_15: "Documentation/Doxygen/1.8.15"
|
||||||
# ------------ Defaults ----------------------------------------------
|
# ------------ Defaults ----------------------------------------------
|
||||||
Doxygen: "$Doxygen1_8_13"
|
Doxygen: "$Doxygen1_8_15"
|
||||||
|
|
||||||
|
|
||||||
###################################################################################################
|
###################################################################################################
|
||||||
|
@ -158,6 +156,13 @@ Post_AverageDown:
|
||||||
- master
|
- master
|
||||||
- release
|
- release
|
||||||
|
|
||||||
|
Post_ASCIItable:
|
||||||
|
stage: postprocessing
|
||||||
|
script: ASCIItable/test.py
|
||||||
|
except:
|
||||||
|
- master
|
||||||
|
- release
|
||||||
|
|
||||||
Post_General:
|
Post_General:
|
||||||
stage: postprocessing
|
stage: postprocessing
|
||||||
script: PostProcessing/test.py
|
script: PostProcessing/test.py
|
||||||
|
@ -202,7 +207,9 @@ Post_ParaviewRelated:
|
||||||
|
|
||||||
Post_OrientationConversion:
|
Post_OrientationConversion:
|
||||||
stage: postprocessing
|
stage: postprocessing
|
||||||
script: OrientationConversion/test.py
|
script:
|
||||||
|
- OrientationConversion/test.py
|
||||||
|
- OrientationConversion/test2.py
|
||||||
except:
|
except:
|
||||||
- master
|
- master
|
||||||
- release
|
- release
|
||||||
|
@ -383,9 +390,9 @@ TextureComponents:
|
||||||
|
|
||||||
###################################################################################################
|
###################################################################################################
|
||||||
Marc_compileIfort2018_1:
|
Marc_compileIfort2018_1:
|
||||||
stage: compileMarc2018_1
|
stage: compileMarc
|
||||||
script:
|
script:
|
||||||
- module load $IntelCompiler17_0 $MSC2018_1
|
- module load $IntelMarc $MSC
|
||||||
- Marc_compileIfort/test.py -m 2018.1
|
- Marc_compileIfort/test.py -m 2018.1
|
||||||
except:
|
except:
|
||||||
- master
|
- master
|
||||||
|
@ -430,11 +437,11 @@ J2_plasticBehavior:
|
||||||
- release
|
- release
|
||||||
|
|
||||||
###################################################################################################
|
###################################################################################################
|
||||||
Abaqus_compile2017:
|
Abaqus_compile:
|
||||||
stage: compileAbaqus2017
|
stage: compileAbaqus
|
||||||
script:
|
script:
|
||||||
- module load $IntelCompiler16_4 $Abaqus2017
|
- module load $IntelAbaqus $Abaqus
|
||||||
- Abaqus_compileIfort/test.py -a 2017
|
- Abaqus_compileIfort/test.py
|
||||||
except:
|
except:
|
||||||
- master
|
- master
|
||||||
- release
|
- release
|
||||||
|
@ -477,24 +484,40 @@ AbaqusStd:
|
||||||
script:
|
script:
|
||||||
- module load $IntelCompiler $MPICH_Intel $PETSc_MPICH_Intel $Doxygen
|
- module load $IntelCompiler $MPICH_Intel $PETSc_MPICH_Intel $Doxygen
|
||||||
- $DAMASKROOT/PRIVATE/documenting/runDoxygen.sh $DAMASKROOT abaqus
|
- $DAMASKROOT/PRIVATE/documenting/runDoxygen.sh $DAMASKROOT abaqus
|
||||||
only:
|
except:
|
||||||
- development
|
- master
|
||||||
|
- release
|
||||||
|
|
||||||
Marc:
|
Marc:
|
||||||
stage: createDocumentation
|
stage: createDocumentation
|
||||||
script:
|
script:
|
||||||
- module load $IntelCompiler $MPICH_Intel $PETSc_MPICH_Intel $Doxygen
|
- module load $IntelCompiler $MPICH_Intel $PETSc_MPICH_Intel $Doxygen
|
||||||
- $DAMASKROOT/PRIVATE/documenting/runDoxygen.sh $DAMASKROOT marc
|
- $DAMASKROOT/PRIVATE/documenting/runDoxygen.sh $DAMASKROOT marc
|
||||||
only:
|
except:
|
||||||
- development
|
- master
|
||||||
|
- release
|
||||||
|
|
||||||
Spectral:
|
Spectral:
|
||||||
stage: createDocumentation
|
stage: createDocumentation
|
||||||
script:
|
script:
|
||||||
- module load $IntelCompiler $MPICH_Intel $PETSc_MPICH_Intel $Doxygen
|
- module load $IntelCompiler $MPICH_Intel $PETSc_MPICH_Intel $Doxygen
|
||||||
- $DAMASKROOT/PRIVATE/documenting/runDoxygen.sh $DAMASKROOT spectral
|
- $DAMASKROOT/PRIVATE/documenting/runDoxygen.sh $DAMASKROOT spectral
|
||||||
only:
|
except:
|
||||||
- development
|
- master
|
||||||
|
- release
|
||||||
|
|
||||||
|
Processing:
|
||||||
|
stage: createDocumentation
|
||||||
|
script:
|
||||||
|
- cd $DAMASKROOT/processing/pre
|
||||||
|
- rm abq_addUserOutput.py marc_addUserOutput.py
|
||||||
|
- $DAMASKROOT/PRIVATE/documenting/scriptHelpToWiki.py --debug *.py
|
||||||
|
- cd $DAMASKROOT/processing/post
|
||||||
|
- rm marc_to_vtk.py vtk2ang.py
|
||||||
|
- $DAMASKROOT/PRIVATE/documenting/scriptHelpToWiki.py --debug *.py
|
||||||
|
except:
|
||||||
|
- master
|
||||||
|
- release
|
||||||
|
|
||||||
##################################################################################################
|
##################################################################################################
|
||||||
backupData:
|
backupData:
|
||||||
|
@ -503,11 +526,10 @@ backupData:
|
||||||
- cd $TESTROOT/performance # location of new runtime results
|
- cd $TESTROOT/performance # location of new runtime results
|
||||||
- git commit -am"${CI_PIPELINE_ID}_${CI_COMMIT_SHA}"
|
- git commit -am"${CI_PIPELINE_ID}_${CI_COMMIT_SHA}"
|
||||||
- mkdir $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}
|
- mkdir $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}
|
||||||
- cp $TESTROOT/performance/time.txt $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
|
|
||||||
- mv $TESTROOT/performance/time.png $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
|
- mv $TESTROOT/performance/time.png $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
|
||||||
- cp $TESTROOT/performance/memory.txt $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
|
|
||||||
- mv $TESTROOT/performance/memory.png $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
|
- mv $TESTROOT/performance/memory.png $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
|
||||||
- mv $DAMASKROOT/PRIVATE/documenting/DAMASK_* $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
|
- mv $DAMASKROOT/PRIVATE/documenting/DAMASK_* $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
|
||||||
|
- mv $DAMASKROOT/processing $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
|
||||||
only:
|
only:
|
||||||
- development
|
- development
|
||||||
|
|
||||||
|
|
|
@ -445,6 +445,33 @@ elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||||
|
|
||||||
# Additional options
|
# Additional options
|
||||||
# -fdefault-integer-8: Use it to set precision to 8 bytes for integer, don't use it for the standard case of pInt=4 (there is no -fdefault-integer-4)
|
# -fdefault-integer-8: Use it to set precision to 8 bytes for integer, don't use it for the standard case of pInt=4 (there is no -fdefault-integer-4)
|
||||||
|
|
||||||
|
|
||||||
|
###################################################################################################
|
||||||
|
# PGI Compiler
|
||||||
|
###################################################################################################
|
||||||
|
elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "PGI")
|
||||||
|
|
||||||
|
if (OPTIMIZATION STREQUAL "OFF")
|
||||||
|
set (OPTIMIZATION_FLAGS "-O0" )
|
||||||
|
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
|
||||||
|
set (OPTIMIZATION_FLAGS "-O2")
|
||||||
|
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
|
||||||
|
set (OPTIMIZATION_FLAGS "-O3")
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------------------------
|
||||||
|
# Fine tuning compilation options
|
||||||
|
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Mpreprocess")
|
||||||
|
# preprocessor
|
||||||
|
|
||||||
|
set (STANDARD_CHECK "-Mallocatable=03")
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------------------------
|
||||||
|
# Runtime debugging
|
||||||
|
set (DEBUG_FLAGS "${DEBUG_FLAGS} -g")
|
||||||
|
# Includes debugging information in the object module; sets the optimization level to zero unless a -O option is present on the command line
|
||||||
else ()
|
else ()
|
||||||
message (FATAL_ERROR "Compiler type (CMAKE_Fortran_COMPILER_ID) not recognized")
|
message (FATAL_ERROR "Compiler type (CMAKE_Fortran_COMPILER_ID) not recognized")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
2
CONFIG
2
CONFIG
|
@ -8,6 +8,6 @@ set DAMASK_NUM_THREADS = 4
|
||||||
set MSC_ROOT = /opt/msc
|
set MSC_ROOT = /opt/msc
|
||||||
set MARC_VERSION = 2018.1
|
set MARC_VERSION = 2018.1
|
||||||
|
|
||||||
set ABAQUS_VERSION = 2017
|
set ABAQUS_VERSION = 2019
|
||||||
|
|
||||||
set DAMASK_HDF5 = OFF
|
set DAMASK_HDF5 = OFF
|
||||||
|
|
2
PRIVATE
2
PRIVATE
|
@ -1 +1 @@
|
||||||
Subproject commit beb9682fff7d4d6c65aba12ffd04c7441dc6ba6b
|
Subproject commit def4081e837539dba7c4760abbb340553be66d3c
|
1
README
1
README
|
@ -10,3 +10,4 @@ Germany
|
||||||
|
|
||||||
Email: DAMASK@mpie.de
|
Email: DAMASK@mpie.de
|
||||||
https://damask.mpie.de
|
https://damask.mpie.de
|
||||||
|
https://magit1.mpie.de
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
(output) texture
|
(output) texture
|
||||||
(output) volume
|
(output) volume
|
||||||
(output) orientation # quaternion
|
(output) orientation # quaternion
|
||||||
(output) eulerangles # orientation as Bunge triple in degree
|
|
||||||
(output) grainrotation # deviation from initial orientation as axis (1-3) and angle in degree (4) in crystal reference coordinates
|
(output) grainrotation # deviation from initial orientation as axis (1-3) and angle in degree (4) in crystal reference coordinates
|
||||||
(output) f # deformation gradient tensor
|
(output) f # deformation gradient tensor
|
||||||
(output) fe # elastic deformation gradient tensor
|
(output) fe # elastic deformation gradient tensor
|
||||||
|
|
|
@ -13,13 +13,13 @@ plasticity isotropic
|
||||||
(output) strainrate
|
(output) strainrate
|
||||||
|
|
||||||
lattice_structure isotropic
|
lattice_structure isotropic
|
||||||
c11 110.9e9
|
c11 10e9
|
||||||
c12 0.0
|
c12 0.0
|
||||||
taylorfactor 3
|
|
||||||
tau0 31e6
|
|
||||||
gdot0 0.001
|
gdot0 0.001
|
||||||
n 20
|
tau0 0.3e6
|
||||||
h0 75e6
|
tausat 0.6e6
|
||||||
tausat 63e6
|
h0 1e6
|
||||||
w0 2.25
|
n 5
|
||||||
|
m 3
|
||||||
|
a 2
|
||||||
atol_resistance 1
|
atol_resistance 1
|
|
@ -13,7 +13,6 @@ mech none
|
||||||
(output) texture
|
(output) texture
|
||||||
(output) volume
|
(output) volume
|
||||||
(output) orientation # quaternion
|
(output) orientation # quaternion
|
||||||
(output) eulerangles # orientation as Bunge triple
|
|
||||||
(output) grainrotation # deviation from initial orientation as axis (1-3) and angle in degree (4)
|
(output) grainrotation # deviation from initial orientation as axis (1-3) and angle in degree (4)
|
||||||
(output) f # deformation gradient tensor; synonyms: "defgrad"
|
(output) f # deformation gradient tensor; synonyms: "defgrad"
|
||||||
(output) fe # elastic deformation gradient tensor
|
(output) fe # elastic deformation gradient tensor
|
||||||
|
|
|
@ -1,84 +0,0 @@
|
||||||
#!/usr/bin/env python2.7
|
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
|
||||||
|
|
||||||
import os,h5py
|
|
||||||
import numpy as np
|
|
||||||
from optparse import OptionParser
|
|
||||||
import damask
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [dream3dfile[s]]', description = """
|
|
||||||
Convert DREAM3D file to ASCIItable. Works for 3D datasets, but, hey, its not called DREAM2D ;)
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-d','--data',
|
|
||||||
dest = 'data',
|
|
||||||
action = 'extend', metavar = '<string LIST>',
|
|
||||||
help = 'data to extract from DREAM3D file')
|
|
||||||
parser.add_option('-c','--container',
|
|
||||||
dest = 'container', metavar = 'string',
|
|
||||||
help = 'root container(group) in which data is stored [%default]')
|
|
||||||
|
|
||||||
parser.set_defaults(container="ImageDataContainer",
|
|
||||||
)
|
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
if options.data is None:
|
|
||||||
parser.error('No data selected')
|
|
||||||
|
|
||||||
rootDir ='DataContainers/'+options.container
|
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: parser.error('no input file specified.')
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
try:
|
|
||||||
table = damask.ASCIItable(outname = os.path.splitext(name)[0]+'.txt',
|
|
||||||
buffered = False
|
|
||||||
)
|
|
||||||
except: continue
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
inFile = h5py.File(name, 'r')
|
|
||||||
try:
|
|
||||||
grid = inFile[rootDir+'/_SIMPL_GEOMETRY/DIMENSIONS'][...]
|
|
||||||
except:
|
|
||||||
damask.util.croak('Group {} not found'.format(options.container))
|
|
||||||
table.close(dismiss = True)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# --- read comments --------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
coords = (np.mgrid[0:grid[2], 0:grid[1], 0: grid[0]]).reshape(3, -1).T
|
|
||||||
table.data = (np.fliplr(coords)*inFile[rootDir+'/_SIMPL_GEOMETRY/SPACING'][...] \
|
|
||||||
+ inFile[rootDir+'/_SIMPL_GEOMETRY/ORIGIN'][...] \
|
|
||||||
+ inFile[rootDir+'/_SIMPL_GEOMETRY/SPACING'][...]*0.5)
|
|
||||||
labels = ['1_pos','2_pos','3_pos']
|
|
||||||
for data in options.data:
|
|
||||||
try:
|
|
||||||
l = np.prod(inFile[rootDir+'/CellData/'+data].shape[3:])
|
|
||||||
labels+=['{}_{}'.format(i+1,data.replace(' ','')) for i in range(l)] if l >1 else [data.replace(' ','')]
|
|
||||||
except KeyError:
|
|
||||||
damask.util.croak('Data {} not found'.format(data))
|
|
||||||
pass
|
|
||||||
table.data = np.hstack((table.data,
|
|
||||||
inFile[rootDir+'/CellData/'+data][...].reshape(grid.prod(),l)))
|
|
||||||
|
|
||||||
# ------------------------------------------ assemble header ---------------------------------------
|
|
||||||
table.labels_clear()
|
|
||||||
table.labels_append(labels,reset = True)
|
|
||||||
table.head_write()
|
|
||||||
|
|
||||||
# ------------------------------------------ finalize output ---------------------------------------
|
|
||||||
table.data_writeArray()
|
|
||||||
table.close()
|
|
|
@ -1,48 +0,0 @@
|
||||||
#!/usr/bin/env python2.7
|
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
|
||||||
|
|
||||||
import os,sys
|
|
||||||
from optparse import OptionParser
|
|
||||||
import damask
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [file[s]]', description = """
|
|
||||||
Adds header to OIM grain file type 1 to make it accesible as ASCII table
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
try:
|
|
||||||
table = damask.ASCIItable(name = name,
|
|
||||||
buffered = False,
|
|
||||||
labeled = False)
|
|
||||||
except: continue
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
table.head_read()
|
|
||||||
data = []
|
|
||||||
while table.data_read():
|
|
||||||
data.append(table.data[0:9])
|
|
||||||
|
|
||||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
|
||||||
table.labels_append(['1_euler','2_euler','3_euler','1_pos','2_pos','IQ','CI','Fit','GrainID'])
|
|
||||||
table.head_write()
|
|
||||||
for i in data:
|
|
||||||
table.data = i
|
|
||||||
table.data_write()
|
|
||||||
|
|
||||||
# --- output finalization --------------------------------------------------------------------------
|
|
||||||
|
|
||||||
table.close() # close ASCII table
|
|
|
@ -14,20 +14,15 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Transform X,Y,Z,F APS BeamLine 34 coordinates to x,y,z APS strain coordinates.
|
Transform X,Y,Z,F APS BeamLine 34 coordinates to x,y,z APS strain coordinates.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
||||||
parser.add_option('-f',
|
parser.add_option('-f','--frame',dest='frame', metavar='string',
|
||||||
'--frame',
|
help='label of APS X,Y,Z coords')
|
||||||
dest='frame',
|
parser.add_option('--depth', dest='depth', metavar='string',
|
||||||
metavar='string',
|
help='depth')
|
||||||
help='APS X,Y,Z coords')
|
|
||||||
parser.add_option('--depth',
|
|
||||||
dest='depth',
|
|
||||||
metavar='string',
|
|
||||||
help='depth')
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ def listify(x):
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add or alter column(s) with derived values according to user-defined arithmetic operation between column(s).
|
Add or alter column(s) with derived values according to user-defined arithmetic operation between column(s).
|
||||||
Column labels are tagged by '#label#' in formulas. Use ';' for ',' in functions.
|
Column labels are tagged by '#label#' in formulas. Use ';' for ',' in functions.
|
||||||
Numpy is available as 'np'.
|
Numpy is available as 'np'.
|
||||||
|
@ -41,10 +41,7 @@ parser.add_option('-f','--formula',
|
||||||
|
|
||||||
parser.add_option('-c','--condition',
|
parser.add_option('-c','--condition',
|
||||||
dest = 'condition', metavar='string',
|
dest = 'condition', metavar='string',
|
||||||
help = 'condition to alter existing column data')
|
help = 'condition to alter existing column data (optional)')
|
||||||
|
|
||||||
parser.set_defaults(condition = None,
|
|
||||||
)
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
|
@ -80,7 +77,7 @@ for name in filenames:
|
||||||
condition = options.condition # copy per file, since might be altered inline
|
condition = options.condition # copy per file, since might be altered inline
|
||||||
breaker = False
|
breaker = False
|
||||||
|
|
||||||
for position,(all,marker,column) in enumerate(set(re.findall(r'#(([s]#)?(.+?))#',condition))): # find three groups
|
for position,(all,marker,column) in enumerate(set(re.findall(r'#(([s]#)?(.+?))#',condition))): # find three groups
|
||||||
idx = table.label_index(column)
|
idx = table.label_index(column)
|
||||||
dim = table.label_dimension(column)
|
dim = table.label_dimension(column)
|
||||||
if idx < 0 and column not in specials:
|
if idx < 0 and column not in specials:
|
||||||
|
@ -89,15 +86,15 @@ for name in filenames:
|
||||||
else:
|
else:
|
||||||
if column in specials:
|
if column in specials:
|
||||||
replacement = 'specials["{}"]'.format(column)
|
replacement = 'specials["{}"]'.format(column)
|
||||||
elif dim == 1: # scalar input
|
elif dim == 1: # scalar input
|
||||||
replacement = '{}(table.data[{}])'.format({ '':'float',
|
replacement = '{}(table.data[{}])'.format({ '':'float',
|
||||||
's#':'str'}[marker],idx) # take float or string value of data column
|
's#':'str'}[marker],idx) # take float or string value of data column
|
||||||
elif dim > 1: # multidimensional input (vector, tensor, etc.)
|
elif dim > 1: # multidimensional input (vector, tensor, etc.)
|
||||||
replacement = 'np.array(table.data[{}:{}],dtype=float)'.format(idx,idx+dim) # use (flat) array representation
|
replacement = 'np.array(table.data[{}:{}],dtype=float)'.format(idx,idx+dim) # use (flat) array representation
|
||||||
|
|
||||||
condition = condition.replace('#'+all+'#',replacement)
|
condition = condition.replace('#'+all+'#',replacement)
|
||||||
|
|
||||||
if breaker: continue # found mistake in condition evaluation --> next file
|
if breaker: continue # found mistake in condition evaluation --> next file
|
||||||
|
|
||||||
# ------------------------------------------ build formulas ----------------------------------------
|
# ------------------------------------------ build formulas ----------------------------------------
|
||||||
|
|
||||||
|
|
|
@ -13,8 +13,8 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add column(s) containing Cauchy stress based on given column(s) of deformation gradient and first Piola--Kirchhoff stress.
|
Add column containing Cauchy stress based on deformation gradient and first Piola--Kirchhoff stress.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
||||||
|
|
|
@ -209,7 +209,7 @@ def shapeMismatch(size,F,nodes,centres):
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add column(s) containing the shape and volume mismatch resulting from given deformation gradient.
|
Add column(s) containing the shape and volume mismatch resulting from given deformation gradient.
|
||||||
Operates on periodic three-dimensional x,y,z-ordered data sets.
|
Operates on periodic three-dimensional x,y,z-ordered data sets.
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2.7
|
#!/usr/bin/env python3
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import os,sys
|
import os,sys
|
||||||
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add cumulative (sum of first to current row) values for given label(s).
|
Add cumulative (sum of first to current row) values for given label(s).
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
||||||
|
@ -22,12 +22,9 @@ parser.add_option('-l','--label',
|
||||||
action = 'extend', metavar = '<string LIST>',
|
action = 'extend', metavar = '<string LIST>',
|
||||||
help = 'columns to cumulate')
|
help = 'columns to cumulate')
|
||||||
|
|
||||||
parser.set_defaults(label = [],
|
|
||||||
)
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
if len(options.label) == 0:
|
if options.label is None:
|
||||||
parser.error('no data column(s) specified.')
|
parser.error('no data column(s) specified.')
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
# --- loop over input files -------------------------------------------------------------------------
|
||||||
|
|
|
@ -49,14 +49,14 @@ def curlFFT(geomdim,field):
|
||||||
|
|
||||||
curl_fourier = np.einsum(einsums[n],e,k_s,field_fourier)*TWOPIIMG
|
curl_fourier = np.einsum(einsums[n],e,k_s,field_fourier)*TWOPIIMG
|
||||||
|
|
||||||
return np.fft.irfftn(curl_fourier,s=shapeFFT,axes=(0,1,2)).reshape([N,n])
|
return np.fft.irfftn(curl_fourier,axes=(0,1,2),s=shapeFFT).reshape([N,n])
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog option(s) [ASCIItable(s)]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add column(s) containing curl of requested column(s).
|
Add column(s) containing curl of requested column(s).
|
||||||
Operates on periodic ordered three-dimensional data sets
|
Operates on periodic ordered three-dimensional data sets
|
||||||
of vector and tensor fields.
|
of vector and tensor fields.
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2.7
|
#!/usr/bin/env python3
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import os,sys
|
import os,sys
|
||||||
|
@ -34,7 +34,7 @@ def derivative(coordinates,what):
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add column(s) containing numerical derivative of requested column(s) with respect to given coordinates.
|
Add column(s) containing numerical derivative of requested column(s) with respect to given coordinates.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
|
@ -20,7 +20,7 @@ def determinant(m):
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add column(s) containing determinant of requested tensor column(s).
|
Add column(s) containing determinant of requested tensor column(s).
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
|
@ -23,7 +23,7 @@ def deviator(m,spherical = False):
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(2)]', description = """
|
||||||
Add column(s) containing deviator of requested tensor column(s).
|
Add column(s) containing deviator of requested tensor column(s).
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
|
@ -87,7 +87,7 @@ def displacementFluctFFT(F,grid,size,nodal=False,transformed=False):
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [options] [ASCIItable(s)]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add displacments resulting from deformation gradient field.
|
Add displacments resulting from deformation gradient field.
|
||||||
Operates on periodic three-dimensional x,y,z-ordered data sets.
|
Operates on periodic three-dimensional x,y,z-ordered data sets.
|
||||||
Outputs at cell centers or cell nodes (into separate file).
|
Outputs at cell centers or cell nodes (into separate file).
|
||||||
|
@ -111,7 +111,6 @@ parser.add_option('--nodal',
|
||||||
|
|
||||||
parser.set_defaults(defgrad = 'f',
|
parser.set_defaults(defgrad = 'f',
|
||||||
pos = 'pos',
|
pos = 'pos',
|
||||||
nodal = False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
|
@ -45,7 +45,7 @@ def divFFT(geomdim,field):
|
||||||
|
|
||||||
div_fourier = np.einsum(einsums[n],k_s,field_fourier)*TWOPIIMG
|
div_fourier = np.einsum(einsums[n],k_s,field_fourier)*TWOPIIMG
|
||||||
|
|
||||||
return np.fft.irfftn(div_fourier,s=shapeFFT,axes=(0,1,2)).reshape([N,n//3])
|
return np.fft.irfftn(div_fourier,axes=(0,1,2),s=shapeFFT).reshape([N,n//3])
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
|
@ -30,7 +30,7 @@ def E_hkl(stiffness,vec): # stiffness = (c11,c12,c44)
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add column(s) containing directional stiffness based on given cubic stiffness values C11, C12, and C44 in consecutive columns.
|
Add column(s) containing directional stiffness based on given cubic stiffness values C11, C12, and C44 in consecutive columns.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
|
@ -83,7 +83,7 @@ neighborhoods = {
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add column(s) containing Euclidean distance to grain structural features: boundaries, triple lines, and quadruple points.
|
Add column(s) containing Euclidean distance to grain structural features: boundaries, triple lines, and quadruple points.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
|
@ -15,7 +15,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog option(s) [ASCIItable(s)]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog option [ASCIItable(s)]', description = """
|
||||||
Add column(s) containing Gaussian filtered values of requested column(s).
|
Add column(s) containing Gaussian filtered values of requested column(s).
|
||||||
Operates on periodic and non-periodic ordered three-dimensional data sets.
|
Operates on periodic and non-periodic ordered three-dimensional data sets.
|
||||||
For details see scipy.ndimage documentation.
|
For details see scipy.ndimage documentation.
|
||||||
|
@ -34,12 +34,12 @@ parser.add_option('-o','--order',
|
||||||
dest = 'order',
|
dest = 'order',
|
||||||
type = int,
|
type = int,
|
||||||
metavar = 'int',
|
metavar = 'int',
|
||||||
help = 'order of the filter')
|
help = 'order of the filter [%default]')
|
||||||
parser.add_option('--sigma',
|
parser.add_option('--sigma',
|
||||||
dest = 'sigma',
|
dest = 'sigma',
|
||||||
type = float,
|
type = float,
|
||||||
metavar = 'float',
|
metavar = 'float',
|
||||||
help = 'standard deviation')
|
help = 'standard deviation [%default]')
|
||||||
parser.add_option('--periodic',
|
parser.add_option('--periodic',
|
||||||
dest = 'periodic',
|
dest = 'periodic',
|
||||||
action = 'store_true',
|
action = 'store_true',
|
||||||
|
@ -50,7 +50,6 @@ parser.add_option('--periodic',
|
||||||
parser.set_defaults(pos = 'pos',
|
parser.set_defaults(pos = 'pos',
|
||||||
order = 0,
|
order = 0,
|
||||||
sigma = 1,
|
sigma = 1,
|
||||||
periodic = False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
|
@ -45,14 +45,14 @@ def gradFFT(geomdim,field):
|
||||||
k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3).astype('c16')
|
k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3).astype('c16')
|
||||||
grad_fourier = np.einsum(einsums[n],field_fourier,k_s)*TWOPIIMG
|
grad_fourier = np.einsum(einsums[n],field_fourier,k_s)*TWOPIIMG
|
||||||
|
|
||||||
return np.fft.irfftn(grad_fourier,s=shapeFFT,axes=(0,1,2)).reshape([N,3*n])
|
return np.fft.irfftn(grad_fourier,axes=(0,1,2),s=shapeFFT).reshape([N,3*n])
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog option(s) [ASCIItable(s)]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog option [ASCIItable(s)]', description = """
|
||||||
Add column(s) containing gradient of requested column(s).
|
Add column(s) containing gradient of requested column(s).
|
||||||
Operates on periodic ordered three-dimensional data sets
|
Operates on periodic ordered three-dimensional data sets
|
||||||
of vector and scalar fields.
|
of vector and scalar fields.
|
||||||
|
|
|
@ -1,176 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
|
||||||
|
|
||||||
import os,sys,copy
|
|
||||||
import numpy as np
|
|
||||||
import damask
|
|
||||||
from optparse import OptionParser
|
|
||||||
from scipy import spatial
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
|
||||||
Add grain index based on similiarity of crystal lattice orientation.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-r',
|
|
||||||
'--radius',
|
|
||||||
dest = 'radius',
|
|
||||||
type = 'float', metavar = 'float',
|
|
||||||
help = 'search radius')
|
|
||||||
parser.add_option('-d',
|
|
||||||
'--disorientation',
|
|
||||||
dest = 'disorientation',
|
|
||||||
type = 'float', metavar = 'float',
|
|
||||||
help = 'disorientation threshold in degrees [%default]')
|
|
||||||
parser.add_option('-s',
|
|
||||||
'--symmetry',
|
|
||||||
dest = 'symmetry',
|
|
||||||
metavar = 'string',
|
|
||||||
help = 'crystal symmetry [%default]')
|
|
||||||
parser.add_option('-o',
|
|
||||||
'--orientation',
|
|
||||||
dest = 'quaternion',
|
|
||||||
metavar = 'string',
|
|
||||||
help = 'label of crystal orientation given as unit quaternion [%default]')
|
|
||||||
parser.add_option('-p',
|
|
||||||
'--pos', '--position',
|
|
||||||
dest = 'pos',
|
|
||||||
metavar = 'string',
|
|
||||||
help = 'label of coordinates [%default]')
|
|
||||||
parser.add_option('--quiet',
|
|
||||||
dest='verbose',
|
|
||||||
action = 'store_false',
|
|
||||||
help = 'hide status bar (useful when piping to file)')
|
|
||||||
|
|
||||||
parser.set_defaults(disorientation = 5,
|
|
||||||
verbose = True,
|
|
||||||
quaternion = 'orientation',
|
|
||||||
symmetry = 'cubic',
|
|
||||||
pos = 'pos',
|
|
||||||
)
|
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
if options.radius is None:
|
|
||||||
parser.error('no radius specified.')
|
|
||||||
|
|
||||||
cos_disorientation = np.cos(np.radians(options.disorientation/2.)) # cos of half the disorientation angle
|
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
try: table = damask.ASCIItable(name = name,
|
|
||||||
buffered = False)
|
|
||||||
except: continue
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
# ------------------------------------------ read header -------------------------------------------
|
|
||||||
|
|
||||||
table.head_read()
|
|
||||||
|
|
||||||
# ------------------------------------------ sanity checks -----------------------------------------
|
|
||||||
|
|
||||||
errors = []
|
|
||||||
remarks = []
|
|
||||||
|
|
||||||
if not 3 >= table.label_dimension(options.pos) >= 1:
|
|
||||||
errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos))
|
|
||||||
if not np.all(table.label_dimension(options.quaternion) == 4):
|
|
||||||
errors.append('input "{}" does not have dimension 4.'.format(options.quaternion))
|
|
||||||
else: column = table.label_index(options.quaternion)
|
|
||||||
|
|
||||||
if remarks != []: damask.util.croak(remarks)
|
|
||||||
if errors != []:
|
|
||||||
damask.util.croak(errors)
|
|
||||||
table.close(dismiss = True)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# ------------------------------------------ assemble header ---------------------------------------
|
|
||||||
|
|
||||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
|
||||||
table.labels_append('grainID_{}@{:g}'.format(options.quaternion,options.disorientation)) # report orientation source and disorientation
|
|
||||||
table.head_write()
|
|
||||||
|
|
||||||
# ------------------------------------------ build KD tree -----------------------------------------
|
|
||||||
|
|
||||||
table.data_readArray(options.pos) # read position vectors
|
|
||||||
grainID = -np.ones(len(table.data),dtype=int)
|
|
||||||
Npoints = table.data.shape[0]
|
|
||||||
kdtree = spatial.KDTree(copy.deepcopy(table.data))
|
|
||||||
|
|
||||||
# ------------------------------------------ assign grain IDs --------------------------------------
|
|
||||||
|
|
||||||
orientations = [] # quaternions found for grain
|
|
||||||
memberCounts = [] # number of voxels in grain
|
|
||||||
p = 0 # point counter
|
|
||||||
g = 0 # grain counter
|
|
||||||
matchedID = -1
|
|
||||||
lastDistance = np.dot(kdtree.data[-1]-kdtree.data[0],kdtree.data[-1]-kdtree.data[0]) # (arbitrarily) use diagonal of cloud
|
|
||||||
|
|
||||||
table.data_rewind()
|
|
||||||
while table.data_read(): # read next data line of ASCII table
|
|
||||||
|
|
||||||
if options.verbose and Npoints > 100 and p%(Npoints//100) == 0: # report in 1% steps if possible and avoid modulo by zero
|
|
||||||
damask.util.progressBar(iteration=p,total=Npoints)
|
|
||||||
|
|
||||||
o = damask.Orientation(quaternion = np.array(list(map(float,table.data[column:column+4]))),
|
|
||||||
symmetry = options.symmetry).reduced()
|
|
||||||
|
|
||||||
matched = False
|
|
||||||
alreadyChecked = {}
|
|
||||||
candidates = []
|
|
||||||
bestDisorientation = damask.Quaternion([0,0,0,1]) # initialize to 180 deg rotation as worst case
|
|
||||||
|
|
||||||
for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points
|
|
||||||
gID = grainID[i]
|
|
||||||
if gID != -1 and gID not in alreadyChecked: # indexed point belonging to a grain not yet tested?
|
|
||||||
alreadyChecked[gID] = True # remember not to check again
|
|
||||||
disorientation = o.disorientation(orientations[gID],SST = False)[0] # compare against other orientation
|
|
||||||
if disorientation.quaternion.q > cos_disorientation: # within threshold ...
|
|
||||||
candidates.append(gID) # remember as potential candidate
|
|
||||||
if disorientation.quaternion.q >= bestDisorientation.q: # ... and better than current best?
|
|
||||||
matched = True
|
|
||||||
matchedID = gID # remember that grain
|
|
||||||
bestDisorientation = disorientation.quaternion
|
|
||||||
|
|
||||||
if matched: # did match existing grain
|
|
||||||
memberCounts[matchedID] += 1
|
|
||||||
if len(candidates) > 1: # ambiguity in grain identification?
|
|
||||||
largestGrain = sorted(candidates,key=lambda x:memberCounts[x])[-1] # find largest among potential candidate grains
|
|
||||||
matchedID = largestGrain
|
|
||||||
for c in [c for c in candidates if c != largestGrain]: # loop over smaller candidates
|
|
||||||
memberCounts[largestGrain] += memberCounts[c] # reassign member count of smaller to largest
|
|
||||||
memberCounts[c] = 0
|
|
||||||
grainID = np.where(np.in1d(grainID,candidates), largestGrain, grainID) # relabel grid points of smaller candidates as largest one
|
|
||||||
|
|
||||||
else: # no match -> new grain found
|
|
||||||
orientations += [o] # initialize with current orientation
|
|
||||||
memberCounts += [1] # start new membership counter
|
|
||||||
matchedID = g
|
|
||||||
g += 1 # increment grain counter
|
|
||||||
|
|
||||||
grainID[p] = matchedID # remember grain index assigned to point
|
|
||||||
p += 1 # increment point
|
|
||||||
|
|
||||||
grainIDs = np.where(np.array(memberCounts) > 0)[0] # identify "live" grain identifiers
|
|
||||||
packingMap = dict(zip(list(grainIDs),range(len(grainIDs)))) # map to condense into consecutive IDs
|
|
||||||
|
|
||||||
table.data_rewind()
|
|
||||||
|
|
||||||
outputAlive = True
|
|
||||||
p = 0
|
|
||||||
damask.util.progressBar(iteration=1,total=1)
|
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
|
||||||
table.data_append(1+packingMap[grainID[p]]) # add (condensed) grain ID
|
|
||||||
outputAlive = table.data_write() # output processed line
|
|
||||||
p += 1
|
|
||||||
|
|
||||||
# ------------------------------------------ output finalization -----------------------------------
|
|
||||||
|
|
||||||
table.close() # close ASCII tables
|
|
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add RGB color value corresponding to TSL-OIM scheme for inverse pole figures.
|
Add RGB color value corresponding to TSL-OIM scheme for inverse pole figures.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
@ -41,6 +41,10 @@ parser.set_defaults(pole = (0.0,0.0,1.0),
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
(options, filenames) = parser.parse_args()
|
||||||
|
|
||||||
|
# damask.Orientation requires Bravais lattice, but we are only interested in symmetry
|
||||||
|
symmetry2lattice={'cubic':'bcc','hexagonal':'hex','tetragonal':'bct'}
|
||||||
|
lattice = symmetry2lattice[options.symmetry]
|
||||||
|
|
||||||
pole = np.array(options.pole)
|
pole = np.array(options.pole)
|
||||||
pole /= np.linalg.norm(pole)
|
pole /= np.linalg.norm(pole)
|
||||||
|
|
||||||
|
@ -78,8 +82,8 @@ for name in filenames:
|
||||||
|
|
||||||
outputAlive = True
|
outputAlive = True
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||||
o = damask.Orientation(quaternion = np.array(list(map(float,table.data[column:column+4]))),
|
o = damask.Orientation(np.array(list(map(float,table.data[column:column+4]))),
|
||||||
symmetry = options.symmetry).reduced()
|
lattice = lattice).reduced()
|
||||||
|
|
||||||
table.data_append(o.IPFcolor(pole))
|
table.data_append(o.IPFcolor(pole))
|
||||||
outputAlive = table.data_write() # output processed line
|
outputAlive = table.data_write() # output processed line
|
||||||
|
|
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add data in column(s) of mapped ASCIItable selected from the row indexed by the value in a mapping column.
|
Add data in column(s) of mapped ASCIItable selected from the row indexed by the value in a mapping column.
|
||||||
Row numbers start at 1.
|
Row numbers start at 1.
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add info lines to ASCIItable header.
|
Add info lines to ASCIItable header.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
@ -23,11 +23,12 @@ parser.add_option('-i',
|
||||||
dest = 'info', action = 'extend', metavar = '<string LIST>',
|
dest = 'info', action = 'extend', metavar = '<string LIST>',
|
||||||
help = 'items to add')
|
help = 'items to add')
|
||||||
|
|
||||||
parser.set_defaults(info = [],
|
|
||||||
)
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
|
if options.info is None:
|
||||||
|
parser.error('no info specified.')
|
||||||
|
|
||||||
# --- loop over input files ------------------------------------------------------------------------
|
# --- loop over input files ------------------------------------------------------------------------
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
if filenames == []: filenames = [None]
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2.7
|
#!/usr/bin/env python3
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import os,sys
|
import os,sys
|
||||||
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add data of selected column(s) from (first) row of linked ASCIItable that shares the linking column value.
|
Add data of selected column(s) from (first) row of linked ASCIItable that shares the linking column value.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
@ -21,7 +21,7 @@ Add data of selected column(s) from (first) row of linked ASCIItable that shares
|
||||||
parser.add_option('--link',
|
parser.add_option('--link',
|
||||||
dest = 'link', nargs = 2,
|
dest = 'link', nargs = 2,
|
||||||
type = 'string', metavar = 'string string',
|
type = 'string', metavar = 'string string',
|
||||||
help = 'column labels containing linked values')
|
help = 'column labels of table and linked table containing linking values')
|
||||||
parser.add_option('-l','--label',
|
parser.add_option('-l','--label',
|
||||||
dest = 'label',
|
dest = 'label',
|
||||||
action = 'extend', metavar = '<string LIST>',
|
action = 'extend', metavar = '<string LIST>',
|
||||||
|
@ -105,7 +105,8 @@ for name in filenames:
|
||||||
outputAlive = True
|
outputAlive = True
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||||
try:
|
try:
|
||||||
table.data_append(data[np.argwhere(np.all((map(float,table.data[myLink:myLink+myLinkDim]) - index)==0,axis=1))[0]]) # add data of first matching line
|
table.data_append(data[np.argwhere(np.all((list(map(float,table.data[myLink:myLink+myLinkDim])) - index)==0,
|
||||||
|
axis=1))[0]]) # add data of first matching line
|
||||||
except IndexError:
|
except IndexError:
|
||||||
table.data_append(np.nan*np.ones_like(data[0])) # or add NaNs
|
table.data_append(np.nan*np.ones_like(data[0])) # or add NaNs
|
||||||
outputAlive = table.data_write() # output processed line
|
outputAlive = table.data_write() # output processed line
|
||||||
|
|
|
@ -24,7 +24,7 @@ def Mises(what,tensor):
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add vonMises equivalent values for symmetric part of requested strains and/or stresses.
|
Add vonMises equivalent values for symmetric part of requested strains and/or stresses.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
@ -41,10 +41,9 @@ parser.add_option('-s','--stress',
|
||||||
parser.set_defaults(strain = [],
|
parser.set_defaults(strain = [],
|
||||||
stress = [],
|
stress = [],
|
||||||
)
|
)
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
if len(options.stress+options.strain) == 0:
|
if options.stress is [] and options.strain is []:
|
||||||
parser.error('no data column specified...')
|
parser.error('no data column specified...')
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
# --- loop over input files -------------------------------------------------------------------------
|
||||||
|
|
|
@ -9,6 +9,7 @@ scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
scriptID = ' '.join([scriptName,damask.version])
|
||||||
|
|
||||||
# definition of element-wise p-norms for matrices
|
# definition of element-wise p-norms for matrices
|
||||||
|
# ToDo: better use numpy.linalg.norm
|
||||||
|
|
||||||
def norm(which,object):
|
def norm(which,object):
|
||||||
|
|
||||||
|
@ -18,12 +19,14 @@ def norm(which,object):
|
||||||
return math.sqrt(sum([x*x for x in object]))
|
return math.sqrt(sum([x*x for x in object]))
|
||||||
elif which == 'Max': # p = inf
|
elif which == 'Max': # p = inf
|
||||||
return max(map(abs, object))
|
return max(map(abs, object))
|
||||||
|
else:
|
||||||
|
return -1
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add column(s) containing norm of requested column(s) being either vectors or tensors.
|
Add column(s) containing norm of requested column(s) being either vectors or tensors.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
@ -43,6 +46,8 @@ parser.set_defaults(norm = 'frobenius',
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
|
if options.norm.lower() not in normChoices:
|
||||||
|
parser.error('invalid norm ({}) specified.'.format(options.norm))
|
||||||
if options.label is None:
|
if options.label is None:
|
||||||
parser.error('no data column specified.')
|
parser.error('no data column specified.')
|
||||||
|
|
||||||
|
@ -74,7 +79,7 @@ for name in filenames:
|
||||||
else:
|
else:
|
||||||
dims.append(dim)
|
dims.append(dim)
|
||||||
columns.append(table.label_index(what))
|
columns.append(table.label_index(what))
|
||||||
table.labels_append('norm{}({})'.format(options.norm.capitalize(),what)) # extend ASCII header with new labels
|
table.labels_append('norm{}({})'.format(options.norm.capitalize(),what)) # extend ASCII header with new labels
|
||||||
|
|
||||||
if remarks != []: damask.util.croak(remarks)
|
if remarks != []: damask.util.croak(remarks)
|
||||||
if errors != []:
|
if errors != []:
|
||||||
|
|
|
@ -9,36 +9,11 @@ import damask
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
scriptID = ' '.join([scriptName,damask.version])
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# convention conformity checks
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
def check_Eulers(eulers):
|
|
||||||
if np.any(eulers < 0.0) or np.any(eulers > 2.0*np.pi) or eulers[1] > np.pi: # Euler angles within valid range?
|
|
||||||
raise ValueError('Euler angles outside of [0..2π],[0..π],[0..2π].\n{} {} {}.'.format(*eulers))
|
|
||||||
return eulers
|
|
||||||
|
|
||||||
def check_quaternion(q):
|
|
||||||
if q[0] < 0.0: # positive first quaternion component?
|
|
||||||
raise ValueError('quaternion has negative first component.\n{}'.format(q[0]))
|
|
||||||
if not np.isclose(np.linalg.norm(q), 1.0): # unit quaternion?
|
|
||||||
raise ValueError('quaternion is not of unit length.\n{} {} {} {}'.format(*q))
|
|
||||||
return q
|
|
||||||
|
|
||||||
def check_matrix(M):
|
|
||||||
if not np.isclose(np.linalg.det(M),1.0): # proper rotation?
|
|
||||||
raise ValueError('matrix is not a proper rotation.\n{}'.format(M))
|
|
||||||
if not np.isclose(np.dot(M[0],M[1]), 0.0) \
|
|
||||||
or not np.isclose(np.dot(M[1],M[2]), 0.0) \
|
|
||||||
or not np.isclose(np.dot(M[2],M[0]), 0.0): # all orthogonal?
|
|
||||||
raise ValueError('matrix is not orthogonal.\n{}'.format(M))
|
|
||||||
return M
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add quaternion and/or Bunge Euler angle representation of crystal lattice orientation.
|
Add quaternion and/or Bunge Euler angle representation of crystal lattice orientation.
|
||||||
Orientation is given by quaternion, Euler angles, rotation matrix, or crystal frame coordinates
|
Orientation is given by quaternion, Euler angles, rotation matrix, or crystal frame coordinates
|
||||||
(i.e. component vectors of rotation matrix).
|
(i.e. component vectors of rotation matrix).
|
||||||
|
@ -46,19 +21,19 @@ Additional (globally fixed) rotations of the lab frame and/or crystal frame can
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
||||||
outputChoices = {
|
representations = {
|
||||||
'quaternion': ['quat',4],
|
'quaternion': ['quat',4], #ToDo: Use here Rowenhorst names (qu/ro/om/ax?)
|
||||||
'rodrigues': ['rodr',3],
|
'rodrigues': ['rodr',4],
|
||||||
'eulers': ['eulr',3],
|
'eulers': ['eulr',3],
|
||||||
'matrix': ['mtrx',9],
|
'matrix': ['mtrx',9],
|
||||||
'angleaxis': ['aaxs',4],
|
'angleaxis': ['aaxs',4],
|
||||||
}
|
}
|
||||||
|
|
||||||
parser.add_option('-o',
|
parser.add_option('-o',
|
||||||
'--output',
|
'--output',
|
||||||
dest = 'output',
|
dest = 'output',
|
||||||
action = 'extend', metavar = '<string LIST>',
|
action = 'extend', metavar = '<string LIST>',
|
||||||
help = 'output orientation formats {{{}}}'.format(', '.join(outputChoices)))
|
help = 'output orientation formats {{{}}}'.format(', '.join(representations)))
|
||||||
parser.add_option('-d',
|
parser.add_option('-d',
|
||||||
'--degrees',
|
'--degrees',
|
||||||
dest = 'degrees',
|
dest = 'degrees',
|
||||||
|
@ -68,12 +43,12 @@ parser.add_option('-R',
|
||||||
'--labrotation',
|
'--labrotation',
|
||||||
dest='labrotation',
|
dest='labrotation',
|
||||||
type = 'float', nargs = 4, metavar = ' '.join(['float']*4),
|
type = 'float', nargs = 4, metavar = ' '.join(['float']*4),
|
||||||
help = 'angle and axis of additional lab frame rotation')
|
help = 'angle and axis of additional lab frame rotation [%default]')
|
||||||
parser.add_option('-r',
|
parser.add_option('-r',
|
||||||
'--crystalrotation',
|
'--crystalrotation',
|
||||||
dest='crystalrotation',
|
dest='crystalrotation',
|
||||||
type = 'float', nargs = 4, metavar = ' '.join(['float']*4),
|
type = 'float', nargs = 4, metavar = ' '.join(['float']*4),
|
||||||
help = 'angle and axis of additional crystal frame rotation')
|
help = 'angle and axis of additional crystal frame rotation [%default]')
|
||||||
parser.add_option('--eulers',
|
parser.add_option('--eulers',
|
||||||
dest = 'eulers',
|
dest = 'eulers',
|
||||||
metavar = 'string',
|
metavar = 'string',
|
||||||
|
@ -104,16 +79,15 @@ parser.add_option('-z',
|
||||||
help = 'label of lab z vector (expressed in crystal coords)')
|
help = 'label of lab z vector (expressed in crystal coords)')
|
||||||
|
|
||||||
parser.set_defaults(output = [],
|
parser.set_defaults(output = [],
|
||||||
labrotation = (0.,1.,1.,1.), # no rotation about 1,1,1
|
labrotation = (0.,1.,0.,0.), # no rotation about 1,0,0
|
||||||
crystalrotation = (0.,1.,1.,1.), # no rotation about 1,1,1
|
crystalrotation = (0.,1.,0.,0.), # no rotation about 1,0,0
|
||||||
degrees = False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
(options, filenames) = parser.parse_args()
|
||||||
|
|
||||||
options.output = list(map(lambda x: x.lower(), options.output))
|
options.output = list(map(lambda x: x.lower(), options.output))
|
||||||
if options.output == [] or (not set(options.output).issubset(set(outputChoices))):
|
if options.output == [] or (not set(options.output).issubset(set(representations))):
|
||||||
parser.error('output must be chosen from {}.'.format(', '.join(outputChoices)))
|
parser.error('output must be chosen from {}.'.format(', '.join(representations)))
|
||||||
|
|
||||||
input = [options.eulers is not None,
|
input = [options.eulers is not None,
|
||||||
options.rodrigues is not None,
|
options.rodrigues is not None,
|
||||||
|
@ -126,16 +100,18 @@ input = [options.eulers is not None,
|
||||||
|
|
||||||
if np.sum(input) != 1: parser.error('needs exactly one input format.')
|
if np.sum(input) != 1: parser.error('needs exactly one input format.')
|
||||||
|
|
||||||
(label,dim,inputtype) = [(options.eulers,3,'eulers'),
|
(label,dim,inputtype) = [(options.eulers,representations['eulers'][1],'eulers'),
|
||||||
(options.rodrigues,3,'rodrigues'),
|
(options.rodrigues,representations['rodrigues'][1],'rodrigues'),
|
||||||
([options.x,options.y,options.z],[3,3,3],'frame'),
|
([options.x,options.y,options.z],[3,3,3],'frame'),
|
||||||
(options.matrix,9,'matrix'),
|
(options.matrix,representations['matrix'][1],'matrix'),
|
||||||
(options.quaternion,4,'quaternion'),
|
(options.quaternion,representations['quaternion'][1],'quaternion'),
|
||||||
][np.where(input)[0][0]] # select input label that was requested
|
][np.where(input)[0][0]] # select input label that was requested
|
||||||
|
|
||||||
toRadians = np.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
|
crystalrotation = np.array(options.crystalrotation[1:4] + (options.crystalrotation[0],)) # Compatibility hack
|
||||||
r = damask.Quaternion.fromAngleAxis(toRadians*options.crystalrotation[0],options.crystalrotation[1:]) # crystal frame rotation
|
labrotation = np.array(options.labrotation[1:4] + (options.labrotation[0],)) # Compatibility hack
|
||||||
R = damask.Quaternion.fromAngleAxis(toRadians*options. labrotation[0],options. labrotation[1:]) # lab frame rotation
|
r = damask.Rotation.fromAxisAngle(crystalrotation,options.degrees) # crystal frame rotation
|
||||||
|
R = damask.Rotation.fromAxisAngle(labrotation,options.degrees) # lab frame rotation
|
||||||
|
|
||||||
|
|
||||||
# --- loop over input files ------------------------------------------------------------------------
|
# --- loop over input files ------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@ -169,9 +145,9 @@ for name in filenames:
|
||||||
|
|
||||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||||
for output in options.output:
|
for output in options.output:
|
||||||
if output in outputChoices:
|
if output in representations:
|
||||||
table.labels_append(['{}_{}({})'.format(i+1,outputChoices[output][0],label) \
|
table.labels_append(['{}_{}({})'.format(i+1,representations[output][0],label) \
|
||||||
for i in range(outputChoices[output][1])])
|
for i in range(representations[output][1])])
|
||||||
table.head_write()
|
table.head_write()
|
||||||
|
|
||||||
# ------------------------------------------ process data ------------------------------------------
|
# ------------------------------------------ process data ------------------------------------------
|
||||||
|
@ -179,30 +155,35 @@ for name in filenames:
|
||||||
outputAlive = True
|
outputAlive = True
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||||
if inputtype == 'eulers':
|
if inputtype == 'eulers':
|
||||||
|
l = representations['eulers'][1]
|
||||||
|
o = damask.Rotation.fromEulers(list(map(float,table.data[column:column+l])),options.degrees)
|
||||||
|
|
||||||
o = damask.Orientation(Eulers = check_Eulers(np.array(list(map(float,table.data[column:column+3])))*toRadians))
|
|
||||||
elif inputtype == 'rodrigues':
|
elif inputtype == 'rodrigues':
|
||||||
o = damask.Orientation(Rodrigues = np.array(list(map(float,table.data[column:column+3]))))
|
l = representations['rodrigues'][1]
|
||||||
elif inputtype == 'matrix':
|
o = damask.Rotation.fromRodrigues(list(map(float,table.data[column:column+l])))
|
||||||
|
|
||||||
o = damask.Orientation(matrix = check_matrix(np.array(list(map(float,table.data[column:column+9]))).reshape(3,3)))
|
elif inputtype == 'matrix':
|
||||||
|
l = representations['matrix'][1]
|
||||||
|
o = damask.Rotation.fromMatrix(list(map(float,table.data[column:column+l])))
|
||||||
|
|
||||||
elif inputtype == 'frame':
|
elif inputtype == 'frame':
|
||||||
M = np.array(list(map(float,table.data[column[0]:column[0]+3] + \
|
M = np.array(list(map(float,table.data[column[0]:column[0]+3] + \
|
||||||
table.data[column[1]:column[1]+3] + \
|
table.data[column[1]:column[1]+3] + \
|
||||||
table.data[column[2]:column[2]+3]))).reshape(3,3).T
|
table.data[column[2]:column[2]+3]))).reshape(3,3).T
|
||||||
o = damask.Orientation(matrix = check_matrix(M/np.linalg.norm(M,axis=0)))
|
o = damask.Rotation.fromMatrix(M/np.linalg.norm(M,axis=0))
|
||||||
elif inputtype == 'quaternion':
|
|
||||||
|
|
||||||
o = damask.Orientation(quaternion = check_quaternion(np.array(list(map(float,table.data[column:column+4])))))
|
elif inputtype == 'quaternion':
|
||||||
|
l = representations['quaternion'][1]
|
||||||
|
o = damask.Rotation.fromQuaternion(list(map(float,table.data[column:column+l])))
|
||||||
|
|
||||||
o.quaternion = r*o.quaternion*R # apply additional lab and crystal frame rotations
|
o= r*o*R # apply additional lab and crystal frame rotations
|
||||||
|
|
||||||
for output in options.output:
|
for output in options.output:
|
||||||
if output == 'quaternion': table.data_append(o.asQuaternion())
|
if output == 'quaternion': table.data_append(o.asQuaternion())
|
||||||
elif output == 'rodrigues': table.data_append(o.asRodrigues())
|
elif output == 'rodrigues': table.data_append(o.asRodrigues())
|
||||||
elif output == 'eulers': table.data_append(o.asEulers(degrees=options.degrees))
|
elif output == 'eulers': table.data_append(o.asEulers(degrees=options.degrees))
|
||||||
elif output == 'matrix': table.data_append(o.asMatrix())
|
elif output == 'matrix': table.data_append(o.asMatrix())
|
||||||
elif output == 'angleaxis': table.data_append(o.asAngleAxis(degrees=options.degrees,flat=True))
|
elif output == 'angleaxis': table.data_append(o.asAxisAngle(degrees=options.degrees))
|
||||||
outputAlive = table.data_write() # output processed line
|
outputAlive = table.data_write() # output processed line
|
||||||
|
|
||||||
# ------------------------------------------ output finalization -----------------------------------
|
# ------------------------------------------ output finalization -----------------------------------
|
||||||
|
|
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add column(s) containing Second Piola--Kirchhoff stress based on given column(s) of deformation
|
Add column(s) containing Second Piola--Kirchhoff stress based on given column(s) of deformation
|
||||||
gradient and first Piola--Kirchhoff stress.
|
gradient and first Piola--Kirchhoff stress.
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add coordinates of stereographic projection of given direction (pole) in crystal frame.
|
Add coordinates of stereographic projection of given direction (pole) in crystal frame.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
@ -35,7 +35,6 @@ parser.add_option('-o',
|
||||||
|
|
||||||
parser.set_defaults(pole = (1.0,0.0,0.0),
|
parser.set_defaults(pole = (1.0,0.0,0.0),
|
||||||
quaternion = 'orientation',
|
quaternion = 'orientation',
|
||||||
polar = False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
(options, filenames) = parser.parse_args()
|
||||||
|
@ -76,9 +75,9 @@ for name in filenames:
|
||||||
# ------------------------------------------ process data ------------------------------------------
|
# ------------------------------------------ process data ------------------------------------------
|
||||||
outputAlive = True
|
outputAlive = True
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||||
o = damask.Orientation(quaternion = np.array(list(map(float,table.data[column:column+4]))))
|
o = damask.Rotation(np.array(list(map(float,table.data[column:column+4]))))
|
||||||
|
|
||||||
rotatedPole = o.quaternion*pole # rotate pole according to crystal orientation
|
rotatedPole = o*pole # rotate pole according to crystal orientation
|
||||||
(x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection
|
(x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection
|
||||||
|
|
||||||
table.data_append([np.sqrt(x*x+y*y),np.arctan2(y,x)] if options.polar else [x,y]) # cartesian coordinates
|
table.data_append([np.sqrt(x*x+y*y),np.arctan2(y,x)] if options.polar else [x,y]) # cartesian coordinates
|
||||||
|
|
|
@ -103,7 +103,7 @@ slipSystems = {
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add columns listing Schmid factors (and optional trace vector of selected system) for given Euler angles.
|
Add columns listing Schmid factors (and optional trace vector of selected system) for given Euler angles.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
@ -115,7 +115,7 @@ parser.add_option('-l',
|
||||||
help = 'type of lattice structure [%default] {}'.format(latticeChoices))
|
help = 'type of lattice structure [%default] {}'.format(latticeChoices))
|
||||||
parser.add_option('--covera',
|
parser.add_option('--covera',
|
||||||
dest = 'CoverA', type = 'float', metavar = 'float',
|
dest = 'CoverA', type = 'float', metavar = 'float',
|
||||||
help = 'C over A ratio for hexagonal systems')
|
help = 'C over A ratio for hexagonal systems [%default]')
|
||||||
parser.add_option('-f',
|
parser.add_option('-f',
|
||||||
'--force',
|
'--force',
|
||||||
dest = 'force',
|
dest = 'force',
|
||||||
|
@ -212,10 +212,10 @@ for name in filenames:
|
||||||
|
|
||||||
outputAlive = True
|
outputAlive = True
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||||
o = damask.Orientation(quaternion = np.array(list(map(float,table.data[column:column+4]))))
|
o = damask.Rotation(list(map(float,table.data[column:column+4])))
|
||||||
|
|
||||||
table.data_append( np.abs( np.sum(slip_direction * (o.quaternion * force) ,axis=1) \
|
table.data_append( np.abs( np.sum(slip_direction * (o * force) ,axis=1) \
|
||||||
* np.sum(slip_normal * (o.quaternion * normal),axis=1)))
|
* np.sum(slip_normal * (o * normal),axis=1)))
|
||||||
outputAlive = table.data_write() # output processed line
|
outputAlive = table.data_write() # output processed line
|
||||||
|
|
||||||
# ------------------------------------------ output finalization -----------------------------------
|
# ------------------------------------------ output finalization -----------------------------------
|
||||||
|
|
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add column(s) containing eigenvalues and eigenvectors of requested symmetric tensor column(s).
|
Add column(s) containing eigenvalues and eigenvectors of requested symmetric tensor column(s).
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
|
@ -25,7 +25,7 @@ def operator(stretch,strain,eigenvalues):
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Add column(s) containing given strains based on given stretches of requested deformation gradient column(s).
|
Add column(s) containing given strains based on given stretches of requested deformation gradient column(s).
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
@ -56,16 +56,15 @@ parser.add_option('-f','--defgrad',
|
||||||
metavar = '<string LIST>',
|
metavar = '<string LIST>',
|
||||||
help = 'heading(s) of columns containing deformation tensor values [%default]')
|
help = 'heading(s) of columns containing deformation tensor values [%default]')
|
||||||
|
|
||||||
parser.set_defaults(right = False,
|
parser.set_defaults(
|
||||||
left = False,
|
|
||||||
logarithmic = False,
|
|
||||||
biot = False,
|
|
||||||
green = False,
|
|
||||||
defgrad = ['f'],
|
defgrad = ['f'],
|
||||||
)
|
)
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
|
if len(options.defgrad) > 1:
|
||||||
|
options.defgrad = options.defgrad[1:]
|
||||||
|
|
||||||
stretches = []
|
stretches = []
|
||||||
strains = []
|
strains = []
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2.7
|
#!/usr/bin/env python3
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import os,sys
|
import os,sys
|
||||||
|
@ -12,7 +12,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Append data of ASCIItable(s).
|
Append data of ASCIItable(s).
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
@ -24,6 +24,10 @@ parser.add_option('-a', '--add','--table',
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
|
if options.table is None:
|
||||||
|
parser.error('no table specified.')
|
||||||
|
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
# --- loop over input files -------------------------------------------------------------------------
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
if filenames == []: filenames = [None]
|
||||||
|
|
|
@ -14,7 +14,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Average each data block of size 'packing' into single values thus reducing the former grid to grid/packing.
|
Average each data block of size 'packing' into single values thus reducing the former grid to grid/packing.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
@ -34,16 +34,14 @@ parser.add_option('--shift',
|
||||||
parser.add_option('-g', '--grid',
|
parser.add_option('-g', '--grid',
|
||||||
dest = 'grid',
|
dest = 'grid',
|
||||||
type = 'int', nargs = 3, metavar = 'int int int',
|
type = 'int', nargs = 3, metavar = 'int int int',
|
||||||
help = 'grid in x,y,z [autodetect]')
|
help = 'grid in x,y,z (optional)')
|
||||||
parser.add_option('-s', '--size',
|
parser.add_option('-s', '--size',
|
||||||
dest = 'size',
|
dest = 'size',
|
||||||
type = 'float', nargs = 3, metavar = 'float float float',
|
type = 'float', nargs = 3, metavar = 'float float float',
|
||||||
help = 'size in x,y,z [autodetect]')
|
help = 'size in x,y,z (optional)')
|
||||||
parser.set_defaults(pos = 'pos',
|
parser.set_defaults(pos = 'pos',
|
||||||
packing = (2,2,2),
|
packing = (2,2,2),
|
||||||
shift = (0,0,0),
|
shift = (0,0,0),
|
||||||
grid = (0,0,0),
|
|
||||||
size = (0.0,0.0,0.0),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
@ -92,7 +90,7 @@ for name in filenames:
|
||||||
|
|
||||||
table.data_readArray()
|
table.data_readArray()
|
||||||
|
|
||||||
if (any(options.grid) == 0 or any(options.size) == 0.0):
|
if (options.grid is None or options.size is None):
|
||||||
grid,size = damask.util.coordGridAndSize(table.data[:,table.label_indexrange(options.pos)])
|
grid,size = damask.util.coordGridAndSize(table.data[:,table.label_indexrange(options.pos)])
|
||||||
else:
|
else:
|
||||||
grid = np.array(options.grid,'i')
|
grid = np.array(options.grid,'i')
|
||||||
|
|
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Produces a binned grid of two columns from an ASCIItable, i.e. a two-dimensional probability density map.
|
Produces a binned grid of two columns from an ASCIItable, i.e. a two-dimensional probability density map.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
@ -37,15 +37,15 @@ parser.add_option('-t','--type',
|
||||||
parser.add_option('-x','--xrange',
|
parser.add_option('-x','--xrange',
|
||||||
dest = 'xrange',
|
dest = 'xrange',
|
||||||
type = 'float', nargs = 2, metavar = 'float float',
|
type = 'float', nargs = 2, metavar = 'float float',
|
||||||
help = 'min max value in x direction [autodetect]')
|
help = 'min max limits in x direction (optional)')
|
||||||
parser.add_option('-y','--yrange',
|
parser.add_option('-y','--yrange',
|
||||||
dest = 'yrange',
|
dest = 'yrange',
|
||||||
type = 'float', nargs = 2, metavar = 'float float',
|
type = 'float', nargs = 2, metavar = 'float float',
|
||||||
help = 'min max value in y direction [autodetect]')
|
help = 'min max limits in y direction (optional)')
|
||||||
parser.add_option('-z','--zrange',
|
parser.add_option('-z','--zrange',
|
||||||
dest = 'zrange',
|
dest = 'zrange',
|
||||||
type = 'float', nargs = 2, metavar = 'float float',
|
type = 'float', nargs = 2, metavar = 'float float',
|
||||||
help = 'min max value in z direction [autodetect]')
|
help = 'min max limits in z direction (optional)')
|
||||||
parser.add_option('-i','--invert',
|
parser.add_option('-i','--invert',
|
||||||
dest = 'invert',
|
dest = 'invert',
|
||||||
action = 'store_true',
|
action = 'store_true',
|
||||||
|
@ -64,9 +64,6 @@ parser.set_defaults(bins = (10,10),
|
||||||
xrange = (0.0,0.0),
|
xrange = (0.0,0.0),
|
||||||
yrange = (0.0,0.0),
|
yrange = (0.0,0.0),
|
||||||
zrange = (0.0,0.0),
|
zrange = (0.0,0.0),
|
||||||
invert = False,
|
|
||||||
normRow = False,
|
|
||||||
normCol = False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2.7
|
#!/usr/bin/env python3
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import os,sys
|
import os,sys
|
||||||
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Blows up each value to a surrounding data block of size 'packing' thus increasing the former resolution
|
Blows up each value to a surrounding data block of size 'packing' thus increasing the former resolution
|
||||||
to resolution*packing.
|
to resolution*packing.
|
||||||
|
|
||||||
|
@ -27,10 +27,10 @@ parser.add_option('-p','--packing',
|
||||||
help = 'dimension of packed group [%default]')
|
help = 'dimension of packed group [%default]')
|
||||||
parser.add_option('-g','--grid',
|
parser.add_option('-g','--grid',
|
||||||
dest = 'resolution', type = 'int', nargs = 3, metavar = 'int int int',
|
dest = 'resolution', type = 'int', nargs = 3, metavar = 'int int int',
|
||||||
help = 'resolution in x,y,z [autodetect]')
|
help = 'grid in x,y,z (optional)')
|
||||||
parser.add_option('-s','--size',
|
parser.add_option('-s','--size',
|
||||||
dest = 'dimension', type = 'float', nargs = 3, metavar = 'int int int',
|
dest = 'dimension', type = 'float', nargs = 3, metavar = 'int int int',
|
||||||
help = 'dimension in x,y,z [autodetect]')
|
help = 'size in x,y,z (optional)')
|
||||||
parser.set_defaults(pos = 'pos',
|
parser.set_defaults(pos = 'pos',
|
||||||
packing = (2,2,2),
|
packing = (2,2,2),
|
||||||
grid = (0,0,0),
|
grid = (0,0,0),
|
||||||
|
|
|
@ -30,7 +30,7 @@ def sortingList(labels,whitelistitems):
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Filter rows according to condition and columns by either white or black listing.
|
Filter rows according to condition and columns by either white or black listing.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
|
|
@ -20,7 +20,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Apply a user-specified function to condense into a single row all those rows for which columns 'label' have identical values.
|
Apply a user-specified function to condense into a single row all those rows for which columns 'label' have identical values.
|
||||||
Output table will contain as many rows as there are different (unique) values in the grouping column(s).
|
Output table will contain as many rows as there are different (unique) values in the grouping column(s).
|
||||||
Periodic domain averaging of coordinate values is supported.
|
Periodic domain averaging of coordinate values is supported.
|
||||||
|
|
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Permute all values in given column(s).
|
Permute all values in given column(s).
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
|
@ -12,7 +12,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [options] dfile[s]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Rename scalar, vectorial, and/or tensorial data header labels.
|
Rename scalar, vectorial, and/or tensorial data header labels.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import os,sys,math
|
import os,sys
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
import damask
|
import damask
|
||||||
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Rotate vector and/or tensor column data by given angle around given axis.
|
Rotate vector and/or tensor column data by given angle around given axis.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
@ -29,7 +29,7 @@ parser.add_option('-r', '--rotation',
|
||||||
parser.add_option('--degrees',
|
parser.add_option('--degrees',
|
||||||
dest = 'degrees',
|
dest = 'degrees',
|
||||||
action = 'store_true',
|
action = 'store_true',
|
||||||
help = 'angles are given in degrees [%default]')
|
help = 'angles are given in degrees')
|
||||||
|
|
||||||
parser.set_defaults(rotation = (0.,1.,1.,1.), # no rotation about 1,1,1
|
parser.set_defaults(rotation = (0.,1.,1.,1.), # no rotation about 1,1,1
|
||||||
degrees = False,
|
degrees = False,
|
||||||
|
@ -40,9 +40,8 @@ parser.set_defaults(rotation = (0.,1.,1.,1.),
|
||||||
if options.data is None:
|
if options.data is None:
|
||||||
parser.error('no data column specified.')
|
parser.error('no data column specified.')
|
||||||
|
|
||||||
toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
|
rotation = np.array(options.rotation[1:4]+(options.rotation[0],)) # Compatibility hack
|
||||||
q = damask.Quaternion().fromAngleAxis(toRadians*options.rotation[0],options.rotation[1:])
|
r = damask.Rotation.fromAxisAngle(rotation,options.degrees,normalise=True)
|
||||||
R = q.asMatrix()
|
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
# --- loop over input files -------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@ -90,12 +89,11 @@ for name in filenames:
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||||
for v in active['vector']:
|
for v in active['vector']:
|
||||||
column = table.label_index(v)
|
column = table.label_index(v)
|
||||||
table.data[column:column+3] = q * np.array(list(map(float,table.data[column:column+3])))
|
table.data[column:column+3] = r * np.array(list(map(float,table.data[column:column+3])))
|
||||||
for t in active['tensor']:
|
for t in active['tensor']:
|
||||||
column = table.label_index(t)
|
column = table.label_index(t)
|
||||||
table.data[column:column+9] = \
|
table.data[column:column+9] = (r * np.array(list(map(float,table.data[column:column+9]))).reshape((3,3))).reshape(9)
|
||||||
np.dot(R,np.dot(np.array(list(map(float,table.data[column:column+9]))).reshape((3,3)),
|
|
||||||
R.transpose())).reshape((9))
|
|
||||||
outputAlive = table.data_write() # output processed line
|
outputAlive = table.data_write() # output processed line
|
||||||
|
|
||||||
# ------------------------------------------ output finalization -----------------------------------
|
# ------------------------------------------ output finalization -----------------------------------
|
||||||
|
|
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Uniformly scale column values by given factor.
|
Uniformly scale column values by given factor.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Uniformly shift column values by given offset.
|
Uniformly shift column values by given offset.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Sort rows by given (or all) column label(s).
|
Sort rows by given (or all) column label(s).
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
|
|
@ -12,7 +12,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(usage='%prog [options] [file[s]]', description = """
|
parser = OptionParser(usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Show components of given ASCIItable(s).
|
Show components of given ASCIItable(s).
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2.7
|
#!/usr/bin/env python3
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import os,vtk
|
import os,vtk
|
||||||
|
@ -17,7 +17,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
msg = "Add scalars, vectors, and/or an RGB tuple from"
|
msg = "Add scalars, vectors, and/or an RGB tuple from"
|
||||||
msg += "an ASCIItable to existing VTK grid (.vtr/.vtk/.vtu)."
|
msg += "an ASCIItable to existing VTK grid (.vtr/.vtk/.vtu)."
|
||||||
parser = OptionParser(option_class=damask.extendableOption,
|
parser = OptionParser(option_class=damask.extendableOption,
|
||||||
usage='%prog options [file[s]]',
|
usage='%prog options [ASCIItable(s)]',
|
||||||
description = msg,
|
description = msg,
|
||||||
version = scriptID)
|
version = scriptID)
|
||||||
|
|
||||||
|
@ -25,10 +25,6 @@ parser.add_option( '--vtk',
|
||||||
dest = 'vtk',
|
dest = 'vtk',
|
||||||
type = 'string', metavar = 'string',
|
type = 'string', metavar = 'string',
|
||||||
help = 'VTK file name')
|
help = 'VTK file name')
|
||||||
parser.add_option( '--inplace',
|
|
||||||
dest = 'inplace',
|
|
||||||
action = 'store_true',
|
|
||||||
help = 'modify VTK file in-place')
|
|
||||||
parser.add_option('-r', '--render',
|
parser.add_option('-r', '--render',
|
||||||
dest = 'render',
|
dest = 'render',
|
||||||
action = 'store_true',
|
action = 'store_true',
|
||||||
|
@ -49,7 +45,6 @@ parser.add_option('-c', '--color',
|
||||||
parser.set_defaults(data = [],
|
parser.set_defaults(data = [],
|
||||||
tensor = [],
|
tensor = [],
|
||||||
color = [],
|
color = [],
|
||||||
inplace = False,
|
|
||||||
render = False,
|
render = False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -58,30 +53,32 @@ parser.set_defaults(data = [],
|
||||||
if not options.vtk: parser.error('No VTK file specified.')
|
if not options.vtk: parser.error('No VTK file specified.')
|
||||||
if not os.path.exists(options.vtk): parser.error('VTK file does not exist.')
|
if not os.path.exists(options.vtk): parser.error('VTK file does not exist.')
|
||||||
|
|
||||||
if os.path.splitext(options.vtk)[1] == '.vtr':
|
vtk_file,vtk_ext = os.path.splitext(options.vtk)
|
||||||
|
|
||||||
|
if vtk_ext == '.vtr':
|
||||||
reader = vtk.vtkXMLRectilinearGridReader()
|
reader = vtk.vtkXMLRectilinearGridReader()
|
||||||
reader.SetFileName(options.vtk)
|
reader.SetFileName(options.vtk)
|
||||||
reader.Update()
|
reader.Update()
|
||||||
rGrid = reader.GetOutput()
|
rGrid = reader.GetOutput()
|
||||||
writer = vtk.vtkXMLRectilinearGridWriter()
|
writer = vtk.vtkXMLRectilinearGridWriter()
|
||||||
writer.SetFileName(os.path.splitext(options.vtk)[0]+('.vtr' if options.inplace else '_added.vtr'))
|
elif vtk_ext == '.vtk':
|
||||||
elif os.path.splitext(options.vtk)[1] == '.vtk':
|
|
||||||
reader = vtk.vtkGenericDataObjectReader()
|
reader = vtk.vtkGenericDataObjectReader()
|
||||||
reader.SetFileName(options.vtk)
|
reader.SetFileName(options.vtk)
|
||||||
reader.Update()
|
reader.Update()
|
||||||
rGrid = reader.GetRectilinearGridOutput()
|
rGrid = reader.GetRectilinearGridOutput()
|
||||||
writer = vtk.vtkXMLRectilinearGridWriter()
|
writer = vtk.vtkXMLRectilinearGridWriter()
|
||||||
writer.SetFileName(os.path.splitext(options.vtk)[0]+('.vtr' if options.inplace else '_added.vtr'))
|
vtk_ext = '.vtr'
|
||||||
elif os.path.splitext(options.vtk)[1] == '.vtu':
|
elif vtk_ext == '.vtu':
|
||||||
reader = vtk.vtkXMLUnstructuredGridReader()
|
reader = vtk.vtkXMLUnstructuredGridReader()
|
||||||
reader.SetFileName(options.vtk)
|
reader.SetFileName(options.vtk)
|
||||||
reader.Update()
|
reader.Update()
|
||||||
rGrid = reader.GetOutput()
|
rGrid = reader.GetOutput()
|
||||||
writer = vtk.vtkXMLUnstructuredGridWriter()
|
writer = vtk.vtkXMLUnstructuredGridWriter()
|
||||||
writer.SetFileName(os.path.splitext(options.vtk)[0]+('.vtu' if options.inplace else '_added.vtu'))
|
|
||||||
else:
|
else:
|
||||||
parser.error('Unsupported VTK file type extension.')
|
parser.error('Unsupported VTK file type extension.')
|
||||||
|
|
||||||
|
writer.SetFileName(vtk_file+vtk_ext)
|
||||||
|
|
||||||
Npoints = rGrid.GetNumberOfPoints()
|
Npoints = rGrid.GetNumberOfPoints()
|
||||||
Ncells = rGrid.GetNumberOfCells()
|
Ncells = rGrid.GetNumberOfCells()
|
||||||
|
|
||||||
|
@ -172,8 +169,7 @@ for name in filenames:
|
||||||
|
|
||||||
writer.SetDataModeToBinary()
|
writer.SetDataModeToBinary()
|
||||||
writer.SetCompressorTypeToZLib()
|
writer.SetCompressorTypeToZLib()
|
||||||
if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(rGrid)
|
writer.SetInputData(rGrid)
|
||||||
else: writer.SetInputData(rGrid)
|
|
||||||
writer.Write()
|
writer.Write()
|
||||||
|
|
||||||
# ------------------------------------------ render result ---------------------------------------
|
# ------------------------------------------ render result ---------------------------------------
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2.7
|
#!/usr/bin/env python3
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import os,vtk
|
import os,vtk
|
||||||
|
@ -15,7 +15,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption,
|
parser = OptionParser(option_class=damask.extendableOption,
|
||||||
usage='%prog options [file[s]]',
|
usage='%prog options [ASCIItable(s)]',
|
||||||
description = """Add scalar and RGB tuples from ASCIItable to existing VTK point cloud (.vtp).""",
|
description = """Add scalar and RGB tuples from ASCIItable to existing VTK point cloud (.vtp).""",
|
||||||
version = scriptID)
|
version = scriptID)
|
||||||
|
|
||||||
|
@ -23,10 +23,6 @@ parser.add_option( '--vtk',
|
||||||
dest = 'vtk',
|
dest = 'vtk',
|
||||||
type = 'string', metavar = 'string',
|
type = 'string', metavar = 'string',
|
||||||
help = 'VTK file name')
|
help = 'VTK file name')
|
||||||
parser.add_option( '--inplace',
|
|
||||||
dest = 'inplace',
|
|
||||||
action = 'store_true',
|
|
||||||
help = 'modify VTK file in-place')
|
|
||||||
parser.add_option('-r', '--render',
|
parser.add_option('-r', '--render',
|
||||||
dest = 'render',
|
dest = 'render',
|
||||||
action = 'store_true',
|
action = 'store_true',
|
||||||
|
@ -46,8 +42,6 @@ parser.add_option('-c', '--color', dest='color', action='extend',
|
||||||
parser.set_defaults(data = [],
|
parser.set_defaults(data = [],
|
||||||
tensor = [],
|
tensor = [],
|
||||||
color = [],
|
color = [],
|
||||||
inplace = False,
|
|
||||||
render = False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
(options, filenames) = parser.parse_args()
|
||||||
|
@ -55,16 +49,19 @@ parser.set_defaults(data = [],
|
||||||
if not options.vtk: parser.error('no VTK file specified.')
|
if not options.vtk: parser.error('no VTK file specified.')
|
||||||
if not os.path.exists(options.vtk): parser.error('VTK file does not exist.')
|
if not os.path.exists(options.vtk): parser.error('VTK file does not exist.')
|
||||||
|
|
||||||
if os.path.splitext(options.vtk)[1] == '.vtp':
|
vtk_file,vtk_ext = os.path.splitext(options.vtk)
|
||||||
|
|
||||||
|
if vtk_ext == '.vtp':
|
||||||
reader = vtk.vtkXMLPolyDataReader()
|
reader = vtk.vtkXMLPolyDataReader()
|
||||||
reader.SetFileName(options.vtk)
|
reader.SetFileName(options.vtk)
|
||||||
reader.Update()
|
reader.Update()
|
||||||
Polydata = reader.GetOutput()
|
Polydata = reader.GetOutput()
|
||||||
elif os.path.splitext(options.vtk)[1] == '.vtk':
|
elif vtk_ext == '.vtk':
|
||||||
reader = vtk.vtkGenericDataObjectReader()
|
reader = vtk.vtkGenericDataObjectReader()
|
||||||
reader.SetFileName(options.vtk)
|
reader.SetFileName(options.vtk)
|
||||||
reader.Update()
|
reader.Update()
|
||||||
Polydata = reader.GetPolyDataOutput()
|
Polydata = reader.GetPolyDataOutput()
|
||||||
|
vtk_ext = '.vtp'
|
||||||
else:
|
else:
|
||||||
parser.error('unsupported VTK file type extension.')
|
parser.error('unsupported VTK file type extension.')
|
||||||
|
|
||||||
|
@ -151,14 +148,12 @@ for name in filenames:
|
||||||
# ------------------------------------------ output result ---------------------------------------
|
# ------------------------------------------ output result ---------------------------------------
|
||||||
|
|
||||||
Polydata.Modified()
|
Polydata.Modified()
|
||||||
if vtk.VTK_MAJOR_VERSION <= 5: Polydata.Update()
|
|
||||||
|
|
||||||
writer = vtk.vtkXMLPolyDataWriter()
|
writer = vtk.vtkXMLPolyDataWriter()
|
||||||
writer.SetDataModeToBinary()
|
writer.SetDataModeToBinary()
|
||||||
writer.SetCompressorTypeToZLib()
|
writer.SetCompressorTypeToZLib()
|
||||||
writer.SetFileName(os.path.splitext(options.vtk)[0]+('.vtp' if options.inplace else '_added.vtp'))
|
writer.SetFileName(vtk_file+vtk_ext)
|
||||||
if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(Polydata)
|
writer.SetInputData(Polydata)
|
||||||
else: writer.SetInputData(Polydata)
|
|
||||||
writer.Write()
|
writer.Write()
|
||||||
|
|
||||||
# ------------------------------------------ render result ---------------------------------------
|
# ------------------------------------------ render result ---------------------------------------
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2.7
|
#!/usr/bin/env python3
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import os,vtk
|
import os,vtk
|
||||||
|
@ -25,10 +25,6 @@ parser.add_option( '--vtk',
|
||||||
dest = 'vtk',
|
dest = 'vtk',
|
||||||
type = 'string', metavar = 'string',
|
type = 'string', metavar = 'string',
|
||||||
help = 'VTK file name')
|
help = 'VTK file name')
|
||||||
parser.add_option( '--inplace',
|
|
||||||
dest = 'inplace',
|
|
||||||
action = 'store_true',
|
|
||||||
help = 'modify VTK file in-place')
|
|
||||||
parser.add_option('-r', '--render',
|
parser.add_option('-r', '--render',
|
||||||
dest = 'render',
|
dest = 'render',
|
||||||
action = 'store_true',
|
action = 'store_true',
|
||||||
|
@ -49,7 +45,6 @@ parser.add_option('-c', '--color',
|
||||||
parser.set_defaults(data = [],
|
parser.set_defaults(data = [],
|
||||||
tensor = [],
|
tensor = [],
|
||||||
color = [],
|
color = [],
|
||||||
inplace = False,
|
|
||||||
render = False,
|
render = False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -58,16 +53,18 @@ parser.set_defaults(data = [],
|
||||||
if not options.vtk: parser.error('no VTK file specified.')
|
if not options.vtk: parser.error('no VTK file specified.')
|
||||||
if not os.path.exists(options.vtk): parser.error('VTK file does not exist.')
|
if not os.path.exists(options.vtk): parser.error('VTK file does not exist.')
|
||||||
|
|
||||||
if os.path.splitext(options.vtk)[1] == '.vtr':
|
vtk_file,vtk_ext = os.path.splitext(options.vtk)
|
||||||
|
if vtk_ext == '.vtr':
|
||||||
reader = vtk.vtkXMLRectilinearGridReader()
|
reader = vtk.vtkXMLRectilinearGridReader()
|
||||||
reader.SetFileName(options.vtk)
|
reader.SetFileName(options.vtk)
|
||||||
reader.Update()
|
reader.Update()
|
||||||
rGrid = reader.GetOutput()
|
rGrid = reader.GetOutput()
|
||||||
elif os.path.splitext(options.vtk)[1] == '.vtk':
|
elif vtk_ext == '.vtk':
|
||||||
reader = vtk.vtkGenericDataObjectReader()
|
reader = vtk.vtkGenericDataObjectReader()
|
||||||
reader.SetFileName(options.vtk)
|
reader.SetFileName(options.vtk)
|
||||||
reader.Update()
|
reader.Update()
|
||||||
rGrid = reader.GetRectilinearGridOutput()
|
rGrid = reader.GetRectilinearGridOutput()
|
||||||
|
vtk_ext = '.vtr'
|
||||||
else:
|
else:
|
||||||
parser.error('unsupported VTK file type extension.')
|
parser.error('unsupported VTK file type extension.')
|
||||||
|
|
||||||
|
@ -158,16 +155,14 @@ for name in filenames:
|
||||||
elif mode == 'point': rGrid.GetPointData().AddArray(VTKarray[me])
|
elif mode == 'point': rGrid.GetPointData().AddArray(VTKarray[me])
|
||||||
|
|
||||||
rGrid.Modified()
|
rGrid.Modified()
|
||||||
if vtk.VTK_MAJOR_VERSION <= 5: rGrid.Update()
|
|
||||||
|
|
||||||
# ------------------------------------------ output result ---------------------------------------
|
# ------------------------------------------ output result ---------------------------------------
|
||||||
|
|
||||||
writer = vtk.vtkXMLRectilinearGridWriter()
|
writer = vtk.vtkXMLRectilinearGridWriter()
|
||||||
writer.SetDataModeToBinary()
|
writer.SetDataModeToBinary()
|
||||||
writer.SetCompressorTypeToZLib()
|
writer.SetCompressorTypeToZLib()
|
||||||
writer.SetFileName(os.path.splitext(options.vtk)[0]+('.vtr' if options.inplace else '_added.vtr'))
|
writer.SetFileName(vtk_file+vtk_ext)
|
||||||
if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(rGrid)
|
writer.SetInputData(rGrid)
|
||||||
else: writer.SetInputData(rGrid)
|
|
||||||
writer.Write()
|
writer.Write()
|
||||||
|
|
||||||
# ------------------------------------------ render result ---------------------------------------
|
# ------------------------------------------ render result ---------------------------------------
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2.7
|
#!/usr/bin/env python3
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import os,sys,vtk
|
import os,sys,vtk
|
||||||
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Produce a VTK point cloud dataset based on coordinates given in an ASCIItable.
|
Produce a VTK point cloud dataset based on coordinates given in an ASCIItable.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
@ -78,7 +78,6 @@ for name in filenames:
|
||||||
Polydata.SetPoints(Points)
|
Polydata.SetPoints(Points)
|
||||||
Polydata.SetVerts(Vertices)
|
Polydata.SetVerts(Vertices)
|
||||||
Polydata.Modified()
|
Polydata.Modified()
|
||||||
if vtk.VTK_MAJOR_VERSION <= 5: Polydata.Update()
|
|
||||||
|
|
||||||
# ------------------------------------------ output result ---------------------------------------
|
# ------------------------------------------ output result ---------------------------------------
|
||||||
|
|
||||||
|
@ -94,8 +93,8 @@ for name in filenames:
|
||||||
writer.SetHeader('# powered by '+scriptID)
|
writer.SetHeader('# powered by '+scriptID)
|
||||||
writer.WriteToOutputStringOn()
|
writer.WriteToOutputStringOn()
|
||||||
|
|
||||||
if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(Polydata)
|
|
||||||
else: writer.SetInputData(Polydata)
|
writer.SetInputData(Polydata)
|
||||||
|
|
||||||
writer.Write()
|
writer.Write()
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2.7
|
#!/usr/bin/env python3
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import os,sys,vtk
|
import os,sys,vtk
|
||||||
|
@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
||||||
Create regular voxel grid from points in an ASCIItable.
|
Create regular voxel grid from points in an ASCIItable.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
@ -125,8 +125,7 @@ for name in filenames:
|
||||||
writer.SetHeader('# powered by '+scriptID)
|
writer.SetHeader('# powered by '+scriptID)
|
||||||
writer.WriteToOutputStringOn()
|
writer.WriteToOutputStringOn()
|
||||||
|
|
||||||
if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(rGrid)
|
writer.SetInputData(rGrid)
|
||||||
else: writer.SetInputData(rGrid)
|
|
||||||
|
|
||||||
writer.Write()
|
writer.Write()
|
||||||
|
|
||||||
|
|
|
@ -1,119 +0,0 @@
|
||||||
#!/usr/bin/env python2.7
|
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
|
||||||
|
|
||||||
import os,sys,math
|
|
||||||
from optparse import OptionParser
|
|
||||||
import damask
|
|
||||||
import pipes
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]',
|
|
||||||
description ='generate 3D RVE from .ang files of EBSD slices .',
|
|
||||||
version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('--offset',
|
|
||||||
dest='offset',
|
|
||||||
type='float',
|
|
||||||
help='offset of EBSD slices [%default]',
|
|
||||||
metavar='float')
|
|
||||||
parser.add_option('--outname',
|
|
||||||
dest='outName',
|
|
||||||
type='string',
|
|
||||||
help='output file name [%default]', metavar='string')
|
|
||||||
parser.add_option('--vtr',
|
|
||||||
action="store_true",
|
|
||||||
dest='vtr')
|
|
||||||
parser.add_option('--geom',
|
|
||||||
action="store_true",
|
|
||||||
dest='geom')
|
|
||||||
parser.set_defaults(offset = 1.0,
|
|
||||||
outName = 'RVE3D')
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
numFiles = len(filenames)
|
|
||||||
formatwidth = 1+int(math.log10(numFiles))
|
|
||||||
|
|
||||||
# copy original files to tmp files to not alter originals
|
|
||||||
for i in range(numFiles):
|
|
||||||
sliceID = 'slice' + str(i).zfill(formatwidth) + '.tmp'
|
|
||||||
strCommand = 'cp ' + pipes.quote(filenames[i]) + ' ' + sliceID
|
|
||||||
os.system(strCommand)
|
|
||||||
|
|
||||||
# modify tmp files
|
|
||||||
print('Add z-coordinates')
|
|
||||||
for i in range(numFiles):
|
|
||||||
sliceID = 'slice' + str(i).zfill(formatwidth) + '.tmp'
|
|
||||||
strCommand = 'OIMgrainFile_toTable ' + sliceID
|
|
||||||
os.system(strCommand)
|
|
||||||
strCommand = 'addCalculation --label 3Dpos --formula "np.array(#pos#.tolist()+[' + str(i*options.offset) + '])" ' + sliceID
|
|
||||||
os.system(strCommand)
|
|
||||||
|
|
||||||
# join temp files into one
|
|
||||||
|
|
||||||
print('\n Colocate files')
|
|
||||||
fileOut = open(options.outName + '.ang','w')
|
|
||||||
|
|
||||||
# take header information from 1st file
|
|
||||||
sliceID = 'slice' + str(0).zfill(formatwidth) + '.tmp'
|
|
||||||
fileRead = open(sliceID)
|
|
||||||
data = fileRead.readlines()
|
|
||||||
fileRead.close()
|
|
||||||
headerLines = int(data[0].split()[0])
|
|
||||||
fileOut.write(str(headerLines+1) + '\t header\n')
|
|
||||||
for line in data[1:headerLines]:
|
|
||||||
fileOut.write(line)
|
|
||||||
fileOut.write(scriptID + '\t' + ' '.join(sys.argv[1:]) + '\n')
|
|
||||||
for line in data[headerLines:]:
|
|
||||||
fileOut.write(line)
|
|
||||||
|
|
||||||
# append other files content without header
|
|
||||||
for i in range(numFiles-1):
|
|
||||||
sliceID = 'slice' + str(i+1).zfill(formatwidth) + '.tmp'
|
|
||||||
fileRead = open(sliceID)
|
|
||||||
data = fileRead.readlines()
|
|
||||||
fileRead.close()
|
|
||||||
headerLines = int(data[0].split()[0])
|
|
||||||
for line in data[headerLines+1:]:
|
|
||||||
fileOut.write(line)
|
|
||||||
fileOut.close()
|
|
||||||
|
|
||||||
# tidy up and add phase column
|
|
||||||
print('\n Remove temp data and add phase info')
|
|
||||||
strCommand = 'filterTable --black pos ' + options.outName + '.ang'
|
|
||||||
os.system(strCommand)
|
|
||||||
strCommand = 'reLabel --label 3Dpos --substitute pos ' + options.outName + '.ang'
|
|
||||||
os.system(strCommand)
|
|
||||||
strCommand = 'addCalculation -l phase -f 1 ' + options.outName + '.ang'
|
|
||||||
os.system(strCommand)
|
|
||||||
|
|
||||||
|
|
||||||
# create geom file when asked for
|
|
||||||
if options.geom:
|
|
||||||
print('\n Build geometry file')
|
|
||||||
strCommand = 'geom_fromTable --phase phase --eulers euler --coordinates pos ' + pipes.quote(options.outName) + '.ang'
|
|
||||||
os.system(strCommand)
|
|
||||||
|
|
||||||
# create paraview file when asked for
|
|
||||||
|
|
||||||
if options.vtr:
|
|
||||||
print('\n Build Paraview file')
|
|
||||||
strCommand = 'addIPFcolor --eulers euler --pole 0.0 0.0 1.0 ' + options.outName + '.ang'
|
|
||||||
os.system(strCommand)
|
|
||||||
strCommand = 'vtk_rectilinearGrid ' + pipes.quote(options.outName) + '.ang'
|
|
||||||
os.system(strCommand)
|
|
||||||
os.rename(pipes.quote(options.outName) + '_pos(cell)'+'.vtr', pipes.quote(options.outName) + '.vtr')
|
|
||||||
strCommand = 'vtk_addRectilinearGridData --vtk '+ pipes.quote(options.outName) + '.vtr --color IPF_001_cubic '\
|
|
||||||
+ pipes.quote(options.outName) + '.ang'
|
|
||||||
os.system(strCommand)
|
|
||||||
|
|
||||||
# delete tmp files
|
|
||||||
for i in range(numFiles):
|
|
||||||
sliceID = 'slice' + str(i).zfill(formatwidth) + '.tmp'
|
|
||||||
os.remove(sliceID)
|
|
|
@ -25,7 +25,7 @@ mappings = {
|
||||||
'microstructures': lambda x: int(x),
|
'microstructures': lambda x: int(x),
|
||||||
}
|
}
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog option(s) [geomfile(s)]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog option [geomfile(s)]', description = """
|
||||||
Positions a geometric object within the (three-dimensional) canvas of a spectral geometry description.
|
Positions a geometric object within the (three-dimensional) canvas of a spectral geometry description.
|
||||||
Depending on the sign of the dimension parameters, these objects can be boxes, cylinders, or ellipsoids.
|
Depending on the sign of the dimension parameters, these objects can be boxes, cylinders, or ellipsoids.
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ parser.add_option('-f', '--fill', dest='fill', type='int', metavar = 'int'
|
||||||
help='grain index to fill primitive. "0" selects maximum microstructure index + 1 [%default]')
|
help='grain index to fill primitive. "0" selects maximum microstructure index + 1 [%default]')
|
||||||
parser.add_option('-q', '--quaternion', dest='quaternion', type='float', nargs = 4, metavar=' '.join(['float']*4),
|
parser.add_option('-q', '--quaternion', dest='quaternion', type='float', nargs = 4, metavar=' '.join(['float']*4),
|
||||||
help = 'rotation of primitive as quaternion')
|
help = 'rotation of primitive as quaternion')
|
||||||
parser.add_option('-a', '--angleaxis', dest='angleaxis', nargs = 4, metavar=' '.join(['float']*4),
|
parser.add_option('-a', '--angleaxis', dest='angleaxis', nargs = 4, metavar=' '.join(['float']*4), type=float,
|
||||||
help = 'angle,x,y,z clockwise rotation of primitive about axis by angle')
|
help = 'angle,x,y,z clockwise rotation of primitive about axis by angle')
|
||||||
parser.add_option( '--degrees', dest='degrees', action='store_true',
|
parser.add_option( '--degrees', dest='degrees', action='store_true',
|
||||||
help = 'angle is given in degrees [%default]')
|
help = 'angle is given in degrees [%default]')
|
||||||
|
@ -63,14 +63,12 @@ parser.set_defaults(center = (.0,.0,.0),
|
||||||
if options.dimension is None:
|
if options.dimension is None:
|
||||||
parser.error('no dimension specified.')
|
parser.error('no dimension specified.')
|
||||||
if options.angleaxis is not None:
|
if options.angleaxis is not None:
|
||||||
options.angleaxis = list(map(float,options.angleaxis))
|
ax = np.array(options.angleaxis[1:4] + (options.angleaxis[0],)) # Compatibility hack
|
||||||
rotation = damask.Quaternion.fromAngleAxis(np.radians(options.angleaxis[0]) if options.degrees else options.angleaxis[0],
|
rotation = damask.Rotation.fromAxisAngle(ax,options.degrees,normalise=True)
|
||||||
options.angleaxis[1:4])
|
|
||||||
elif options.quaternion is not None:
|
elif options.quaternion is not None:
|
||||||
options.quaternion = list(map(float,options.quaternion))
|
rotation = damask.Rotation.fromQuaternion(options.quaternion)
|
||||||
rotation = damask.Quaternion(quat=options.quaternion)
|
|
||||||
else:
|
else:
|
||||||
rotation = damask.Quaternion()
|
rotation = damask.Rotation()
|
||||||
|
|
||||||
options.center = np.array(options.center)
|
options.center = np.array(options.center)
|
||||||
options.dimension = np.array(options.dimension)
|
options.dimension = np.array(options.dimension)
|
||||||
|
@ -159,8 +157,7 @@ for name in filenames:
|
||||||
X -= options.center[0] - 0.5
|
X -= options.center[0] - 0.5
|
||||||
Y -= options.center[1] - 0.5
|
Y -= options.center[1] - 0.5
|
||||||
Z -= options.center[2] - 0.5
|
Z -= options.center[2] - 0.5
|
||||||
# and then by applying the quaternion
|
# and then by applying the rotation
|
||||||
# this should be rotation.conjugate() * (X,Y,Z), but it is this way for backwards compatibility with the older version of this script
|
|
||||||
(X, Y, Z) = rotation * (X, Y, Z)
|
(X, Y, Z) = rotation * (X, Y, Z)
|
||||||
# and finally by scaling (we don't worry about options.dimension being negative, np.abs occurs on the microstructure = np.where... line)
|
# and finally by scaling (we don't worry about options.dimension being negative, np.abs occurs on the microstructure = np.where... line)
|
||||||
X /= options.dimension[0] * 0.5
|
X /= options.dimension[0] * 0.5
|
||||||
|
|
|
@ -18,8 +18,8 @@ do
|
||||||
< $geom \
|
< $geom \
|
||||||
| \
|
| \
|
||||||
vtk_addRectilinearGridData \
|
vtk_addRectilinearGridData \
|
||||||
|
--vtk ${geom%.*}.vtk \
|
||||||
--data microstructure \
|
--data microstructure \
|
||||||
--inplace \
|
|
||||||
--vtk ${geom%.*}.vtk
|
|
||||||
rm ${geom%.*}.vtk
|
rm ${geom%.*}.vtk
|
||||||
done
|
done
|
||||||
|
|
|
@ -18,7 +18,7 @@ def mostFrequent(arr):
|
||||||
# MAIN
|
# MAIN
|
||||||
#--------------------------------------------------------------------------------------------------
|
#--------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog option(s) [geomfile(s)]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [geomfile(s)]', description = """
|
||||||
Smooth geometry by selecting most frequent microstructure index within given stencil at each location.
|
Smooth geometry by selecting most frequent microstructure index within given stencil at each location.
|
||||||
|
|
||||||
""", version=scriptID)
|
""", version=scriptID)
|
||||||
|
|
|
@ -0,0 +1,189 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
|
import os,sys,h5py
|
||||||
|
import numpy as np
|
||||||
|
from optparse import OptionParser
|
||||||
|
import damask
|
||||||
|
|
||||||
|
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||||
|
scriptID = ' '.join([scriptName,damask.version])
|
||||||
|
|
||||||
|
|
||||||
|
#--------------------------------------------------------------------------------------------------
|
||||||
|
# MAIN
|
||||||
|
#--------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [dream3dfile[s]]', description = """
|
||||||
|
Convert DREAM3D file to geometry file. This can be done from cell data (direct pointwise takeover) or
|
||||||
|
from grain data (individual grains are segmented). Requires orientation data as quaternion.
|
||||||
|
|
||||||
|
""", version = scriptID)
|
||||||
|
|
||||||
|
parser.add_option('-b','--basegroup',
|
||||||
|
dest = 'basegroup', metavar = 'string',
|
||||||
|
help = 'name of the group in "DataContainers" that contains all the data')
|
||||||
|
parser.add_option('-p','--pointwise',
|
||||||
|
dest = 'pointwise', metavar = 'string',
|
||||||
|
help = 'name of the group in "DataContainers/<basegroup>" that contains pointwise data [%default]')
|
||||||
|
parser.add_option('-a','--average',
|
||||||
|
dest = 'average', metavar = 'string',
|
||||||
|
help = 'name of the group in "DataContainers</basegroup>" that contains grain average data. '\
|
||||||
|
+ 'Leave empty for pointwise data')
|
||||||
|
parser.add_option('--phase',
|
||||||
|
dest = 'phase',
|
||||||
|
type = 'string', metavar = 'string',
|
||||||
|
help = 'name of the dataset containing pointwise/average phase IDs [%default]')
|
||||||
|
parser.add_option('--microstructure',
|
||||||
|
dest = 'microstructure',
|
||||||
|
type = 'string', metavar = 'string',
|
||||||
|
help = 'name of the dataset connecting pointwise and average data [%default]')
|
||||||
|
parser.add_option('-q', '--quaternion',
|
||||||
|
dest = 'quaternion',
|
||||||
|
type = 'string', metavar='string',
|
||||||
|
help = 'name of the dataset containing pointwise/average orientation as quaternion [%default]')
|
||||||
|
|
||||||
|
parser.set_defaults(pointwise = 'CellData',
|
||||||
|
quaternion = 'Quats',
|
||||||
|
phase = 'Phases',
|
||||||
|
microstructure = 'FeatureIds',
|
||||||
|
crystallite = 1,
|
||||||
|
)
|
||||||
|
|
||||||
|
(options, filenames) = parser.parse_args()
|
||||||
|
|
||||||
|
if options.basegroup is None:
|
||||||
|
parser.error('No base group selected')
|
||||||
|
|
||||||
|
rootDir ='DataContainers'
|
||||||
|
|
||||||
|
# --- loop over input files -------------------------------------------------------------------------
|
||||||
|
|
||||||
|
if filenames == []: parser.error('no input file specified.')
|
||||||
|
|
||||||
|
for name in filenames:
|
||||||
|
try:
|
||||||
|
table = damask.ASCIItable(outname = os.path.splitext(name)[0]+'.geom',
|
||||||
|
buffered = False, labeled=False,
|
||||||
|
)
|
||||||
|
except: continue
|
||||||
|
damask.util.report(scriptName,name)
|
||||||
|
|
||||||
|
errors = []
|
||||||
|
|
||||||
|
info = {}
|
||||||
|
ori = []
|
||||||
|
inFile = h5py.File(name, 'r')
|
||||||
|
group_geom = os.path.join(rootDir,options.basegroup,'_SIMPL_GEOMETRY')
|
||||||
|
try:
|
||||||
|
info['size'] = inFile[os.path.join(group_geom,'DIMENSIONS')][...] \
|
||||||
|
* inFile[os.path.join(group_geom,'SPACING')][...]
|
||||||
|
info['grid'] = inFile[os.path.join(group_geom,'DIMENSIONS')][...]
|
||||||
|
info['origin'] = inFile[os.path.join(group_geom,'ORIGIN')][...]
|
||||||
|
except:
|
||||||
|
errors.append('Geometry data ({}) not found'.format(group_geom))
|
||||||
|
|
||||||
|
|
||||||
|
group_pointwise = os.path.join(rootDir,options.basegroup,options.pointwise)
|
||||||
|
if options.average is None:
|
||||||
|
label = 'point'
|
||||||
|
N_microstructure = np.product(info['grid'])
|
||||||
|
|
||||||
|
dataset = os.path.join(group_pointwise,options.quaternion)
|
||||||
|
try:
|
||||||
|
quats = np.reshape(inFile[dataset][...],(N_microstructure,3))
|
||||||
|
except:
|
||||||
|
errors.append('Pointwise orientation data ({}) not found'.format(dataset))
|
||||||
|
|
||||||
|
texture = [damask.Rotation.fromQuaternion(q,True,P=+1) for q in quats]
|
||||||
|
|
||||||
|
dataset = os.path.join(group_pointwise,options.phase)
|
||||||
|
try:
|
||||||
|
phase = np.reshape(inFile[dataset][...],(N_microstructure))
|
||||||
|
except:
|
||||||
|
errors.append('Pointwise phase data ({}) not found'.format(dataset))
|
||||||
|
|
||||||
|
|
||||||
|
else:
|
||||||
|
label = 'grain'
|
||||||
|
|
||||||
|
dataset = os.path.join(group_pointwise,options.microstructure)
|
||||||
|
try:
|
||||||
|
microstructure = np.reshape(inFile[dataset][...],(np.product(info['grid'])))
|
||||||
|
N_microstructure = np.max(microstructure)
|
||||||
|
except:
|
||||||
|
errors.append('Link between pointwise and grain average data ({}) not found'.format(dataset))
|
||||||
|
|
||||||
|
group_average = os.path.join(rootDir,options.basegroup,options.average)
|
||||||
|
|
||||||
|
dataset = os.path.join(group_average,options.quaternion)
|
||||||
|
try:
|
||||||
|
texture = [damask.Rotation.fromQuaternion(q,True,P=+1) for q in inFile[dataset][...][1:]] # skip first entry (unindexed)
|
||||||
|
except:
|
||||||
|
errors.append('Average orientation data ({}) not found'.format(dataset))
|
||||||
|
|
||||||
|
dataset = os.path.join(group_average,options.phase)
|
||||||
|
try:
|
||||||
|
phase = [i[0] for i in inFile[dataset][...]][1:] # skip first entry (unindexed)
|
||||||
|
except:
|
||||||
|
errors.append('Average phase data ({}) not found'.format(dataset))
|
||||||
|
|
||||||
|
if errors != []:
|
||||||
|
damask.util.croak(errors)
|
||||||
|
table.close(dismiss = True)
|
||||||
|
continue
|
||||||
|
|
||||||
|
|
||||||
|
mat = damask.Material()
|
||||||
|
mat.verbose = False
|
||||||
|
|
||||||
|
# dummy <homogenization>
|
||||||
|
h = damask.config.material.Homogenization()
|
||||||
|
mat.add_section('Homogenization','none',h)
|
||||||
|
info['homogenization'] = 1
|
||||||
|
|
||||||
|
# <crystallite> placeholder (same for all microstructures at the moment)
|
||||||
|
c = damask.config.material.Crystallite()
|
||||||
|
mat.add_section('Crystallite','tbd',c)
|
||||||
|
|
||||||
|
# <phase> placeholders
|
||||||
|
for i in range(np.max(phase)):
|
||||||
|
p = damask.config.material.Phase()
|
||||||
|
mat.add_section('phase','phase{}-tbd'.format(i+1),p)
|
||||||
|
|
||||||
|
# <texture>
|
||||||
|
for i,o in enumerate(texture):
|
||||||
|
t = damask.config.material.Texture()
|
||||||
|
t.add_component('gauss',{'eulers':o.asEulers(degrees=True)})
|
||||||
|
mat.add_section(part='texture', section='{}{}'.format(label,i+1),initialData=t)
|
||||||
|
|
||||||
|
# <microstructure>
|
||||||
|
for i in range(N_microstructure):
|
||||||
|
m = damask.config.material.Microstructure()
|
||||||
|
mat.add_section('microstructure','{}{}'.format(label,i+1),m)
|
||||||
|
mat.add_microstructure('{}{}'.format(label,i+1),
|
||||||
|
{'phase': 'phase{}-tbd'.format(phase[i]),
|
||||||
|
'texture':'{}{}'.format(label,i+1),
|
||||||
|
'crystallite':'tbd',
|
||||||
|
'fraction':1
|
||||||
|
})
|
||||||
|
|
||||||
|
table.info_append([
|
||||||
|
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||||
|
"grid\ta {}\tb {}\tc {}".format(*info['grid']),
|
||||||
|
"size\tx {}\ty {}\tz {}".format(*info['size']),
|
||||||
|
"origin\tx {}\ty {}\tz {}".format(*info['origin']),
|
||||||
|
"homogenization\t{}".format(info['homogenization']),
|
||||||
|
str(mat).split('\n')
|
||||||
|
])
|
||||||
|
table.head_write()
|
||||||
|
|
||||||
|
if options.average is None:
|
||||||
|
table.data = [1, 'to', format(N_microstructure)]
|
||||||
|
table.data_write()
|
||||||
|
else:
|
||||||
|
table.data = microstructure.reshape(info['grid'][1]*info['grid'][2],info['grid'][0])
|
||||||
|
table.data_writeArray()
|
||||||
|
|
||||||
|
|
||||||
|
table.close()
|
|
@ -1,8 +1,8 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import os,sys,math,time
|
import os,sys,math
|
||||||
import scipy.spatial, numpy as np
|
import numpy as np
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
import damask
|
import damask
|
||||||
|
|
||||||
|
@ -32,34 +32,6 @@ parser.add_option('--microstructure',
|
||||||
dest = 'microstructure',
|
dest = 'microstructure',
|
||||||
type = 'string', metavar = 'string',
|
type = 'string', metavar = 'string',
|
||||||
help = 'microstructure label')
|
help = 'microstructure label')
|
||||||
parser.add_option('-t', '--tolerance',
|
|
||||||
dest = 'tolerance',
|
|
||||||
type = 'float', metavar = 'float',
|
|
||||||
help = 'angular tolerance for orientation squashing [%default]')
|
|
||||||
parser.add_option('-e', '--eulers',
|
|
||||||
dest = 'eulers',
|
|
||||||
type = 'string', metavar = 'string',
|
|
||||||
help = 'Euler angles label')
|
|
||||||
parser.add_option('-d', '--degrees',
|
|
||||||
dest = 'degrees',
|
|
||||||
action = 'store_true',
|
|
||||||
help = 'all angles are in degrees')
|
|
||||||
parser.add_option('-m', '--matrix',
|
|
||||||
dest = 'matrix',
|
|
||||||
type = 'string', metavar = 'string',
|
|
||||||
help = 'orientation matrix label')
|
|
||||||
parser.add_option('-a',
|
|
||||||
dest='a',
|
|
||||||
type = 'string', metavar = 'string',
|
|
||||||
help = 'crystal frame a vector label')
|
|
||||||
parser.add_option('-b',
|
|
||||||
dest='b',
|
|
||||||
type = 'string', metavar = 'string',
|
|
||||||
help = 'crystal frame b vector label')
|
|
||||||
parser.add_option('-c',
|
|
||||||
dest = 'c',
|
|
||||||
type = 'string', metavar='string',
|
|
||||||
help = 'crystal frame c vector label')
|
|
||||||
parser.add_option('-q', '--quaternion',
|
parser.add_option('-q', '--quaternion',
|
||||||
dest = 'quaternion',
|
dest = 'quaternion',
|
||||||
type = 'string', metavar='string',
|
type = 'string', metavar='string',
|
||||||
|
@ -67,11 +39,8 @@ parser.add_option('-q', '--quaternion',
|
||||||
parser.add_option('--axes',
|
parser.add_option('--axes',
|
||||||
dest = 'axes',
|
dest = 'axes',
|
||||||
type = 'string', nargs = 3, metavar = ' '.join(['string']*3),
|
type = 'string', nargs = 3, metavar = ' '.join(['string']*3),
|
||||||
help = 'orientation coordinate frame in terms of position coordinate frame [same]')
|
help = 'orientation coordinate frame in terms of position coordinate frame [+x +y +z]')
|
||||||
parser.add_option('-s', '--symmetry',
|
|
||||||
dest = 'symmetry',
|
|
||||||
action = 'extend', metavar = '<string LIST>',
|
|
||||||
help = 'crystal symmetry of each phase %default {{{}}} '.format(', '.join(damask.Symmetry.lattices[1:])))
|
|
||||||
parser.add_option('--homogenization',
|
parser.add_option('--homogenization',
|
||||||
dest = 'homogenization',
|
dest = 'homogenization',
|
||||||
type = 'int', metavar = 'int',
|
type = 'int', metavar = 'int',
|
||||||
|
@ -80,27 +49,16 @@ parser.add_option('--crystallite',
|
||||||
dest = 'crystallite',
|
dest = 'crystallite',
|
||||||
type = 'int', metavar = 'int',
|
type = 'int', metavar = 'int',
|
||||||
help = 'crystallite index to be used [%default]')
|
help = 'crystallite index to be used [%default]')
|
||||||
parser.add_option('--verbose',
|
|
||||||
dest = 'verbose', action = 'store_true',
|
|
||||||
help = 'output extra info')
|
|
||||||
|
|
||||||
parser.set_defaults(symmetry = [damask.Symmetry.lattices[-1]],
|
|
||||||
tolerance = 0.0,
|
parser.set_defaults(homogenization = 1,
|
||||||
degrees = False,
|
|
||||||
homogenization = 1,
|
|
||||||
crystallite = 1,
|
crystallite = 1,
|
||||||
verbose = False,
|
|
||||||
pos = 'pos',
|
pos = 'pos',
|
||||||
)
|
)
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
input = [options.eulers is not None,
|
input = [ options.quaternion is not None,
|
||||||
options.a is not None and \
|
|
||||||
options.b is not None and \
|
|
||||||
options.c is not None,
|
|
||||||
options.matrix is not None,
|
|
||||||
options.quaternion is not None,
|
|
||||||
options.microstructure is not None,
|
options.microstructure is not None,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -109,14 +67,9 @@ if np.sum(input) != 1:
|
||||||
if options.axes is not None and not set(options.axes).issubset(set(['x','+x','-x','y','+y','-y','z','+z','-z'])):
|
if options.axes is not None and not set(options.axes).issubset(set(['x','+x','-x','y','+y','-y','z','+z','-z'])):
|
||||||
parser.error('invalid axes {} {} {}.'.format(*options.axes))
|
parser.error('invalid axes {} {} {}.'.format(*options.axes))
|
||||||
|
|
||||||
(label,dim,inputtype) = [(options.eulers,3,'eulers'),
|
(label,dim,inputtype) = [(options.quaternion,4,'quaternion'),
|
||||||
([options.a,options.b,options.c],[3,3,3],'frame'),
|
|
||||||
(options.matrix,9,'matrix'),
|
|
||||||
(options.quaternion,4,'quaternion'),
|
|
||||||
(options.microstructure,1,'microstructure'),
|
(options.microstructure,1,'microstructure'),
|
||||||
][np.where(input)[0][0]] # select input label that was requested
|
][np.where(input)[0][0]] # select input label that was requested
|
||||||
toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale all angles to radians
|
|
||||||
threshold = np.cos(options.tolerance/2.*toRadians) # cosine of (half of) tolerance angle
|
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
# --- loop over input files -------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@ -146,7 +99,7 @@ for name in filenames:
|
||||||
if options.phase and table.label_dimension(options.phase) != 1:
|
if options.phase and table.label_dimension(options.phase) != 1:
|
||||||
errors.append('phase column "{}" is not scalar.'.format(options.phase))
|
errors.append('phase column "{}" is not scalar.'.format(options.phase))
|
||||||
|
|
||||||
if errors != []:
|
if errors != []:
|
||||||
damask.util.croak(errors)
|
damask.util.croak(errors)
|
||||||
table.close(dismiss = True)
|
table.close(dismiss = True)
|
||||||
continue
|
continue
|
||||||
|
@ -157,10 +110,8 @@ for name in filenames:
|
||||||
|
|
||||||
if coordDim == 2:
|
if coordDim == 2:
|
||||||
table.data = np.insert(table.data,2,np.zeros(len(table.data)),axis=1) # add zero z coordinate for two-dimensional input
|
table.data = np.insert(table.data,2,np.zeros(len(table.data)),axis=1) # add zero z coordinate for two-dimensional input
|
||||||
if options.verbose: damask.util.croak('extending to 3D...')
|
|
||||||
if options.phase is None:
|
if options.phase is None:
|
||||||
table.data = np.column_stack((table.data,np.ones(len(table.data)))) # add single phase if no phase column given
|
table.data = np.column_stack((table.data,np.ones(len(table.data)))) # add single phase if no phase column given
|
||||||
if options.verbose: damask.util.croak('adding dummy phase info...')
|
|
||||||
|
|
||||||
# --------------- figure out size and grid ---------------------------------------------------------
|
# --------------- figure out size and grid ---------------------------------------------------------
|
||||||
|
|
||||||
|
@ -196,17 +147,10 @@ for name in filenames:
|
||||||
grain = table.data[:,colOri]
|
grain = table.data[:,colOri]
|
||||||
nGrains = len(np.unique(grain))
|
nGrains = len(np.unique(grain))
|
||||||
|
|
||||||
else:
|
elif inputtype == 'quaternion':
|
||||||
|
|
||||||
if options.verbose: bg = damask.util.backgroundMessage(); bg.start() # start background messaging
|
|
||||||
|
|
||||||
colPhase = -1 # column of phase data comes last
|
colPhase = -1 # column of phase data comes last
|
||||||
if options.verbose: bg.set_message('sorting positions...')
|
|
||||||
index = np.lexsort((table.data[:,0],table.data[:,1],table.data[:,2])) # index of position when sorting x fast, z slow
|
index = np.lexsort((table.data[:,0],table.data[:,1],table.data[:,2])) # index of position when sorting x fast, z slow
|
||||||
if options.verbose: bg.set_message('building KD tree...')
|
|
||||||
KDTree = scipy.spatial.KDTree((table.data[index,:3]-mincorner) / delta) # build KDTree with dX = dY = dZ = 1 and origin 0,0,0
|
|
||||||
|
|
||||||
statistics = {'global': 0, 'local': 0}
|
|
||||||
grain = -np.ones(N,dtype = 'int32') # initialize empty microstructure
|
grain = -np.ones(N,dtype = 'int32') # initialize empty microstructure
|
||||||
orientations = [] # orientations
|
orientations = [] # orientations
|
||||||
multiplicity = [] # orientation multiplicity (number of group members)
|
multiplicity = [] # orientation multiplicity (number of group members)
|
||||||
|
@ -215,87 +159,26 @@ for name in filenames:
|
||||||
existingGrains = np.arange(nGrains)
|
existingGrains = np.arange(nGrains)
|
||||||
myPos = 0 # position (in list) of current grid point
|
myPos = 0 # position (in list) of current grid point
|
||||||
|
|
||||||
tick = time.clock()
|
|
||||||
if options.verbose: bg.set_message('assigning grain IDs...')
|
|
||||||
|
|
||||||
for z in range(grid[2]):
|
for z in range(grid[2]):
|
||||||
for y in range(grid[1]):
|
for y in range(grid[1]):
|
||||||
for x in range(grid[0]):
|
for x in range(grid[0]):
|
||||||
if (myPos+1)%(N/500.) < 1:
|
|
||||||
time_delta = (time.clock()-tick) * (N - myPos) / myPos
|
|
||||||
if options.verbose: bg.set_message('(%02i:%02i:%02i) processing point %i of %i (grain count %i)...'
|
|
||||||
%(time_delta//3600,time_delta%3600//60,time_delta%60,myPos,N,nGrains))
|
|
||||||
|
|
||||||
myData = table.data[index[myPos]] # read data for current grid point
|
myData = table.data[index[myPos]] # read data for current grid point
|
||||||
myPhase = int(myData[colPhase])
|
myPhase = int(myData[colPhase])
|
||||||
mySym = options.symmetry[min(myPhase,len(options.symmetry))-1] # take last specified option for all with higher index
|
|
||||||
|
o = damask.Rotation(myData[colOri:colOri+4])
|
||||||
if inputtype == 'eulers':
|
|
||||||
o = damask.Orientation(Eulers = myData[colOri:colOri+3]*toRadians,
|
|
||||||
symmetry = mySym)
|
|
||||||
elif inputtype == 'matrix':
|
|
||||||
o = damask.Orientation(matrix = myData[colOri:colOri+9].reshape(3,3),
|
|
||||||
symmetry = mySym)
|
|
||||||
elif inputtype == 'frame':
|
|
||||||
o = damask.Orientation(matrix = np.hstack((myData[colOri[0]:colOri[0]+3],
|
|
||||||
myData[colOri[1]:colOri[1]+3],
|
|
||||||
myData[colOri[2]:colOri[2]+3],
|
|
||||||
)).reshape(3,3),
|
|
||||||
symmetry = mySym)
|
|
||||||
elif inputtype == 'quaternion':
|
|
||||||
o = damask.Orientation(quaternion = myData[colOri:colOri+4],
|
|
||||||
symmetry = mySym)
|
|
||||||
|
|
||||||
cos_disorientations = -np.ones(1,dtype=float) # largest possible disorientation
|
grain[myPos] = nGrains # assign new grain to me ...
|
||||||
closest_grain = -1 # invalid neighbor
|
nGrains += 1 # ... and update counter
|
||||||
|
orientations.append(o) # store new orientation for future comparison
|
||||||
if options.tolerance > 0.0: # only try to compress orientations if asked to
|
multiplicity.append(1) # having single occurrence so far
|
||||||
neighbors = np.array(KDTree.query_ball_point([x,y,z], 3)) # point indices within radius
|
phases.append(myPhase) # store phase info for future reporting
|
||||||
# filter neighbors: skip myself, anyone further ahead (cannot yet have a grain ID), and other phases
|
existingGrains = np.arange(nGrains) # update list of existing grains
|
||||||
neighbors = neighbors[(neighbors < myPos) & \
|
|
||||||
(table.data[index[neighbors],colPhase] == myPhase)]
|
|
||||||
grains = np.unique(grain[neighbors]) # unique grain IDs among valid neighbors
|
|
||||||
|
|
||||||
if len(grains) > 0: # check immediate neighborhood first
|
|
||||||
cos_disorientations = np.array([o.disorientation(orientations[grainID],
|
|
||||||
SST = False)[0].quaternion.q \
|
|
||||||
for grainID in grains]) # store disorientation per grainID
|
|
||||||
closest_grain = np.argmax(cos_disorientations) # grain among grains with closest orientation to myself
|
|
||||||
match = 'local'
|
|
||||||
|
|
||||||
if cos_disorientations[closest_grain] < threshold: # orientation not close enough?
|
|
||||||
grains = existingGrains[np.atleast_1d( (np.array(phases) == myPhase ) & \
|
|
||||||
(np.in1d(existingGrains,grains,invert=True)))] # other already identified grains (of my phase)
|
|
||||||
|
|
||||||
if len(grains) > 0:
|
|
||||||
cos_disorientations = np.array([o.disorientation(orientations[grainID],
|
|
||||||
SST = False)[0].quaternion.q \
|
|
||||||
for grainID in grains]) # store disorientation per grainID
|
|
||||||
closest_grain = np.argmax(cos_disorientations) # grain among grains with closest orientation to myself
|
|
||||||
match = 'global'
|
|
||||||
|
|
||||||
if cos_disorientations[closest_grain] >= threshold: # orientation now close enough?
|
|
||||||
grainID = grains[closest_grain]
|
|
||||||
grain[myPos] = grainID # assign myself to that grain ...
|
|
||||||
orientations[grainID] = damask.Orientation.average([orientations[grainID],o],
|
|
||||||
[multiplicity[grainID],1]) # update average orientation of best matching grain
|
|
||||||
multiplicity[grainID] += 1
|
|
||||||
statistics[match] += 1
|
|
||||||
else:
|
|
||||||
grain[myPos] = nGrains # assign new grain to me ...
|
|
||||||
nGrains += 1 # ... and update counter
|
|
||||||
orientations.append(o) # store new orientation for future comparison
|
|
||||||
multiplicity.append(1) # having single occurrence so far
|
|
||||||
phases.append(myPhase) # store phase info for future reporting
|
|
||||||
existingGrains = np.arange(nGrains) # update list of existing grains
|
|
||||||
|
|
||||||
myPos += 1
|
myPos += 1
|
||||||
|
|
||||||
if options.verbose:
|
|
||||||
bg.stop()
|
|
||||||
bg.join()
|
|
||||||
damask.util.croak("{} seconds total.\n{} local and {} global matches.".\
|
|
||||||
format(time.clock()-tick,statistics['local'],statistics['global']))
|
|
||||||
|
|
||||||
grain += 1 # offset from starting index 0 to 1
|
grain += 1 # offset from starting index 0 to 1
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2.7
|
#!/usr/bin/env python3
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import os,sys,math
|
import os,sys,math
|
||||||
|
@ -49,7 +49,7 @@ parser.set_defaults(d = 1,
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
(options, filenames) = parser.parse_args()
|
||||||
|
|
||||||
options.immutable = map(int,options.immutable)
|
options.immutable = list(map(int,options.immutable))
|
||||||
|
|
||||||
getInterfaceEnergy = lambda A,B: np.float32((A*B != 0)*(A != B)*1.0) # 1.0 if A & B are distinct & nonzero, 0.0 otherwise
|
getInterfaceEnergy = lambda A,B: np.float32((A*B != 0)*(A != B)*1.0) # 1.0 if A & B are distinct & nonzero, 0.0 otherwise
|
||||||
struc = ndimage.generate_binary_structure(3,1) # 3D von Neumann neighborhood
|
struc = ndimage.generate_binary_structure(3,1) # 3D von Neumann neighborhood
|
||||||
|
@ -70,9 +70,9 @@ for name in filenames:
|
||||||
table.head_read()
|
table.head_read()
|
||||||
info,extra_header = table.head_getGeom()
|
info,extra_header = table.head_getGeom()
|
||||||
|
|
||||||
damask.util.croak(['grid a b c: {}'.format(' x '.join(map(str,info['grid']))),
|
damask.util.croak(['grid a b c: {}'.format(' x '.join(list(map(str,info['grid'])))),
|
||||||
'size x y z: {}'.format(' x '.join(map(str,info['size']))),
|
'size x y z: {}'.format(' x '.join(list(map(str,info['size'])))),
|
||||||
'origin x y z: {}'.format(' : '.join(map(str,info['origin']))),
|
'origin x y z: {}'.format(' : '.join(list(map(str,info['origin'])))),
|
||||||
'homogenization: {}'.format(info['homogenization']),
|
'homogenization: {}'.format(info['homogenization']),
|
||||||
'microstructures: {}'.format(info['microstructures']),
|
'microstructures: {}'.format(info['microstructures']),
|
||||||
])
|
])
|
||||||
|
@ -102,9 +102,9 @@ for name in filenames:
|
||||||
gauss = np.exp(-(X*X + Y*Y + Z*Z)/(2.0*options.d*options.d),dtype=np.float32) \
|
gauss = np.exp(-(X*X + Y*Y + Z*Z)/(2.0*options.d*options.d),dtype=np.float32) \
|
||||||
/np.power(2.0*np.pi*options.d*options.d,(3.0 - np.count_nonzero(info['grid'] == 1))/2.,dtype=np.float32)
|
/np.power(2.0*np.pi*options.d*options.d,(3.0 - np.count_nonzero(info['grid'] == 1))/2.,dtype=np.float32)
|
||||||
|
|
||||||
gauss[:,:,:grid[2]/2:-1] = gauss[:,:,1:(grid[2]+1)/2] # trying to cope with uneven (odd) grid size
|
gauss[:,:,:grid[2]//2:-1] = gauss[:,:,1:(grid[2]+1)//2] # trying to cope with uneven (odd) grid size
|
||||||
gauss[:,:grid[1]/2:-1,:] = gauss[:,1:(grid[1]+1)/2,:]
|
gauss[:,:grid[1]//2:-1,:] = gauss[:,1:(grid[1]+1)//2,:]
|
||||||
gauss[:grid[0]/2:-1,:,:] = gauss[1:(grid[0]+1)/2,:,:]
|
gauss[:grid[0]//2:-1,:,:] = gauss[1:(grid[0]+1)//2,:,:]
|
||||||
gauss = np.fft.rfftn(gauss).astype(np.complex64)
|
gauss = np.fft.rfftn(gauss).astype(np.complex64)
|
||||||
|
|
||||||
for smoothIter in range(options.N):
|
for smoothIter in range(options.N):
|
||||||
|
@ -119,9 +119,9 @@ for name in filenames:
|
||||||
microstructure,i,axis=0), j,axis=1), k,axis=2)))
|
microstructure,i,axis=0), j,axis=1), k,axis=2)))
|
||||||
|
|
||||||
# periodically extend interfacial energy array by half a grid size in positive and negative directions
|
# periodically extend interfacial energy array by half a grid size in positive and negative directions
|
||||||
periodic_interfaceEnergy = np.tile(interfaceEnergy,(3,3,3))[grid[0]/2:-grid[0]/2,
|
periodic_interfaceEnergy = np.tile(interfaceEnergy,(3,3,3))[grid[0]//2:-grid[0]//2,
|
||||||
grid[1]/2:-grid[1]/2,
|
grid[1]//2:-grid[1]//2,
|
||||||
grid[2]/2:-grid[2]/2]
|
grid[2]//2:-grid[2]//2]
|
||||||
|
|
||||||
# transform bulk volume (i.e. where interfacial energy remained zero), store index of closest boundary voxel
|
# transform bulk volume (i.e. where interfacial energy remained zero), store index of closest boundary voxel
|
||||||
index = ndimage.morphology.distance_transform_edt(periodic_interfaceEnergy == 0.,
|
index = ndimage.morphology.distance_transform_edt(periodic_interfaceEnergy == 0.,
|
||||||
|
@ -148,15 +148,15 @@ for name in filenames:
|
||||||
ndimage.morphology.binary_dilation(interfaceEnergy > 0.,
|
ndimage.morphology.binary_dilation(interfaceEnergy > 0.,
|
||||||
structure = struc,
|
structure = struc,
|
||||||
iterations = int(round(options.d*2.))-1),# fat boundary
|
iterations = int(round(options.d*2.))-1),# fat boundary
|
||||||
periodic_bulkEnergy[grid[0]/2:-grid[0]/2, # retain filled energy on fat boundary...
|
periodic_bulkEnergy[grid[0]//2:-grid[0]//2, # retain filled energy on fat boundary...
|
||||||
grid[1]/2:-grid[1]/2,
|
grid[1]//2:-grid[1]//2,
|
||||||
grid[2]/2:-grid[2]/2], # ...and zero everywhere else
|
grid[2]//2:-grid[2]//2], # ...and zero everywhere else
|
||||||
0.)).astype(np.complex64) *
|
0.)).astype(np.complex64) *
|
||||||
gauss).astype(np.float32)
|
gauss).astype(np.float32)
|
||||||
|
|
||||||
periodic_diffusedEnergy = np.tile(diffusedEnergy,(3,3,3))[grid[0]/2:-grid[0]/2,
|
periodic_diffusedEnergy = np.tile(diffusedEnergy,(3,3,3))[grid[0]//2:-grid[0]//2,
|
||||||
grid[1]/2:-grid[1]/2,
|
grid[1]//2:-grid[1]//2,
|
||||||
grid[2]/2:-grid[2]/2] # periodically extend the smoothed bulk energy
|
grid[2]//2:-grid[2]//2] # periodically extend the smoothed bulk energy
|
||||||
|
|
||||||
|
|
||||||
# transform voxels close to interface region
|
# transform voxels close to interface region
|
||||||
|
@ -164,15 +164,15 @@ for name in filenames:
|
||||||
return_distances = False,
|
return_distances = False,
|
||||||
return_indices = True) # want index of closest bulk grain
|
return_indices = True) # want index of closest bulk grain
|
||||||
|
|
||||||
periodic_microstructure = np.tile(microstructure,(3,3,3))[grid[0]/2:-grid[0]/2,
|
periodic_microstructure = np.tile(microstructure,(3,3,3))[grid[0]//2:-grid[0]//2,
|
||||||
grid[1]/2:-grid[1]/2,
|
grid[1]//2:-grid[1]//2,
|
||||||
grid[2]/2:-grid[2]/2] # periodically extend the microstructure
|
grid[2]//2:-grid[2]//2] # periodically extend the microstructure
|
||||||
|
|
||||||
microstructure = periodic_microstructure[index[0],
|
microstructure = periodic_microstructure[index[0],
|
||||||
index[1],
|
index[1],
|
||||||
index[2]].reshape(2*grid)[grid[0]/2:-grid[0]/2,
|
index[2]].reshape(2*grid)[grid[0]//2:-grid[0]//2,
|
||||||
grid[1]/2:-grid[1]/2,
|
grid[1]//2:-grid[1]//2,
|
||||||
grid[2]/2:-grid[2]/2] # extent grains into interface region
|
grid[2]//2:-grid[2]//2] # extent grains into interface region
|
||||||
|
|
||||||
# replace immutable microstructures with closest mutable ones
|
# replace immutable microstructures with closest mutable ones
|
||||||
index = ndimage.morphology.distance_transform_edt(np.in1d(microstructure,options.immutable).reshape(grid),
|
index = ndimage.morphology.distance_transform_edt(np.in1d(microstructure,options.immutable).reshape(grid),
|
||||||
|
@ -236,3 +236,4 @@ for name in filenames:
|
||||||
# --- output finalization --------------------------------------------------------------------------
|
# --- output finalization --------------------------------------------------------------------------
|
||||||
|
|
||||||
table.close()
|
table.close()
|
||||||
|
|
|
@ -52,13 +52,14 @@ parser.set_defaults(degrees = False,
|
||||||
if sum(x is not None for x in [options.rotation,options.eulers,options.matrix,options.quaternion]) != 1:
|
if sum(x is not None for x in [options.rotation,options.eulers,options.matrix,options.quaternion]) != 1:
|
||||||
parser.error('not exactly one rotation specified...')
|
parser.error('not exactly one rotation specified...')
|
||||||
|
|
||||||
eulers = np.array(damask.orientation.Orientation(
|
if options.quaternion is not None:
|
||||||
quaternion = np.array(options.quaternion) if options.quaternion else None,
|
eulers = damask.Rotation.fromQuaternion(np.array(options.quaternion)).asEulers(degrees=True)
|
||||||
angleAxis = np.array(options.rotation) if options.rotation else None,
|
if options.rotation is not None:
|
||||||
matrix = np.array(options.matrix) if options.matrix else None,
|
eulers = damask.Rotation.fromAxisAngle(np.array(options.rotation,degrees=True)).asEulers(degrees=True)
|
||||||
Eulers = np.array(options.eulers) if options.eulers else None,
|
if options.matrix is not None:
|
||||||
degrees = options.degrees,
|
eulers = damask.Rotation.fromMatrix(np.array(options.Matrix)).asEulers(degrees=True)
|
||||||
).asEulers(degrees=True))
|
if options.eulers is not None:
|
||||||
|
eulers = damask.Rotation.fromEulers(np.array(options.eulers),degrees=True).asEulers(degrees=True)
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
# --- loop over input files -------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2.7
|
#!/usr/bin/env python3
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import os,sys
|
import os,sys
|
||||||
|
@ -48,11 +48,11 @@ for name in filenames:
|
||||||
table.head_read()
|
table.head_read()
|
||||||
info,extra_header = table.head_getGeom()
|
info,extra_header = table.head_getGeom()
|
||||||
|
|
||||||
damask.util.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
damask.util.croak(['grid a b c: {}'.format(' x '.join(list(map(str,info['grid'])))),
|
||||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
'size x y z: {}'.format(' x '.join(list(map(str,info['size'])))),
|
||||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
'origin x y z: {}'.format(' : '.join(list(map(str,info['origin'])))),
|
||||||
'homogenization: %i'%info['homogenization'],
|
'homogenization: {}'.format(info['homogenization']),
|
||||||
'microstructures: %i'%info['microstructures'],
|
'microstructures: {}'.format(info['microstructures']),
|
||||||
])
|
])
|
||||||
|
|
||||||
errors = []
|
errors = []
|
||||||
|
@ -86,7 +86,7 @@ for name in filenames:
|
||||||
yy = np.tile(np.repeat(y,info['grid'][0] ),info['grid'][2])
|
yy = np.tile(np.repeat(y,info['grid'][0] ),info['grid'][2])
|
||||||
zz = np.repeat(z,info['grid'][0]*info['grid'][1])
|
zz = np.repeat(z,info['grid'][0]*info['grid'][1])
|
||||||
|
|
||||||
table.data = np.squeeze(np.dstack((xx,yy,zz,microstructure)))
|
table.data = np.squeeze(np.dstack((xx,yy,zz,microstructure)),axis=0)
|
||||||
table.data_writeArray()
|
table.data_writeArray()
|
||||||
|
|
||||||
# ------------------------------------------ finalize output ---------------------------------------
|
# ------------------------------------------ finalize output ---------------------------------------
|
||||||
|
|
|
@ -2,9 +2,9 @@
|
||||||
|
|
||||||
for seeds in "$@"
|
for seeds in "$@"
|
||||||
do
|
do
|
||||||
vtk_pointcloud $seeds
|
vtk_pointCloud $seeds
|
||||||
|
|
||||||
vtk_addPointcloudData $seeds \
|
vtk_addPointCloudData $seeds \
|
||||||
--data microstructure,weight \
|
--data microstructure,weight \
|
||||||
--inplace \
|
--inplace \
|
||||||
--vtk ${seeds%.*}.vtp \
|
--vtk ${seeds%.*}.vtp \
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
#!/usr/bin/env python2.7
|
#!/usr/bin/env python3
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import threading,time,os,sys,random
|
import threading,time,os,sys,random
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
from cStringIO import StringIO
|
from io import StringIO
|
||||||
|
import binascii
|
||||||
import damask
|
import damask
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||||
|
@ -96,7 +97,7 @@ class myThread (threading.Thread):
|
||||||
perturbedGeomVFile = StringIO()
|
perturbedGeomVFile = StringIO()
|
||||||
perturbedSeedsVFile.reset()
|
perturbedSeedsVFile.reset()
|
||||||
perturbedGeomVFile.write(damask.util.execute('geom_fromVoronoiTessellation '+
|
perturbedGeomVFile.write(damask.util.execute('geom_fromVoronoiTessellation '+
|
||||||
' -g '+' '.join(map(str, options.grid)),streamIn=perturbedSeedsVFile)[0])
|
' -g '+' '.join(list(map(str, options.grid))),streamIn=perturbedSeedsVFile)[0])
|
||||||
perturbedGeomVFile.reset()
|
perturbedGeomVFile.reset()
|
||||||
|
|
||||||
#--- evaluate current seeds file ----------------------------------------------------------------------
|
#--- evaluate current seeds file ----------------------------------------------------------------------
|
||||||
|
@ -214,7 +215,7 @@ options = parser.parse_args()[0]
|
||||||
damask.util.report(scriptName,options.seedFile)
|
damask.util.report(scriptName,options.seedFile)
|
||||||
|
|
||||||
if options.randomSeed is None:
|
if options.randomSeed is None:
|
||||||
options.randomSeed = int(os.urandom(4).encode('hex'), 16)
|
options.randomSeed = int(binascii.hexlify(os.urandom(4)),16)
|
||||||
damask.util.croak(options.randomSeed)
|
damask.util.croak(options.randomSeed)
|
||||||
delta = (options.scale/options.grid[0],options.scale/options.grid[1],options.scale/options.grid[2])
|
delta = (options.scale/options.grid[0],options.scale/options.grid[1],options.scale/options.grid[2])
|
||||||
baseFile=os.path.splitext(os.path.basename(options.seedFile))[0]
|
baseFile=os.path.splitext(os.path.basename(options.seedFile))[0]
|
||||||
|
@ -240,17 +241,17 @@ if os.path.isfile(os.path.splitext(options.seedFile)[0]+'.seeds'):
|
||||||
for line in initialSeedFile: bestSeedsVFile.write(line)
|
for line in initialSeedFile: bestSeedsVFile.write(line)
|
||||||
else:
|
else:
|
||||||
bestSeedsVFile.write(damask.util.execute('seeds_fromRandom'+\
|
bestSeedsVFile.write(damask.util.execute('seeds_fromRandom'+\
|
||||||
' -g '+' '.join(map(str, options.grid))+\
|
' -g '+' '.join(list(map(str, options.grid)))+\
|
||||||
' -r {:d}'.format(options.randomSeed)+\
|
' -r {:d}'.format(options.randomSeed)+\
|
||||||
' -N '+str(nMicrostructures))[0])
|
' -N '+str(nMicrostructures))[0])
|
||||||
bestSeedsUpdate = time.time()
|
bestSeedsUpdate = time.time()
|
||||||
|
|
||||||
# ----------- tessellate initial seed file to get and evaluate geom file
|
# ----------- tessellate initial seed file to get and evaluate geom file
|
||||||
bestSeedsVFile.reset()
|
bestSeedsVFile.seek(0)
|
||||||
initialGeomVFile = StringIO()
|
initialGeomVFile = StringIO()
|
||||||
initialGeomVFile.write(damask.util.execute('geom_fromVoronoiTessellation '+
|
initialGeomVFile.write(damask.util.execute('geom_fromVoronoiTessellation '+
|
||||||
' -g '+' '.join(map(str, options.grid)),bestSeedsVFile)[0])
|
' -g '+' '.join(list(map(str, options.grid))),bestSeedsVFile)[0])
|
||||||
initialGeomVFile.reset()
|
initialGeomVFile.seek(0)
|
||||||
initialGeomTable = damask.ASCIItable(initialGeomVFile,None,labeled=False,readonly=True)
|
initialGeomTable = damask.ASCIItable(initialGeomVFile,None,labeled=False,readonly=True)
|
||||||
initialGeomTable.head_read()
|
initialGeomTable.head_read()
|
||||||
info,devNull = initialGeomTable.head_getGeom()
|
info,devNull = initialGeomTable.head_getGeom()
|
||||||
|
|
|
@ -28,7 +28,7 @@ def kdtree_search(cloud, queryPoints):
|
||||||
# MAIN
|
# MAIN
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [options]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options', description = """
|
||||||
Distribute given number of points randomly within (a fraction of) the three-dimensional cube [0.0,0.0,0.0]--[1.0,1.0,1.0].
|
Distribute given number of points randomly within (a fraction of) the three-dimensional cube [0.0,0.0,0.0]--[1.0,1.0,1.0].
|
||||||
Reports positions with random crystal orientations in seeds file format to STDOUT.
|
Reports positions with random crystal orientations in seeds file format to STDOUT.
|
||||||
|
|
||||||
|
@ -90,11 +90,7 @@ group.add_option( '-s',
|
||||||
'--selective',
|
'--selective',
|
||||||
action = 'store_true',
|
action = 'store_true',
|
||||||
dest = 'selective',
|
dest = 'selective',
|
||||||
help = 'selective picking of seed points from random seed points [%default]')
|
help = 'selective picking of seed points from random seed points')
|
||||||
group.add_option( '--force',
|
|
||||||
action = 'store_true',
|
|
||||||
dest = 'force',
|
|
||||||
help = 'try selective picking despite large seed point number [%default]')
|
|
||||||
group.add_option( '--distance',
|
group.add_option( '--distance',
|
||||||
dest = 'distance',
|
dest = 'distance',
|
||||||
type = 'float', metavar = 'float',
|
type = 'float', metavar = 'float',
|
||||||
|
@ -115,7 +111,6 @@ parser.set_defaults(randomSeed = None,
|
||||||
sigma = 0.05,
|
sigma = 0.05,
|
||||||
microstructure = 1,
|
microstructure = 1,
|
||||||
selective = False,
|
selective = False,
|
||||||
force = False,
|
|
||||||
distance = 0.2,
|
distance = 0.2,
|
||||||
numCandidates = 10,
|
numCandidates = 10,
|
||||||
format = None,
|
format = None,
|
||||||
|
@ -148,10 +143,11 @@ for name in filenames:
|
||||||
errors = []
|
errors = []
|
||||||
if gridSize == 0:
|
if gridSize == 0:
|
||||||
errors.append('zero grid dimension for {}.'.format(', '.join([['a','b','c'][x] for x in np.where(options.grid == 0)[0]])))
|
errors.append('zero grid dimension for {}.'.format(', '.join([['a','b','c'][x] for x in np.where(options.grid == 0)[0]])))
|
||||||
if options.N > gridSize/10.: errors.append('seed count exceeds 0.1 of grid points.')
|
if options.N > gridSize/10.:
|
||||||
|
remarks.append('seed count exceeds 0.1 of grid points.')
|
||||||
if options.selective and 4./3.*math.pi*(options.distance/2.)**3*options.N > 0.5:
|
if options.selective and 4./3.*math.pi*(options.distance/2.)**3*options.N > 0.5:
|
||||||
(remarks if options.force else errors).append('maximum recommended seed point count for given distance is {}.{}'.
|
remarks.append('maximum recommended seed point count for given distance is {}.{}'.
|
||||||
format(int(3./8./math.pi/(options.distance/2.)**3),'..'*options.force))
|
format(int(3./8./math.pi/(options.distance/2.)**3)))
|
||||||
|
|
||||||
if remarks != []: damask.util.croak(remarks)
|
if remarks != []: damask.util.croak(remarks)
|
||||||
if errors != []:
|
if errors != []:
|
||||||
|
|
|
@ -0,0 +1,125 @@
|
||||||
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
|
####################################################################################################
|
||||||
|
# Code below available according to the followin conditions on https://github.com/MarDiehl/3Drotations
|
||||||
|
####################################################################################################
|
||||||
|
# Copyright (c) 2017-2019, Martin Diehl/Max-Planck-Institut für Eisenforschung GmbH
|
||||||
|
# Copyright (c) 2013-2014, Marc De Graef/Carnegie Mellon University
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without modification, are
|
||||||
|
# permitted provided that the following conditions are met:
|
||||||
|
#
|
||||||
|
# - Redistributions of source code must retain the above copyright notice, this list
|
||||||
|
# of conditions and the following disclaimer.
|
||||||
|
# - Redistributions in binary form must reproduce the above copyright notice, this
|
||||||
|
# list of conditions and the following disclaimer in the documentation and/or
|
||||||
|
# other materials provided with the distribution.
|
||||||
|
# - Neither the names of Marc De Graef, Carnegie Mellon University nor the names
|
||||||
|
# of its contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||||
|
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
####################################################################################################
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
sc = np.pi**(1./6.)/6.**(1./6.)
|
||||||
|
beta = np.pi**(5./6.)/6.**(1./6.)/2.
|
||||||
|
R1 = (3.*np.pi/4.)**(1./3.)
|
||||||
|
|
||||||
|
def CubeToBall(cube):
|
||||||
|
|
||||||
|
if np.abs(np.max(cube))>np.pi**(2./3.) * 0.5:
|
||||||
|
raise ValueError
|
||||||
|
|
||||||
|
# transform to the sphere grid via the curved square, and intercept the zero point
|
||||||
|
if np.allclose(cube,0.0,rtol=0.0,atol=1.0e-300):
|
||||||
|
ball = np.zeros(3)
|
||||||
|
else:
|
||||||
|
# get pyramide and scale by grid parameter ratio
|
||||||
|
p = GetPyramidOrder(cube)
|
||||||
|
XYZ = cube[p] * sc
|
||||||
|
|
||||||
|
# intercept all the points along the z-axis
|
||||||
|
if np.allclose(XYZ[0:2],0.0,rtol=0.0,atol=1.0e-300):
|
||||||
|
ball = np.array([0.0, 0.0, np.sqrt(6.0/np.pi) * XYZ[2]])
|
||||||
|
else:
|
||||||
|
order = [1,0] if np.abs(XYZ[1]) <= np.abs(XYZ[0]) else [0,1]
|
||||||
|
q = np.pi/12.0 * XYZ[order[0]]/XYZ[order[1]]
|
||||||
|
c = np.cos(q)
|
||||||
|
s = np.sin(q)
|
||||||
|
q = R1*2.0**0.25/beta * XYZ[order[1]] / np.sqrt(np.sqrt(2.0)-c)
|
||||||
|
T = np.array([ (np.sqrt(2.0)*c - 1.0), np.sqrt(2.0) * s]) * q
|
||||||
|
|
||||||
|
# transform to sphere grid (inverse Lambert)
|
||||||
|
# note that there is no need to worry about dividing by zero, since XYZ[2] can not become zero
|
||||||
|
c = np.sum(T**2)
|
||||||
|
s = c * np.pi/24.0 /XYZ[2]**2
|
||||||
|
c = c * np.sqrt(np.pi/24.0)/XYZ[2]
|
||||||
|
q = np.sqrt( 1.0 - s )
|
||||||
|
ball = np.array([ T[order[1]] * q, T[order[0]] * q, np.sqrt(6.0/np.pi) * XYZ[2] - c ])
|
||||||
|
|
||||||
|
# reverse the coordinates back to the regular order according to the original pyramid number
|
||||||
|
ball = ball[p]
|
||||||
|
|
||||||
|
return ball
|
||||||
|
|
||||||
|
|
||||||
|
def BallToCube(ball):
|
||||||
|
|
||||||
|
rs = np.linalg.norm(ball)
|
||||||
|
if rs > R1:
|
||||||
|
raise ValueError
|
||||||
|
|
||||||
|
if np.allclose(ball,0.0,rtol=0.0,atol=1.0e-300):
|
||||||
|
cube = np.zeros(3)
|
||||||
|
else:
|
||||||
|
p = GetPyramidOrder(ball)
|
||||||
|
xyz3 = ball[p]
|
||||||
|
|
||||||
|
# inverse M_3
|
||||||
|
xyz2 = xyz3[0:2] * np.sqrt( 2.0*rs/(rs+np.abs(xyz3[2])) )
|
||||||
|
|
||||||
|
# inverse M_2
|
||||||
|
qxy = np.sum(xyz2**2)
|
||||||
|
|
||||||
|
if np.isclose(qxy,0.0,rtol=0.0,atol=1.0e-300):
|
||||||
|
Tinv = np.zeros(2)
|
||||||
|
else:
|
||||||
|
q2 = qxy + np.max(np.abs(xyz2))**2
|
||||||
|
sq2 = np.sqrt(q2)
|
||||||
|
q = (beta/np.sqrt(2.0)/R1) * np.sqrt(q2*qxy/(q2-np.max(np.abs(xyz2))*sq2))
|
||||||
|
tt = np.clip((np.min(np.abs(xyz2))**2+np.max(np.abs(xyz2))*sq2)/np.sqrt(2.0)/qxy,-1.0,1.0)
|
||||||
|
Tinv = np.array([1.0,np.arccos(tt)/np.pi*12.0]) if np.abs(xyz2[1]) <= np.abs(xyz2[0]) else \
|
||||||
|
np.array([np.arccos(tt)/np.pi*12.0,1.0])
|
||||||
|
Tinv = q * np.where(xyz2<0.0,-Tinv,Tinv)
|
||||||
|
|
||||||
|
# inverse M_1
|
||||||
|
cube = np.array([ Tinv[0], Tinv[1], (-1.0 if xyz3[2] < 0.0 else 1.0) * rs / np.sqrt(6.0/np.pi) ]) /sc
|
||||||
|
|
||||||
|
# reverse the coordinates back to the regular order according to the original pyramid number
|
||||||
|
cube = cube[p]
|
||||||
|
|
||||||
|
return cube
|
||||||
|
|
||||||
|
def GetPyramidOrder(xyz):
|
||||||
|
|
||||||
|
if (abs(xyz[0])<= xyz[2]) and (abs(xyz[1])<= xyz[2]) or \
|
||||||
|
(abs(xyz[0])<=-xyz[2]) and (abs(xyz[1])<=-xyz[2]):
|
||||||
|
return [0,1,2]
|
||||||
|
elif (abs(xyz[2])<= xyz[0]) and (abs(xyz[1])<= xyz[0]) or \
|
||||||
|
(abs(xyz[2])<=-xyz[0]) and (abs(xyz[1])<=-xyz[0]):
|
||||||
|
return [1,2,0]
|
||||||
|
elif (abs(xyz[0])<= xyz[1]) and (abs(xyz[2])<= xyz[1]) or \
|
||||||
|
(abs(xyz[0])<=-xyz[1]) and (abs(xyz[2])<=-xyz[1]):
|
||||||
|
return [2,0,1]
|
|
@ -13,7 +13,7 @@ from .asciitable import ASCIItable # noqa
|
||||||
|
|
||||||
from .config import Material # noqa
|
from .config import Material # noqa
|
||||||
from .colormaps import Colormap, Color # noqa
|
from .colormaps import Colormap, Color # noqa
|
||||||
from .orientation import Quaternion, Symmetry, Orientation # noqa
|
from .orientation import Symmetry, Lattice, Rotation, Orientation # noqa
|
||||||
|
|
||||||
#from .block import Block # only one class
|
#from .block import Block # only one class
|
||||||
from .result import Result # noqa
|
from .result import Result # noqa
|
||||||
|
|
|
@ -77,18 +77,6 @@ class Texture(Section):
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
if multiKey == 'fiber':
|
|
||||||
self.add_multiKey(multiKey,'alpha1 %g\talpha2 %g\tbeta1 %g\tbeta2 %g\tscatter %g\tfraction %g'%(
|
|
||||||
properties['eulers'][0],
|
|
||||||
properties['eulers'][1],
|
|
||||||
properties['eulers'][2],
|
|
||||||
properties['eulers'][3],
|
|
||||||
scatter,
|
|
||||||
fraction,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class Material():
|
class Material():
|
||||||
"""Reads, manipulates and writes material.config files"""
|
"""Reads, manipulates and writes material.config files"""
|
||||||
|
@ -97,10 +85,10 @@ class Material():
|
||||||
"""Generates ordered list of parts"""
|
"""Generates ordered list of parts"""
|
||||||
self.parts = [
|
self.parts = [
|
||||||
'homogenization',
|
'homogenization',
|
||||||
'microstructure',
|
|
||||||
'crystallite',
|
'crystallite',
|
||||||
'phase',
|
'phase',
|
||||||
'texture',
|
'texture',
|
||||||
|
'microstructure',
|
||||||
]
|
]
|
||||||
self.data = {\
|
self.data = {\
|
||||||
'homogenization': {'__order__': []},
|
'homogenization': {'__order__': []},
|
||||||
|
@ -117,15 +105,12 @@ class Material():
|
||||||
for part in self.parts:
|
for part in self.parts:
|
||||||
if self.verbose: print('processing <{}>'.format(part))
|
if self.verbose: print('processing <{}>'.format(part))
|
||||||
me += ['',
|
me += ['',
|
||||||
'#-----------------------------#',
|
'#'*100,
|
||||||
'<{}>'.format(part),
|
'<{}>'.format(part),
|
||||||
'#-----------------------------#',
|
'#'*100,
|
||||||
]
|
]
|
||||||
for section in self.data[part]['__order__']:
|
for section in self.data[part]['__order__']:
|
||||||
me += ['',
|
me += ['[{}] {}'.format(section,'#'+'-'*max(0,96-len(section)))]
|
||||||
'[{}] {}'.format(section,'#'*max(0,27-len(section))),
|
|
||||||
'',
|
|
||||||
]
|
|
||||||
for key in self.data[part][section]['__order__']:
|
for key in self.data[part][section]['__order__']:
|
||||||
if key.startswith('(') and key.endswith(')'): # multiple (key)
|
if key.startswith('(') and key.endswith(')'): # multiple (key)
|
||||||
me += ['{}\t{}'.format(key,' '.join(values)) for values in self.data[part][section][key]]
|
me += ['{}\t{}'.format(key,' '.join(values)) for values in self.data[part][section][key]]
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
from .solver import Solver
|
from .solver import Solver
|
||||||
import damask
|
import damask
|
||||||
import subprocess,re
|
import subprocess
|
||||||
|
|
||||||
class Abaqus(Solver):
|
class Abaqus(Solver):
|
||||||
|
|
||||||
|
@ -15,14 +15,13 @@ class Abaqus(Solver):
|
||||||
|
|
||||||
def return_run_command(self,model):
|
def return_run_command(self,model):
|
||||||
env=damask.Environment()
|
env=damask.Environment()
|
||||||
shortVersion = re.sub('[\.,-]', '',self.version)
|
|
||||||
try:
|
try:
|
||||||
cmd='abq'+shortVersion
|
cmd='abq'+self.version
|
||||||
subprocess.check_output(['abq'+shortVersion,'information=release'])
|
subprocess.check_output([cmd,'information=release'])
|
||||||
except OSError: # link to abqXXX not existing
|
except OSError: # link to abqXXX not existing
|
||||||
cmd='abaqus'
|
cmd='abaqus'
|
||||||
process = subprocess.Popen(['abaqus','information=release'],stdout = subprocess.PIPE,stderr = subprocess.PIPE)
|
process = subprocess.Popen(['abaqus','information=release'],stdout = subprocess.PIPE,stderr = subprocess.PIPE)
|
||||||
detectedVersion = process.stdout.readlines()[1].split()[1]
|
detectedVersion = process.stdout.readlines()[1].split()[1].decode('utf-8')
|
||||||
if self.version != detectedVersion:
|
if self.version != detectedVersion:
|
||||||
raise Exception('found Abaqus version %s, but requested %s'%(detectedVersion,self.version))
|
raise Exception('found Abaqus version {}, but requested {}'.format(detectedVersion,self.version))
|
||||||
return '%s -job %s -user %s/src/DAMASK_abaqus interactive'%(cmd,model,env.rootDir())
|
return '{} -job {} -user {}/src/DAMASK_abaqus interactive'.format(cmd,model,env.rootDir())
|
||||||
|
|
|
@ -7,6 +7,7 @@ endif()
|
||||||
|
|
||||||
# The dependency detection in CMake is not functioning for Fortran,
|
# The dependency detection in CMake is not functioning for Fortran,
|
||||||
# hence we declare the dependencies from top to bottom in the following
|
# hence we declare the dependencies from top to bottom in the following
|
||||||
|
|
||||||
add_library(C_ROUTINES OBJECT "C_routines.c")
|
add_library(C_ROUTINES OBJECT "C_routines.c")
|
||||||
set(OBJECTFILES $<TARGET_OBJECTS:C_ROUTINES>)
|
set(OBJECTFILES $<TARGET_OBJECTS:C_ROUTINES>)
|
||||||
|
|
||||||
|
@ -17,6 +18,10 @@ list(APPEND OBJECTFILES $<TARGET_OBJECTS:SYSTEM_ROUTINES>)
|
||||||
add_library(PREC OBJECT "prec.f90")
|
add_library(PREC OBJECT "prec.f90")
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:PREC>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:PREC>)
|
||||||
|
|
||||||
|
add_library(ELEMENT OBJECT "element.f90")
|
||||||
|
add_dependencies(ELEMENT PREC)
|
||||||
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:ELEMENT>)
|
||||||
|
|
||||||
add_library(QUIT OBJECT "quit.f90")
|
add_library(QUIT OBJECT "quit.f90")
|
||||||
add_dependencies(QUIT PREC)
|
add_dependencies(QUIT PREC)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:QUIT>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:QUIT>)
|
||||||
|
@ -34,7 +39,7 @@ add_dependencies(NUMERICS IO)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:NUMERICS>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:NUMERICS>)
|
||||||
|
|
||||||
add_library(DEBUG OBJECT "debug.f90")
|
add_library(DEBUG OBJECT "debug.f90")
|
||||||
add_dependencies(DEBUG NUMERICS)
|
add_dependencies(DEBUG IO)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:DEBUG>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:DEBUG>)
|
||||||
|
|
||||||
add_library(DAMASK_CONFIG OBJECT "config.f90")
|
add_library(DAMASK_CONFIG OBJECT "config.f90")
|
||||||
|
@ -42,7 +47,7 @@ add_dependencies(DAMASK_CONFIG DEBUG)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:DAMASK_CONFIG>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:DAMASK_CONFIG>)
|
||||||
|
|
||||||
add_library(HDF5_UTILITIES OBJECT "HDF5_utilities.f90")
|
add_library(HDF5_UTILITIES OBJECT "HDF5_utilities.f90")
|
||||||
add_dependencies(HDF5_UTILITIES DAMASK_CONFIG)
|
add_dependencies(HDF5_UTILITIES DAMASK_CONFIG NUMERICS)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:HDF5_UTILITIES>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:HDF5_UTILITIES>)
|
||||||
|
|
||||||
add_library(RESULTS OBJECT "results.f90")
|
add_library(RESULTS OBJECT "results.f90")
|
||||||
|
@ -50,34 +55,50 @@ add_dependencies(RESULTS HDF5_UTILITIES)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:RESULTS>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:RESULTS>)
|
||||||
|
|
||||||
add_library(FEsolving OBJECT "FEsolving.f90")
|
add_library(FEsolving OBJECT "FEsolving.f90")
|
||||||
add_dependencies(FEsolving RESULTS)
|
add_dependencies(FEsolving DEBUG)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:FEsolving>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:FEsolving>)
|
||||||
|
|
||||||
add_library(DAMASK_MATH OBJECT "math.f90")
|
add_library(MATH OBJECT "math.f90")
|
||||||
add_dependencies(DAMASK_MATH FEsolving)
|
add_dependencies(MATH NUMERICS)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:DAMASK_MATH>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:MATH>)
|
||||||
|
|
||||||
|
add_library(QUATERNIONS OBJECT "quaternions.f90")
|
||||||
|
add_dependencies(QUATERNIONS MATH)
|
||||||
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:QUATERNIONS>)
|
||||||
|
|
||||||
|
add_library(LAMBERT OBJECT "Lambert.f90")
|
||||||
|
add_dependencies(LAMBERT MATH)
|
||||||
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:LAMBERT>)
|
||||||
|
|
||||||
|
add_library(ROTATIONS OBJECT "rotations.f90")
|
||||||
|
add_dependencies(ROTATIONS LAMBERT QUATERNIONS)
|
||||||
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:ROTATIONS>)
|
||||||
|
|
||||||
|
add_library(MESH_BASE OBJECT "mesh_base.f90")
|
||||||
|
add_dependencies(MESH_BASE ELEMENT)
|
||||||
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:MESH_BASE>)
|
||||||
|
|
||||||
# SPECTRAL solver and FEM solver use different mesh files
|
# SPECTRAL solver and FEM solver use different mesh files
|
||||||
if (PROJECT_NAME STREQUAL "DAMASK_spectral")
|
if (PROJECT_NAME STREQUAL "DAMASK_spectral")
|
||||||
add_library(MESH OBJECT "mesh.f90")
|
add_library(MESH OBJECT "mesh_grid.f90")
|
||||||
add_dependencies(MESH DAMASK_MATH)
|
add_dependencies(MESH MESH_BASE MATH FEsolving)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:MESH>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:MESH>)
|
||||||
elseif (PROJECT_NAME STREQUAL "DAMASK_FEM")
|
elseif (PROJECT_NAME STREQUAL "DAMASK_FEM")
|
||||||
add_library(FEZoo OBJECT "FEM_zoo.f90")
|
add_library(FEZoo OBJECT "FEM_zoo.f90")
|
||||||
add_dependencies(FEZoo DAMASK_MATH)
|
add_dependencies(FEZoo IO)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:FEZoo>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:FEZoo>)
|
||||||
add_library(MESH OBJECT "meshFEM.f90")
|
add_library(MESH OBJECT "mesh_FEM.f90")
|
||||||
add_dependencies(MESH FEZoo)
|
add_dependencies(MESH FEZoo MESH_BASE MATH FEsolving)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:MESH>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:MESH>)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_library(MATERIAL OBJECT "material.f90")
|
add_library(MATERIAL OBJECT "material.f90")
|
||||||
add_dependencies(MATERIAL MESH DAMASK_CONFIG)
|
add_dependencies(MATERIAL MESH DAMASK_CONFIG ROTATIONS)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:MATERIAL>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:MATERIAL>)
|
||||||
|
|
||||||
add_library(DAMASK_HELPERS OBJECT "lattice.f90")
|
add_library(LATTICE OBJECT "lattice.f90")
|
||||||
add_dependencies(DAMASK_HELPERS MATERIAL)
|
add_dependencies(LATTICE MATERIAL)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:DAMASK_HELPERS>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:LATTICE>)
|
||||||
|
|
||||||
# For each modular section
|
# For each modular section
|
||||||
add_library (PLASTIC OBJECT
|
add_library (PLASTIC OBJECT
|
||||||
|
@ -88,14 +109,14 @@ add_library (PLASTIC OBJECT
|
||||||
"plastic_kinematichardening.f90"
|
"plastic_kinematichardening.f90"
|
||||||
"plastic_nonlocal.f90"
|
"plastic_nonlocal.f90"
|
||||||
"plastic_none.f90")
|
"plastic_none.f90")
|
||||||
add_dependencies(PLASTIC DAMASK_HELPERS)
|
add_dependencies(PLASTIC LATTICE RESULTS)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:PLASTIC>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:PLASTIC>)
|
||||||
|
|
||||||
add_library (KINEMATICS OBJECT
|
add_library (KINEMATICS OBJECT
|
||||||
"kinematics_cleavage_opening.f90"
|
"kinematics_cleavage_opening.f90"
|
||||||
"kinematics_slipplane_opening.f90"
|
"kinematics_slipplane_opening.f90"
|
||||||
"kinematics_thermal_expansion.f90")
|
"kinematics_thermal_expansion.f90")
|
||||||
add_dependencies(KINEMATICS DAMASK_HELPERS)
|
add_dependencies(KINEMATICS LATTICE RESULTS)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:KINEMATICS>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:KINEMATICS>)
|
||||||
|
|
||||||
add_library (SOURCE OBJECT
|
add_library (SOURCE OBJECT
|
||||||
|
@ -105,7 +126,7 @@ add_library (SOURCE OBJECT
|
||||||
"source_damage_isoDuctile.f90"
|
"source_damage_isoDuctile.f90"
|
||||||
"source_damage_anisoBrittle.f90"
|
"source_damage_anisoBrittle.f90"
|
||||||
"source_damage_anisoDuctile.f90")
|
"source_damage_anisoDuctile.f90")
|
||||||
add_dependencies(SOURCE DAMASK_HELPERS)
|
add_dependencies(SOURCE LATTICE RESULTS)
|
||||||
list(APPEND OBJECTFILES $<TARGET_OBJECTS:SOURCE>)
|
list(APPEND OBJECTFILES $<TARGET_OBJECTS:SOURCE>)
|
||||||
|
|
||||||
add_library(CONSTITUTIVE OBJECT "constitutive.f90")
|
add_library(CONSTITUTIVE OBJECT "constitutive.f90")
|
||||||
|
|
|
@ -140,8 +140,7 @@ subroutine CPFEM_init
|
||||||
restartRead, &
|
restartRead, &
|
||||||
modelName
|
modelName
|
||||||
use mesh, only: &
|
use mesh, only: &
|
||||||
mesh_NcpElems, &
|
theMesh
|
||||||
mesh_maxNips
|
|
||||||
use material, only: &
|
use material, only: &
|
||||||
material_phase, &
|
material_phase, &
|
||||||
homogState, &
|
homogState, &
|
||||||
|
@ -168,10 +167,9 @@ subroutine CPFEM_init
|
||||||
flush(6)
|
flush(6)
|
||||||
endif mainProcess
|
endif mainProcess
|
||||||
|
|
||||||
! initialize stress and jacobian to zero
|
allocate(CPFEM_cs( 6,theMesh%elem%nIPs,theMesh%Nelems), source= 0.0_pReal)
|
||||||
allocate(CPFEM_cs(6,mesh_maxNips,mesh_NcpElems)) ; CPFEM_cs = 0.0_pReal
|
allocate(CPFEM_dcsdE( 6,6,theMesh%elem%nIPs,theMesh%Nelems), source= 0.0_pReal)
|
||||||
allocate(CPFEM_dcsdE(6,6,mesh_maxNips,mesh_NcpElems)) ; CPFEM_dcsdE = 0.0_pReal
|
allocate(CPFEM_dcsdE_knownGood(6,6,theMesh%elem%nIPs,theMesh%Nelems), source= 0.0_pReal)
|
||||||
allocate(CPFEM_dcsdE_knownGood(6,6,mesh_maxNips,mesh_NcpElems)) ; CPFEM_dcsdE_knownGood = 0.0_pReal
|
|
||||||
|
|
||||||
! *** restore the last converged values of each essential variable from the binary file
|
! *** restore the last converged values of each essential variable from the binary file
|
||||||
if (restartRead) then
|
if (restartRead) then
|
||||||
|
@ -289,8 +287,7 @@ subroutine CPFEM_general(mode, parallelExecution, ffn, ffn1, temperature_inp, dt
|
||||||
math_6toSym33
|
math_6toSym33
|
||||||
use mesh, only: &
|
use mesh, only: &
|
||||||
mesh_FEasCP, &
|
mesh_FEasCP, &
|
||||||
mesh_NcpElems, &
|
theMesh, &
|
||||||
mesh_maxNips, &
|
|
||||||
mesh_element
|
mesh_element
|
||||||
use material, only: &
|
use material, only: &
|
||||||
microstructure_elemhomo, &
|
microstructure_elemhomo, &
|
||||||
|
@ -401,7 +398,7 @@ subroutine CPFEM_general(mode, parallelExecution, ffn, ffn1, temperature_inp, dt
|
||||||
enddo; enddo
|
enddo; enddo
|
||||||
if (iand(debug_level(debug_CPFEM), debug_levelBasic) /= 0_pInt) then
|
if (iand(debug_level(debug_CPFEM), debug_levelBasic) /= 0_pInt) then
|
||||||
write(6,'(a)') '<< CPFEM >> aging states'
|
write(6,'(a)') '<< CPFEM >> aging states'
|
||||||
if (debug_e <= mesh_NcpElems .and. debug_i <= mesh_maxNips) then
|
if (debug_e <= theMesh%Nelems .and. debug_i <= theMesh%elem%nIPs) then
|
||||||
write(6,'(a,1x,i8,1x,i2,1x,i4,/,(12x,6(e20.8,1x)),/)') &
|
write(6,'(a,1x,i8,1x,i2,1x,i4,/,(12x,6(e20.8,1x)),/)') &
|
||||||
'<< CPFEM >> aged state of elFE ip grain',debug_e, debug_i, 1, &
|
'<< CPFEM >> aged state of elFE ip grain',debug_e, debug_i, 1, &
|
||||||
plasticState(phaseAt(1,debug_i,debug_e))%state(:,phasememberAt(1,debug_i,debug_e))
|
plasticState(phaseAt(1,debug_i,debug_e))%state(:,phasememberAt(1,debug_i,debug_e))
|
||||||
|
|
|
@ -95,8 +95,6 @@ subroutine CPFEM_init
|
||||||
use prec, only: &
|
use prec, only: &
|
||||||
pInt, pReal, pLongInt
|
pInt, pReal, pLongInt
|
||||||
use IO, only: &
|
use IO, only: &
|
||||||
IO_read_realFile,&
|
|
||||||
IO_read_intFile, &
|
|
||||||
IO_timeStamp, &
|
IO_timeStamp, &
|
||||||
IO_error
|
IO_error
|
||||||
use numerics, only: &
|
use numerics, only: &
|
||||||
|
|
|
@ -6,9 +6,11 @@
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
#include <signal.h>
|
||||||
|
|
||||||
/* http://stackoverflow.com/questions/30279228/is-there-an-alternative-to-getcwd-in-fortran-2003-2008 */
|
/* http://stackoverflow.com/questions/30279228/is-there-an-alternative-to-getcwd-in-fortran-2003-2008 */
|
||||||
|
|
||||||
|
|
||||||
int isdirectory_c(const char *dir){
|
int isdirectory_c(const char *dir){
|
||||||
struct stat statbuf;
|
struct stat statbuf;
|
||||||
if(stat(dir, &statbuf) != 0) /* error */
|
if(stat(dir, &statbuf) != 0) /* error */
|
||||||
|
@ -44,3 +46,11 @@ void gethostname_c(char hostname[], int *stat){
|
||||||
int chdir_c(const char *dir){
|
int chdir_c(const char *dir){
|
||||||
return chdir(dir);
|
return chdir(dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void signalusr1_c(void (*handler)(int)){
|
||||||
|
signal(SIGUSR1, handler);
|
||||||
|
}
|
||||||
|
|
||||||
|
void signalusr2_c(void (*handler)(int)){
|
||||||
|
signal(SIGUSR2, handler);
|
||||||
|
}
|
|
@ -30,6 +30,11 @@ contains
|
||||||
!> @brief reports and sets working directory
|
!> @brief reports and sets working directory
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine DAMASK_interface_init
|
subroutine DAMASK_interface_init
|
||||||
|
#if __INTEL_COMPILER >= 1800
|
||||||
|
use, intrinsic :: iso_fortran_env, only: &
|
||||||
|
compiler_version, &
|
||||||
|
compiler_options
|
||||||
|
#endif
|
||||||
use ifport, only: &
|
use ifport, only: &
|
||||||
CHDIR
|
CHDIR
|
||||||
|
|
||||||
|
@ -40,16 +45,25 @@ subroutine DAMASK_interface_init
|
||||||
character(len=256) :: wd
|
character(len=256) :: wd
|
||||||
|
|
||||||
call date_and_time(values = dateAndTime)
|
call date_and_time(values = dateAndTime)
|
||||||
write(6,'(/,a)') ' <<<+- DAMASK_abaqus_std -+>>>'
|
write(6,'(/,a)') ' <<<+- DAMASK_abaqus -+>>>'
|
||||||
write(6,'(/,a)') ' Roters et al., Computational Materials Science, 2018'
|
write(6,'(/,a)') ' Roters et al., Computational Materials Science 158, 2018, 420-478'
|
||||||
write(6,'(/,a)') ' Version: '//DAMASKVERSION
|
write(6,'(a,/)') ' https://doi.org/10.1016/j.commatsci.2018.04.030'
|
||||||
write(6,'(a,2(i2.2,a),i4.4)') ' Date: ',dateAndTime(3),'/',&
|
|
||||||
dateAndTime(2),'/',&
|
write(6,'(a,/)') ' Version: '//DAMASKVERSION
|
||||||
dateAndTime(1)
|
|
||||||
write(6,'(a,2(i2.2,a),i2.2)') ' Time: ',dateAndTime(5),':',&
|
! https://github.com/jeffhammond/HPCInfo/blob/master/docs/Preprocessor-Macros.md
|
||||||
dateAndTime(6),':',&
|
#if __INTEL_COMPILER >= 1800
|
||||||
dateAndTime(7)
|
write(6,*) 'Compiled with: ', compiler_version()
|
||||||
write(6,'(/,a)') ' <<<+- DAMASK_interface init -+>>>'
|
write(6,*) 'Compiler options: ', compiler_options()
|
||||||
|
#else
|
||||||
|
write(6,'(a,i4.4,a,i8.8)') ' Compiled with Intel fortran version :', __INTEL_COMPILER,&
|
||||||
|
', build date :', __INTEL_COMPILER_BUILD_DATE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
write(6,*) 'Compiled on ', __DATE__,' at ',__TIME__
|
||||||
|
|
||||||
|
write(6,'(a,2(i2.2,a),i4.4)') ' Date: ',dateAndTime(3),'/',dateAndTime(2),'/', dateAndTime(1)
|
||||||
|
write(6,'(a,2(i2.2,a),i2.2)') ' Time: ',dateAndTime(5),':', dateAndTime(6),':', dateAndTime(7)
|
||||||
|
|
||||||
call getoutdir(wd, lenOutDir)
|
call getoutdir(wd, lenOutDir)
|
||||||
ierr = CHDIR(wd)
|
ierr = CHDIR(wd)
|
||||||
|
|
|
@ -12,9 +12,9 @@
|
||||||
module DAMASK_interface
|
module DAMASK_interface
|
||||||
use prec, only: &
|
use prec, only: &
|
||||||
pInt
|
pInt
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
private
|
private
|
||||||
|
logical, public, protected :: SIGUSR1,SIGUSR2
|
||||||
integer(pInt), public, protected :: &
|
integer(pInt), public, protected :: &
|
||||||
interface_restartInc = 0_pInt !< Increment at which calculation starts
|
interface_restartInc = 0_pInt !< Increment at which calculation starts
|
||||||
character(len=1024), public, protected :: &
|
character(len=1024), public, protected :: &
|
||||||
|
@ -42,6 +42,8 @@ contains
|
||||||
subroutine DAMASK_interface_init()
|
subroutine DAMASK_interface_init()
|
||||||
use, intrinsic :: &
|
use, intrinsic :: &
|
||||||
iso_fortran_env
|
iso_fortran_env
|
||||||
|
use :: &
|
||||||
|
iso_c_binding
|
||||||
#include <petsc/finclude/petscsys.h>
|
#include <petsc/finclude/petscsys.h>
|
||||||
#if defined(__GFORTRAN__) && __GNUC__ < 5
|
#if defined(__GFORTRAN__) && __GNUC__ < 5
|
||||||
===================================================================================================
|
===================================================================================================
|
||||||
|
@ -81,6 +83,8 @@ subroutine DAMASK_interface_init()
|
||||||
|
|
||||||
use PETScSys
|
use PETScSys
|
||||||
use system_routines, only: &
|
use system_routines, only: &
|
||||||
|
signalusr1_C, &
|
||||||
|
signalusr2_C, &
|
||||||
getHostName, &
|
getHostName, &
|
||||||
getCWD
|
getCWD
|
||||||
|
|
||||||
|
@ -139,16 +143,27 @@ subroutine DAMASK_interface_init()
|
||||||
|
|
||||||
call date_and_time(values = dateAndTime)
|
call date_and_time(values = dateAndTime)
|
||||||
write(6,'(/,a)') ' <<<+- DAMASK_interface init -+>>>'
|
write(6,'(/,a)') ' <<<+- DAMASK_interface init -+>>>'
|
||||||
write(6,'(a,/)') ' Roters et al., Computational Materials Science, 2018'
|
write(6,'(/,a)') ' Roters et al., Computational Materials Science 158, 2018, 420-478'
|
||||||
write(6,'(/,a)') ' Version: '//DAMASKVERSION
|
write(6,'(a,/)') ' https://doi.org/10.1016/j.commatsci.2018.04.030'
|
||||||
write(6,'(a,2(i2.2,a),i4.4)') ' Date: ',dateAndTime(3),'/',&
|
|
||||||
dateAndTime(2),'/',&
|
write(6,'(a,/)') ' Version: '//DAMASKVERSION
|
||||||
dateAndTime(1)
|
|
||||||
write(6,'(a,2(i2.2,a),i2.2)') ' Time: ',dateAndTime(5),':',&
|
! https://github.com/jeffhammond/HPCInfo/blob/master/docs/Preprocessor-Macros.md
|
||||||
dateAndTime(6),':',&
|
#if defined(__GFORTRAN__) || __INTEL_COMPILER >= 1800
|
||||||
dateAndTime(7)
|
write(6,*) 'Compiled with: ', compiler_version()
|
||||||
write(6,'(/,a,i4.1)') ' MPI processes: ',worldsize
|
write(6,*) 'Compiler options: ', compiler_options()
|
||||||
#include "compilation_info.f90"
|
#elif defined(__INTEL_COMPILER)
|
||||||
|
write(6,'(a,i4.4,a,i8.8)') ' Compiled with Intel fortran version :', __INTEL_COMPILER,&
|
||||||
|
', build date :', __INTEL_COMPILER_BUILD_DATE
|
||||||
|
#elif defined(__PGI)
|
||||||
|
write(6,'(a,i4.4,a,i8.8)') ' Compiled with PGI fortran version :', __PGIC__,&
|
||||||
|
'.', __PGIC_MINOR__
|
||||||
|
#endif
|
||||||
|
|
||||||
|
write(6,*) 'Compiled on ', __DATE__,' at ',__TIME__
|
||||||
|
|
||||||
|
write(6,'(a,2(i2.2,a),i4.4)') ' Date: ',dateAndTime(3),'/',dateAndTime(2),'/', dateAndTime(1)
|
||||||
|
write(6,'(a,2(i2.2,a),i2.2)') ' Time: ',dateAndTime(5),':', dateAndTime(6),':', dateAndTime(7)
|
||||||
|
|
||||||
call get_command(commandLine)
|
call get_command(commandLine)
|
||||||
chunkPos = IIO_stringPos(commandLine)
|
chunkPos = IIO_stringPos(commandLine)
|
||||||
|
@ -215,9 +230,11 @@ subroutine DAMASK_interface_init()
|
||||||
|
|
||||||
call get_environment_variable('USER',userName)
|
call get_environment_variable('USER',userName)
|
||||||
! ToDo: https://stackoverflow.com/questions/8953424/how-to-get-the-username-in-c-c-in-linux
|
! ToDo: https://stackoverflow.com/questions/8953424/how-to-get-the-username-in-c-c-in-linux
|
||||||
write(6,'(a,a)') ' Host name: ', trim(getHostName())
|
write(6,'(/,a,i4.1)') ' MPI processes: ',worldsize
|
||||||
write(6,'(a,a)') ' User name: ', trim(userName)
|
write(6,'(a,a)') ' Host name: ', trim(getHostName())
|
||||||
write(6,'(a,a)') ' Command line call: ', trim(commandLine)
|
write(6,'(a,a)') ' User name: ', trim(userName)
|
||||||
|
|
||||||
|
write(6,'(/a,a)') ' Command line call: ', trim(commandLine)
|
||||||
if (len(trim(workingDirArg)) > 0) &
|
if (len(trim(workingDirArg)) > 0) &
|
||||||
write(6,'(a,a)') ' Working dir argument: ', trim(workingDirArg)
|
write(6,'(a,a)') ' Working dir argument: ', trim(workingDirArg)
|
||||||
write(6,'(a,a)') ' Geometry argument: ', trim(geometryArg)
|
write(6,'(a,a)') ' Geometry argument: ', trim(geometryArg)
|
||||||
|
@ -229,6 +246,12 @@ subroutine DAMASK_interface_init()
|
||||||
if (interface_restartInc > 0_pInt) &
|
if (interface_restartInc > 0_pInt) &
|
||||||
write(6,'(a,i6.6)') ' Restart from increment: ', interface_restartInc
|
write(6,'(a,i6.6)') ' Restart from increment: ', interface_restartInc
|
||||||
|
|
||||||
|
call signalusr1_c(c_funloc(setSIGUSR1))
|
||||||
|
call signalusr2_c(c_funloc(setSIGUSR2))
|
||||||
|
SIGUSR1 = .false.
|
||||||
|
SIGUSR2 = .false.
|
||||||
|
|
||||||
|
|
||||||
end subroutine DAMASK_interface_init
|
end subroutine DAMASK_interface_init
|
||||||
|
|
||||||
|
|
||||||
|
@ -412,6 +435,35 @@ character(len=1024) function makeRelativePath(a,b)
|
||||||
|
|
||||||
end function makeRelativePath
|
end function makeRelativePath
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief sets global variable SIGUSR1 to .true. if program receives SIGUSR1
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine setSIGUSR1(signal) bind(C)
|
||||||
|
use :: iso_c_binding
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
integer(C_INT), value :: signal
|
||||||
|
SIGUSR1 = .true.
|
||||||
|
|
||||||
|
write(6,*) 'received signal ',signal, 'set SIGUSR1'
|
||||||
|
|
||||||
|
end subroutine setSIGUSR1
|
||||||
|
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief sets global variable SIGUSR2 to .true. if program receives SIGUSR2
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine setSIGUSR2(signal) bind(C)
|
||||||
|
use :: iso_c_binding
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
integer(C_INT), value :: signal
|
||||||
|
SIGUSR2 = .true.
|
||||||
|
|
||||||
|
write(6,*) 'received signal ',signal, 'set SIGUSR2'
|
||||||
|
|
||||||
|
end subroutine setSIGUSR2
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief taken from IO, check IO_stringValue for documentation
|
!> @brief taken from IO, check IO_stringValue for documentation
|
||||||
|
@ -469,7 +521,6 @@ pure function IIO_stringPos(string)
|
||||||
do while (verify(string(right+1:),SEP)>0)
|
do while (verify(string(right+1:),SEP)>0)
|
||||||
left = right + verify(string(right+1:),SEP)
|
left = right + verify(string(right+1:),SEP)
|
||||||
right = left + scan(string(left:),SEP) - 2
|
right = left + scan(string(left:),SEP) - 2
|
||||||
if ( string(left:left) == '#' ) exit
|
|
||||||
IIO_stringPos = [IIO_stringPos,int(left, pInt), int(right, pInt)]
|
IIO_stringPos = [IIO_stringPos,int(left, pInt), int(right, pInt)]
|
||||||
IIO_stringPos(1) = IIO_stringPos(1)+1_pInt
|
IIO_stringPos(1) = IIO_stringPos(1)+1_pInt
|
||||||
enddo
|
enddo
|
||||||
|
|
|
@ -43,6 +43,11 @@ contains
|
||||||
!> @brief reports and sets working directory
|
!> @brief reports and sets working directory
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine DAMASK_interface_init
|
subroutine DAMASK_interface_init
|
||||||
|
#if __INTEL_COMPILER >= 1800
|
||||||
|
use, intrinsic :: iso_fortran_env, only: &
|
||||||
|
compiler_version, &
|
||||||
|
compiler_options
|
||||||
|
#endif
|
||||||
use ifport, only: &
|
use ifport, only: &
|
||||||
CHDIR
|
CHDIR
|
||||||
|
|
||||||
|
@ -53,17 +58,26 @@ subroutine DAMASK_interface_init
|
||||||
character(len=1024) :: wd
|
character(len=1024) :: wd
|
||||||
|
|
||||||
call date_and_time(values = dateAndTime)
|
call date_and_time(values = dateAndTime)
|
||||||
write(6,'(/,a)') ' <<<+- DAMASK_Marc -+>>>'
|
write(6,'(/,a)') ' <<<+- DAMASK_abaqus -+>>>'
|
||||||
write(6,'(/,a)') ' Roters et al., Computational Materials Science, 2018'
|
write(6,'(/,a)') ' Roters et al., Computational Materials Science 158, 2018, 420-478'
|
||||||
write(6,'(/,a)') ' Version: '//DAMASKVERSION
|
write(6,'(a,/)') ' https://doi.org/10.1016/j.commatsci.2018.04.030'
|
||||||
write(6,'(a,2(i2.2,a),i4.4)') ' Date: ',dateAndTime(3),'/',&
|
|
||||||
dateAndTime(2),'/',&
|
write(6,'(a,/)') ' Version: '//DAMASKVERSION
|
||||||
dateAndTime(1)
|
|
||||||
write(6,'(a,2(i2.2,a),i2.2)') ' Time: ',dateAndTime(5),':',&
|
! https://github.com/jeffhammond/HPCInfo/blob/master/docs/Preprocessor-Macros.md
|
||||||
dateAndTime(6),':',&
|
#if __INTEL_COMPILER >= 1800
|
||||||
dateAndTime(7)
|
write(6,*) 'Compiled with: ', compiler_version()
|
||||||
write(6,'(/,a)') ' <<<+- DAMASK_interface init -+>>>'
|
write(6,*) 'Compiler options: ', compiler_options()
|
||||||
#include "compilation_info.f90"
|
#else
|
||||||
|
write(6,'(a,i4.4,a,i8.8)') ' Compiled with Intel fortran version :', __INTEL_COMPILER,&
|
||||||
|
', build date :', __INTEL_COMPILER_BUILD_DATE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
write(6,*) 'Compiled on ', __DATE__,' at ',__TIME__
|
||||||
|
|
||||||
|
write(6,'(a,2(i2.2,a),i4.4)') ' Date: ',dateAndTime(3),'/',dateAndTime(2),'/', dateAndTime(1)
|
||||||
|
write(6,'(a,2(i2.2,a),i2.2)') ' Time: ',dateAndTime(5),':', dateAndTime(6),':', dateAndTime(7)
|
||||||
|
|
||||||
inquire(5, name=wd) ! determine inputputfile
|
inquire(5, name=wd) ! determine inputputfile
|
||||||
wd = wd(1:scan(wd,'/',back=.true.))
|
wd = wd(1:scan(wd,'/',back=.true.))
|
||||||
ierr = CHDIR(wd)
|
ierr = CHDIR(wd)
|
||||||
|
@ -134,6 +148,7 @@ subroutine hypela2(d,g,e,de,s,t,dt,ngens,m,nn,kcus,matus,ndi,nshear,disp, &
|
||||||
debug_info, &
|
debug_info, &
|
||||||
debug_reset
|
debug_reset
|
||||||
use mesh, only: &
|
use mesh, only: &
|
||||||
|
theMesh, &
|
||||||
mesh_FEasCP, &
|
mesh_FEasCP, &
|
||||||
mesh_element, &
|
mesh_element, &
|
||||||
mesh_node0, &
|
mesh_node0, &
|
||||||
|
@ -141,8 +156,7 @@ subroutine hypela2(d,g,e,de,s,t,dt,ngens,m,nn,kcus,matus,ndi,nshear,disp, &
|
||||||
mesh_Ncellnodes, &
|
mesh_Ncellnodes, &
|
||||||
mesh_cellnode, &
|
mesh_cellnode, &
|
||||||
mesh_build_cellnodes, &
|
mesh_build_cellnodes, &
|
||||||
mesh_build_ipCoordinates, &
|
mesh_build_ipCoordinates
|
||||||
FE_Nnodes
|
|
||||||
use CPFEM, only: &
|
use CPFEM, only: &
|
||||||
CPFEM_general, &
|
CPFEM_general, &
|
||||||
CPFEM_init_done, &
|
CPFEM_init_done, &
|
||||||
|
@ -314,7 +328,7 @@ subroutine hypela2(d,g,e,de,s,t,dt,ngens,m,nn,kcus,matus,ndi,nshear,disp, &
|
||||||
computationMode = ior(computationMode,CPFEM_BACKUPJACOBIAN) ! collect and backup Jacobian after convergence
|
computationMode = ior(computationMode,CPFEM_BACKUPJACOBIAN) ! collect and backup Jacobian after convergence
|
||||||
lastIncConverged = .false. ! reset flag
|
lastIncConverged = .false. ! reset flag
|
||||||
endif
|
endif
|
||||||
do node = 1,FE_Nnodes(mesh_element(2,cp_en))
|
do node = 1,theMesh%elem%nNodes
|
||||||
CPnodeID = mesh_element(4_pInt+node,cp_en)
|
CPnodeID = mesh_element(4_pInt+node,cp_en)
|
||||||
mesh_node(1:ndeg,CPnodeID) = mesh_node0(1:ndeg,CPnodeID) + numerics_unitlength * dispt(1:ndeg,node)
|
mesh_node(1:ndeg,CPnodeID) = mesh_node0(1:ndeg,CPnodeID) + numerics_unitlength * dispt(1:ndeg,node)
|
||||||
enddo
|
enddo
|
||||||
|
|
|
@ -162,7 +162,6 @@ subroutine utilities_init()
|
||||||
|
|
||||||
character(len=1024) :: petsc_optionsPhysics
|
character(len=1024) :: petsc_optionsPhysics
|
||||||
integer(pInt) :: dimPlex
|
integer(pInt) :: dimPlex
|
||||||
integer(pInt) :: headerID = 205_pInt
|
|
||||||
PetscInt, allocatable :: nEntities(:), nOutputCells(:), nOutputNodes(:)
|
PetscInt, allocatable :: nEntities(:), nOutputCells(:), nOutputNodes(:)
|
||||||
PetscInt :: dim
|
PetscInt :: dim
|
||||||
PetscErrorCode :: ierr
|
PetscErrorCode :: ierr
|
||||||
|
@ -213,13 +212,6 @@ subroutine utilities_init()
|
||||||
nOutputCells(worldrank+1) = count(material_homog > 0_pInt)
|
nOutputCells(worldrank+1) = count(material_homog > 0_pInt)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,nOutputNodes,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,nOutputNodes,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,nOutputCells,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,nOutputCells,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
||||||
if (worldrank == 0_pInt) then
|
|
||||||
open(unit=headerID, file=trim(getSolverJobName())//'.header', &
|
|
||||||
form='FORMATTED', status='REPLACE')
|
|
||||||
write(headerID, '(a,i0)') 'dimension : ', dimPlex
|
|
||||||
write(headerID, '(a,i0)') 'number of nodes : ', sum(nOutputNodes)
|
|
||||||
write(headerID, '(a,i0)') 'number of cells : ', sum(nOutputCells)
|
|
||||||
endif
|
|
||||||
|
|
||||||
end subroutine utilities_init
|
end subroutine utilities_init
|
||||||
|
|
||||||
|
@ -503,7 +495,6 @@ subroutine utilities_indexActiveSet(field,section,x_local,f_local,localIS,global
|
||||||
CHKERRQ(ierr)
|
CHKERRQ(ierr)
|
||||||
call ISDestroy(dummyIS,ierr); CHKERRQ(ierr)
|
call ISDestroy(dummyIS,ierr); CHKERRQ(ierr)
|
||||||
endif
|
endif
|
||||||
deallocate(localIndices)
|
|
||||||
|
|
||||||
end subroutine utilities_indexActiveSet
|
end subroutine utilities_indexActiveSet
|
||||||
|
|
||||||
|
|
|
@ -9,11 +9,11 @@ module FEM_Zoo
|
||||||
private
|
private
|
||||||
integer(pInt), parameter, public:: &
|
integer(pInt), parameter, public:: &
|
||||||
maxOrder = 5 !< current max interpolation set at cubic (intended to be arbitrary)
|
maxOrder = 5 !< current max interpolation set at cubic (intended to be arbitrary)
|
||||||
real(pReal), dimension(2,3), private, protected :: &
|
real(pReal), dimension(2,3), private, parameter :: &
|
||||||
triangle = reshape([-1.0_pReal, -1.0_pReal, &
|
triangle = reshape([-1.0_pReal, -1.0_pReal, &
|
||||||
1.0_pReal, -1.0_pReal, &
|
1.0_pReal, -1.0_pReal, &
|
||||||
-1.0_pReal, 1.0_pReal], shape=[2,3])
|
-1.0_pReal, 1.0_pReal], shape=[2,3])
|
||||||
real(pReal), dimension(3,4), private, protected :: &
|
real(pReal), dimension(3,4), private, parameter :: &
|
||||||
tetrahedron = reshape([-1.0_pReal, -1.0_pReal, -1.0_pReal, &
|
tetrahedron = reshape([-1.0_pReal, -1.0_pReal, -1.0_pReal, &
|
||||||
1.0_pReal, -1.0_pReal, -1.0_pReal, &
|
1.0_pReal, -1.0_pReal, -1.0_pReal, &
|
||||||
-1.0_pReal, 1.0_pReal, -1.0_pReal, &
|
-1.0_pReal, 1.0_pReal, -1.0_pReal, &
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
888
src/IO.f90
888
src/IO.f90
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,217 @@
|
||||||
|
! ###################################################################
|
||||||
|
! Copyright (c) 2013-2015, Marc De Graef/Carnegie Mellon University
|
||||||
|
! Modified 2017-2019, Martin Diehl/Max-Planck-Institut für Eisenforschung GmbH
|
||||||
|
! All rights reserved.
|
||||||
|
!
|
||||||
|
! Redistribution and use in source and binary forms, with or without modification, are
|
||||||
|
! permitted provided that the following conditions are met:
|
||||||
|
!
|
||||||
|
! - Redistributions of source code must retain the above copyright notice, this list
|
||||||
|
! of conditions and the following disclaimer.
|
||||||
|
! - Redistributions in binary form must reproduce the above copyright notice, this
|
||||||
|
! list of conditions and the following disclaimer in the documentation and/or
|
||||||
|
! other materials provided with the distribution.
|
||||||
|
! - Neither the names of Marc De Graef, Carnegie Mellon University nor the names
|
||||||
|
! of its contributors may be used to endorse or promote products derived from
|
||||||
|
! this software without specific prior written permission.
|
||||||
|
!
|
||||||
|
! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||||
|
! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
! DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
! SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
! CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
! OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
! USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
! ###################################################################
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------
|
||||||
|
!> @author Marc De Graef, Carnegie Mellon University
|
||||||
|
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
|
||||||
|
!> @brief Mapping homochoric <-> cubochoric
|
||||||
|
!
|
||||||
|
!> @details
|
||||||
|
!> D. Rosca, A. Morawiec, and M. De Graef. “A new method of constructing a grid
|
||||||
|
!> in the space of 3D rotations and its applications to texture analysis”.
|
||||||
|
!> Modeling and Simulations in Materials Science and Engineering 22, 075013 (2014).
|
||||||
|
!--------------------------------------------------------------------------
|
||||||
|
module Lambert
|
||||||
|
use math
|
||||||
|
use prec, only: &
|
||||||
|
pReal
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
private
|
||||||
|
real(pReal), parameter, private :: &
|
||||||
|
SPI = sqrt(PI), &
|
||||||
|
PREF = sqrt(6.0_pReal/PI), &
|
||||||
|
A = PI**(5.0_pReal/6.0_pReal)/6.0_pReal**(1.0_pReal/6.0_pReal), &
|
||||||
|
AP = PI**(2.0_pReal/3.0_pReal), &
|
||||||
|
SC = A/AP, &
|
||||||
|
BETA = A/2.0_pReal, &
|
||||||
|
R1 = (3.0_pReal*PI/4.0_pReal)**(1.0_pReal/3.0_pReal), &
|
||||||
|
R2 = sqrt(2.0_pReal), &
|
||||||
|
PI12 = PI/12.0_pReal, &
|
||||||
|
PREK = R1 * 2.0_pReal**(1.0_pReal/4.0_pReal)/BETA
|
||||||
|
|
||||||
|
public :: &
|
||||||
|
LambertCubeToBall, &
|
||||||
|
LambertBallToCube
|
||||||
|
private :: &
|
||||||
|
GetPyramidOrder
|
||||||
|
|
||||||
|
contains
|
||||||
|
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------
|
||||||
|
!> @author Marc De Graef, Carnegie Mellon University
|
||||||
|
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
|
||||||
|
!> @brief map from 3D cubic grid to 3D ball
|
||||||
|
!--------------------------------------------------------------------------
|
||||||
|
function LambertCubeToBall(cube) result(ball)
|
||||||
|
use, intrinsic :: IEEE_ARITHMETIC
|
||||||
|
use prec, only: &
|
||||||
|
pInt, &
|
||||||
|
dEq0
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
real(pReal), intent(in), dimension(3) :: cube
|
||||||
|
real(pReal), dimension(3) :: ball, LamXYZ, XYZ
|
||||||
|
real(pReal) :: T(2), c, s, q
|
||||||
|
real(pReal), parameter :: eps = 1.0e-8_pReal
|
||||||
|
integer(pInt), dimension(3) :: p
|
||||||
|
integer(pInt), dimension(2) :: order
|
||||||
|
|
||||||
|
if (maxval(abs(cube)) > AP/2.0+eps) then
|
||||||
|
ball = IEEE_value(cube,IEEE_positive_inf)
|
||||||
|
return
|
||||||
|
end if
|
||||||
|
|
||||||
|
! transform to the sphere grid via the curved square, and intercept the zero point
|
||||||
|
center: if (all(dEq0(cube))) then
|
||||||
|
ball = 0.0_pReal
|
||||||
|
else center
|
||||||
|
! get pyramide and scale by grid parameter ratio
|
||||||
|
p = GetPyramidOrder(cube)
|
||||||
|
XYZ = cube(p) * sc
|
||||||
|
|
||||||
|
! intercept all the points along the z-axis
|
||||||
|
special: if (all(dEq0(XYZ(1:2)))) then
|
||||||
|
LamXYZ = [ 0.0_pReal, 0.0_pReal, pref * XYZ(3) ]
|
||||||
|
else special
|
||||||
|
order = merge( [2,1], [1,2], abs(XYZ(2)) <= abs(XYZ(1))) ! order of absolute values of XYZ
|
||||||
|
q = PI12 * XYZ(order(1))/XYZ(order(2)) ! smaller by larger
|
||||||
|
c = cos(q)
|
||||||
|
s = sin(q)
|
||||||
|
q = prek * XYZ(order(2))/ sqrt(R2-c)
|
||||||
|
T = [ (R2*c - 1.0), R2 * s] * q
|
||||||
|
|
||||||
|
! transform to sphere grid (inverse Lambert)
|
||||||
|
! [note that there is no need to worry about dividing by zero, since XYZ(3) can not become zero]
|
||||||
|
c = sum(T**2)
|
||||||
|
s = Pi * c/(24.0*XYZ(3)**2)
|
||||||
|
c = sPi * c / sqrt(24.0_pReal) / XYZ(3)
|
||||||
|
q = sqrt( 1.0 - s )
|
||||||
|
LamXYZ = [ T(order(2)) * q, T(order(1)) * q, pref * XYZ(3) - c ]
|
||||||
|
endif special
|
||||||
|
|
||||||
|
! reverse the coordinates back to the regular order according to the original pyramid number
|
||||||
|
ball = LamXYZ(p)
|
||||||
|
|
||||||
|
endif center
|
||||||
|
|
||||||
|
end function LambertCubeToBall
|
||||||
|
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------
|
||||||
|
!> @author Marc De Graef, Carnegie Mellon University
|
||||||
|
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
|
||||||
|
!> @brief map from 3D ball to 3D cubic grid
|
||||||
|
!--------------------------------------------------------------------------
|
||||||
|
pure function LambertBallToCube(xyz) result(cube)
|
||||||
|
use, intrinsic :: IEEE_ARITHMETIC, only:&
|
||||||
|
IEEE_positive_inf, &
|
||||||
|
IEEE_value
|
||||||
|
use prec, only: &
|
||||||
|
pInt, &
|
||||||
|
dEq0
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
real(pReal), intent(in), dimension(3) :: xyz
|
||||||
|
real(pReal), dimension(3) :: cube, xyz1, xyz3
|
||||||
|
real(pReal), dimension(2) :: Tinv, xyz2
|
||||||
|
real(pReal) :: rs, qxy, q2, sq2, q, tt
|
||||||
|
integer(pInt), dimension(3) :: p
|
||||||
|
|
||||||
|
rs = norm2(xyz)
|
||||||
|
if (rs > R1) then
|
||||||
|
cube = IEEE_value(cube,IEEE_positive_inf)
|
||||||
|
return
|
||||||
|
endif
|
||||||
|
|
||||||
|
center: if (all(dEq0(xyz))) then
|
||||||
|
cube = 0.0_pReal
|
||||||
|
else center
|
||||||
|
p = GetPyramidOrder(xyz)
|
||||||
|
xyz3 = xyz(p)
|
||||||
|
|
||||||
|
! inverse M_3
|
||||||
|
xyz2 = xyz3(1:2) * sqrt( 2.0*rs/(rs+abs(xyz3(3))) )
|
||||||
|
|
||||||
|
! inverse M_2
|
||||||
|
qxy = sum(xyz2**2)
|
||||||
|
|
||||||
|
special: if (dEq0(qxy)) then
|
||||||
|
Tinv = 0.0
|
||||||
|
else special
|
||||||
|
q2 = qxy + maxval(abs(xyz2))**2
|
||||||
|
sq2 = sqrt(q2)
|
||||||
|
q = (beta/R2/R1) * sqrt(q2*qxy/(q2-maxval(abs(xyz2))*sq2))
|
||||||
|
tt = (minval(abs(xyz2))**2+maxval(abs(xyz2))*sq2)/R2/qxy
|
||||||
|
Tinv = q * sign(1.0,xyz2) * merge([ 1.0_pReal, acos(math_clip(tt,-1.0_pReal,1.0_pReal))/PI12], &
|
||||||
|
[ acos(math_clip(tt,-1.0_pReal,1.0_pReal))/PI12, 1.0_pReal], &
|
||||||
|
abs(xyz2(2)) <= abs(xyz2(1)))
|
||||||
|
endif special
|
||||||
|
|
||||||
|
! inverse M_1
|
||||||
|
xyz1 = [ Tinv(1), Tinv(2), sign(1.0,xyz3(3)) * rs / pref ] /sc
|
||||||
|
|
||||||
|
! reverst the coordinates back to the regular order according to the original pyramid number
|
||||||
|
cube = xyz1(p)
|
||||||
|
|
||||||
|
endif center
|
||||||
|
|
||||||
|
end function LambertBallToCube
|
||||||
|
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------
|
||||||
|
!> @author Marc De Graef, Carnegie Mellon University
|
||||||
|
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
|
||||||
|
!> @brief determine to which pyramid a point in a cubic grid belongs
|
||||||
|
!--------------------------------------------------------------------------
|
||||||
|
pure function GetPyramidOrder(xyz)
|
||||||
|
use prec, only: &
|
||||||
|
pInt
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
real(pReal),intent(in),dimension(3) :: xyz
|
||||||
|
integer(pInt), dimension(3) :: GetPyramidOrder
|
||||||
|
|
||||||
|
if (((abs(xyz(1)) <= xyz(3)).and.(abs(xyz(2)) <= xyz(3))) .or. &
|
||||||
|
((abs(xyz(1)) <= -xyz(3)).and.(abs(xyz(2)) <= -xyz(3)))) then
|
||||||
|
GetPyramidOrder = [1,2,3]
|
||||||
|
else if (((abs(xyz(3)) <= xyz(1)).and.(abs(xyz(2)) <= xyz(1))) .or. &
|
||||||
|
((abs(xyz(3)) <= -xyz(1)).and.(abs(xyz(2)) <= -xyz(1)))) then
|
||||||
|
GetPyramidOrder = [2,3,1]
|
||||||
|
else if (((abs(xyz(1)) <= xyz(2)).and.(abs(xyz(3)) <= xyz(2))) .or. &
|
||||||
|
((abs(xyz(1)) <= -xyz(2)).and.(abs(xyz(3)) <= -xyz(2)))) then
|
||||||
|
GetPyramidOrder = [3,1,2]
|
||||||
|
else
|
||||||
|
GetPyramidOrder = -1 ! should be impossible, but might simplify debugging
|
||||||
|
end if
|
||||||
|
|
||||||
|
end function GetPyramidOrder
|
||||||
|
|
||||||
|
end module Lambert
|
|
@ -11,8 +11,18 @@
|
||||||
#include "HDF5_utilities.f90"
|
#include "HDF5_utilities.f90"
|
||||||
#endif
|
#endif
|
||||||
#include "math.f90"
|
#include "math.f90"
|
||||||
|
#include "quaternions.f90"
|
||||||
|
#include "Lambert.f90"
|
||||||
|
#include "rotations.f90"
|
||||||
#include "FEsolving.f90"
|
#include "FEsolving.f90"
|
||||||
#include "mesh.f90"
|
#include "element.f90"
|
||||||
|
#include "mesh_base.f90"
|
||||||
|
#ifdef Abaqus
|
||||||
|
#include "mesh_abaqus.f90"
|
||||||
|
#endif
|
||||||
|
#ifdef Marc4DAMASK
|
||||||
|
#include "mesh_marc.f90"
|
||||||
|
#endif
|
||||||
#include "material.f90"
|
#include "material.f90"
|
||||||
#include "lattice.f90"
|
#include "lattice.f90"
|
||||||
#include "source_thermal_dissipation.f90"
|
#include "source_thermal_dissipation.f90"
|
||||||
|
|
|
@ -1,10 +0,0 @@
|
||||||
#if defined(__GFORTRAN__) || __INTEL_COMPILER >= 1800
|
|
||||||
write(6,*) 'Compiled with ', compiler_version()
|
|
||||||
write(6,*) 'With options ', compiler_options()
|
|
||||||
#else
|
|
||||||
write(6,'(a,i4.4,a,i8.8)') ' Compiled with Intel fortran version ', __INTEL_COMPILER,&
|
|
||||||
', build date ', __INTEL_COMPILER_BUILD_DATE
|
|
||||||
#endif
|
|
||||||
write(6,*) 'Compiled on ', __DATE__,' at ',__TIME__
|
|
||||||
write(6,*)
|
|
||||||
flush(6)
|
|
|
@ -38,11 +38,6 @@ contains
|
||||||
!> @brief allocates arrays pointing to array of the various constitutive modules
|
!> @brief allocates arrays pointing to array of the various constitutive modules
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine constitutive_init()
|
subroutine constitutive_init()
|
||||||
#if defined(__GFORTRAN__) || __INTEL_COMPILER >= 1800
|
|
||||||
use, intrinsic :: iso_fortran_env, only: &
|
|
||||||
compiler_version, &
|
|
||||||
compiler_options
|
|
||||||
#endif
|
|
||||||
use prec, only: &
|
use prec, only: &
|
||||||
pReal
|
pReal
|
||||||
use debug, only: &
|
use debug, only: &
|
||||||
|
@ -53,15 +48,8 @@ subroutine constitutive_init()
|
||||||
use IO, only: &
|
use IO, only: &
|
||||||
IO_error, &
|
IO_error, &
|
||||||
IO_open_file, &
|
IO_open_file, &
|
||||||
IO_checkAndRewind, &
|
|
||||||
IO_open_jobFile_stat, &
|
IO_open_jobFile_stat, &
|
||||||
IO_write_jobFile, &
|
IO_write_jobFile
|
||||||
IO_write_jobIntFile, &
|
|
||||||
IO_timeStamp
|
|
||||||
use config, only: &
|
|
||||||
config_phase
|
|
||||||
use mesh, only: &
|
|
||||||
FE_geomtype
|
|
||||||
use config, only: &
|
use config, only: &
|
||||||
material_Nphase, &
|
material_Nphase, &
|
||||||
material_localFileExt, &
|
material_localFileExt, &
|
||||||
|
@ -141,46 +129,33 @@ subroutine constitutive_init()
|
||||||
nonlocalConstitutionPresent = .false.
|
nonlocalConstitutionPresent = .false.
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! open material.config
|
! initialized plasticity
|
||||||
if (.not. IO_open_jobFile_stat(FILEUNIT,material_localFileExt)) & ! no local material configuration present...
|
|
||||||
call IO_open_file(FILEUNIT,material_configFile) ! ... open material.config file
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
! parse plasticities from config file
|
|
||||||
if (any(phase_plasticity == PLASTICITY_NONE_ID)) call plastic_none_init
|
if (any(phase_plasticity == PLASTICITY_NONE_ID)) call plastic_none_init
|
||||||
if (any(phase_plasticity == PLASTICITY_ISOTROPIC_ID)) call plastic_isotropic_init
|
if (any(phase_plasticity == PLASTICITY_ISOTROPIC_ID)) call plastic_isotropic_init
|
||||||
if (any(phase_plasticity == PLASTICITY_PHENOPOWERLAW_ID)) call plastic_phenopowerlaw_init
|
if (any(phase_plasticity == PLASTICITY_PHENOPOWERLAW_ID)) call plastic_phenopowerlaw_init
|
||||||
if (any(phase_plasticity == PLASTICITY_KINEHARDENING_ID)) call plastic_kinehardening_init
|
if (any(phase_plasticity == PLASTICITY_KINEHARDENING_ID)) call plastic_kinehardening_init
|
||||||
if (any(phase_plasticity == PLASTICITY_DISLOTWIN_ID)) call plastic_dislotwin_init
|
if (any(phase_plasticity == PLASTICITY_DISLOTWIN_ID)) call plastic_dislotwin_init
|
||||||
if (any(phase_plasticity == PLASTICITY_DISLOUCLA_ID)) call plastic_disloucla_init
|
if (any(phase_plasticity == PLASTICITY_DISLOUCLA_ID)) call plastic_disloucla_init
|
||||||
if (any(phase_plasticity == PLASTICITY_NONLOCAL_ID)) then
|
if (any(phase_plasticity == PLASTICITY_NONLOCAL_ID)) call plastic_nonlocal_init
|
||||||
call plastic_nonlocal_init(FILEUNIT)
|
|
||||||
call plastic_nonlocal_stateInit()
|
|
||||||
endif
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! parse source mechanisms from config file
|
! initialize source mechanisms
|
||||||
call IO_checkAndRewind(FILEUNIT)
|
if (any(phase_source == SOURCE_thermal_dissipation_ID)) call source_thermal_dissipation_init
|
||||||
if (any(phase_source == SOURCE_thermal_dissipation_ID)) call source_thermal_dissipation_init(FILEUNIT)
|
if (any(phase_source == SOURCE_thermal_externalheat_ID)) call source_thermal_externalheat_init
|
||||||
if (any(phase_source == SOURCE_thermal_externalheat_ID)) call source_thermal_externalheat_init(FILEUNIT)
|
if (any(phase_source == SOURCE_damage_isoBrittle_ID)) call source_damage_isoBrittle_init
|
||||||
if (any(phase_source == SOURCE_damage_isoBrittle_ID)) call source_damage_isoBrittle_init(FILEUNIT)
|
if (any(phase_source == SOURCE_damage_isoDuctile_ID)) call source_damage_isoDuctile_init
|
||||||
if (any(phase_source == SOURCE_damage_isoDuctile_ID)) call source_damage_isoDuctile_init(FILEUNIT)
|
if (any(phase_source == SOURCE_damage_anisoBrittle_ID)) call source_damage_anisoBrittle_init
|
||||||
if (any(phase_source == SOURCE_damage_anisoBrittle_ID)) call source_damage_anisoBrittle_init(FILEUNIT)
|
if (any(phase_source == SOURCE_damage_anisoDuctile_ID)) call source_damage_anisoDuctile_init
|
||||||
if (any(phase_source == SOURCE_damage_anisoDuctile_ID)) call source_damage_anisoDuctile_init(FILEUNIT)
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! parse kinematic mechanisms from config file
|
! initialize kinematic mechanisms
|
||||||
call IO_checkAndRewind(FILEUNIT)
|
if (any(phase_kinematics == KINEMATICS_cleavage_opening_ID)) call kinematics_cleavage_opening_init
|
||||||
if (any(phase_kinematics == KINEMATICS_cleavage_opening_ID)) call kinematics_cleavage_opening_init(FILEUNIT)
|
if (any(phase_kinematics == KINEMATICS_slipplane_opening_ID)) call kinematics_slipplane_opening_init
|
||||||
if (any(phase_kinematics == KINEMATICS_slipplane_opening_ID)) call kinematics_slipplane_opening_init(FILEUNIT)
|
if (any(phase_kinematics == KINEMATICS_thermal_expansion_ID)) call kinematics_thermal_expansion_init
|
||||||
if (any(phase_kinematics == KINEMATICS_thermal_expansion_ID)) call kinematics_thermal_expansion_init(FILEUNIT)
|
|
||||||
close(FILEUNIT)
|
|
||||||
|
|
||||||
call config_deallocate('material.config/phase')
|
call config_deallocate('material.config/phase')
|
||||||
|
|
||||||
write(6,'(/,a)') ' <<<+- constitutive init -+>>>'
|
write(6,'(/,a)') ' <<<+- constitutive init -+>>>'
|
||||||
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
|
|
||||||
#include "compilation_info.f90"
|
|
||||||
|
|
||||||
mainProcess: if (worldrank == 0) then
|
mainProcess: if (worldrank == 0) then
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
@ -348,7 +323,7 @@ end function constitutive_homogenizedC
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief calls microstructure function of the different constitutive models
|
!> @brief calls microstructure function of the different constitutive models
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine constitutive_microstructure(orientations, Fe, Fp, ipc, ip, el)
|
subroutine constitutive_microstructure(Fe, Fp, ipc, ip, el)
|
||||||
use prec, only: &
|
use prec, only: &
|
||||||
pReal
|
pReal
|
||||||
use material, only: &
|
use material, only: &
|
||||||
|
@ -363,7 +338,7 @@ subroutine constitutive_microstructure(orientations, Fe, Fp, ipc, ip, el)
|
||||||
PLASTICITY_disloucla_ID, &
|
PLASTICITY_disloucla_ID, &
|
||||||
PLASTICITY_nonlocal_ID
|
PLASTICITY_nonlocal_ID
|
||||||
use plastic_nonlocal, only: &
|
use plastic_nonlocal, only: &
|
||||||
plastic_nonlocal_microstructure
|
plastic_nonlocal_dependentState
|
||||||
use plastic_dislotwin, only: &
|
use plastic_dislotwin, only: &
|
||||||
plastic_dislotwin_dependentState
|
plastic_dislotwin_dependentState
|
||||||
use plastic_disloUCLA, only: &
|
use plastic_disloUCLA, only: &
|
||||||
|
@ -381,8 +356,6 @@ subroutine constitutive_microstructure(orientations, Fe, Fp, ipc, ip, el)
|
||||||
ho, & !< homogenization
|
ho, & !< homogenization
|
||||||
tme, & !< thermal member position
|
tme, & !< thermal member position
|
||||||
instance, of
|
instance, of
|
||||||
real(pReal), intent(in), dimension(:,:,:,:) :: &
|
|
||||||
orientations !< crystal orientations as quaternions
|
|
||||||
|
|
||||||
ho = material_homogenizationAt(el)
|
ho = material_homogenizationAt(el)
|
||||||
tme = thermalMapping(ho)%p(ip,el)
|
tme = thermalMapping(ho)%p(ip,el)
|
||||||
|
@ -397,7 +370,7 @@ subroutine constitutive_microstructure(orientations, Fe, Fp, ipc, ip, el)
|
||||||
instance = phase_plasticityInstance(material_phase(ipc,ip,el))
|
instance = phase_plasticityInstance(material_phase(ipc,ip,el))
|
||||||
call plastic_disloUCLA_dependentState(instance,of)
|
call plastic_disloUCLA_dependentState(instance,of)
|
||||||
case (PLASTICITY_NONLOCAL_ID) plasticityType
|
case (PLASTICITY_NONLOCAL_ID) plasticityType
|
||||||
call plastic_nonlocal_microstructure (Fe,Fp,ip,el)
|
call plastic_nonlocal_dependentState (Fe,Fp,ip,el)
|
||||||
end select plasticityType
|
end select plasticityType
|
||||||
|
|
||||||
end subroutine constitutive_microstructure
|
end subroutine constitutive_microstructure
|
||||||
|
@ -405,15 +378,15 @@ end subroutine constitutive_microstructure
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief contains the constitutive equation for calculating the velocity gradient
|
!> @brief contains the constitutive equation for calculating the velocity gradient
|
||||||
|
! ToDo: Discuss wheter it makes sense if crystallite handles the configuration conversion, i.e.
|
||||||
|
! Mp in, dLp_dMp out
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine constitutive_LpAndItsTangents(Lp, dLp_dS, dLp_dFi, S6, Fi, ipc, ip, el)
|
subroutine constitutive_LpAndItsTangents(Lp, dLp_dS, dLp_dFi, &
|
||||||
|
S, Fi, ipc, ip, el)
|
||||||
use prec, only: &
|
use prec, only: &
|
||||||
pReal
|
pReal
|
||||||
use math, only: &
|
use math, only: &
|
||||||
math_mul33x33, &
|
math_mul33x33
|
||||||
math_6toSym33, &
|
|
||||||
math_sym33to6, &
|
|
||||||
math_99to3333
|
|
||||||
use material, only: &
|
use material, only: &
|
||||||
phasememberAt, &
|
phasememberAt, &
|
||||||
phase_plasticity, &
|
phase_plasticity, &
|
||||||
|
@ -429,6 +402,8 @@ subroutine constitutive_LpAndItsTangents(Lp, dLp_dS, dLp_dFi, S6, Fi, ipc, ip, e
|
||||||
PLASTICITY_DISLOTWIN_ID, &
|
PLASTICITY_DISLOTWIN_ID, &
|
||||||
PLASTICITY_DISLOUCLA_ID, &
|
PLASTICITY_DISLOUCLA_ID, &
|
||||||
PLASTICITY_NONLOCAL_ID
|
PLASTICITY_NONLOCAL_ID
|
||||||
|
use mesh, only: &
|
||||||
|
mesh_ipVolume
|
||||||
use plastic_isotropic, only: &
|
use plastic_isotropic, only: &
|
||||||
plastic_isotropic_LpAndItsTangent
|
plastic_isotropic_LpAndItsTangent
|
||||||
use plastic_phenopowerlaw, only: &
|
use plastic_phenopowerlaw, only: &
|
||||||
|
@ -447,9 +422,8 @@ subroutine constitutive_LpAndItsTangents(Lp, dLp_dS, dLp_dFi, S6, Fi, ipc, ip, e
|
||||||
ipc, & !< component-ID of integration point
|
ipc, & !< component-ID of integration point
|
||||||
ip, & !< integration point
|
ip, & !< integration point
|
||||||
el !< element
|
el !< element
|
||||||
real(pReal), intent(in), dimension(6) :: &
|
|
||||||
S6 !< 2nd Piola-Kirchhoff stress (vector notation)
|
|
||||||
real(pReal), intent(in), dimension(3,3) :: &
|
real(pReal), intent(in), dimension(3,3) :: &
|
||||||
|
S, & !< 2nd Piola-Kirchhoff stress
|
||||||
Fi !< intermediate deformation gradient
|
Fi !< intermediate deformation gradient
|
||||||
real(pReal), intent(out), dimension(3,3) :: &
|
real(pReal), intent(out), dimension(3,3) :: &
|
||||||
Lp !< plastic velocity gradient
|
Lp !< plastic velocity gradient
|
||||||
|
@ -458,11 +432,8 @@ subroutine constitutive_LpAndItsTangents(Lp, dLp_dS, dLp_dFi, S6, Fi, ipc, ip, e
|
||||||
dLp_dFi !< derivative of Lp with respect to Fi
|
dLp_dFi !< derivative of Lp with respect to Fi
|
||||||
real(pReal), dimension(3,3,3,3) :: &
|
real(pReal), dimension(3,3,3,3) :: &
|
||||||
dLp_dMp !< derivative of Lp with respect to Mandel stress
|
dLp_dMp !< derivative of Lp with respect to Mandel stress
|
||||||
real(pReal), dimension(9,9) :: &
|
|
||||||
dLp_dMp99 !< derivative of Lp with respect to Mstar (matrix notation)
|
|
||||||
real(pReal), dimension(3,3) :: &
|
real(pReal), dimension(3,3) :: &
|
||||||
Mp, & !< Mandel stress work conjugate with Lp
|
Mp !< Mandel stress work conjugate with Lp
|
||||||
S !< 2nd Piola-Kirchhoff stress
|
|
||||||
integer(pInt) :: &
|
integer(pInt) :: &
|
||||||
ho, & !< homogenization
|
ho, & !< homogenization
|
||||||
tme !< thermal member position
|
tme !< thermal member position
|
||||||
|
@ -472,7 +443,6 @@ subroutine constitutive_LpAndItsTangents(Lp, dLp_dS, dLp_dFi, S6, Fi, ipc, ip, e
|
||||||
ho = material_homogenizationAt(el)
|
ho = material_homogenizationAt(el)
|
||||||
tme = thermalMapping(ho)%p(ip,el)
|
tme = thermalMapping(ho)%p(ip,el)
|
||||||
|
|
||||||
S = math_6toSym33(S6)
|
|
||||||
Mp = math_mul33x33(math_mul33x33(transpose(Fi),Fi),S)
|
Mp = math_mul33x33(math_mul33x33(transpose(Fi),Fi),S)
|
||||||
|
|
||||||
plasticityType: select case (phase_plasticity(material_phase(ipc,ip,el)))
|
plasticityType: select case (phase_plasticity(material_phase(ipc,ip,el)))
|
||||||
|
@ -497,9 +467,8 @@ subroutine constitutive_LpAndItsTangents(Lp, dLp_dS, dLp_dFi, S6, Fi, ipc, ip, e
|
||||||
call plastic_kinehardening_LpAndItsTangent (Lp,dLp_dMp, Mp,instance,of)
|
call plastic_kinehardening_LpAndItsTangent (Lp,dLp_dMp, Mp,instance,of)
|
||||||
|
|
||||||
case (PLASTICITY_NONLOCAL_ID) plasticityType
|
case (PLASTICITY_NONLOCAL_ID) plasticityType
|
||||||
call plastic_nonlocal_LpAndItsTangent (Lp,dLp_dMp99, math_sym33to6(Mp), &
|
call plastic_nonlocal_LpAndItsTangent (Lp,dLp_dMp,Mp, &
|
||||||
temperature(ho)%p(tme),ip,el)
|
temperature(ho)%p(tme),mesh_ipVolume(ip,el),ip,el)
|
||||||
dLp_dMp = math_99to3333(dLp_dMp99) ! ToDo: We revert here the last statement in plastic_xx_LpAndItsTanget
|
|
||||||
|
|
||||||
case (PLASTICITY_DISLOTWIN_ID) plasticityType
|
case (PLASTICITY_DISLOTWIN_ID) plasticityType
|
||||||
of = phasememberAt(ipc,ip,el)
|
of = phasememberAt(ipc,ip,el)
|
||||||
|
@ -534,15 +503,15 @@ end subroutine constitutive_LpAndItsTangents
|
||||||
!> @brief contains the constitutive equation for calculating the velocity gradient
|
!> @brief contains the constitutive equation for calculating the velocity gradient
|
||||||
! ToDo: MD: S is Mi?
|
! ToDo: MD: S is Mi?
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine constitutive_LiAndItsTangents(Li, dLi_dS, dLi_dFi, S6, Fi, ipc, ip, el)
|
subroutine constitutive_LiAndItsTangents(Li, dLi_dS, dLi_dFi, &
|
||||||
|
S, Fi, ipc, ip, el)
|
||||||
use prec, only: &
|
use prec, only: &
|
||||||
pReal
|
pReal
|
||||||
use math, only: &
|
use math, only: &
|
||||||
math_I3, &
|
math_I3, &
|
||||||
math_inv33, &
|
math_inv33, &
|
||||||
math_det33, &
|
math_det33, &
|
||||||
math_mul33x33, &
|
math_mul33x33
|
||||||
math_6toSym33
|
|
||||||
use material, only: &
|
use material, only: &
|
||||||
phasememberAt, &
|
phasememberAt, &
|
||||||
phase_plasticity, &
|
phase_plasticity, &
|
||||||
|
@ -569,8 +538,8 @@ subroutine constitutive_LiAndItsTangents(Li, dLi_dS, dLi_dFi, S6, Fi, ipc, ip, e
|
||||||
ipc, & !< component-ID of integration point
|
ipc, & !< component-ID of integration point
|
||||||
ip, & !< integration point
|
ip, & !< integration point
|
||||||
el !< element
|
el !< element
|
||||||
real(pReal), intent(in), dimension(6) :: &
|
real(pReal), intent(in), dimension(3,3) :: &
|
||||||
S6 !< 2nd Piola-Kirchhoff stress (vector notation)
|
S !< 2nd Piola-Kirchhoff stress
|
||||||
real(pReal), intent(in), dimension(3,3) :: &
|
real(pReal), intent(in), dimension(3,3) :: &
|
||||||
Fi !< intermediate deformation gradient
|
Fi !< intermediate deformation gradient
|
||||||
real(pReal), intent(out), dimension(3,3) :: &
|
real(pReal), intent(out), dimension(3,3) :: &
|
||||||
|
@ -599,7 +568,7 @@ subroutine constitutive_LiAndItsTangents(Li, dLi_dS, dLi_dFi, S6, Fi, ipc, ip, e
|
||||||
case (PLASTICITY_isotropic_ID) plasticityType
|
case (PLASTICITY_isotropic_ID) plasticityType
|
||||||
of = phasememberAt(ipc,ip,el)
|
of = phasememberAt(ipc,ip,el)
|
||||||
instance = phase_plasticityInstance(material_phase(ipc,ip,el))
|
instance = phase_plasticityInstance(material_phase(ipc,ip,el))
|
||||||
call plastic_isotropic_LiAndItsTangent(my_Li, my_dLi_dS, math_6toSym33(S6),instance,of)
|
call plastic_isotropic_LiAndItsTangent(my_Li, my_dLi_dS, S ,instance,of)
|
||||||
case default plasticityType
|
case default plasticityType
|
||||||
my_Li = 0.0_pReal
|
my_Li = 0.0_pReal
|
||||||
my_dLi_dS = 0.0_pReal
|
my_dLi_dS = 0.0_pReal
|
||||||
|
@ -611,9 +580,9 @@ subroutine constitutive_LiAndItsTangents(Li, dLi_dS, dLi_dFi, S6, Fi, ipc, ip, e
|
||||||
KinematicsLoop: do k = 1_pInt, phase_Nkinematics(material_phase(ipc,ip,el))
|
KinematicsLoop: do k = 1_pInt, phase_Nkinematics(material_phase(ipc,ip,el))
|
||||||
kinematicsType: select case (phase_kinematics(k,material_phase(ipc,ip,el)))
|
kinematicsType: select case (phase_kinematics(k,material_phase(ipc,ip,el)))
|
||||||
case (KINEMATICS_cleavage_opening_ID) kinematicsType
|
case (KINEMATICS_cleavage_opening_ID) kinematicsType
|
||||||
call kinematics_cleavage_opening_LiAndItsTangent(my_Li, my_dLi_dS, S6, ipc, ip, el)
|
call kinematics_cleavage_opening_LiAndItsTangent(my_Li, my_dLi_dS, S, ipc, ip, el)
|
||||||
case (KINEMATICS_slipplane_opening_ID) kinematicsType
|
case (KINEMATICS_slipplane_opening_ID) kinematicsType
|
||||||
call kinematics_slipplane_opening_LiAndItsTangent(my_Li, my_dLi_dS, S6, ipc, ip, el)
|
call kinematics_slipplane_opening_LiAndItsTangent(my_Li, my_dLi_dS, S, ipc, ip, el)
|
||||||
case (KINEMATICS_thermal_expansion_ID) kinematicsType
|
case (KINEMATICS_thermal_expansion_ID) kinematicsType
|
||||||
call kinematics_thermal_expansion_LiAndItsTangent(my_Li, my_dLi_dS, ipc, ip, el)
|
call kinematics_thermal_expansion_LiAndItsTangent(my_Li, my_dLi_dS, ipc, ip, el)
|
||||||
case default kinematicsType
|
case default kinematicsType
|
||||||
|
@ -645,10 +614,11 @@ pure function constitutive_initialFi(ipc, ip, el)
|
||||||
use prec, only: &
|
use prec, only: &
|
||||||
pReal
|
pReal
|
||||||
use math, only: &
|
use math, only: &
|
||||||
math_I3, &
|
math_I3
|
||||||
math_inv33, &
|
|
||||||
math_mul33x33
|
|
||||||
use material, only: &
|
use material, only: &
|
||||||
|
material_phase, &
|
||||||
|
material_homog, &
|
||||||
|
thermalMapping, &
|
||||||
phase_kinematics, &
|
phase_kinematics, &
|
||||||
phase_Nkinematics, &
|
phase_Nkinematics, &
|
||||||
material_phase, &
|
material_phase, &
|
||||||
|
@ -665,14 +635,20 @@ pure function constitutive_initialFi(ipc, ip, el)
|
||||||
constitutive_initialFi !< composite initial intermediate deformation gradient
|
constitutive_initialFi !< composite initial intermediate deformation gradient
|
||||||
integer(pInt) :: &
|
integer(pInt) :: &
|
||||||
k !< counter in kinematics loop
|
k !< counter in kinematics loop
|
||||||
|
integer(pInt) :: &
|
||||||
|
phase, &
|
||||||
|
homog, offset
|
||||||
|
|
||||||
constitutive_initialFi = math_I3
|
constitutive_initialFi = math_I3
|
||||||
|
phase = material_phase(ipc,ip,el)
|
||||||
|
|
||||||
KinematicsLoop: do k = 1_pInt, phase_Nkinematics(material_phase(ipc,ip,el)) !< Warning: small initial strain assumption
|
KinematicsLoop: do k = 1_pInt, phase_Nkinematics(phase) !< Warning: small initial strain assumption
|
||||||
kinematicsType: select case (phase_kinematics(k,material_phase(ipc,ip,el)))
|
kinematicsType: select case (phase_kinematics(k,phase))
|
||||||
case (KINEMATICS_thermal_expansion_ID) kinematicsType
|
case (KINEMATICS_thermal_expansion_ID) kinematicsType
|
||||||
|
homog = material_homog(ip,el)
|
||||||
|
offset = thermalMapping(homog)%p(ip,el)
|
||||||
constitutive_initialFi = &
|
constitutive_initialFi = &
|
||||||
constitutive_initialFi + kinematics_thermal_expansion_initialStrain(ipc, ip, el)
|
constitutive_initialFi + kinematics_thermal_expansion_initialStrain(homog,phase,offset)
|
||||||
end select kinematicsType
|
end select kinematicsType
|
||||||
enddo KinematicsLoop
|
enddo KinematicsLoop
|
||||||
|
|
||||||
|
@ -712,7 +688,8 @@ end subroutine constitutive_SandItsTangents
|
||||||
!> @brief returns the 2nd Piola-Kirchhoff stress tensor and its tangent with respect to
|
!> @brief returns the 2nd Piola-Kirchhoff stress tensor and its tangent with respect to
|
||||||
!> the elastic and intermeidate deformation gradients using Hookes law
|
!> the elastic and intermeidate deformation gradients using Hookes law
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine constitutive_hooke_SandItsTangents(S, dS_dFe, dS_dFi, Fe, Fi, ipc, ip, el)
|
subroutine constitutive_hooke_SandItsTangents(S, dS_dFe, dS_dFi, &
|
||||||
|
Fe, Fi, ipc, ip, el)
|
||||||
use prec, only: &
|
use prec, only: &
|
||||||
pReal
|
pReal
|
||||||
use math, only : &
|
use math, only : &
|
||||||
|
@ -776,7 +753,7 @@ end subroutine constitutive_hooke_SandItsTangents
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief contains the constitutive equation for calculating the rate of change of microstructure
|
!> @brief contains the constitutive equation for calculating the rate of change of microstructure
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine constitutive_collectDotState(S6, FeArray, Fi, FpArray, subdt, subfracArray,ipc, ip, el)
|
subroutine constitutive_collectDotState(S, FeArray, Fi, FpArray, subdt, ipc, ip, el)
|
||||||
use prec, only: &
|
use prec, only: &
|
||||||
pReal, &
|
pReal, &
|
||||||
pLongInt
|
pLongInt
|
||||||
|
@ -786,12 +763,9 @@ subroutine constitutive_collectDotState(S6, FeArray, Fi, FpArray, subdt, subfrac
|
||||||
debug_levelBasic
|
debug_levelBasic
|
||||||
use math, only: &
|
use math, only: &
|
||||||
math_mul33x33, &
|
math_mul33x33, &
|
||||||
math_6toSym33, &
|
|
||||||
math_sym33to6, &
|
|
||||||
math_mul33x33
|
math_mul33x33
|
||||||
use mesh, only: &
|
use mesh, only: &
|
||||||
mesh_NcpElems, &
|
theMesh
|
||||||
mesh_maxNips
|
|
||||||
use material, only: &
|
use material, only: &
|
||||||
phasememberAt, &
|
phasememberAt, &
|
||||||
phase_plasticityInstance, &
|
phase_plasticityInstance, &
|
||||||
|
@ -842,27 +816,25 @@ subroutine constitutive_collectDotState(S6, FeArray, Fi, FpArray, subdt, subfrac
|
||||||
el !< element
|
el !< element
|
||||||
real(pReal), intent(in) :: &
|
real(pReal), intent(in) :: &
|
||||||
subdt !< timestep
|
subdt !< timestep
|
||||||
real(pReal), intent(in), dimension(homogenization_maxNgrains,mesh_maxNips,mesh_NcpElems) :: &
|
real(pReal), intent(in), dimension(3,3,homogenization_maxNgrains,theMesh%elem%nIPs,theMesh%Nelems) :: &
|
||||||
subfracArray !< subfraction of timestep
|
|
||||||
real(pReal), intent(in), dimension(3,3,homogenization_maxNgrains,mesh_maxNips,mesh_NcpElems) :: &
|
|
||||||
FeArray, & !< elastic deformation gradient
|
FeArray, & !< elastic deformation gradient
|
||||||
FpArray !< plastic deformation gradient
|
FpArray !< plastic deformation gradient
|
||||||
real(pReal), intent(in), dimension(3,3) :: &
|
real(pReal), intent(in), dimension(3,3) :: &
|
||||||
Fi !< intermediate deformation gradient
|
Fi !< intermediate deformation gradient
|
||||||
real(pReal), intent(in), dimension(6) :: &
|
real(pReal), intent(in), dimension(3,3) :: &
|
||||||
S6 !< 2nd Piola Kirchhoff stress (vector notation)
|
S !< 2nd Piola Kirchhoff stress (vector notation)
|
||||||
real(pReal), dimension(3,3) :: &
|
real(pReal), dimension(3,3) :: &
|
||||||
Mp
|
Mp
|
||||||
integer(pInt) :: &
|
integer(pInt) :: &
|
||||||
ho, & !< homogenization
|
ho, & !< homogenization
|
||||||
tme, & !< thermal member position
|
tme, & !< thermal member position
|
||||||
s, & !< counter in source loop
|
i, & !< counter in source loop
|
||||||
instance, of
|
instance, of
|
||||||
|
|
||||||
ho = material_homogenizationAt(el)
|
ho = material_homogenizationAt(el)
|
||||||
tme = thermalMapping(ho)%p(ip,el)
|
tme = thermalMapping(ho)%p(ip,el)
|
||||||
|
|
||||||
Mp = math_mul33x33(math_mul33x33(transpose(Fi),Fi),math_6toSym33(S6))
|
Mp = math_mul33x33(math_mul33x33(transpose(Fi),Fi),S)
|
||||||
|
|
||||||
plasticityType: select case (phase_plasticity(material_phase(ipc,ip,el)))
|
plasticityType: select case (phase_plasticity(material_phase(ipc,ip,el)))
|
||||||
|
|
||||||
|
@ -892,16 +864,16 @@ subroutine constitutive_collectDotState(S6, FeArray, Fi, FpArray, subdt, subfrac
|
||||||
call plastic_disloucla_dotState (Mp,temperature(ho)%p(tme),instance,of)
|
call plastic_disloucla_dotState (Mp,temperature(ho)%p(tme),instance,of)
|
||||||
|
|
||||||
case (PLASTICITY_NONLOCAL_ID) plasticityType
|
case (PLASTICITY_NONLOCAL_ID) plasticityType
|
||||||
call plastic_nonlocal_dotState (math_sym33to6(Mp),FeArray,FpArray,temperature(ho)%p(tme), &
|
call plastic_nonlocal_dotState (Mp,FeArray,FpArray,temperature(ho)%p(tme), &
|
||||||
subdt,subfracArray,ip,el)
|
subdt,ip,el)
|
||||||
end select plasticityType
|
end select plasticityType
|
||||||
|
|
||||||
SourceLoop: do s = 1_pInt, phase_Nsources(material_phase(ipc,ip,el))
|
SourceLoop: do i = 1_pInt, phase_Nsources(material_phase(ipc,ip,el))
|
||||||
|
|
||||||
sourceType: select case (phase_source(s,material_phase(ipc,ip,el)))
|
sourceType: select case (phase_source(i,material_phase(ipc,ip,el)))
|
||||||
|
|
||||||
case (SOURCE_damage_anisoBrittle_ID) sourceType
|
case (SOURCE_damage_anisoBrittle_ID) sourceType
|
||||||
call source_damage_anisoBrittle_dotState (S6, ipc, ip, el) !< correct stress?
|
call source_damage_anisoBrittle_dotState (S, ipc, ip, el) !< correct stress?
|
||||||
|
|
||||||
case (SOURCE_damage_isoDuctile_ID) sourceType
|
case (SOURCE_damage_isoDuctile_ID) sourceType
|
||||||
call source_damage_isoDuctile_dotState ( ipc, ip, el)
|
call source_damage_isoDuctile_dotState ( ipc, ip, el)
|
||||||
|
@ -931,7 +903,6 @@ subroutine constitutive_collectDeltaState(S, Fe, Fi, ipc, ip, el)
|
||||||
debug_constitutive, &
|
debug_constitutive, &
|
||||||
debug_levelBasic
|
debug_levelBasic
|
||||||
use math, only: &
|
use math, only: &
|
||||||
math_sym33to6, &
|
|
||||||
math_mul33x33
|
math_mul33x33
|
||||||
use material, only: &
|
use material, only: &
|
||||||
phasememberAt, &
|
phasememberAt, &
|
||||||
|
@ -975,7 +946,7 @@ subroutine constitutive_collectDeltaState(S, Fe, Fi, ipc, ip, el)
|
||||||
call plastic_kinehardening_deltaState(Mp,instance,of)
|
call plastic_kinehardening_deltaState(Mp,instance,of)
|
||||||
|
|
||||||
case (PLASTICITY_NONLOCAL_ID) plasticityType
|
case (PLASTICITY_NONLOCAL_ID) plasticityType
|
||||||
call plastic_nonlocal_deltaState(math_sym33to6(Mp),ip,el)
|
call plastic_nonlocal_deltaState(Mp,ip,el)
|
||||||
|
|
||||||
end select plasticityType
|
end select plasticityType
|
||||||
|
|
||||||
|
@ -997,15 +968,11 @@ end subroutine constitutive_collectDeltaState
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief returns array of constitutive results
|
!> @brief returns array of constitutive results
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
function constitutive_postResults(S6, Fi, FeArray, ipc, ip, el)
|
function constitutive_postResults(S, Fi, ipc, ip, el)
|
||||||
use prec, only: &
|
use prec, only: &
|
||||||
pReal
|
pReal
|
||||||
use math, only: &
|
use math, only: &
|
||||||
math_6toSym33, &
|
|
||||||
math_mul33x33
|
math_mul33x33
|
||||||
use mesh, only: &
|
|
||||||
mesh_NcpElems, &
|
|
||||||
mesh_maxNips
|
|
||||||
use material, only: &
|
use material, only: &
|
||||||
phasememberAt, &
|
phasememberAt, &
|
||||||
phase_plasticityInstance, &
|
phase_plasticityInstance, &
|
||||||
|
@ -1018,7 +985,6 @@ function constitutive_postResults(S6, Fi, FeArray, ipc, ip, el)
|
||||||
material_homogenizationAt, &
|
material_homogenizationAt, &
|
||||||
temperature, &
|
temperature, &
|
||||||
thermalMapping, &
|
thermalMapping, &
|
||||||
homogenization_maxNgrains, &
|
|
||||||
PLASTICITY_NONE_ID, &
|
PLASTICITY_NONE_ID, &
|
||||||
PLASTICITY_ISOTROPIC_ID, &
|
PLASTICITY_ISOTROPIC_ID, &
|
||||||
PLASTICITY_PHENOPOWERLAW_ID, &
|
PLASTICITY_PHENOPOWERLAW_ID, &
|
||||||
|
@ -1061,10 +1027,8 @@ function constitutive_postResults(S6, Fi, FeArray, ipc, ip, el)
|
||||||
constitutive_postResults
|
constitutive_postResults
|
||||||
real(pReal), intent(in), dimension(3,3) :: &
|
real(pReal), intent(in), dimension(3,3) :: &
|
||||||
Fi !< intermediate deformation gradient
|
Fi !< intermediate deformation gradient
|
||||||
real(pReal), intent(in), dimension(3,3,homogenization_maxNgrains,mesh_maxNips,mesh_NcpElems) :: &
|
real(pReal), intent(in), dimension(3,3) :: &
|
||||||
FeArray !< elastic deformation gradient
|
S !< 2nd Piola Kirchhoff stress
|
||||||
real(pReal), intent(in), dimension(6) :: &
|
|
||||||
S6 !< 2nd Piola Kirchhoff stress (vector notation)
|
|
||||||
real(pReal), dimension(3,3) :: &
|
real(pReal), dimension(3,3) :: &
|
||||||
Mp !< Mandel stress
|
Mp !< Mandel stress
|
||||||
integer(pInt) :: &
|
integer(pInt) :: &
|
||||||
|
@ -1072,11 +1036,11 @@ function constitutive_postResults(S6, Fi, FeArray, ipc, ip, el)
|
||||||
integer(pInt) :: &
|
integer(pInt) :: &
|
||||||
ho, & !< homogenization
|
ho, & !< homogenization
|
||||||
tme, & !< thermal member position
|
tme, & !< thermal member position
|
||||||
s, of, instance !< counter in source loop
|
i, of, instance !< counter in source loop
|
||||||
|
|
||||||
constitutive_postResults = 0.0_pReal
|
constitutive_postResults = 0.0_pReal
|
||||||
|
|
||||||
Mp = math_mul33x33(math_mul33x33(transpose(Fi),Fi),math_6toSym33(S6))
|
Mp = math_mul33x33(math_mul33x33(transpose(Fi),Fi),S)
|
||||||
|
|
||||||
ho = material_homogenizationAt(el)
|
ho = material_homogenizationAt(el)
|
||||||
tme = thermalMapping(ho)%p(ip,el)
|
tme = thermalMapping(ho)%p(ip,el)
|
||||||
|
@ -1117,22 +1081,24 @@ function constitutive_postResults(S6, Fi, FeArray, ipc, ip, el)
|
||||||
|
|
||||||
case (PLASTICITY_NONLOCAL_ID) plasticityType
|
case (PLASTICITY_NONLOCAL_ID) plasticityType
|
||||||
constitutive_postResults(startPos:endPos) = &
|
constitutive_postResults(startPos:endPos) = &
|
||||||
plastic_nonlocal_postResults (S6,FeArray,ip,el)
|
plastic_nonlocal_postResults (Mp,ip,el)
|
||||||
end select plasticityType
|
end select plasticityType
|
||||||
|
|
||||||
SourceLoop: do s = 1_pInt, phase_Nsources(material_phase(ipc,ip,el))
|
SourceLoop: do i = 1_pInt, phase_Nsources(material_phase(ipc,ip,el))
|
||||||
startPos = endPos + 1_pInt
|
startPos = endPos + 1_pInt
|
||||||
endPos = endPos + sourceState(material_phase(ipc,ip,el))%p(s)%sizePostResults
|
endPos = endPos + sourceState(material_phase(ipc,ip,el))%p(i)%sizePostResults
|
||||||
sourceType: select case (phase_source(s,material_phase(ipc,ip,el)))
|
of = phasememberAt(ipc,ip,el)
|
||||||
|
sourceType: select case (phase_source(i,material_phase(ipc,ip,el)))
|
||||||
case (SOURCE_damage_isoBrittle_ID) sourceType
|
case (SOURCE_damage_isoBrittle_ID) sourceType
|
||||||
constitutive_postResults(startPos:endPos) = source_damage_isoBrittle_postResults(ipc, ip, el)
|
constitutive_postResults(startPos:endPos) = source_damage_isoBrittle_postResults(material_phase(ipc,ip,el),of)
|
||||||
case (SOURCE_damage_isoDuctile_ID) sourceType
|
case (SOURCE_damage_isoDuctile_ID) sourceType
|
||||||
constitutive_postResults(startPos:endPos) = source_damage_isoDuctile_postResults(ipc, ip, el)
|
constitutive_postResults(startPos:endPos) = source_damage_isoDuctile_postResults(material_phase(ipc,ip,el),of)
|
||||||
case (SOURCE_damage_anisoBrittle_ID) sourceType
|
case (SOURCE_damage_anisoBrittle_ID) sourceType
|
||||||
constitutive_postResults(startPos:endPos) = source_damage_anisoBrittle_postResults(ipc, ip, el)
|
constitutive_postResults(startPos:endPos) = source_damage_anisoBrittle_postResults(material_phase(ipc,ip,el),of)
|
||||||
case (SOURCE_damage_anisoDuctile_ID) sourceType
|
case (SOURCE_damage_anisoDuctile_ID) sourceType
|
||||||
constitutive_postResults(startPos:endPos) = source_damage_anisoDuctile_postResults(ipc, ip, el)
|
constitutive_postResults(startPos:endPos) = source_damage_anisoDuctile_postResults(material_phase(ipc,ip,el),of)
|
||||||
end select sourceType
|
end select sourceType
|
||||||
|
|
||||||
enddo SourceLoop
|
enddo SourceLoop
|
||||||
|
|
||||||
end function constitutive_postResults
|
end function constitutive_postResults
|
||||||
|
|
1012
src/crystallite.f90
1012
src/crystallite.f90
File diff suppressed because it is too large
Load Diff
|
@ -225,6 +225,7 @@ subroutine damage_local_getSourceAndItsTangent(phiDot, dPhiDot_dPhi, phi, ip, el
|
||||||
homogenization_Ngrains, &
|
homogenization_Ngrains, &
|
||||||
mappingHomogenization, &
|
mappingHomogenization, &
|
||||||
phaseAt, &
|
phaseAt, &
|
||||||
|
phasememberAt, &
|
||||||
phase_source, &
|
phase_source, &
|
||||||
phase_Nsources, &
|
phase_Nsources, &
|
||||||
SOURCE_damage_isoBrittle_ID, &
|
SOURCE_damage_isoBrittle_ID, &
|
||||||
|
@ -249,7 +250,8 @@ subroutine damage_local_getSourceAndItsTangent(phiDot, dPhiDot_dPhi, phi, ip, el
|
||||||
integer(pInt) :: &
|
integer(pInt) :: &
|
||||||
phase, &
|
phase, &
|
||||||
grain, &
|
grain, &
|
||||||
source
|
source, &
|
||||||
|
constituent
|
||||||
real(pReal) :: &
|
real(pReal) :: &
|
||||||
phiDot, dPhiDot_dPhi, localphiDot, dLocalphiDot_dPhi
|
phiDot, dPhiDot_dPhi, localphiDot, dLocalphiDot_dPhi
|
||||||
|
|
||||||
|
@ -257,19 +259,20 @@ subroutine damage_local_getSourceAndItsTangent(phiDot, dPhiDot_dPhi, phi, ip, el
|
||||||
dPhiDot_dPhi = 0.0_pReal
|
dPhiDot_dPhi = 0.0_pReal
|
||||||
do grain = 1, homogenization_Ngrains(mappingHomogenization(2,ip,el))
|
do grain = 1, homogenization_Ngrains(mappingHomogenization(2,ip,el))
|
||||||
phase = phaseAt(grain,ip,el)
|
phase = phaseAt(grain,ip,el)
|
||||||
|
constituent = phasememberAt(grain,ip,el)
|
||||||
do source = 1, phase_Nsources(phase)
|
do source = 1, phase_Nsources(phase)
|
||||||
select case(phase_source(source,phase))
|
select case(phase_source(source,phase))
|
||||||
case (SOURCE_damage_isoBrittle_ID)
|
case (SOURCE_damage_isoBrittle_ID)
|
||||||
call source_damage_isobrittle_getRateAndItsTangent (localphiDot, dLocalphiDot_dPhi, phi, grain, ip, el)
|
call source_damage_isobrittle_getRateAndItsTangent (localphiDot, dLocalphiDot_dPhi, phi, phase, constituent)
|
||||||
|
|
||||||
case (SOURCE_damage_isoDuctile_ID)
|
case (SOURCE_damage_isoDuctile_ID)
|
||||||
call source_damage_isoductile_getRateAndItsTangent (localphiDot, dLocalphiDot_dPhi, phi, grain, ip, el)
|
call source_damage_isoductile_getRateAndItsTangent (localphiDot, dLocalphiDot_dPhi, phi, phase, constituent)
|
||||||
|
|
||||||
case (SOURCE_damage_anisoBrittle_ID)
|
case (SOURCE_damage_anisoBrittle_ID)
|
||||||
call source_damage_anisobrittle_getRateAndItsTangent(localphiDot, dLocalphiDot_dPhi, phi, grain, ip, el)
|
call source_damage_anisobrittle_getRateAndItsTangent(localphiDot, dLocalphiDot_dPhi, phi, phase, constituent)
|
||||||
|
|
||||||
case (SOURCE_damage_anisoDuctile_ID)
|
case (SOURCE_damage_anisoDuctile_ID)
|
||||||
call source_damage_anisoductile_getRateAndItsTangent(localphiDot, dLocalphiDot_dPhi, phi, grain, ip, el)
|
call source_damage_anisoductile_getRateAndItsTangent(localphiDot, dLocalphiDot_dPhi, phi, phase, constituent)
|
||||||
|
|
||||||
case default
|
case default
|
||||||
localphiDot = 0.0_pReal
|
localphiDot = 0.0_pReal
|
||||||
|
|
|
@ -186,6 +186,7 @@ subroutine damage_nonlocal_getSourceAndItsTangent(phiDot, dPhiDot_dPhi, phi, ip,
|
||||||
homogenization_Ngrains, &
|
homogenization_Ngrains, &
|
||||||
mappingHomogenization, &
|
mappingHomogenization, &
|
||||||
phaseAt, &
|
phaseAt, &
|
||||||
|
phasememberAt, &
|
||||||
phase_source, &
|
phase_source, &
|
||||||
phase_Nsources, &
|
phase_Nsources, &
|
||||||
SOURCE_damage_isoBrittle_ID, &
|
SOURCE_damage_isoBrittle_ID, &
|
||||||
|
@ -210,7 +211,8 @@ subroutine damage_nonlocal_getSourceAndItsTangent(phiDot, dPhiDot_dPhi, phi, ip,
|
||||||
integer(pInt) :: &
|
integer(pInt) :: &
|
||||||
phase, &
|
phase, &
|
||||||
grain, &
|
grain, &
|
||||||
source
|
source, &
|
||||||
|
constituent
|
||||||
real(pReal) :: &
|
real(pReal) :: &
|
||||||
phiDot, dPhiDot_dPhi, localphiDot, dLocalphiDot_dPhi
|
phiDot, dPhiDot_dPhi, localphiDot, dLocalphiDot_dPhi
|
||||||
|
|
||||||
|
@ -218,19 +220,20 @@ subroutine damage_nonlocal_getSourceAndItsTangent(phiDot, dPhiDot_dPhi, phi, ip,
|
||||||
dPhiDot_dPhi = 0.0_pReal
|
dPhiDot_dPhi = 0.0_pReal
|
||||||
do grain = 1, homogenization_Ngrains(mappingHomogenization(2,ip,el))
|
do grain = 1, homogenization_Ngrains(mappingHomogenization(2,ip,el))
|
||||||
phase = phaseAt(grain,ip,el)
|
phase = phaseAt(grain,ip,el)
|
||||||
|
constituent = phasememberAt(grain,ip,el)
|
||||||
do source = 1_pInt, phase_Nsources(phase)
|
do source = 1_pInt, phase_Nsources(phase)
|
||||||
select case(phase_source(source,phase))
|
select case(phase_source(source,phase))
|
||||||
case (SOURCE_damage_isoBrittle_ID)
|
case (SOURCE_damage_isoBrittle_ID)
|
||||||
call source_damage_isobrittle_getRateAndItsTangent (localphiDot, dLocalphiDot_dPhi, phi, grain, ip, el)
|
call source_damage_isobrittle_getRateAndItsTangent (localphiDot, dLocalphiDot_dPhi, phi, phase, constituent)
|
||||||
|
|
||||||
case (SOURCE_damage_isoDuctile_ID)
|
case (SOURCE_damage_isoDuctile_ID)
|
||||||
call source_damage_isoductile_getRateAndItsTangent (localphiDot, dLocalphiDot_dPhi, phi, grain, ip, el)
|
call source_damage_isoductile_getRateAndItsTangent (localphiDot, dLocalphiDot_dPhi, phi, phase, constituent)
|
||||||
|
|
||||||
case (SOURCE_damage_anisoBrittle_ID)
|
case (SOURCE_damage_anisoBrittle_ID)
|
||||||
call source_damage_anisobrittle_getRateAndItsTangent(localphiDot, dLocalphiDot_dPhi, phi, grain, ip, el)
|
call source_damage_anisobrittle_getRateAndItsTangent(localphiDot, dLocalphiDot_dPhi, phi, phase, constituent)
|
||||||
|
|
||||||
case (SOURCE_damage_anisoDuctile_ID)
|
case (SOURCE_damage_anisoDuctile_ID)
|
||||||
call source_damage_anisoductile_getRateAndItsTangent(localphiDot, dLocalphiDot_dPhi, phi, grain, ip, el)
|
call source_damage_anisoductile_getRateAndItsTangent(localphiDot, dLocalphiDot_dPhi, phi, phase, constituent)
|
||||||
|
|
||||||
case default
|
case default
|
||||||
localphiDot = 0.0_pReal
|
localphiDot = 0.0_pReal
|
||||||
|
|
|
@ -0,0 +1,921 @@
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
|
||||||
|
!> @author Christoph Koords, Max-Planck-Institut für Eisenforschung GmbH
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
module element
|
||||||
|
use prec, only: &
|
||||||
|
pInt, &
|
||||||
|
pReal
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
private
|
||||||
|
|
||||||
|
!---------------------------------------------------------------------------------------------------
|
||||||
|
!> Properties of a single element (the element used in the mesh)
|
||||||
|
!---------------------------------------------------------------------------------------------------
|
||||||
|
type, public :: tElement
|
||||||
|
integer(pInt) :: &
|
||||||
|
elemType, &
|
||||||
|
geomType, & ! geometry type (same for same dimension and same number of integration points)
|
||||||
|
cellType, &
|
||||||
|
Nnodes, &
|
||||||
|
Ncellnodes, &
|
||||||
|
NcellnodesPerCell, &
|
||||||
|
nIPs, &
|
||||||
|
nIPneighbors, & ! ToDo: MD: Do all IPs in one element type have the same number of neighbors?
|
||||||
|
maxNnodeAtIP
|
||||||
|
integer(pInt), dimension(:,:), allocatable :: &
|
||||||
|
Cell, & ! intra-element (cell) nodes that constitute a cell
|
||||||
|
NnodeAtIP, &
|
||||||
|
IPneighbor, &
|
||||||
|
cellFace
|
||||||
|
real(pReal), dimension(:,:), allocatable :: &
|
||||||
|
! center of gravity of the weighted nodes gives the position of the cell node.
|
||||||
|
! example: face-centered cell node with face nodes 1,2,5,6 to be used in,
|
||||||
|
! e.g., an 8 node element, would be encoded:
|
||||||
|
! 1, 1, 0, 0, 1, 1, 0, 0
|
||||||
|
cellNodeParentNodeWeights
|
||||||
|
contains
|
||||||
|
procedure :: init => tElement_init
|
||||||
|
end type
|
||||||
|
|
||||||
|
integer(pInt), parameter, private :: &
|
||||||
|
NELEMTYPE = 13_pInt
|
||||||
|
|
||||||
|
integer(pInt), dimension(NelemType), parameter, private :: NNODE = &
|
||||||
|
int([ &
|
||||||
|
3, & ! 2D 3node 1ip
|
||||||
|
6, & ! 2D 6node 3ip
|
||||||
|
4, & ! 2D 4node 4ip
|
||||||
|
8, & ! 2D 8node 9ip
|
||||||
|
8, & ! 2D 8node 4ip
|
||||||
|
!--------------------
|
||||||
|
4, & ! 3D 4node 1ip
|
||||||
|
5, & ! 3D 5node 4ip
|
||||||
|
10, & ! 3D 10node 4ip
|
||||||
|
6, & ! 3D 6node 6ip
|
||||||
|
8, & ! 3D 8node 1ip
|
||||||
|
8, & ! 3D 8node 8ip
|
||||||
|
20, & ! 3D 20node 8ip
|
||||||
|
20 & ! 3D 20node 27ip
|
||||||
|
],pInt) !< number of nodes that constitute a specific type of element
|
||||||
|
|
||||||
|
integer(pInt), dimension(NelemType), parameter, public :: GEOMTYPE = &
|
||||||
|
int([ &
|
||||||
|
1, & ! 2D 3node 1ip
|
||||||
|
2, & ! 2D 6node 3ip
|
||||||
|
3, & ! 2D 4node 4ip
|
||||||
|
4, & ! 2D 8node 9ip
|
||||||
|
3, & ! 2D 8node 4ip
|
||||||
|
!--------------------
|
||||||
|
5, & ! 3D 4node 1ip
|
||||||
|
6, & ! 3D 5node 4ip
|
||||||
|
6, & ! 3D 10node 4ip
|
||||||
|
7, & ! 3D 6node 6ip
|
||||||
|
8, & ! 3D 8node 1ip
|
||||||
|
9, & ! 3D 8node 8ip
|
||||||
|
9, & ! 3D 20node 8ip
|
||||||
|
10 & ! 3D 20node 27ip
|
||||||
|
],pInt) !< geometry type of particular element type
|
||||||
|
|
||||||
|
!integer(pInt), dimension(maxval(geomType)), parameter, private :: NCELLNODE = & ! Intel 16.0 complains
|
||||||
|
integer(pInt), dimension(10), parameter, private :: NCELLNODE = &
|
||||||
|
int([ &
|
||||||
|
3, &
|
||||||
|
7, &
|
||||||
|
9, &
|
||||||
|
16, &
|
||||||
|
4, &
|
||||||
|
15, &
|
||||||
|
21, &
|
||||||
|
8, &
|
||||||
|
27, &
|
||||||
|
64 &
|
||||||
|
],pInt) !< number of cell nodes in a specific geometry type
|
||||||
|
|
||||||
|
!integer(pInt), dimension(maxval(geomType)), parameter, private :: NIP = & ! Intel 16.0 complains
|
||||||
|
integer(pInt), dimension(10), parameter, private :: NIP = &
|
||||||
|
int([ &
|
||||||
|
1, &
|
||||||
|
3, &
|
||||||
|
4, &
|
||||||
|
9, &
|
||||||
|
1, &
|
||||||
|
4, &
|
||||||
|
6, &
|
||||||
|
1, &
|
||||||
|
8, &
|
||||||
|
27 &
|
||||||
|
],pInt) !< number of IPs in a specific geometry type
|
||||||
|
|
||||||
|
!integer(pInt), dimension(maxval(geomType)), parameter, private :: CELLTYPE = & ! Intel 16.0 complains
|
||||||
|
integer(pInt), dimension(10), parameter, private :: CELLTYPE = & !< cell type that is used by each geometry type
|
||||||
|
int([ &
|
||||||
|
1, & ! 2D 3node
|
||||||
|
2, & ! 2D 4node
|
||||||
|
2, & ! 2D 4node
|
||||||
|
2, & ! 2D 4node
|
||||||
|
3, & ! 3D 4node
|
||||||
|
4, & ! 3D 8node
|
||||||
|
4, & ! 3D 8node
|
||||||
|
4, & ! 3D 8node
|
||||||
|
4, & ! 3D 8node
|
||||||
|
4 & ! 3D 8node
|
||||||
|
],pInt)
|
||||||
|
|
||||||
|
!integer(pInt), dimension(maxval(cellType)), parameter, private :: nIPNeighbor = & ! causes problem with Intel 16.0
|
||||||
|
integer(pInt), dimension(4), parameter, private :: NIPNEIGHBOR = & !< number of ip neighbors / cell faces in a specific cell type
|
||||||
|
int([&
|
||||||
|
3, & ! 2D 3node
|
||||||
|
4, & ! 2D 4node
|
||||||
|
4, & ! 3D 4node
|
||||||
|
6 & ! 3D 8node
|
||||||
|
],pInt)
|
||||||
|
|
||||||
|
!integer(pInt), dimension(maxval(cellType)), parameter, private :: NCELLNODESPERCELLFACE = &
|
||||||
|
integer(pInt), dimension(4), parameter, private :: NCELLNODEPERCELLFACE = & !< number of cell nodes in a specific cell type
|
||||||
|
int([ &
|
||||||
|
2, & ! 2D 3node
|
||||||
|
2, & ! 2D 4node
|
||||||
|
3, & ! 3D 4node
|
||||||
|
4 & ! 3D 8node
|
||||||
|
],pInt)
|
||||||
|
|
||||||
|
!integer(pInt), dimension(maxval(geomType)), parameter, private :: maxNodeAtIP = & ! causes problem with Intel 16.0
|
||||||
|
integer(pInt), dimension(10), parameter, private :: maxNnodeAtIP = & !< maximum number of parent nodes that belong to an IP for a specific type of element
|
||||||
|
int([ &
|
||||||
|
3, &
|
||||||
|
1, &
|
||||||
|
1, &
|
||||||
|
2, &
|
||||||
|
4, &
|
||||||
|
1, &
|
||||||
|
1, &
|
||||||
|
8, &
|
||||||
|
1, &
|
||||||
|
4 &
|
||||||
|
],pInt)
|
||||||
|
|
||||||
|
|
||||||
|
!integer(pInt), dimension(maxval(CELLTYPE)), parameter, private :: NCELLNODEPERCELL = & ! Intel 16.0 complains
|
||||||
|
integer(pInt), dimension(4), parameter, private :: NCELLNODEPERCELL = & !< number of cell nodes in a specific cell type
|
||||||
|
int([ &
|
||||||
|
3, & ! 2D 3node
|
||||||
|
4, & ! 2D 4node
|
||||||
|
4, & ! 3D 4node
|
||||||
|
8 & ! 3D 8node
|
||||||
|
],pInt)
|
||||||
|
|
||||||
|
integer(pInt), dimension(maxNnodeAtIP(1),nIP(1)), parameter, private :: NnodeAtIP1 = &
|
||||||
|
reshape(int([&
|
||||||
|
1,2,3 &
|
||||||
|
],pInt),[maxNnodeAtIP(1),nIP(1)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(maxNnodeAtIP(2),nIP(2)), parameter, private :: NnodeAtIP2 = &
|
||||||
|
reshape(int([&
|
||||||
|
1, &
|
||||||
|
2, &
|
||||||
|
3 &
|
||||||
|
],pInt),[maxNnodeAtIP(2),nIP(2)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(maxNnodeAtIP(3),nIP(3)), parameter, private :: NnodeAtIP3 = &
|
||||||
|
reshape(int([&
|
||||||
|
1, &
|
||||||
|
2, &
|
||||||
|
4, &
|
||||||
|
3 &
|
||||||
|
],pInt),[maxNnodeAtIP(3),nIP(3)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(maxNnodeAtIP(4),nIP(4)), parameter, private :: NnodeAtIP4 = &
|
||||||
|
reshape(int([&
|
||||||
|
1,0, &
|
||||||
|
1,2, &
|
||||||
|
2,0, &
|
||||||
|
1,4, &
|
||||||
|
0,0, &
|
||||||
|
2,3, &
|
||||||
|
4,0, &
|
||||||
|
3,4, &
|
||||||
|
3,0 &
|
||||||
|
],pInt),[maxNnodeAtIP(4),nIP(4)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(maxNnodeAtIP(5),nIP(5)), parameter, private :: NnodeAtIP5 = &
|
||||||
|
reshape(int([&
|
||||||
|
1,2,3,4 &
|
||||||
|
],pInt),[maxNnodeAtIP(5),nIP(5)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(maxNnodeAtIP(6),nIP(6)), parameter, private :: NnodeAtIP6 = &
|
||||||
|
reshape(int([&
|
||||||
|
1, &
|
||||||
|
2, &
|
||||||
|
3, &
|
||||||
|
4 &
|
||||||
|
],pInt),[maxNnodeAtIP(6),nIP(6)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(maxNnodeAtIP(7),nIP(7)), parameter, private :: NnodeAtIP7 = &
|
||||||
|
reshape(int([&
|
||||||
|
1, &
|
||||||
|
2, &
|
||||||
|
3, &
|
||||||
|
4, &
|
||||||
|
5, &
|
||||||
|
6 &
|
||||||
|
],pInt),[maxNnodeAtIP(7),nIP(7)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(maxNnodeAtIP(8),nIP(8)), parameter, private :: NnodeAtIP8 = &
|
||||||
|
reshape(int([&
|
||||||
|
1,2,3,4,5,6,7,8 &
|
||||||
|
],pInt),[maxNnodeAtIP(8),nIP(8)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(maxNnodeAtIP(9),nIP(9)), parameter, private :: NnodeAtIP9 = &
|
||||||
|
reshape(int([&
|
||||||
|
1, &
|
||||||
|
2, &
|
||||||
|
4, &
|
||||||
|
3, &
|
||||||
|
5, &
|
||||||
|
6, &
|
||||||
|
8, &
|
||||||
|
7 &
|
||||||
|
],pInt),[maxNnodeAtIP(9),nIP(9)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(maxNnodeAtIP(10),nIP(10)), parameter, private :: NnodeAtIP10 = &
|
||||||
|
reshape(int([&
|
||||||
|
1,0, 0,0, &
|
||||||
|
1,2, 0,0, &
|
||||||
|
2,0, 0,0, &
|
||||||
|
1,4, 0,0, &
|
||||||
|
1,3, 2,4, &
|
||||||
|
2,3, 0,0, &
|
||||||
|
4,0, 0,0, &
|
||||||
|
3,4, 0,0, &
|
||||||
|
3,0, 0,0, &
|
||||||
|
1,5, 0,0, &
|
||||||
|
1,6, 2,5, &
|
||||||
|
2,6, 0,0, &
|
||||||
|
1,8, 4,5, &
|
||||||
|
0,0, 0,0, &
|
||||||
|
2,7, 3,6, &
|
||||||
|
4,8, 0,0, &
|
||||||
|
3,8, 4,7, &
|
||||||
|
3,7, 0,0, &
|
||||||
|
5,0, 0,0, &
|
||||||
|
5,6, 0,0, &
|
||||||
|
6,0, 0,0, &
|
||||||
|
5,8, 0,0, &
|
||||||
|
5,7, 6,8, &
|
||||||
|
6,7, 0,0, &
|
||||||
|
8,0, 0,0, &
|
||||||
|
7,8, 0,0, &
|
||||||
|
7,0, 0,0 &
|
||||||
|
],pInt),[maxNnodeAtIP(10),nIP(10)])
|
||||||
|
|
||||||
|
! *** FE_ipNeighbor ***
|
||||||
|
! is a list of the neighborhood of each IP.
|
||||||
|
! It is sorted in (local) +x,-x, +y,-y, +z,-z direction.
|
||||||
|
! Positive integers denote an intra-FE IP identifier.
|
||||||
|
! Negative integers denote the interface behind which the neighboring (extra-FE) IP will be located.
|
||||||
|
|
||||||
|
|
||||||
|
integer(pInt), dimension(nIPneighbor(cellType(1)),nIP(1)), parameter, private :: IPneighbor1 = &
|
||||||
|
reshape(int([&
|
||||||
|
-2,-3,-1 &
|
||||||
|
],pInt),[nIPneighbor(cellType(1)),nIP(1)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(nIPneighbor(cellType(2)),nIP(2)), parameter, private :: IPneighbor2 = &
|
||||||
|
reshape(int([&
|
||||||
|
2,-3, 3,-1, &
|
||||||
|
-2, 1, 3,-1, &
|
||||||
|
2,-3,-2, 1 &
|
||||||
|
],pInt),[nIPneighbor(cellType(2)),nIP(2)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(nIPneighbor(cellType(3)),nIP(3)), parameter, private :: IPneighbor3 = &
|
||||||
|
reshape(int([&
|
||||||
|
2,-4, 3,-1, &
|
||||||
|
-2, 1, 4,-1, &
|
||||||
|
4,-4,-3, 1, &
|
||||||
|
-2, 3,-3, 2 &
|
||||||
|
],pInt),[nIPneighbor(cellType(3)),nIP(3)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(nIPneighbor(cellType(4)),nIP(4)), parameter, private :: IPneighbor4 = &
|
||||||
|
reshape(int([&
|
||||||
|
2,-4, 4,-1, &
|
||||||
|
3, 1, 5,-1, &
|
||||||
|
-2, 2, 6,-1, &
|
||||||
|
5,-4, 7, 1, &
|
||||||
|
6, 4, 8, 2, &
|
||||||
|
-2, 5, 9, 3, &
|
||||||
|
8,-4,-3, 4, &
|
||||||
|
9, 7,-3, 5, &
|
||||||
|
-2, 8,-3, 6 &
|
||||||
|
],pInt),[nIPneighbor(cellType(4)),nIP(4)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(nIPneighbor(cellType(5)),nIP(5)), parameter, private :: IPneighbor5 = &
|
||||||
|
reshape(int([&
|
||||||
|
-1,-2,-3,-4 &
|
||||||
|
],pInt),[nIPneighbor(cellType(5)),nIP(5)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(nIPneighbor(cellType(6)),nIP(6)), parameter, private :: IPneighbor6 = &
|
||||||
|
reshape(int([&
|
||||||
|
2,-4, 3,-2, 4,-1, &
|
||||||
|
-2, 1, 3,-2, 4,-1, &
|
||||||
|
2,-4,-3, 1, 4,-1, &
|
||||||
|
2,-4, 3,-2,-3, 1 &
|
||||||
|
],pInt),[nIPneighbor(cellType(6)),nIP(6)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(nIPneighbor(cellType(7)),nIP(7)), parameter, private :: IPneighbor7 = &
|
||||||
|
reshape(int([&
|
||||||
|
2,-4, 3,-2, 4,-1, &
|
||||||
|
-3, 1, 3,-2, 5,-1, &
|
||||||
|
2,-4,-3, 1, 6,-1, &
|
||||||
|
5,-4, 6,-2,-5, 1, &
|
||||||
|
-3, 4, 6,-2,-5, 2, &
|
||||||
|
5,-4,-3, 4,-5, 3 &
|
||||||
|
],pInt),[nIPneighbor(cellType(7)),nIP(7)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(nIPneighbor(cellType(8)),nIP(8)), parameter, private :: IPneighbor8 = &
|
||||||
|
reshape(int([&
|
||||||
|
-3,-5,-4,-2,-6,-1 &
|
||||||
|
],pInt),[nIPneighbor(cellType(8)),nIP(8)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(nIPneighbor(cellType(9)),nIP(9)), parameter, private :: IPneighbor9 = &
|
||||||
|
reshape(int([&
|
||||||
|
2,-5, 3,-2, 5,-1, &
|
||||||
|
-3, 1, 4,-2, 6,-1, &
|
||||||
|
4,-5,-4, 1, 7,-1, &
|
||||||
|
-3, 3,-4, 2, 8,-1, &
|
||||||
|
6,-5, 7,-2,-6, 1, &
|
||||||
|
-3, 5, 8,-2,-6, 2, &
|
||||||
|
8,-5,-4, 5,-6, 3, &
|
||||||
|
-3, 7,-4, 6,-6, 4 &
|
||||||
|
],pInt),[nIPneighbor(cellType(9)),nIP(9)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(nIPneighbor(cellType(10)),nIP(10)), parameter, private :: IPneighbor10 = &
|
||||||
|
reshape(int([&
|
||||||
|
2,-5, 4,-2,10,-1, &
|
||||||
|
3, 1, 5,-2,11,-1, &
|
||||||
|
-3, 2, 6,-2,12,-1, &
|
||||||
|
5,-5, 7, 1,13,-1, &
|
||||||
|
6, 4, 8, 2,14,-1, &
|
||||||
|
-3, 5, 9, 3,15,-1, &
|
||||||
|
8,-5,-4, 4,16,-1, &
|
||||||
|
9, 7,-4, 5,17,-1, &
|
||||||
|
-3, 8,-4, 6,18,-1, &
|
||||||
|
11,-5,13,-2,19, 1, &
|
||||||
|
12,10,14,-2,20, 2, &
|
||||||
|
-3,11,15,-2,21, 3, &
|
||||||
|
14,-5,16,10,22, 4, &
|
||||||
|
15,13,17,11,23, 5, &
|
||||||
|
-3,14,18,12,24, 6, &
|
||||||
|
17,-5,-4,13,25, 7, &
|
||||||
|
18,16,-4,14,26, 8, &
|
||||||
|
-3,17,-4,15,27, 9, &
|
||||||
|
20,-5,22,-2,-6,10, &
|
||||||
|
21,19,23,-2,-6,11, &
|
||||||
|
-3,20,24,-2,-6,12, &
|
||||||
|
23,-5,25,19,-6,13, &
|
||||||
|
24,22,26,20,-6,14, &
|
||||||
|
-3,23,27,21,-6,15, &
|
||||||
|
26,-5,-4,22,-6,16, &
|
||||||
|
27,25,-4,23,-6,17, &
|
||||||
|
-3,26,-4,24,-6,18 &
|
||||||
|
],pInt),[nIPneighbor(cellType(10)),nIP(10)])
|
||||||
|
|
||||||
|
|
||||||
|
real(pReal), dimension(nNode(1),NcellNode(geomType(1))), parameter :: cellNodeParentNodeWeights1 = &
|
||||||
|
reshape(real([&
|
||||||
|
1, 0, 0, &
|
||||||
|
0, 1, 0, &
|
||||||
|
0, 0, 1 &
|
||||||
|
],pReal),[nNode(1),NcellNode(geomType(1))]) ! 2D 3node 1ip
|
||||||
|
|
||||||
|
real(pReal), dimension(nNode(2),NcellNode(geomType(2))), parameter :: cellNodeParentNodeWeights2 = &
|
||||||
|
reshape(real([&
|
||||||
|
1, 0, 0, 0, 0, 0, &
|
||||||
|
0, 1, 0, 0, 0, 0, &
|
||||||
|
0, 0, 1, 0, 0, 0, &
|
||||||
|
0, 0, 0, 1, 0, 0, &
|
||||||
|
0, 0, 0, 0, 1, 0, &
|
||||||
|
0, 0, 0, 0, 0, 1, &
|
||||||
|
1, 1, 1, 2, 2, 2 &
|
||||||
|
],pReal),[nNode(2),NcellNode(geomType(2))]) ! 2D 6node 3ip
|
||||||
|
|
||||||
|
real(pReal), dimension(nNode(3),NcellNode(geomType(3))), parameter :: cellNodeParentNodeWeights3 = &
|
||||||
|
reshape(real([&
|
||||||
|
1, 0, 0, 0, &
|
||||||
|
0, 1, 0, 0, &
|
||||||
|
0, 0, 1, 0, &
|
||||||
|
0, 0, 0, 1, &
|
||||||
|
1, 1, 0, 0, &
|
||||||
|
0, 1, 1, 0, &
|
||||||
|
0, 0, 1, 1, &
|
||||||
|
1, 0, 0, 1, &
|
||||||
|
1, 1, 1, 1 &
|
||||||
|
],pReal),[nNode(3),NcellNode(geomType(3))]) ! 2D 6node 3ip
|
||||||
|
|
||||||
|
real(pReal), dimension(nNode(4),NcellNode(geomType(4))), parameter :: cellNodeParentNodeWeights4 = &
|
||||||
|
reshape(real([&
|
||||||
|
1, 0, 0, 0, 0, 0, 0, 0, &
|
||||||
|
0, 1, 0, 0, 0, 0, 0, 0, &
|
||||||
|
0, 0, 1, 0, 0, 0, 0, 0, &
|
||||||
|
0, 0, 0, 1, 0, 0, 0, 0, &
|
||||||
|
1, 0, 0, 0, 2, 0, 0, 0, &
|
||||||
|
0, 1, 0, 0, 2, 0, 0, 0, &
|
||||||
|
0, 1, 0, 0, 0, 2, 0, 0, &
|
||||||
|
0, 0, 1, 0, 0, 2, 0, 0, &
|
||||||
|
0, 0, 1, 0, 0, 0, 2, 0, &
|
||||||
|
0, 0, 0, 1, 0, 0, 2, 0, &
|
||||||
|
0, 0, 0, 1, 0, 0, 0, 2, &
|
||||||
|
1, 0, 0, 0, 0, 0, 0, 2, &
|
||||||
|
4, 1, 1, 1, 8, 2, 2, 8, &
|
||||||
|
1, 4, 1, 1, 8, 8, 2, 2, &
|
||||||
|
1, 1, 4, 1, 2, 8, 8, 2, &
|
||||||
|
1, 1, 1, 4, 2, 2, 8, 8 &
|
||||||
|
],pReal),[nNode(4),NcellNode(geomType(4))]) ! 2D 8node 9ip
|
||||||
|
|
||||||
|
real(pReal), dimension(nNode(5),NcellNode(geomType(5))), parameter :: cellNodeParentNodeWeights5 = &
|
||||||
|
reshape(real([&
|
||||||
|
1, 0, 0, 0, 0, 0, 0, 0, &
|
||||||
|
0, 1, 0, 0, 0, 0, 0, 0, &
|
||||||
|
0, 0, 1, 0, 0, 0, 0, 0, &
|
||||||
|
0, 0, 0, 1, 0, 0, 0, 0, &
|
||||||
|
0, 0, 0, 0, 1, 0, 0, 0, &
|
||||||
|
0, 0, 0, 0, 0, 1, 0, 0, &
|
||||||
|
0, 0, 0, 0, 0, 0, 1, 0, &
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 1, &
|
||||||
|
1, 1, 1, 1, 2, 2, 2, 2 &
|
||||||
|
],pReal),[nNode(5),NcellNode(geomType(5))]) ! 2D 8node 4ip
|
||||||
|
|
||||||
|
real(pReal), dimension(nNode(6),NcellNode(geomType(6))), parameter :: cellNodeParentNodeWeights6 = &
|
||||||
|
reshape(real([&
|
||||||
|
1, 0, 0, 0, &
|
||||||
|
0, 1, 0, 0, &
|
||||||
|
0, 0, 1, 0, &
|
||||||
|
0, 0, 0, 1 &
|
||||||
|
],pReal),[nNode(6),NcellNode(geomType(6))]) ! 3D 4node 1ip
|
||||||
|
|
||||||
|
real(pReal), dimension(nNode(7),NcellNode(geomType(7))), parameter :: cellNodeParentNodeWeights7 = &
|
||||||
|
reshape(real([&
|
||||||
|
1, 0, 0, 0, 0, &
|
||||||
|
0, 1, 0, 0, 0, &
|
||||||
|
0, 0, 1, 0, 0, &
|
||||||
|
0, 0, 0, 1, 0, &
|
||||||
|
1, 1, 0, 0, 0, &
|
||||||
|
0, 1, 1, 0, 0, &
|
||||||
|
1, 0, 1, 0, 0, &
|
||||||
|
1, 0, 0, 1, 0, &
|
||||||
|
0, 1, 0, 1, 0, &
|
||||||
|
0, 0, 1, 1, 0, &
|
||||||
|
1, 1, 1, 0, 0, &
|
||||||
|
1, 1, 0, 1, 0, &
|
||||||
|
0, 1, 1, 1, 0, &
|
||||||
|
1, 0, 1, 1, 0, &
|
||||||
|
0, 0, 0, 0, 1 &
|
||||||
|
],pReal),[nNode(7),NcellNode(geomType(7))]) ! 3D 5node 4ip
|
||||||
|
|
||||||
|
real(pReal), dimension(nNode(8),NcellNode(geomType(8))), parameter :: cellNodeParentNodeWeights8 = &
|
||||||
|
reshape(real([&
|
||||||
|
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, &
|
||||||
|
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, &
|
||||||
|
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, &
|
||||||
|
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, &
|
||||||
|
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, &
|
||||||
|
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, &
|
||||||
|
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, &
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, &
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, &
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, &
|
||||||
|
1, 1, 1, 0, 2, 2, 2, 0, 0, 0, &
|
||||||
|
1, 1, 0, 1, 2, 0, 0, 2, 2, 0, &
|
||||||
|
0, 1, 1, 1, 0, 2, 0, 0, 2, 2, &
|
||||||
|
1, 0, 1, 1, 0, 0, 2, 2, 0, 2, &
|
||||||
|
3, 3, 3, 3, 4, 4, 4, 4, 4, 4 &
|
||||||
|
],pReal),[nNode(8),NcellNode(geomType(8))]) ! 3D 10node 4ip
|
||||||
|
|
||||||
|
real(pReal), dimension(nNode(9),NcellNode(geomType(9))), parameter :: cellNodeParentNodeWeights9 = &
|
||||||
|
reshape(real([&
|
||||||
|
1, 0, 0, 0, 0, 0, &
|
||||||
|
0, 1, 0, 0, 0, 0, &
|
||||||
|
0, 0, 1, 0, 0, 0, &
|
||||||
|
0, 0, 0, 1, 0, 0, &
|
||||||
|
0, 0, 0, 0, 1, 0, &
|
||||||
|
0, 0, 0, 0, 0, 1, &
|
||||||
|
1, 1, 0, 0, 0, 0, &
|
||||||
|
0, 1, 1, 0, 0, 0, &
|
||||||
|
1, 0, 1, 0, 0, 0, &
|
||||||
|
1, 0, 0, 1, 0, 0, &
|
||||||
|
0, 1, 0, 0, 1, 0, &
|
||||||
|
0, 0, 1, 0, 0, 1, &
|
||||||
|
0, 0, 0, 1, 1, 0, &
|
||||||
|
0, 0, 0, 0, 1, 1, &
|
||||||
|
0, 0, 0, 1, 0, 1, &
|
||||||
|
1, 1, 1, 0, 0, 0, &
|
||||||
|
1, 1, 0, 1, 1, 0, &
|
||||||
|
0, 1, 1, 0, 1, 1, &
|
||||||
|
1, 0, 1, 1, 0, 1, &
|
||||||
|
0, 0, 0, 1, 1, 1, &
|
||||||
|
1, 1, 1, 1, 1, 1 &
|
||||||
|
],pReal),[nNode(9),NcellNode(geomType(9))]) ! 3D 6node 6ip
|
||||||
|
|
||||||
|
real(pReal), dimension(nNode(10),NcellNode(geomType(10))), parameter :: cellNodeParentNodeWeights10 = &
|
||||||
|
reshape(real([&
|
||||||
|
1, 0, 0, 0, 0, 0, 0, 0, &
|
||||||
|
0, 1, 0, 0, 0, 0, 0, 0, &
|
||||||
|
0, 0, 1, 0, 0, 0, 0, 0, &
|
||||||
|
0, 0, 0, 1, 0, 0, 0, 0, &
|
||||||
|
0, 0, 0, 0, 1, 0, 0, 0, &
|
||||||
|
0, 0, 0, 0, 0, 1, 0, 0, &
|
||||||
|
0, 0, 0, 0, 0, 0, 1, 0, &
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 1 &
|
||||||
|
],pReal),[nNode(10),NcellNode(geomType(10))]) ! 3D 8node 1ip
|
||||||
|
|
||||||
|
real(pReal), dimension(nNode(11),NcellNode(geomType(11))), parameter :: cellNodeParentNodeWeights11 = &
|
||||||
|
reshape(real([&
|
||||||
|
1, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 1, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 1, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 1, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 1, 0, 0, 0, & ! 5
|
||||||
|
0, 0, 0, 0, 0, 1, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 1, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 1, & !
|
||||||
|
1, 1, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 1, 1, 0, 0, 0, 0, 0, & ! 10
|
||||||
|
0, 0, 1, 1, 0, 0, 0, 0, & !
|
||||||
|
1, 0, 0, 1, 0, 0, 0, 0, & !
|
||||||
|
1, 0, 0, 0, 1, 0, 0, 0, & !
|
||||||
|
0, 1, 0, 0, 0, 1, 0, 0, & !
|
||||||
|
0, 0, 1, 0, 0, 0, 1, 0, & ! 15
|
||||||
|
0, 0, 0, 1, 0, 0, 0, 1, & !
|
||||||
|
0, 0, 0, 0, 1, 1, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 1, 1, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 1, 1, & !
|
||||||
|
0, 0, 0, 0, 1, 0, 0, 1, & ! 20
|
||||||
|
1, 1, 1, 1, 0, 0, 0, 0, & !
|
||||||
|
1, 1, 0, 0, 1, 1, 0, 0, & !
|
||||||
|
0, 1, 1, 0, 0, 1, 1, 0, & !
|
||||||
|
0, 0, 1, 1, 0, 0, 1, 1, & !
|
||||||
|
1, 0, 0, 1, 1, 0, 0, 1, & ! 25
|
||||||
|
0, 0, 0, 0, 1, 1, 1, 1, & !
|
||||||
|
1, 1, 1, 1, 1, 1, 1, 1 & !
|
||||||
|
],pReal),[nNode(11),NcellNode(geomType(11))]) ! 3D 8node 8ip
|
||||||
|
|
||||||
|
real(pReal), dimension(nNode(12),NcellNode(geomType(12))), parameter :: cellNodeParentNodeWeights12 = &
|
||||||
|
reshape(real([&
|
||||||
|
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & ! 5
|
||||||
|
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & ! 10
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, & ! 15
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, & ! 20
|
||||||
|
1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
1, 1, 0, 0, 1, 1, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2, 2, 0, 0, & !
|
||||||
|
0, 1, 1, 0, 0, 1, 1, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2, 2, 0, & !
|
||||||
|
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2, 2, & !
|
||||||
|
1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 2, 2, 0, 0, 2, & ! 25
|
||||||
|
0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, & !
|
||||||
|
3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 & !
|
||||||
|
],pReal),[nNode(12),NcellNode(geomType(12))]) ! 3D 20node 8ip
|
||||||
|
|
||||||
|
real(pReal), dimension(nNode(13),NcellNode(geomType(13))), parameter :: cellNodeParentNodeWeights13 = &
|
||||||
|
reshape(real([&
|
||||||
|
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & ! 5
|
||||||
|
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & ! 10
|
||||||
|
0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, & ! 15
|
||||||
|
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, & !
|
||||||
|
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, & !
|
||||||
|
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, & !
|
||||||
|
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, & ! 20
|
||||||
|
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, & !
|
||||||
|
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, & ! 25
|
||||||
|
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, & ! 30
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, & !
|
||||||
|
4, 1, 1, 1, 0, 0, 0, 0, 8, 2, 2, 8, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
1, 4, 1, 1, 0, 0, 0, 0, 8, 8, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
1, 1, 4, 1, 0, 0, 0, 0, 2, 8, 8, 2, 0, 0, 0, 0, 0, 0, 0, 0, & ! 35
|
||||||
|
1, 1, 1, 4, 0, 0, 0, 0, 2, 2, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, & !
|
||||||
|
4, 1, 0, 0, 1, 1, 0, 0, 8, 0, 0, 0, 2, 0, 0, 0, 8, 2, 0, 0, & !
|
||||||
|
1, 4, 0, 0, 1, 1, 0, 0, 8, 0, 0, 0, 2, 0, 0, 0, 2, 8, 0, 0, & !
|
||||||
|
0, 4, 1, 0, 0, 1, 1, 0, 0, 8, 0, 0, 0, 2, 0, 0, 0, 8, 2, 0, & !
|
||||||
|
0, 1, 4, 0, 0, 1, 1, 0, 0, 8, 0, 0, 0, 2, 0, 0, 0, 2, 8, 0, & ! 40
|
||||||
|
0, 0, 4, 1, 0, 0, 1, 1, 0, 0, 8, 0, 0, 0, 2, 0, 0, 0, 8, 2, & !
|
||||||
|
0, 0, 1, 4, 0, 0, 1, 1, 0, 0, 8, 0, 0, 0, 2, 0, 0, 0, 2, 8, & !
|
||||||
|
1, 0, 0, 4, 1, 0, 0, 1, 0, 0, 0, 8, 0, 0, 0, 2, 2, 0, 0, 8, & !
|
||||||
|
4, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 8, 0, 0, 0, 2, 8, 0, 0, 2, & !
|
||||||
|
1, 1, 0, 0, 4, 1, 0, 0, 2, 0, 0, 0, 8, 0, 0, 0, 8, 2, 0, 0, & ! 45
|
||||||
|
1, 1, 0, 0, 1, 4, 0, 0, 2, 0, 0, 0, 8, 0, 0, 0, 2, 8, 0, 0, & !
|
||||||
|
0, 1, 1, 0, 0, 4, 1, 0, 0, 2, 0, 0, 0, 8, 0, 0, 0, 8, 2, 0, & !
|
||||||
|
0, 1, 1, 0, 0, 1, 4, 0, 0, 2, 0, 0, 0, 8, 0, 0, 0, 2, 8, 0, & !
|
||||||
|
0, 0, 1, 1, 0, 0, 4, 1, 0, 0, 2, 0, 0, 0, 8, 0, 0, 0, 8, 2, & !
|
||||||
|
0, 0, 1, 1, 0, 0, 1, 4, 0, 0, 2, 0, 0, 0, 8, 0, 0, 0, 2, 8, & ! 50
|
||||||
|
1, 0, 0, 1, 1, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 8, 2, 0, 0, 8, & !
|
||||||
|
1, 0, 0, 1, 4, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 8, 8, 0, 0, 2, & !
|
||||||
|
0, 0, 0, 0, 4, 1, 1, 1, 0, 0, 0, 0, 8, 2, 2, 8, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 1, 4, 1, 1, 0, 0, 0, 0, 8, 8, 2, 2, 0, 0, 0, 0, & !
|
||||||
|
0, 0, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 2, 8, 8, 2, 0, 0, 0, 0, & ! 55
|
||||||
|
0, 0, 0, 0, 1, 1, 1, 4, 0, 0, 0, 0, 2, 2, 8, 8, 0, 0, 0, 0, & !
|
||||||
|
24, 8, 4, 8, 8, 4, 3, 4, 32,12,12,32, 12, 4, 4,12, 32,12, 4,12, & !
|
||||||
|
8,24, 8, 4, 4, 8, 4, 3, 32,32,12,12, 12,12, 4, 4, 12,32,12, 4, & !
|
||||||
|
4, 8,24, 8, 3, 4, 8, 4, 12,32,32,12, 4,12,12, 4, 4,12,32,12, & !
|
||||||
|
8, 4, 8,24, 4, 3, 4, 8, 12,12,32,32, 4, 4,12,12, 12, 4,12,32, & ! 60
|
||||||
|
8, 4, 3, 4, 24, 8, 4, 8, 12, 4, 4,12, 32,12,12,32, 32,12, 4,12, & !
|
||||||
|
4, 8, 4, 3, 8,24, 8, 4, 12,12, 4, 4, 32,32,12,12, 12,32,12, 4, & !
|
||||||
|
3, 4, 8, 4, 4, 8,24, 8, 4,12,12, 4, 12,32,32,12, 4,12,32,12, & !
|
||||||
|
4, 3, 4, 8, 8, 4, 8,24, 4, 4,12,12, 12,12,32,32, 12, 4,12,32 & !
|
||||||
|
],pReal),[nNode(13),NcellNode(geomType(13))]) ! 3D 20node 27ip
|
||||||
|
|
||||||
|
|
||||||
|
integer(pInt), dimension(NCELLNODEPERCELL(CELLTYPE(1)),NIP(1)), parameter :: CELL1 = &
|
||||||
|
reshape(int([&
|
||||||
|
1,2,3 &
|
||||||
|
],pInt),[NCELLNODEPERCELL(CELLTYPE(1)),NIP(1)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(NCELLNODEPERCELL(CELLTYPE(2)),NIP(2)), parameter :: CELL2 = &
|
||||||
|
reshape(int([&
|
||||||
|
1, 4, 7, 6, &
|
||||||
|
2, 5, 7, 4, &
|
||||||
|
3, 6, 7, 5 &
|
||||||
|
],pInt),[NCELLNODEPERCELL(CELLTYPE(2)),NIP(2)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(NCELLNODEPERCELL(CELLTYPE(3)),NIP(3)), parameter :: CELL3 = &
|
||||||
|
reshape(int([&
|
||||||
|
1, 5, 9, 8, &
|
||||||
|
5, 2, 6, 9, &
|
||||||
|
8, 9, 7, 4, &
|
||||||
|
9, 6, 3, 7 &
|
||||||
|
],pInt),[NCELLNODEPERCELL(CELLTYPE(3)),NIP(3)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(NCELLNODEPERCELL(CELLTYPE(4)),NIP(4)), parameter :: CELL4 = &
|
||||||
|
reshape(int([&
|
||||||
|
1, 5,13,12, &
|
||||||
|
5, 6,14,13, &
|
||||||
|
6, 2, 7,14, &
|
||||||
|
12,13,16,11, &
|
||||||
|
13,14,15,16, &
|
||||||
|
14, 7, 8,15, &
|
||||||
|
11,16,10, 4, &
|
||||||
|
16,15, 9,10, &
|
||||||
|
15, 8, 3, 9 &
|
||||||
|
],pInt),[NCELLNODEPERCELL(CELLTYPE(4)),NIP(4)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(NCELLNODEPERCELL(CELLTYPE(5)),NIP(5)), parameter :: CELL5 = &
|
||||||
|
reshape(int([&
|
||||||
|
1, 2, 3, 4 &
|
||||||
|
],pInt),[NCELLNODEPERCELL(CELLTYPE(5)),NIP(5)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(NCELLNODEPERCELL(CELLTYPE(6)),NIP(6)), parameter :: CELL6 = &
|
||||||
|
reshape(int([&
|
||||||
|
1, 5,11, 7, 8,12,15,14, &
|
||||||
|
5, 2, 6,11,12, 9,13,15, &
|
||||||
|
7,11, 6, 3,14,15,13,10, &
|
||||||
|
8,12,15, 4, 4, 9,13,10 &
|
||||||
|
],pInt),[NCELLNODEPERCELL(CELLTYPE(6)),NIP(6)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(NCELLNODEPERCELL(CELLTYPE(7)),NIP(7)), parameter :: CELL7 = &
|
||||||
|
reshape(int([&
|
||||||
|
1, 7,16, 9,10,17,21,19, &
|
||||||
|
7, 2, 8,16,17,11,18,21, &
|
||||||
|
9,16, 8, 3,19,21,18,12, &
|
||||||
|
10,17,21,19, 4,13,20,15, &
|
||||||
|
17,11,18,21,13, 5,14,20, &
|
||||||
|
19,21,18,12,15,20,14, 6 &
|
||||||
|
],pInt),[NCELLNODEPERCELL(CELLTYPE(7)),NIP(7)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(NCELLNODEPERCELL(CELLTYPE(8)),NIP(8)), parameter :: CELL8 = &
|
||||||
|
reshape(int([&
|
||||||
|
1, 2, 3, 4, 5, 6, 7, 8 &
|
||||||
|
],pInt),[NCELLNODEPERCELL(CELLTYPE(8)),NIP(8)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(NCELLNODEPERCELL(CELLTYPE(9)),NIP(9)), parameter :: CELL9 = &
|
||||||
|
reshape(int([&
|
||||||
|
1, 9,21,12,13,22,27,25, &
|
||||||
|
9, 2,10,21,22,14,23,27, &
|
||||||
|
12,21,11, 4,25,27,24,16, &
|
||||||
|
21,10, 3,11,27,23,15,24, &
|
||||||
|
13,22,27,25, 5,17,26,20, &
|
||||||
|
22,14,23,27,17, 6,18,26, &
|
||||||
|
25,27,24,16,20,26,19, 8, &
|
||||||
|
27,23,15,24,26,18, 7,19 &
|
||||||
|
],pInt),[NCELLNODEPERCELL(CELLTYPE(9)),NIP(9)])
|
||||||
|
|
||||||
|
integer(pInt), dimension(NCELLNODEPERCELL(CELLTYPE(10)),NIP(10)), parameter :: CELL10 = &
|
||||||
|
reshape(int([&
|
||||||
|
1, 9,33,16,17,37,57,44, &
|
||||||
|
9,10,34,33,37,38,58,57, &
|
||||||
|
10, 2,11,34,38,18,39,58, &
|
||||||
|
16,33,36,15,44,57,60,43, &
|
||||||
|
33,34,35,36,57,58,59,60, &
|
||||||
|
34,11,12,35,58,39,40,59, &
|
||||||
|
15,36,14, 4,43,60,42,20, &
|
||||||
|
36,35,13,14,60,59,41,42, &
|
||||||
|
35,12, 3,13,59,40,19,41, &
|
||||||
|
17,37,57,44,21,45,61,52, &
|
||||||
|
37,38,58,57,45,46,62,61, &
|
||||||
|
38,18,39,58,46,22,47,62, &
|
||||||
|
44,57,60,43,52,61,64,51, &
|
||||||
|
57,58,59,60,61,62,63,64, &
|
||||||
|
58,39,40,59,62,47,48,63, &
|
||||||
|
43,60,42,20,51,64,50,24, &
|
||||||
|
60,59,41,42,64,63,49,50, &
|
||||||
|
59,40,19,41,63,48,23,49, &
|
||||||
|
21,45,61,52, 5,25,53,32, &
|
||||||
|
45,46,62,61,25,26,54,53, &
|
||||||
|
46,22,47,62,26, 6,27,54, &
|
||||||
|
52,61,64,51,32,53,56,31, &
|
||||||
|
61,62,63,64,53,54,55,56, &
|
||||||
|
62,47,48,63,54,27,28,55, &
|
||||||
|
51,64,50,24,31,56,30, 8, &
|
||||||
|
64,63,49,50,56,55,29,30, &
|
||||||
|
63,48,23,49,55,28, 7,29 &
|
||||||
|
],pInt),[NCELLNODEPERCELL(CELLTYPE(10)),NIP(10)])
|
||||||
|
|
||||||
|
|
||||||
|
integer(pInt), dimension(NCELLNODEPERCELLFACE(1),NIPNEIGHBOR(1)), parameter :: CELLFACE1 = &
|
||||||
|
reshape(int([&
|
||||||
|
2,3, &
|
||||||
|
3,1, &
|
||||||
|
1,2 &
|
||||||
|
],pInt),[NCELLNODEPERCELLFACE(1),NIPNEIGHBOR(1)]) ! 2D 3node, VTK_TRIANGLE (5)
|
||||||
|
|
||||||
|
integer(pInt), dimension(NCELLNODEPERCELLFACE(2),NIPNEIGHBOR(2)), parameter :: CELLFACE2 = &
|
||||||
|
reshape(int([&
|
||||||
|
2,3, &
|
||||||
|
4,1, &
|
||||||
|
3,4, &
|
||||||
|
1,2 &
|
||||||
|
],pInt),[NCELLNODEPERCELLFACE(2),NIPNEIGHBOR(2)]) ! 2D 4node, VTK_QUAD (9)
|
||||||
|
|
||||||
|
integer(pInt), dimension(NCELLNODEPERCELLFACE(3),NIPNEIGHBOR(3)), parameter :: CELLFACE3 = &
|
||||||
|
reshape(int([&
|
||||||
|
1,3,2, &
|
||||||
|
1,2,4, &
|
||||||
|
2,3,4, &
|
||||||
|
1,4,3 &
|
||||||
|
],pInt),[NCELLNODEPERCELLFACE(3),NIPNEIGHBOR(3)]) ! 3D 4node, VTK_TETRA (10)
|
||||||
|
|
||||||
|
integer(pInt), dimension(NCELLNODEPERCELLFACE(4),NIPNEIGHBOR(4)), parameter :: CELLFACE4 = &
|
||||||
|
reshape(int([&
|
||||||
|
2,3,7,6, &
|
||||||
|
4,1,5,8, &
|
||||||
|
3,4,8,7, &
|
||||||
|
1,2,6,5, &
|
||||||
|
5,6,7,8, &
|
||||||
|
1,4,3,2 &
|
||||||
|
],pInt),[NCELLNODEPERCELLFACE(4),NIPNEIGHBOR(4)]) ! 3D 8node, VTK_HEXAHEDRON (12)
|
||||||
|
|
||||||
|
|
||||||
|
contains
|
||||||
|
|
||||||
|
subroutine tElement_init(self,elemType)
|
||||||
|
implicit none
|
||||||
|
class(tElement) :: self
|
||||||
|
integer(pInt), intent(in) :: elemType
|
||||||
|
self%elemType = elemType
|
||||||
|
|
||||||
|
self%Nnodes = Nnode (self%elemType)
|
||||||
|
self%geomType = geomType (self%elemType)
|
||||||
|
select case (self%elemType)
|
||||||
|
case(1_pInt)
|
||||||
|
self%cellNodeParentNodeWeights = cellNodeParentNodeWeights1
|
||||||
|
case(2_pInt)
|
||||||
|
self%cellNodeParentNodeWeights = cellNodeParentNodeWeights2
|
||||||
|
case(3_pInt)
|
||||||
|
self%cellNodeParentNodeWeights = cellNodeParentNodeWeights3
|
||||||
|
case(4_pInt)
|
||||||
|
self%cellNodeParentNodeWeights = cellNodeParentNodeWeights4
|
||||||
|
case(5_pInt)
|
||||||
|
self%cellNodeParentNodeWeights = cellNodeParentNodeWeights5
|
||||||
|
case(6_pInt)
|
||||||
|
self%cellNodeParentNodeWeights = cellNodeParentNodeWeights6
|
||||||
|
case(7_pInt)
|
||||||
|
self%cellNodeParentNodeWeights = cellNodeParentNodeWeights7
|
||||||
|
case(8_pInt)
|
||||||
|
self%cellNodeParentNodeWeights = cellNodeParentNodeWeights8
|
||||||
|
case(9_pInt)
|
||||||
|
self%cellNodeParentNodeWeights = cellNodeParentNodeWeights9
|
||||||
|
case(10_pInt)
|
||||||
|
self%cellNodeParentNodeWeights = cellNodeParentNodeWeights10
|
||||||
|
case(11_pInt)
|
||||||
|
self%cellNodeParentNodeWeights = cellNodeParentNodeWeights11
|
||||||
|
case(12_pInt)
|
||||||
|
self%cellNodeParentNodeWeights = cellNodeParentNodeWeights12
|
||||||
|
case(13_pInt)
|
||||||
|
self%cellNodeParentNodeWeights = cellNodeParentNodeWeights13
|
||||||
|
case default
|
||||||
|
print*, 'Mist'
|
||||||
|
end select
|
||||||
|
|
||||||
|
|
||||||
|
self%NcellNodes = NcellNode (self%geomType)
|
||||||
|
self%maxNnodeAtIP = maxNnodeAtIP (self%geomType)
|
||||||
|
self%nIPs = nIP (self%geomType)
|
||||||
|
self%cellType = cellType (self%geomType)
|
||||||
|
|
||||||
|
|
||||||
|
select case (self%geomType)
|
||||||
|
case(1_pInt)
|
||||||
|
self%NnodeAtIP = NnodeAtIP1
|
||||||
|
self%IPneighbor = IPneighbor1
|
||||||
|
self%cell = CELL1
|
||||||
|
case(2_pInt)
|
||||||
|
self%NnodeAtIP = NnodeAtIP2
|
||||||
|
self%IPneighbor = IPneighbor2
|
||||||
|
self%cell = CELL2
|
||||||
|
case(3_pInt)
|
||||||
|
self%NnodeAtIP = NnodeAtIP3
|
||||||
|
self%IPneighbor = IPneighbor3
|
||||||
|
self%cell = CELL3
|
||||||
|
case(4_pInt)
|
||||||
|
self%NnodeAtIP = NnodeAtIP4
|
||||||
|
self%IPneighbor = IPneighbor4
|
||||||
|
self%cell = CELL4
|
||||||
|
case(5_pInt)
|
||||||
|
self%NnodeAtIP = NnodeAtIP5
|
||||||
|
self%IPneighbor = IPneighbor5
|
||||||
|
self%cell = CELL5
|
||||||
|
case(6_pInt)
|
||||||
|
self%NnodeAtIP = NnodeAtIP6
|
||||||
|
self%IPneighbor = IPneighbor6
|
||||||
|
self%cell = CELL6
|
||||||
|
case(7_pInt)
|
||||||
|
self%NnodeAtIP = NnodeAtIP7
|
||||||
|
self%IPneighbor = IPneighbor7
|
||||||
|
self%cell = CELL7
|
||||||
|
case(8_pInt)
|
||||||
|
self%NnodeAtIP = NnodeAtIP8
|
||||||
|
self%IPneighbor = IPneighbor8
|
||||||
|
self%cell = CELL8
|
||||||
|
case(9_pInt)
|
||||||
|
self%NnodeAtIP = NnodeAtIP9
|
||||||
|
self%IPneighbor = IPneighbor9
|
||||||
|
self%cell = CELL9
|
||||||
|
case(10_pInt)
|
||||||
|
self%NnodeAtIP = NnodeAtIP10
|
||||||
|
self%IPneighbor = IPneighbor10
|
||||||
|
self%cell = CELL10
|
||||||
|
end select
|
||||||
|
self%NcellNodesPerCell = NCELLNODEPERCELL(self%cellType)
|
||||||
|
|
||||||
|
select case(self%cellType)
|
||||||
|
case(1_pInt)
|
||||||
|
self%cellFace = CELLFACE1
|
||||||
|
case(2_pInt)
|
||||||
|
self%cellFace = CELLFACE2
|
||||||
|
case(3_pInt)
|
||||||
|
self%cellFace = CELLFACE3
|
||||||
|
case(4_pInt)
|
||||||
|
self%cellFace = CELLFACE4
|
||||||
|
end select
|
||||||
|
|
||||||
|
self%nIPneighbors = size(self%IPneighbor,1)
|
||||||
|
|
||||||
|
write(6,'(/,a)') ' <<<+- element_init -+>>>'
|
||||||
|
|
||||||
|
write(6,*)' element type ',self%elemType
|
||||||
|
write(6,*)' geom type ',self%geomType
|
||||||
|
write(6,*)' cell type ',self%cellType
|
||||||
|
write(6,*)' # node ',self%Nnodes
|
||||||
|
write(6,*)' # IP ',self%nIPs
|
||||||
|
write(6,*)' # cellnode ',self%Ncellnodes
|
||||||
|
write(6,*)' # cellnode/cell ',self%NcellnodesPerCell
|
||||||
|
write(6,*)' # IP neighbor ',self%nIPneighbors
|
||||||
|
write(6,*)' max # node at IP ',self%maxNnodeAtIP
|
||||||
|
|
||||||
|
end subroutine tElement_init
|
||||||
|
|
||||||
|
end module element
|
|
@ -71,11 +71,8 @@ subroutine homogenization_init
|
||||||
debug_e, &
|
debug_e, &
|
||||||
debug_g
|
debug_g
|
||||||
use mesh, only: &
|
use mesh, only: &
|
||||||
mesh_maxNips, &
|
theMesh, &
|
||||||
mesh_NcpElems, &
|
mesh_element
|
||||||
mesh_element, &
|
|
||||||
FE_Nips, &
|
|
||||||
FE_geomtype
|
|
||||||
use constitutive, only: &
|
use constitutive, only: &
|
||||||
constitutive_plasticity_maxSizePostResults, &
|
constitutive_plasticity_maxSizePostResults, &
|
||||||
constitutive_source_maxSizePostResults
|
constitutive_source_maxSizePostResults
|
||||||
|
@ -111,30 +108,18 @@ subroutine homogenization_init
|
||||||
logical :: valid
|
logical :: valid
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
! open material.config
|
|
||||||
if (.not. IO_open_jobFile_stat(FILEUNIT,material_localFileExt)) & ! no local material configuration present...
|
|
||||||
call IO_open_file(FILEUNIT,material_configFile) ! ... open material.config file
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
! parse homogenization from config file
|
|
||||||
if (any(homogenization_type == HOMOGENIZATION_NONE_ID)) call homogenization_none_init
|
if (any(homogenization_type == HOMOGENIZATION_NONE_ID)) call homogenization_none_init
|
||||||
if (any(homogenization_type == HOMOGENIZATION_ISOSTRAIN_ID)) call homogenization_isostrain_init
|
if (any(homogenization_type == HOMOGENIZATION_ISOSTRAIN_ID)) call homogenization_isostrain_init
|
||||||
if (any(homogenization_type == HOMOGENIZATION_RGC_ID)) call homogenization_RGC_init
|
if (any(homogenization_type == HOMOGENIZATION_RGC_ID)) call homogenization_RGC_init
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
if (any(thermal_type == THERMAL_isothermal_ID)) call thermal_isothermal_init
|
||||||
! parse thermal from config file
|
if (any(thermal_type == THERMAL_adiabatic_ID)) call thermal_adiabatic_init
|
||||||
call IO_checkAndRewind(FILEUNIT)
|
if (any(thermal_type == THERMAL_conduction_ID)) call thermal_conduction_init
|
||||||
if (any(thermal_type == THERMAL_isothermal_ID)) &
|
|
||||||
call thermal_isothermal_init()
|
|
||||||
if (any(thermal_type == THERMAL_adiabatic_ID)) &
|
|
||||||
call thermal_adiabatic_init(FILEUNIT)
|
|
||||||
if (any(thermal_type == THERMAL_conduction_ID)) &
|
|
||||||
call thermal_conduction_init(FILEUNIT)
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! parse damage from config file
|
! open material.config
|
||||||
call IO_checkAndRewind(FILEUNIT)
|
if (.not. IO_open_jobFile_stat(FILEUNIT,material_localFileExt)) & ! no local material configuration present...
|
||||||
|
call IO_open_file(FILEUNIT,material_configFile) ! ... open material.config file
|
||||||
if (any(damage_type == DAMAGE_none_ID)) &
|
if (any(damage_type == DAMAGE_none_ID)) &
|
||||||
call damage_none_init()
|
call damage_none_init()
|
||||||
if (any(damage_type == DAMAGE_local_ID)) &
|
if (any(damage_type == DAMAGE_local_ID)) &
|
||||||
|
@ -244,20 +229,20 @@ subroutine homogenization_init
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! allocate and initialize global variables
|
! allocate and initialize global variables
|
||||||
allocate(materialpoint_dPdF(3,3,3,3,mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
allocate(materialpoint_dPdF(3,3,3,3,theMesh%elem%nIPs,theMesh%nElems), source=0.0_pReal)
|
||||||
allocate(materialpoint_F0(3,3,mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
allocate(materialpoint_F0(3,3,theMesh%elem%nIPs,theMesh%nElems), source=0.0_pReal)
|
||||||
materialpoint_F0 = spread(spread(math_I3,3,mesh_maxNips),4,mesh_NcpElems) ! initialize to identity
|
materialpoint_F0 = spread(spread(math_I3,3,theMesh%elem%nIPs),4,theMesh%nElems) ! initialize to identity
|
||||||
allocate(materialpoint_F(3,3,mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
allocate(materialpoint_F(3,3,theMesh%elem%nIPs,theMesh%nElems), source=0.0_pReal)
|
||||||
materialpoint_F = materialpoint_F0 ! initialize to identity
|
materialpoint_F = materialpoint_F0 ! initialize to identity
|
||||||
allocate(materialpoint_subF0(3,3,mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
allocate(materialpoint_subF0(3,3,theMesh%elem%nIPs,theMesh%nElems), source=0.0_pReal)
|
||||||
allocate(materialpoint_subF(3,3,mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
allocate(materialpoint_subF(3,3,theMesh%elem%nIPs,theMesh%nElems), source=0.0_pReal)
|
||||||
allocate(materialpoint_P(3,3,mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
allocate(materialpoint_P(3,3,theMesh%elem%nIPs,theMesh%nElems), source=0.0_pReal)
|
||||||
allocate(materialpoint_subFrac(mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
allocate(materialpoint_subFrac(theMesh%elem%nIPs,theMesh%nElems), source=0.0_pReal)
|
||||||
allocate(materialpoint_subStep(mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
allocate(materialpoint_subStep(theMesh%elem%nIPs,theMesh%nElems), source=0.0_pReal)
|
||||||
allocate(materialpoint_subdt(mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
|
allocate(materialpoint_subdt(theMesh%elem%nIPs,theMesh%nElems), source=0.0_pReal)
|
||||||
allocate(materialpoint_requested(mesh_maxNips,mesh_NcpElems), source=.false.)
|
allocate(materialpoint_requested(theMesh%elem%nIPs,theMesh%nElems), source=.false.)
|
||||||
allocate(materialpoint_converged(mesh_maxNips,mesh_NcpElems), source=.true.)
|
allocate(materialpoint_converged(theMesh%elem%nIPs,theMesh%nElems), source=.true.)
|
||||||
allocate(materialpoint_doneAndHappy(2,mesh_maxNips,mesh_NcpElems), source=.true.)
|
allocate(materialpoint_doneAndHappy(2,theMesh%elem%nIPs,theMesh%nElems), source=.true.)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! allocate and initialize global state and postresutls variables
|
! allocate and initialize global state and postresutls variables
|
||||||
|
@ -277,7 +262,7 @@ subroutine homogenization_init
|
||||||
+ homogenization_maxNgrains * (1 + crystallite_maxSizePostResults & ! crystallite size & crystallite results
|
+ homogenization_maxNgrains * (1 + crystallite_maxSizePostResults & ! crystallite size & crystallite results
|
||||||
+ 1 + constitutive_plasticity_maxSizePostResults & ! constitutive size & constitutive results
|
+ 1 + constitutive_plasticity_maxSizePostResults & ! constitutive size & constitutive results
|
||||||
+ constitutive_source_maxSizePostResults)
|
+ constitutive_source_maxSizePostResults)
|
||||||
allocate(materialpoint_results(materialpoint_sizeResults,mesh_maxNips,mesh_NcpElems))
|
allocate(materialpoint_results(materialpoint_sizeResults,theMesh%elem%nIPs,theMesh%nElems))
|
||||||
|
|
||||||
write(6,'(/,a)') ' <<<+- homogenization init -+>>>'
|
write(6,'(/,a)') ' <<<+- homogenization init -+>>>'
|
||||||
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
|
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
|
||||||
|
@ -346,7 +331,6 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
|
||||||
crystallite_Lp, &
|
crystallite_Lp, &
|
||||||
crystallite_Li0, &
|
crystallite_Li0, &
|
||||||
crystallite_Li, &
|
crystallite_Li, &
|
||||||
crystallite_dPdF, &
|
|
||||||
crystallite_Tstar0_v, &
|
crystallite_Tstar0_v, &
|
||||||
crystallite_Tstar_v, &
|
crystallite_Tstar_v, &
|
||||||
crystallite_partionedF0, &
|
crystallite_partionedF0, &
|
||||||
|
@ -600,7 +584,7 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
|
||||||
IpLooping2: do i = FEsolving_execIP(1,e),FEsolving_execIP(2,e)
|
IpLooping2: do i = FEsolving_execIP(1,e),FEsolving_execIP(2,e)
|
||||||
if ( materialpoint_requested(i,e) .and. & ! process requested but...
|
if ( materialpoint_requested(i,e) .and. & ! process requested but...
|
||||||
.not. materialpoint_doneAndHappy(1,i,e)) then ! ...not yet done material points
|
.not. materialpoint_doneAndHappy(1,i,e)) then ! ...not yet done material points
|
||||||
call partitionDeformation(i,e) ! partition deformation onto constituents
|
call partitionDeformation(i,e) ! partition deformation onto constituents
|
||||||
crystallite_dt(1:myNgrains,i,e) = materialpoint_subdt(i,e) ! propagate materialpoint dt to grains
|
crystallite_dt(1:myNgrains,i,e) = materialpoint_subdt(i,e) ! propagate materialpoint dt to grains
|
||||||
crystallite_requested(1:myNgrains,i,e) = .true. ! request calculation for constituents
|
crystallite_requested(1:myNgrains,i,e) = .true. ! request calculation for constituents
|
||||||
else
|
else
|
||||||
|
@ -614,7 +598,8 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
|
||||||
! crystallite integration
|
! crystallite integration
|
||||||
! based on crystallite_partionedF0,.._partionedF
|
! based on crystallite_partionedF0,.._partionedF
|
||||||
! incrementing by crystallite_dt
|
! incrementing by crystallite_dt
|
||||||
materialpoint_converged = crystallite_stress() !ToDo: MD not sure if that is the best logic
|
|
||||||
|
materialpoint_converged = crystallite_stress() !ToDo: MD not sure if that is the best logic
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! state update
|
! state update
|
||||||
|
@ -898,6 +883,8 @@ function postResults(ip,el)
|
||||||
use mesh, only: &
|
use mesh, only: &
|
||||||
mesh_element
|
mesh_element
|
||||||
use material, only: &
|
use material, only: &
|
||||||
|
thermalMapping, &
|
||||||
|
thermal_typeInstance, &
|
||||||
material_homogenizationAt, &
|
material_homogenizationAt, &
|
||||||
homogenization_typeInstance,&
|
homogenization_typeInstance,&
|
||||||
mappingHomogenization, &
|
mappingHomogenization, &
|
||||||
|
@ -937,7 +924,7 @@ function postResults(ip,el)
|
||||||
postResults
|
postResults
|
||||||
integer(pInt) :: &
|
integer(pInt) :: &
|
||||||
startPos, endPos ,&
|
startPos, endPos ,&
|
||||||
of, instance
|
of, instance, homog
|
||||||
|
|
||||||
|
|
||||||
postResults = 0.0_pReal
|
postResults = 0.0_pReal
|
||||||
|
@ -957,10 +944,14 @@ function postResults(ip,el)
|
||||||
chosenThermal: select case (thermal_type(mesh_element(3,el)))
|
chosenThermal: select case (thermal_type(mesh_element(3,el)))
|
||||||
|
|
||||||
case (THERMAL_adiabatic_ID) chosenThermal
|
case (THERMAL_adiabatic_ID) chosenThermal
|
||||||
postResults(startPos:endPos) = thermal_adiabatic_postResults(ip, el)
|
homog = mappingHomogenization(2,ip,el)
|
||||||
|
postResults(startPos:endPos) = &
|
||||||
|
thermal_adiabatic_postResults(homog,thermal_typeInstance(homog),thermalMapping(homog)%p(ip,el))
|
||||||
case (THERMAL_conduction_ID) chosenThermal
|
case (THERMAL_conduction_ID) chosenThermal
|
||||||
postResults(startPos:endPos) = thermal_conduction_postResults(ip, el)
|
homog = mappingHomogenization(2,ip,el)
|
||||||
|
postResults(startPos:endPos) = &
|
||||||
|
thermal_conduction_postResults(homog,thermal_typeInstance(homog),thermalMapping(homog)%p(ip,el))
|
||||||
|
|
||||||
end select chosenThermal
|
end select chosenThermal
|
||||||
|
|
||||||
startPos = endPos + 1_pInt
|
startPos = endPos + 1_pInt
|
||||||
|
|
|
@ -11,20 +11,22 @@ module kinematics_cleavage_opening
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
private
|
private
|
||||||
integer(pInt), dimension(:), allocatable, public, protected :: &
|
integer(pInt), dimension(:), allocatable, private :: kinematics_cleavage_opening_instance
|
||||||
kinematics_cleavage_opening_sizePostResults, & !< cumulative size of post results
|
|
||||||
kinematics_cleavage_opening_offset, & !< which kinematics is my current damage mechanism?
|
|
||||||
kinematics_cleavage_opening_instance !< instance of damage kinematics mechanism
|
|
||||||
|
|
||||||
integer(pInt), dimension(:,:), allocatable, target, public :: &
|
type, private :: tParameters !< container type for internal constitutive parameters
|
||||||
kinematics_cleavage_opening_sizePostResult !< size of each post result output
|
integer(pInt) :: &
|
||||||
|
totalNcleavage
|
||||||
character(len=64), dimension(:,:), allocatable, target, public :: &
|
integer(pInt), dimension(:), allocatable :: &
|
||||||
kinematics_cleavage_opening_output !< name of each post result output
|
Ncleavage !< active number of cleavage systems per family
|
||||||
|
real(pReal) :: &
|
||||||
integer(pInt), dimension(:), allocatable, target, public :: &
|
sdot0, &
|
||||||
kinematics_cleavage_opening_Noutput !< number of outputs per instance of this damage
|
n
|
||||||
|
real(pReal), dimension(:), allocatable :: &
|
||||||
|
critDisp, &
|
||||||
|
critLoad
|
||||||
|
end type
|
||||||
|
|
||||||
|
! Begin Deprecated
|
||||||
integer(pInt), dimension(:), allocatable, private :: &
|
integer(pInt), dimension(:), allocatable, private :: &
|
||||||
kinematics_cleavage_opening_totalNcleavage !< total number of cleavage systems
|
kinematics_cleavage_opening_totalNcleavage !< total number of cleavage systems
|
||||||
|
|
||||||
|
@ -38,6 +40,7 @@ module kinematics_cleavage_opening
|
||||||
real(pReal), dimension(:,:), allocatable, private :: &
|
real(pReal), dimension(:,:), allocatable, private :: &
|
||||||
kinematics_cleavage_opening_critDisp, &
|
kinematics_cleavage_opening_critDisp, &
|
||||||
kinematics_cleavage_opening_critLoad
|
kinematics_cleavage_opening_critLoad
|
||||||
|
! End Deprecated
|
||||||
|
|
||||||
public :: &
|
public :: &
|
||||||
kinematics_cleavage_opening_init, &
|
kinematics_cleavage_opening_init, &
|
||||||
|
@ -50,7 +53,7 @@ contains
|
||||||
!> @brief module initialization
|
!> @brief module initialization
|
||||||
!> @details reads in material parameters, allocates arrays, and does sanity checks
|
!> @details reads in material parameters, allocates arrays, and does sanity checks
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine kinematics_cleavage_opening_init(fileUnit)
|
subroutine kinematics_cleavage_opening_init()
|
||||||
#if defined(__GFORTRAN__) || __INTEL_COMPILER >= 1800
|
#if defined(__GFORTRAN__) || __INTEL_COMPILER >= 1800
|
||||||
use, intrinsic :: iso_fortran_env, only: &
|
use, intrinsic :: iso_fortran_env, only: &
|
||||||
compiler_version, &
|
compiler_version, &
|
||||||
|
@ -60,41 +63,25 @@ subroutine kinematics_cleavage_opening_init(fileUnit)
|
||||||
debug_level,&
|
debug_level,&
|
||||||
debug_constitutive,&
|
debug_constitutive,&
|
||||||
debug_levelBasic
|
debug_levelBasic
|
||||||
|
use config, only: &
|
||||||
|
config_phase
|
||||||
use IO, only: &
|
use IO, only: &
|
||||||
IO_read, &
|
|
||||||
IO_lc, &
|
|
||||||
IO_getTag, &
|
|
||||||
IO_isBlank, &
|
|
||||||
IO_stringPos, &
|
|
||||||
IO_stringValue, &
|
|
||||||
IO_floatValue, &
|
|
||||||
IO_intValue, &
|
|
||||||
IO_warning, &
|
IO_warning, &
|
||||||
IO_error, &
|
IO_error, &
|
||||||
IO_timeStamp, &
|
IO_timeStamp
|
||||||
IO_EOF
|
|
||||||
use material, only: &
|
use material, only: &
|
||||||
phase_kinematics, &
|
phase_kinematics, &
|
||||||
phase_Nkinematics, &
|
|
||||||
phase_Noutput, &
|
|
||||||
KINEMATICS_cleavage_opening_label, &
|
KINEMATICS_cleavage_opening_label, &
|
||||||
KINEMATICS_cleavage_opening_ID
|
KINEMATICS_cleavage_opening_ID
|
||||||
use config, only: &
|
|
||||||
material_Nphase, &
|
|
||||||
MATERIAL_partPhase
|
|
||||||
use lattice, only: &
|
use lattice, only: &
|
||||||
lattice_maxNcleavageFamily, &
|
lattice_maxNcleavageFamily, &
|
||||||
lattice_NcleavageSystem
|
lattice_NcleavageSystem
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
integer(pInt), intent(in) :: fileUnit
|
integer(pInt), allocatable, dimension(:) :: tempInt
|
||||||
|
real(pReal), allocatable, dimension(:) :: tempFloat
|
||||||
|
|
||||||
integer(pInt), allocatable, dimension(:) :: chunkPos
|
integer(pInt) :: maxNinstance,p,instance,kinematics
|
||||||
integer(pInt) :: maxNinstance,phase,instance,kinematics
|
|
||||||
integer(pInt) :: Nchunks_CleavageFamilies = 0_pInt, j
|
|
||||||
character(len=65536) :: &
|
|
||||||
tag = '', &
|
|
||||||
line = ''
|
|
||||||
|
|
||||||
write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_cleavage_opening_LABEL//' init -+>>>'
|
write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_cleavage_opening_LABEL//' init -+>>>'
|
||||||
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
|
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
|
||||||
|
@ -106,21 +93,11 @@ subroutine kinematics_cleavage_opening_init(fileUnit)
|
||||||
if (iand(debug_level(debug_constitutive),debug_levelBasic) /= 0_pInt) &
|
if (iand(debug_level(debug_constitutive),debug_levelBasic) /= 0_pInt) &
|
||||||
write(6,'(a16,1x,i5,/)') '# instances:',maxNinstance
|
write(6,'(a16,1x,i5,/)') '# instances:',maxNinstance
|
||||||
|
|
||||||
allocate(kinematics_cleavage_opening_offset(material_Nphase), source=0_pInt)
|
allocate(kinematics_cleavage_opening_instance(size(config_phase)), source=0_pInt)
|
||||||
allocate(kinematics_cleavage_opening_instance(material_Nphase), source=0_pInt)
|
do p = 1_pInt, size(config_phase)
|
||||||
do phase = 1, material_Nphase
|
kinematics_cleavage_opening_instance(p) = count(phase_kinematics(:,1:p) == kinematics_cleavage_opening_ID) ! ToDo: count correct?
|
||||||
kinematics_cleavage_opening_instance(phase) = count(phase_kinematics(:,1:phase) == kinematics_cleavage_opening_ID)
|
|
||||||
do kinematics = 1, phase_Nkinematics(phase)
|
|
||||||
if (phase_kinematics(kinematics,phase) == kinematics_cleavage_opening_ID) &
|
|
||||||
kinematics_cleavage_opening_offset(phase) = kinematics
|
|
||||||
enddo
|
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
allocate(kinematics_cleavage_opening_sizePostResults(maxNinstance), source=0_pInt)
|
|
||||||
allocate(kinematics_cleavage_opening_sizePostResult(maxval(phase_Noutput),maxNinstance), source=0_pInt)
|
|
||||||
allocate(kinematics_cleavage_opening_output(maxval(phase_Noutput),maxNinstance))
|
|
||||||
kinematics_cleavage_opening_output = ''
|
|
||||||
allocate(kinematics_cleavage_opening_Noutput(maxNinstance), source=0_pInt)
|
|
||||||
allocate(kinematics_cleavage_opening_critDisp(lattice_maxNcleavageFamily,maxNinstance), source=0.0_pReal)
|
allocate(kinematics_cleavage_opening_critDisp(lattice_maxNcleavageFamily,maxNinstance), source=0.0_pReal)
|
||||||
allocate(kinematics_cleavage_opening_critLoad(lattice_maxNcleavageFamily,maxNinstance), source=0.0_pReal)
|
allocate(kinematics_cleavage_opening_critLoad(lattice_maxNcleavageFamily,maxNinstance), source=0.0_pReal)
|
||||||
allocate(kinematics_cleavage_opening_Ncleavage(lattice_maxNcleavageFamily,maxNinstance), source=0_pInt)
|
allocate(kinematics_cleavage_opening_Ncleavage(lattice_maxNcleavageFamily,maxNinstance), source=0_pInt)
|
||||||
|
@ -128,90 +105,51 @@ subroutine kinematics_cleavage_opening_init(fileUnit)
|
||||||
allocate(kinematics_cleavage_opening_sdot_0(maxNinstance), source=0.0_pReal)
|
allocate(kinematics_cleavage_opening_sdot_0(maxNinstance), source=0.0_pReal)
|
||||||
allocate(kinematics_cleavage_opening_N(maxNinstance), source=0.0_pReal)
|
allocate(kinematics_cleavage_opening_N(maxNinstance), source=0.0_pReal)
|
||||||
|
|
||||||
rewind(fileUnit)
|
do p = 1_pInt, size(config_phase)
|
||||||
phase = 0_pInt
|
if (all(phase_kinematics(:,p) /= KINEMATICS_cleavage_opening_ID)) cycle
|
||||||
do while (trim(line) /= IO_EOF .and. IO_lc(IO_getTag(line,'<','>')) /= MATERIAL_partPhase) ! wind forward to <phase>
|
instance = kinematics_cleavage_opening_instance(p)
|
||||||
line = IO_read(fileUnit)
|
kinematics_cleavage_opening_sdot_0(instance) = config_phase(p)%getFloat('anisobrittle_sdot0')
|
||||||
enddo
|
kinematics_cleavage_opening_N(instance) = config_phase(p)%getFloat('anisobrittle_ratesensitivity')
|
||||||
|
tempInt = config_phase(p)%getInts('ncleavage')
|
||||||
parsingFile: do while (trim(line) /= IO_EOF) ! read through sections of phase part
|
kinematics_cleavage_opening_Ncleavage(1:size(tempInt),instance) = tempInt
|
||||||
line = IO_read(fileUnit)
|
|
||||||
if (IO_isBlank(line)) cycle ! skip empty lines
|
|
||||||
if (IO_getTag(line,'<','>') /= '') then ! stop at next part
|
|
||||||
line = IO_read(fileUnit, .true.) ! reset IO_read
|
|
||||||
exit
|
|
||||||
endif
|
|
||||||
if (IO_getTag(line,'[',']') /= '') then ! next phase section
|
|
||||||
phase = phase + 1_pInt ! advance phase section counter
|
|
||||||
cycle ! skip to next line
|
|
||||||
endif
|
|
||||||
if (phase > 0_pInt ) then; if (any(phase_kinematics(:,phase) == KINEMATICS_cleavage_opening_ID)) then ! do not short-circuit here (.and. with next if statemen). It's not safe in Fortran
|
|
||||||
instance = kinematics_cleavage_opening_instance(phase) ! which instance of my damage is present phase
|
|
||||||
chunkPos = IO_stringPos(line)
|
|
||||||
tag = IO_lc(IO_stringValue(line,chunkPos,1_pInt)) ! extract key
|
|
||||||
select case(tag)
|
|
||||||
case ('anisobrittle_sdot0')
|
|
||||||
kinematics_cleavage_opening_sdot_0(instance) = IO_floatValue(line,chunkPos,2_pInt)
|
|
||||||
|
|
||||||
case ('anisobrittle_ratesensitivity')
|
|
||||||
kinematics_cleavage_opening_N(instance) = IO_floatValue(line,chunkPos,2_pInt)
|
|
||||||
|
|
||||||
case ('ncleavage') !
|
|
||||||
Nchunks_CleavageFamilies = chunkPos(1) - 1_pInt
|
|
||||||
do j = 1_pInt, Nchunks_CleavageFamilies
|
|
||||||
kinematics_cleavage_opening_Ncleavage(j,instance) = IO_intValue(line,chunkPos,1_pInt+j)
|
|
||||||
enddo
|
|
||||||
|
|
||||||
case ('anisobrittle_criticaldisplacement')
|
tempFloat = config_phase(p)%getFloats('anisobrittle_criticaldisplacement',requiredSize=size(tempInt))
|
||||||
do j = 1_pInt, Nchunks_CleavageFamilies
|
kinematics_cleavage_opening_critDisp(1:size(tempInt),instance) = tempFloat
|
||||||
kinematics_cleavage_opening_critDisp(j,instance) = IO_floatValue(line,chunkPos,1_pInt+j)
|
|
||||||
enddo
|
|
||||||
|
|
||||||
case ('anisobrittle_criticalload')
|
tempFloat = config_phase(p)%getFloats('anisobrittle_criticalload',requiredSize=size(tempInt))
|
||||||
do j = 1_pInt, Nchunks_CleavageFamilies
|
kinematics_cleavage_opening_critLoad(1:size(tempInt),instance) = tempFloat
|
||||||
kinematics_cleavage_opening_critLoad(j,instance) = IO_floatValue(line,chunkPos,1_pInt+j)
|
|
||||||
enddo
|
|
||||||
|
|
||||||
end select
|
kinematics_cleavage_opening_Ncleavage(1:lattice_maxNcleavageFamily,instance) = &
|
||||||
endif; endif
|
min(lattice_NcleavageSystem(1:lattice_maxNcleavageFamily,p),& ! limit active cleavage systems per family to min of available and requested
|
||||||
enddo parsingFile
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
! sanity checks
|
|
||||||
sanityChecks: do phase = 1_pInt, material_Nphase
|
|
||||||
myPhase: if (any(phase_kinematics(:,phase) == KINEMATICS_cleavage_opening_ID)) then
|
|
||||||
instance = kinematics_cleavage_opening_instance(phase)
|
|
||||||
kinematics_cleavage_opening_Ncleavage(1:lattice_maxNcleavageFamily,instance) = &
|
|
||||||
min(lattice_NcleavageSystem(1:lattice_maxNcleavageFamily,phase),& ! limit active cleavage systems per family to min of available and requested
|
|
||||||
kinematics_cleavage_opening_Ncleavage(1:lattice_maxNcleavageFamily,instance))
|
kinematics_cleavage_opening_Ncleavage(1:lattice_maxNcleavageFamily,instance))
|
||||||
kinematics_cleavage_opening_totalNcleavage(instance) = sum(kinematics_cleavage_opening_Ncleavage(:,instance)) ! how many cleavage systems altogether
|
kinematics_cleavage_opening_totalNcleavage(instance) = sum(kinematics_cleavage_opening_Ncleavage(:,instance)) ! how many cleavage systems altogether
|
||||||
if (kinematics_cleavage_opening_sdot_0(instance) <= 0.0_pReal) &
|
if (kinematics_cleavage_opening_sdot_0(instance) <= 0.0_pReal) &
|
||||||
call IO_error(211_pInt,el=instance,ext_msg='sdot_0 ('//KINEMATICS_cleavage_opening_LABEL//')')
|
call IO_error(211_pInt,el=instance,ext_msg='sdot_0 ('//KINEMATICS_cleavage_opening_LABEL//')')
|
||||||
if (any(kinematics_cleavage_opening_critDisp(1:Nchunks_CleavageFamilies,instance) < 0.0_pReal)) &
|
if (any(kinematics_cleavage_opening_critDisp(1:size(tempInt),instance) < 0.0_pReal)) &
|
||||||
call IO_error(211_pInt,el=instance,ext_msg='critical_displacement ('//KINEMATICS_cleavage_opening_LABEL//')')
|
call IO_error(211_pInt,el=instance,ext_msg='critical_displacement ('//KINEMATICS_cleavage_opening_LABEL//')')
|
||||||
if (any(kinematics_cleavage_opening_critLoad(1:Nchunks_CleavageFamilies,instance) < 0.0_pReal)) &
|
if (any(kinematics_cleavage_opening_critLoad(1:size(tempInt),instance) < 0.0_pReal)) &
|
||||||
call IO_error(211_pInt,el=instance,ext_msg='critical_load ('//KINEMATICS_cleavage_opening_LABEL//')')
|
call IO_error(211_pInt,el=instance,ext_msg='critical_load ('//KINEMATICS_cleavage_opening_LABEL//')')
|
||||||
if (kinematics_cleavage_opening_N(instance) <= 0.0_pReal) &
|
if (kinematics_cleavage_opening_N(instance) <= 0.0_pReal) &
|
||||||
call IO_error(211_pInt,el=instance,ext_msg='rate_sensitivity ('//KINEMATICS_cleavage_opening_LABEL//')')
|
call IO_error(211_pInt,el=instance,ext_msg='rate_sensitivity ('//KINEMATICS_cleavage_opening_LABEL//')')
|
||||||
endif myPhase
|
enddo
|
||||||
enddo sanityChecks
|
|
||||||
|
|
||||||
end subroutine kinematics_cleavage_opening_init
|
end subroutine kinematics_cleavage_opening_init
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief contains the constitutive equation for calculating the velocity gradient
|
!> @brief contains the constitutive equation for calculating the velocity gradient
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine kinematics_cleavage_opening_LiAndItsTangent(Ld, dLd_dTstar3333, Tstar_v, ipc, ip, el)
|
subroutine kinematics_cleavage_opening_LiAndItsTangent(Ld, dLd_dTstar, S, ipc, ip, el)
|
||||||
use prec, only: &
|
use prec, only: &
|
||||||
tol_math_check
|
tol_math_check
|
||||||
|
use math, only: &
|
||||||
|
math_mul33xx33
|
||||||
use material, only: &
|
use material, only: &
|
||||||
phaseAt, phasememberAt, &
|
material_phase, &
|
||||||
material_homog, &
|
material_homog, &
|
||||||
damage, &
|
damage, &
|
||||||
damageMapping
|
damageMapping
|
||||||
use lattice, only: &
|
use lattice, only: &
|
||||||
lattice_Scleavage, &
|
lattice_Scleavage, &
|
||||||
lattice_Scleavage_v, &
|
|
||||||
lattice_maxNcleavageFamily, &
|
lattice_maxNcleavageFamily, &
|
||||||
lattice_NcleavageSystem
|
lattice_NcleavageSystem
|
||||||
|
|
||||||
|
@ -220,36 +158,33 @@ subroutine kinematics_cleavage_opening_LiAndItsTangent(Ld, dLd_dTstar3333, Tstar
|
||||||
ipc, & !< grain number
|
ipc, & !< grain number
|
||||||
ip, & !< integration point number
|
ip, & !< integration point number
|
||||||
el !< element number
|
el !< element number
|
||||||
real(pReal), intent(in), dimension(6) :: &
|
real(pReal), intent(in), dimension(3,3) :: &
|
||||||
Tstar_v !< 2nd Piola-Kirchhoff stress
|
S
|
||||||
real(pReal), intent(out), dimension(3,3) :: &
|
real(pReal), intent(out), dimension(3,3) :: &
|
||||||
Ld !< damage velocity gradient
|
Ld !< damage velocity gradient
|
||||||
real(pReal), intent(out), dimension(3,3,3,3) :: &
|
real(pReal), intent(out), dimension(3,3,3,3) :: &
|
||||||
dLd_dTstar3333 !< derivative of Ld with respect to Tstar (4th-order tensor)
|
dLd_dTstar !< derivative of Ld with respect to Tstar (4th-order tensor)
|
||||||
integer(pInt) :: &
|
integer(pInt) :: &
|
||||||
phase, &
|
instance, phase, &
|
||||||
constituent, &
|
|
||||||
instance, &
|
|
||||||
homog, damageOffset, &
|
homog, damageOffset, &
|
||||||
f, i, index_myFamily, k, l, m, n
|
f, i, index_myFamily, k, l, m, n
|
||||||
real(pReal) :: &
|
real(pReal) :: &
|
||||||
traction_d, traction_t, traction_n, traction_crit, &
|
traction_d, traction_t, traction_n, traction_crit, &
|
||||||
udotd, dudotd_dt, udott, dudott_dt, udotn, dudotn_dt
|
udotd, dudotd_dt, udott, dudott_dt, udotn, dudotn_dt
|
||||||
|
|
||||||
phase = phaseAt(ipc,ip,el)
|
phase = material_phase(ipc,ip,el)
|
||||||
constituent = phasememberAt(ipc,ip,el)
|
|
||||||
instance = kinematics_cleavage_opening_instance(phase)
|
instance = kinematics_cleavage_opening_instance(phase)
|
||||||
homog = material_homog(ip,el)
|
homog = material_homog(ip,el)
|
||||||
damageOffset = damageMapping(homog)%p(ip,el)
|
damageOffset = damageMapping(homog)%p(ip,el)
|
||||||
|
|
||||||
Ld = 0.0_pReal
|
Ld = 0.0_pReal
|
||||||
dLd_dTstar3333 = 0.0_pReal
|
dLd_dTstar = 0.0_pReal
|
||||||
do f = 1_pInt,lattice_maxNcleavageFamily
|
do f = 1_pInt,lattice_maxNcleavageFamily
|
||||||
index_myFamily = sum(lattice_NcleavageSystem(1:f-1_pInt,phase)) ! at which index starts my family
|
index_myFamily = sum(lattice_NcleavageSystem(1:f-1_pInt,phase)) ! at which index starts my family
|
||||||
do i = 1_pInt,kinematics_cleavage_opening_Ncleavage(f,instance) ! process each (active) cleavage system in family
|
do i = 1_pInt,kinematics_cleavage_opening_Ncleavage(f,instance) ! process each (active) cleavage system in family
|
||||||
traction_d = dot_product(Tstar_v,lattice_Scleavage_v(1:6,1,index_myFamily+i,phase))
|
traction_d = math_mul33xx33(S,lattice_Scleavage(1:3,1:3,1,index_myFamily+i,phase))
|
||||||
traction_t = dot_product(Tstar_v,lattice_Scleavage_v(1:6,2,index_myFamily+i,phase))
|
traction_t = math_mul33xx33(S,lattice_Scleavage(1:3,1:3,2,index_myFamily+i,phase))
|
||||||
traction_n = dot_product(Tstar_v,lattice_Scleavage_v(1:6,3,index_myFamily+i,phase))
|
traction_n = math_mul33xx33(S,lattice_Scleavage(1:3,1:3,3,index_myFamily+i,phase))
|
||||||
traction_crit = kinematics_cleavage_opening_critLoad(f,instance)* &
|
traction_crit = kinematics_cleavage_opening_critLoad(f,instance)* &
|
||||||
damage(homog)%p(damageOffset)*damage(homog)%p(damageOffset)
|
damage(homog)%p(damageOffset)*damage(homog)%p(damageOffset)
|
||||||
udotd = &
|
udotd = &
|
||||||
|
@ -261,7 +196,7 @@ subroutine kinematics_cleavage_opening_LiAndItsTangent(Ld, dLd_dTstar3333, Tstar
|
||||||
dudotd_dt = sign(1.0_pReal,traction_d)*udotd*kinematics_cleavage_opening_N(instance)/ &
|
dudotd_dt = sign(1.0_pReal,traction_d)*udotd*kinematics_cleavage_opening_N(instance)/ &
|
||||||
max(0.0_pReal, abs(traction_d) - traction_crit)
|
max(0.0_pReal, abs(traction_d) - traction_crit)
|
||||||
forall (k=1_pInt:3_pInt,l=1_pInt:3_pInt,m=1_pInt:3_pInt,n=1_pInt:3_pInt) &
|
forall (k=1_pInt:3_pInt,l=1_pInt:3_pInt,m=1_pInt:3_pInt,n=1_pInt:3_pInt) &
|
||||||
dLd_dTstar3333(k,l,m,n) = dLd_dTstar3333(k,l,m,n) + &
|
dLd_dTstar(k,l,m,n) = dLd_dTstar(k,l,m,n) + &
|
||||||
dudotd_dt*lattice_Scleavage(k,l,1,index_myFamily+i,phase)* &
|
dudotd_dt*lattice_Scleavage(k,l,1,index_myFamily+i,phase)* &
|
||||||
lattice_Scleavage(m,n,1,index_myFamily+i,phase)
|
lattice_Scleavage(m,n,1,index_myFamily+i,phase)
|
||||||
endif
|
endif
|
||||||
|
@ -275,7 +210,7 @@ subroutine kinematics_cleavage_opening_LiAndItsTangent(Ld, dLd_dTstar3333, Tstar
|
||||||
dudott_dt = sign(1.0_pReal,traction_t)*udott*kinematics_cleavage_opening_N(instance)/ &
|
dudott_dt = sign(1.0_pReal,traction_t)*udott*kinematics_cleavage_opening_N(instance)/ &
|
||||||
max(0.0_pReal, abs(traction_t) - traction_crit)
|
max(0.0_pReal, abs(traction_t) - traction_crit)
|
||||||
forall (k=1_pInt:3_pInt,l=1_pInt:3_pInt,m=1_pInt:3_pInt,n=1_pInt:3_pInt) &
|
forall (k=1_pInt:3_pInt,l=1_pInt:3_pInt,m=1_pInt:3_pInt,n=1_pInt:3_pInt) &
|
||||||
dLd_dTstar3333(k,l,m,n) = dLd_dTstar3333(k,l,m,n) + &
|
dLd_dTstar(k,l,m,n) = dLd_dTstar(k,l,m,n) + &
|
||||||
dudott_dt*lattice_Scleavage(k,l,2,index_myFamily+i,phase)* &
|
dudott_dt*lattice_Scleavage(k,l,2,index_myFamily+i,phase)* &
|
||||||
lattice_Scleavage(m,n,2,index_myFamily+i,phase)
|
lattice_Scleavage(m,n,2,index_myFamily+i,phase)
|
||||||
endif
|
endif
|
||||||
|
@ -289,11 +224,10 @@ subroutine kinematics_cleavage_opening_LiAndItsTangent(Ld, dLd_dTstar3333, Tstar
|
||||||
dudotn_dt = sign(1.0_pReal,traction_n)*udotn*kinematics_cleavage_opening_N(instance)/ &
|
dudotn_dt = sign(1.0_pReal,traction_n)*udotn*kinematics_cleavage_opening_N(instance)/ &
|
||||||
max(0.0_pReal, abs(traction_n) - traction_crit)
|
max(0.0_pReal, abs(traction_n) - traction_crit)
|
||||||
forall (k=1_pInt:3_pInt,l=1_pInt:3_pInt,m=1_pInt:3_pInt,n=1_pInt:3_pInt) &
|
forall (k=1_pInt:3_pInt,l=1_pInt:3_pInt,m=1_pInt:3_pInt,n=1_pInt:3_pInt) &
|
||||||
dLd_dTstar3333(k,l,m,n) = dLd_dTstar3333(k,l,m,n) + &
|
dLd_dTstar(k,l,m,n) = dLd_dTstar(k,l,m,n) + &
|
||||||
dudotn_dt*lattice_Scleavage(k,l,3,index_myFamily+i,phase)* &
|
dudotn_dt*lattice_Scleavage(k,l,3,index_myFamily+i,phase)* &
|
||||||
lattice_Scleavage(m,n,3,index_myFamily+i,phase)
|
lattice_Scleavage(m,n,3,index_myFamily+i,phase)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
enddo
|
enddo
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
|
|
|
@ -11,20 +11,22 @@ module kinematics_slipplane_opening
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
private
|
private
|
||||||
integer(pInt), dimension(:), allocatable, public, protected :: &
|
integer(pInt), dimension(:), allocatable, private :: kinematics_slipplane_opening_instance
|
||||||
kinematics_slipplane_opening_sizePostResults, & !< cumulative size of post results
|
|
||||||
kinematics_slipplane_opening_offset, & !< which kinematics is my current damage mechanism?
|
|
||||||
kinematics_slipplane_opening_instance !< instance of damage kinematics mechanism
|
|
||||||
|
|
||||||
integer(pInt), dimension(:,:), allocatable, target, public :: &
|
|
||||||
kinematics_slipplane_opening_sizePostResult !< size of each post result output
|
|
||||||
|
|
||||||
character(len=64), dimension(:,:), allocatable, target, public :: &
|
|
||||||
kinematics_slipplane_opening_output !< name of each post result output
|
|
||||||
|
|
||||||
integer(pInt), dimension(:), allocatable, target, public :: &
|
|
||||||
kinematics_slipplane_opening_Noutput !< number of outputs per instance of this damage
|
|
||||||
|
|
||||||
|
type, private :: tParameters !< container type for internal constitutive parameters
|
||||||
|
integer(pInt) :: &
|
||||||
|
totalNslip
|
||||||
|
integer(pInt), dimension(:), allocatable :: &
|
||||||
|
Nslip !< active number of slip systems per family
|
||||||
|
real(pReal) :: &
|
||||||
|
sdot0, &
|
||||||
|
n
|
||||||
|
real(pReal), dimension(:), allocatable :: &
|
||||||
|
critDisp, &
|
||||||
|
critPlasticStrain
|
||||||
|
end type
|
||||||
|
|
||||||
|
! Begin Deprecated
|
||||||
integer(pInt), dimension(:), allocatable, private :: &
|
integer(pInt), dimension(:), allocatable, private :: &
|
||||||
kinematics_slipplane_opening_totalNslip !< total number of slip systems
|
kinematics_slipplane_opening_totalNslip !< total number of slip systems
|
||||||
|
|
||||||
|
@ -38,6 +40,7 @@ module kinematics_slipplane_opening
|
||||||
real(pReal), dimension(:,:), allocatable, private :: &
|
real(pReal), dimension(:,:), allocatable, private :: &
|
||||||
kinematics_slipplane_opening_critPlasticStrain, &
|
kinematics_slipplane_opening_critPlasticStrain, &
|
||||||
kinematics_slipplane_opening_critLoad
|
kinematics_slipplane_opening_critLoad
|
||||||
|
! End Deprecated
|
||||||
|
|
||||||
public :: &
|
public :: &
|
||||||
kinematics_slipplane_opening_init, &
|
kinematics_slipplane_opening_init, &
|
||||||
|
@ -50,7 +53,7 @@ contains
|
||||||
!> @brief module initialization
|
!> @brief module initialization
|
||||||
!> @details reads in material parameters, allocates arrays, and does sanity checks
|
!> @details reads in material parameters, allocates arrays, and does sanity checks
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine kinematics_slipplane_opening_init(fileUnit)
|
subroutine kinematics_slipplane_opening_init()
|
||||||
#if defined(__GFORTRAN__) || __INTEL_COMPILER >= 1800
|
#if defined(__GFORTRAN__) || __INTEL_COMPILER >= 1800
|
||||||
use, intrinsic :: iso_fortran_env, only: &
|
use, intrinsic :: iso_fortran_env, only: &
|
||||||
compiler_version, &
|
compiler_version, &
|
||||||
|
@ -60,41 +63,25 @@ subroutine kinematics_slipplane_opening_init(fileUnit)
|
||||||
debug_level,&
|
debug_level,&
|
||||||
debug_constitutive,&
|
debug_constitutive,&
|
||||||
debug_levelBasic
|
debug_levelBasic
|
||||||
|
use config, only: &
|
||||||
|
config_phase
|
||||||
use IO, only: &
|
use IO, only: &
|
||||||
IO_read, &
|
|
||||||
IO_lc, &
|
|
||||||
IO_getTag, &
|
|
||||||
IO_isBlank, &
|
|
||||||
IO_stringPos, &
|
|
||||||
IO_stringValue, &
|
|
||||||
IO_floatValue, &
|
|
||||||
IO_intValue, &
|
|
||||||
IO_warning, &
|
IO_warning, &
|
||||||
IO_error, &
|
IO_error, &
|
||||||
IO_timeStamp, &
|
IO_timeStamp
|
||||||
IO_EOF
|
|
||||||
use material, only: &
|
use material, only: &
|
||||||
phase_kinematics, &
|
phase_kinematics, &
|
||||||
phase_Nkinematics, &
|
|
||||||
phase_Noutput, &
|
|
||||||
KINEMATICS_slipplane_opening_label, &
|
KINEMATICS_slipplane_opening_label, &
|
||||||
KINEMATICS_slipplane_opening_ID
|
KINEMATICS_slipplane_opening_ID
|
||||||
use config, only: &
|
|
||||||
material_Nphase, &
|
|
||||||
MATERIAL_partPhase
|
|
||||||
use lattice, only: &
|
use lattice, only: &
|
||||||
lattice_maxNslipFamily, &
|
lattice_maxNslipFamily, &
|
||||||
lattice_NslipSystem
|
lattice_NslipSystem
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
integer(pInt), intent(in) :: fileUnit
|
integer(pInt), allocatable, dimension(:) :: tempInt
|
||||||
|
real(pReal), allocatable, dimension(:) :: tempFloat
|
||||||
|
|
||||||
integer(pInt), allocatable, dimension(:) :: chunkPos
|
integer(pInt) :: maxNinstance,p,instance,kinematics
|
||||||
integer(pInt) :: maxNinstance,phase,instance,kinematics
|
|
||||||
integer(pInt) :: Nchunks_SlipFamilies = 0_pInt, j
|
|
||||||
character(len=65536) :: &
|
|
||||||
tag = '', &
|
|
||||||
line = ''
|
|
||||||
|
|
||||||
write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_slipplane_opening_LABEL//' init -+>>>'
|
write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_slipplane_opening_LABEL//' init -+>>>'
|
||||||
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
|
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
|
||||||
|
@ -106,21 +93,11 @@ subroutine kinematics_slipplane_opening_init(fileUnit)
|
||||||
if (iand(debug_level(debug_constitutive),debug_levelBasic) /= 0_pInt) &
|
if (iand(debug_level(debug_constitutive),debug_levelBasic) /= 0_pInt) &
|
||||||
write(6,'(a16,1x,i5,/)') '# instances:',maxNinstance
|
write(6,'(a16,1x,i5,/)') '# instances:',maxNinstance
|
||||||
|
|
||||||
allocate(kinematics_slipplane_opening_offset(material_Nphase), source=0_pInt)
|
allocate(kinematics_slipplane_opening_instance(size(config_phase)), source=0_pInt)
|
||||||
allocate(kinematics_slipplane_opening_instance(material_Nphase), source=0_pInt)
|
do p = 1_pInt, size(config_phase)
|
||||||
do phase = 1, material_Nphase
|
kinematics_slipplane_opening_instance(p) = count(phase_kinematics(:,1:p) == kinematics_slipplane_opening_ID) ! ToDo: count correct?
|
||||||
kinematics_slipplane_opening_instance(phase) = count(phase_kinematics(:,1:phase) == kinematics_slipplane_opening_ID)
|
|
||||||
do kinematics = 1, phase_Nkinematics(phase)
|
|
||||||
if (phase_kinematics(kinematics,phase) == kinematics_slipplane_opening_ID) &
|
|
||||||
kinematics_slipplane_opening_offset(phase) = kinematics
|
|
||||||
enddo
|
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
allocate(kinematics_slipplane_opening_sizePostResults(maxNinstance), source=0_pInt)
|
|
||||||
allocate(kinematics_slipplane_opening_sizePostResult(maxval(phase_Noutput),maxNinstance),source=0_pInt)
|
|
||||||
allocate(kinematics_slipplane_opening_output(maxval(phase_Noutput),maxNinstance))
|
|
||||||
kinematics_slipplane_opening_output = ''
|
|
||||||
allocate(kinematics_slipplane_opening_Noutput(maxNinstance), source=0_pInt)
|
|
||||||
allocate(kinematics_slipplane_opening_critLoad(lattice_maxNslipFamily,maxNinstance), source=0.0_pReal)
|
allocate(kinematics_slipplane_opening_critLoad(lattice_maxNslipFamily,maxNinstance), source=0.0_pReal)
|
||||||
allocate(kinematics_slipplane_opening_critPlasticStrain(lattice_maxNslipFamily,maxNinstance),source=0.0_pReal)
|
allocate(kinematics_slipplane_opening_critPlasticStrain(lattice_maxNslipFamily,maxNinstance),source=0.0_pReal)
|
||||||
allocate(kinematics_slipplane_opening_Nslip(lattice_maxNslipFamily,maxNinstance), source=0_pInt)
|
allocate(kinematics_slipplane_opening_Nslip(lattice_maxNslipFamily,maxNinstance), source=0_pInt)
|
||||||
|
@ -128,61 +105,22 @@ subroutine kinematics_slipplane_opening_init(fileUnit)
|
||||||
allocate(kinematics_slipplane_opening_N(maxNinstance), source=0.0_pReal)
|
allocate(kinematics_slipplane_opening_N(maxNinstance), source=0.0_pReal)
|
||||||
allocate(kinematics_slipplane_opening_sdot_0(maxNinstance), source=0.0_pReal)
|
allocate(kinematics_slipplane_opening_sdot_0(maxNinstance), source=0.0_pReal)
|
||||||
|
|
||||||
rewind(fileUnit)
|
do p = 1_pInt, size(config_phase)
|
||||||
phase = 0_pInt
|
if (all(phase_kinematics(:,p) /= KINEMATICS_slipplane_opening_ID)) cycle
|
||||||
do while (trim(line) /= IO_EOF .and. IO_lc(IO_getTag(line,'<','>')) /= MATERIAL_partPhase) ! wind forward to <phase>
|
instance = kinematics_slipplane_opening_instance(p)
|
||||||
line = IO_read(fileUnit)
|
kinematics_slipplane_opening_sdot_0(instance) = config_phase(p)%getFloat('anisoductile_sdot0')
|
||||||
enddo
|
kinematics_slipplane_opening_N(instance) = config_phase(p)%getFloat('anisoductile_ratesensitivity')
|
||||||
|
tempInt = config_phase(p)%getInts('ncleavage')
|
||||||
parsingFile: do while (trim(line) /= IO_EOF) ! read through sections of phase part
|
kinematics_slipplane_opening_Nslip(1:size(tempInt),instance) = tempInt
|
||||||
line = IO_read(fileUnit)
|
|
||||||
if (IO_isBlank(line)) cycle ! skip empty lines
|
|
||||||
if (IO_getTag(line,'<','>') /= '') then ! stop at next part
|
|
||||||
line = IO_read(fileUnit, .true.) ! reset IO_read
|
|
||||||
exit
|
|
||||||
endif
|
|
||||||
if (IO_getTag(line,'[',']') /= '') then ! next phase section
|
|
||||||
phase = phase + 1_pInt ! advance phase section counter
|
|
||||||
cycle ! skip to next line
|
|
||||||
endif
|
|
||||||
if (phase > 0_pInt ) then; if (any(phase_kinematics(:,phase) == KINEMATICS_slipplane_opening_ID)) then ! do not short-circuit here (.and. with next if statemen). It's not safe in Fortran
|
|
||||||
instance = kinematics_slipplane_opening_instance(phase) ! which instance of my damage is present phase
|
|
||||||
chunkPos = IO_stringPos(line)
|
|
||||||
tag = IO_lc(IO_stringValue(line,chunkPos,1_pInt)) ! extract key
|
|
||||||
select case(tag)
|
|
||||||
case ('nslip') !
|
|
||||||
Nchunks_SlipFamilies = chunkPos(1) - 1_pInt
|
|
||||||
do j = 1_pInt, Nchunks_SlipFamilies
|
|
||||||
kinematics_slipplane_opening_Nslip(j,instance) = IO_intValue(line,chunkPos,1_pInt+j)
|
|
||||||
enddo
|
|
||||||
|
|
||||||
case ('anisoductile_sdot0')
|
tempFloat = config_phase(p)%getFloats('anisoductile_criticalplasticstrain',requiredSize=size(tempInt))
|
||||||
kinematics_slipplane_opening_sdot_0(instance) = IO_floatValue(line,chunkPos,2_pInt)
|
kinematics_slipplane_opening_critPlasticStrain(1:size(tempInt),instance) = tempFloat
|
||||||
|
|
||||||
case ('anisoductile_criticalplasticstrain')
|
|
||||||
do j = 1_pInt, Nchunks_SlipFamilies
|
|
||||||
kinematics_slipplane_opening_critPlasticStrain(j,instance) = IO_floatValue(line,chunkPos,1_pInt+j)
|
|
||||||
enddo
|
|
||||||
|
|
||||||
case ('anisoductile_ratesensitivity')
|
|
||||||
kinematics_slipplane_opening_N(instance) = IO_floatValue(line,chunkPos,2_pInt)
|
|
||||||
|
|
||||||
case ('anisoductile_criticalload')
|
tempFloat = config_phase(p)%getFloats('anisoductile_criticalload',requiredSize=size(tempInt))
|
||||||
do j = 1_pInt, Nchunks_SlipFamilies
|
kinematics_slipplane_opening_critLoad(1:size(tempInt),instance) = tempFloat
|
||||||
kinematics_slipplane_opening_critLoad(j,instance) = IO_floatValue(line,chunkPos,1_pInt+j)
|
|
||||||
enddo
|
|
||||||
|
|
||||||
end select
|
|
||||||
endif; endif
|
|
||||||
enddo parsingFile
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
! sanity checks
|
|
||||||
sanityChecks: do phase = 1_pInt, material_Nphase
|
|
||||||
myPhase: if (any(phase_kinematics(:,phase) == KINEMATICS_slipplane_opening_ID)) then
|
|
||||||
instance = kinematics_slipplane_opening_instance(phase)
|
|
||||||
kinematics_slipplane_opening_Nslip(1:lattice_maxNslipFamily,instance) = &
|
kinematics_slipplane_opening_Nslip(1:lattice_maxNslipFamily,instance) = &
|
||||||
min(lattice_NslipSystem(1:lattice_maxNslipFamily,phase),& ! limit active cleavage systems per family to min of available and requested
|
min(lattice_NslipSystem(1:lattice_maxNslipFamily,p),& ! limit active cleavage systems per family to min of available and requested
|
||||||
kinematics_slipplane_opening_Nslip(1:lattice_maxNslipFamily,instance))
|
kinematics_slipplane_opening_Nslip(1:lattice_maxNslipFamily,instance))
|
||||||
kinematics_slipplane_opening_totalNslip(instance) = sum(kinematics_slipplane_opening_Nslip(:,instance))
|
kinematics_slipplane_opening_totalNslip(instance) = sum(kinematics_slipplane_opening_Nslip(:,instance))
|
||||||
if (kinematics_slipplane_opening_sdot_0(instance) <= 0.0_pReal) &
|
if (kinematics_slipplane_opening_sdot_0(instance) <= 0.0_pReal) &
|
||||||
|
@ -191,18 +129,18 @@ subroutine kinematics_slipplane_opening_init(fileUnit)
|
||||||
call IO_error(211_pInt,el=instance,ext_msg='criticaPlasticStrain ('//KINEMATICS_slipplane_opening_LABEL//')')
|
call IO_error(211_pInt,el=instance,ext_msg='criticaPlasticStrain ('//KINEMATICS_slipplane_opening_LABEL//')')
|
||||||
if (kinematics_slipplane_opening_N(instance) <= 0.0_pReal) &
|
if (kinematics_slipplane_opening_N(instance) <= 0.0_pReal) &
|
||||||
call IO_error(211_pInt,el=instance,ext_msg='rate_sensitivity ('//KINEMATICS_slipplane_opening_LABEL//')')
|
call IO_error(211_pInt,el=instance,ext_msg='rate_sensitivity ('//KINEMATICS_slipplane_opening_LABEL//')')
|
||||||
endif myPhase
|
enddo
|
||||||
enddo sanityChecks
|
|
||||||
|
|
||||||
|
|
||||||
end subroutine kinematics_slipplane_opening_init
|
end subroutine kinematics_slipplane_opening_init
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief contains the constitutive equation for calculating the velocity gradient
|
!> @brief contains the constitutive equation for calculating the velocity gradient
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine kinematics_slipplane_opening_LiAndItsTangent(Ld, dLd_dTstar3333, Tstar_v, ipc, ip, el)
|
subroutine kinematics_slipplane_opening_LiAndItsTangent(Ld, dLd_dTstar, S, ipc, ip, el)
|
||||||
use prec, only: &
|
use prec, only: &
|
||||||
tol_math_check
|
tol_math_check
|
||||||
|
use math, only: &
|
||||||
|
math_mul33xx33
|
||||||
use lattice, only: &
|
use lattice, only: &
|
||||||
lattice_maxNslipFamily, &
|
lattice_maxNslipFamily, &
|
||||||
lattice_NslipSystem, &
|
lattice_NslipSystem, &
|
||||||
|
@ -210,53 +148,41 @@ subroutine kinematics_slipplane_opening_LiAndItsTangent(Ld, dLd_dTstar3333, Tsta
|
||||||
lattice_st, &
|
lattice_st, &
|
||||||
lattice_sn
|
lattice_sn
|
||||||
use material, only: &
|
use material, only: &
|
||||||
phaseAt, phasememberAt, &
|
material_phase, &
|
||||||
material_homog, &
|
material_homog, &
|
||||||
damage, &
|
damage, &
|
||||||
damageMapping
|
damageMapping
|
||||||
use math, only: &
|
use math, only: &
|
||||||
math_Plain3333to99, &
|
math_tensorproduct33
|
||||||
math_I3, &
|
|
||||||
math_identity4th, &
|
|
||||||
math_symmetric33, &
|
|
||||||
math_Mandel33to6, &
|
|
||||||
math_tensorproduct33, &
|
|
||||||
math_det33, &
|
|
||||||
math_mul33x33
|
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
integer(pInt), intent(in) :: &
|
integer(pInt), intent(in) :: &
|
||||||
ipc, & !< grain number
|
ipc, & !< grain number
|
||||||
ip, & !< integration point number
|
ip, & !< integration point number
|
||||||
el !< element number
|
el !< element number
|
||||||
real(pReal), intent(in), dimension(6) :: &
|
real(pReal), intent(in), dimension(3,3) :: &
|
||||||
Tstar_v !< 2nd Piola-Kirchhoff stress
|
S
|
||||||
real(pReal), intent(out), dimension(3,3) :: &
|
real(pReal), intent(out), dimension(3,3) :: &
|
||||||
Ld !< damage velocity gradient
|
Ld !< damage velocity gradient
|
||||||
real(pReal), intent(out), dimension(3,3,3,3) :: &
|
real(pReal), intent(out), dimension(3,3,3,3) :: &
|
||||||
dLd_dTstar3333 !< derivative of Ld with respect to Tstar (4th-order tensor)
|
dLd_dTstar !< derivative of Ld with respect to Tstar (4th-order tensor)
|
||||||
real(pReal), dimension(3,3) :: &
|
real(pReal), dimension(3,3) :: &
|
||||||
projection_d, projection_t, projection_n !< projection modes 3x3 tensor
|
projection_d, projection_t, projection_n !< projection modes 3x3 tensor
|
||||||
real(pReal), dimension(6) :: &
|
|
||||||
projection_d_v, projection_t_v, projection_n_v !< projection modes 3x3 vector
|
|
||||||
integer(pInt) :: &
|
integer(pInt) :: &
|
||||||
phase, &
|
instance, phase, &
|
||||||
constituent, &
|
|
||||||
instance, &
|
|
||||||
homog, damageOffset, &
|
homog, damageOffset, &
|
||||||
f, i, index_myFamily, k, l, m, n
|
f, i, index_myFamily, k, l, m, n
|
||||||
real(pReal) :: &
|
real(pReal) :: &
|
||||||
traction_d, traction_t, traction_n, traction_crit, &
|
traction_d, traction_t, traction_n, traction_crit, &
|
||||||
udotd, dudotd_dt, udott, dudott_dt, udotn, dudotn_dt
|
udotd, dudotd_dt, udott, dudott_dt, udotn, dudotn_dt
|
||||||
|
|
||||||
phase = phaseAt(ipc,ip,el)
|
phase = material_phase(ipc,ip,el)
|
||||||
constituent = phasememberAt(ipc,ip,el)
|
|
||||||
instance = kinematics_slipplane_opening_instance(phase)
|
instance = kinematics_slipplane_opening_instance(phase)
|
||||||
homog = material_homog(ip,el)
|
homog = material_homog(ip,el)
|
||||||
damageOffset = damageMapping(homog)%p(ip,el)
|
damageOffset = damageMapping(homog)%p(ip,el)
|
||||||
|
|
||||||
Ld = 0.0_pReal
|
Ld = 0.0_pReal
|
||||||
dLd_dTstar3333 = 0.0_pReal
|
dLd_dTstar = 0.0_pReal
|
||||||
do f = 1_pInt,lattice_maxNslipFamily
|
do f = 1_pInt,lattice_maxNslipFamily
|
||||||
index_myFamily = sum(lattice_NslipSystem(1:f-1_pInt,phase)) ! at which index starts my family
|
index_myFamily = sum(lattice_NslipSystem(1:f-1_pInt,phase)) ! at which index starts my family
|
||||||
do i = 1_pInt,kinematics_slipplane_opening_Nslip(f,instance) ! process each (active) slip system in family
|
do i = 1_pInt,kinematics_slipplane_opening_Nslip(f,instance) ! process each (active) slip system in family
|
||||||
|
@ -267,13 +193,10 @@ subroutine kinematics_slipplane_opening_LiAndItsTangent(Ld, dLd_dTstar3333, Tsta
|
||||||
projection_n = math_tensorproduct33(lattice_sn(1:3,index_myFamily+i,phase),&
|
projection_n = math_tensorproduct33(lattice_sn(1:3,index_myFamily+i,phase),&
|
||||||
lattice_sn(1:3,index_myFamily+i,phase))
|
lattice_sn(1:3,index_myFamily+i,phase))
|
||||||
|
|
||||||
projection_d_v(1:6) = math_Mandel33to6(math_symmetric33(projection_d(1:3,1:3)))
|
|
||||||
projection_t_v(1:6) = math_Mandel33to6(math_symmetric33(projection_t(1:3,1:3)))
|
|
||||||
projection_n_v(1:6) = math_Mandel33to6(math_symmetric33(projection_n(1:3,1:3)))
|
|
||||||
|
|
||||||
traction_d = dot_product(Tstar_v,projection_d_v(1:6))
|
traction_d = math_mul33xx33(S,projection_d)
|
||||||
traction_t = dot_product(Tstar_v,projection_t_v(1:6))
|
traction_t = math_mul33xx33(S,projection_t)
|
||||||
traction_n = dot_product(Tstar_v,projection_n_v(1:6))
|
traction_n = math_mul33xx33(S,projection_n)
|
||||||
|
|
||||||
traction_crit = kinematics_slipplane_opening_critLoad(f,instance)* &
|
traction_crit = kinematics_slipplane_opening_critLoad(f,instance)* &
|
||||||
damage(homog)%p(damageOffset) ! degrading critical load carrying capacity by damage
|
damage(homog)%p(damageOffset) ! degrading critical load carrying capacity by damage
|
||||||
|
@ -287,7 +210,7 @@ subroutine kinematics_slipplane_opening_LiAndItsTangent(Ld, dLd_dTstar3333, Tsta
|
||||||
Ld = Ld + udotd*projection_d
|
Ld = Ld + udotd*projection_d
|
||||||
dudotd_dt = udotd*kinematics_slipplane_opening_N(instance)/traction_d
|
dudotd_dt = udotd*kinematics_slipplane_opening_N(instance)/traction_d
|
||||||
forall (k=1_pInt:3_pInt,l=1_pInt:3_pInt,m=1_pInt:3_pInt,n=1_pInt:3_pInt) &
|
forall (k=1_pInt:3_pInt,l=1_pInt:3_pInt,m=1_pInt:3_pInt,n=1_pInt:3_pInt) &
|
||||||
dLd_dTstar3333(k,l,m,n) = dLd_dTstar3333(k,l,m,n) + &
|
dLd_dTstar(k,l,m,n) = dLd_dTstar(k,l,m,n) + &
|
||||||
dudotd_dt*projection_d(k,l)*projection_d(m,n)
|
dudotd_dt*projection_d(k,l)*projection_d(m,n)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
@ -300,9 +223,10 @@ subroutine kinematics_slipplane_opening_LiAndItsTangent(Ld, dLd_dTstar3333, Tsta
|
||||||
Ld = Ld + udott*projection_t
|
Ld = Ld + udott*projection_t
|
||||||
dudott_dt = udott*kinematics_slipplane_opening_N(instance)/traction_t
|
dudott_dt = udott*kinematics_slipplane_opening_N(instance)/traction_t
|
||||||
forall (k=1_pInt:3_pInt,l=1_pInt:3_pInt,m=1_pInt:3_pInt,n=1_pInt:3_pInt) &
|
forall (k=1_pInt:3_pInt,l=1_pInt:3_pInt,m=1_pInt:3_pInt,n=1_pInt:3_pInt) &
|
||||||
dLd_dTstar3333(k,l,m,n) = dLd_dTstar3333(k,l,m,n) + &
|
dLd_dTstar(k,l,m,n) = dLd_dTstar(k,l,m,n) + &
|
||||||
dudott_dt*projection_t(k,l)*projection_t(m,n)
|
dudott_dt*projection_t(k,l)*projection_t(m,n)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
udotn = &
|
udotn = &
|
||||||
kinematics_slipplane_opening_sdot_0(instance)* &
|
kinematics_slipplane_opening_sdot_0(instance)* &
|
||||||
(max(0.0_pReal,traction_n)/traction_crit - &
|
(max(0.0_pReal,traction_n)/traction_crit - &
|
||||||
|
@ -311,7 +235,7 @@ subroutine kinematics_slipplane_opening_LiAndItsTangent(Ld, dLd_dTstar3333, Tsta
|
||||||
Ld = Ld + udotn*projection_n
|
Ld = Ld + udotn*projection_n
|
||||||
dudotn_dt = udotn*kinematics_slipplane_opening_N(instance)/traction_n
|
dudotn_dt = udotn*kinematics_slipplane_opening_N(instance)/traction_n
|
||||||
forall (k=1_pInt:3_pInt,l=1_pInt:3_pInt,m=1_pInt:3_pInt,n=1_pInt:3_pInt) &
|
forall (k=1_pInt:3_pInt,l=1_pInt:3_pInt,m=1_pInt:3_pInt,n=1_pInt:3_pInt) &
|
||||||
dLd_dTstar3333(k,l,m,n) = dLd_dTstar3333(k,l,m,n) + &
|
dLd_dTstar(k,l,m,n) = dLd_dTstar(k,l,m,n) + &
|
||||||
dudotn_dt*projection_n(k,l)*projection_n(m,n)
|
dudotn_dt*projection_n(k,l)*projection_n(m,n)
|
||||||
endif
|
endif
|
||||||
enddo
|
enddo
|
||||||
|
|
|
@ -4,34 +4,24 @@
|
||||||
!> @details to be done
|
!> @details to be done
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
module kinematics_thermal_expansion
|
module kinematics_thermal_expansion
|
||||||
use prec, only: &
|
use prec, only: &
|
||||||
pReal, &
|
pReal, &
|
||||||
pInt
|
pInt
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
private
|
private
|
||||||
integer(pInt), dimension(:), allocatable, public, protected :: &
|
|
||||||
kinematics_thermal_expansion_sizePostResults, & !< cumulative size of post results
|
type, private :: tParameters
|
||||||
kinematics_thermal_expansion_offset, & !< which kinematics is my current damage mechanism?
|
real(pReal), allocatable, dimension(:,:,:) :: &
|
||||||
kinematics_thermal_expansion_instance !< instance of damage kinematics mechanism
|
expansion
|
||||||
|
end type tParameters
|
||||||
integer(pInt), dimension(:,:), allocatable, target, public :: &
|
|
||||||
kinematics_thermal_expansion_sizePostResult !< size of each post result output
|
type(tParameters), dimension(:), allocatable :: param
|
||||||
|
|
||||||
character(len=64), dimension(:,:), allocatable, target, public :: &
|
public :: &
|
||||||
kinematics_thermal_expansion_output !< name of each post result output
|
kinematics_thermal_expansion_init, &
|
||||||
|
kinematics_thermal_expansion_initialStrain, &
|
||||||
integer(pInt), dimension(:), allocatable, target, public :: &
|
kinematics_thermal_expansion_LiAndItsTangent
|
||||||
kinematics_thermal_expansion_Noutput !< number of outputs per instance of this damage
|
|
||||||
|
|
||||||
! enum, bind(c) ! ToDo kinematics need state machinery to deal with sizePostResult
|
|
||||||
! enumerator :: undefined_ID, & ! possible remedy is to decouple having state vars from having output
|
|
||||||
! thermalexpansionrate_ID ! which means to separate user-defined types tState + tOutput...
|
|
||||||
! end enum
|
|
||||||
public :: &
|
|
||||||
kinematics_thermal_expansion_init, &
|
|
||||||
kinematics_thermal_expansion_initialStrain, &
|
|
||||||
kinematics_thermal_expansion_LiAndItsTangent
|
|
||||||
|
|
||||||
contains
|
contains
|
||||||
|
|
||||||
|
@ -40,197 +30,129 @@ contains
|
||||||
!> @brief module initialization
|
!> @brief module initialization
|
||||||
!> @details reads in material parameters, allocates arrays, and does sanity checks
|
!> @details reads in material parameters, allocates arrays, and does sanity checks
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine kinematics_thermal_expansion_init(fileUnit)
|
subroutine kinematics_thermal_expansion_init()
|
||||||
#if defined(__GFORTRAN__) || __INTEL_COMPILER >= 1800
|
use debug, only: &
|
||||||
use, intrinsic :: iso_fortran_env, only: &
|
debug_level,&
|
||||||
compiler_version, &
|
debug_constitutive,&
|
||||||
compiler_options
|
debug_levelBasic
|
||||||
#endif
|
use material, only: &
|
||||||
use debug, only: &
|
phase_kinematics, &
|
||||||
debug_level,&
|
KINEMATICS_thermal_expansion_label, &
|
||||||
debug_constitutive,&
|
KINEMATICS_thermal_expansion_ID
|
||||||
debug_levelBasic
|
use config, only: &
|
||||||
use IO, only: &
|
config_phase
|
||||||
IO_read, &
|
|
||||||
IO_lc, &
|
|
||||||
IO_getTag, &
|
|
||||||
IO_isBlank, &
|
|
||||||
IO_stringPos, &
|
|
||||||
IO_stringValue, &
|
|
||||||
IO_floatValue, &
|
|
||||||
IO_intValue, &
|
|
||||||
IO_warning, &
|
|
||||||
IO_error, &
|
|
||||||
IO_timeStamp, &
|
|
||||||
IO_EOF
|
|
||||||
use material, only: &
|
|
||||||
phase_kinematics, &
|
|
||||||
phase_Nkinematics, &
|
|
||||||
phase_Noutput, &
|
|
||||||
KINEMATICS_thermal_expansion_label, &
|
|
||||||
KINEMATICS_thermal_expansion_ID
|
|
||||||
use config, only: &
|
|
||||||
material_Nphase, &
|
|
||||||
MATERIAL_partPhase
|
|
||||||
|
|
||||||
implicit none
|
|
||||||
integer(pInt), intent(in) :: fileUnit
|
|
||||||
|
|
||||||
integer(pInt), allocatable, dimension(:) :: chunkPos
|
|
||||||
integer(pInt) :: maxNinstance,phase,instance,kinematics
|
|
||||||
character(len=65536) :: &
|
|
||||||
tag = '', &
|
|
||||||
line = ''
|
|
||||||
|
|
||||||
write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_thermal_expansion_LABEL//' init -+>>>'
|
|
||||||
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
|
|
||||||
#include "compilation_info.f90"
|
|
||||||
|
|
||||||
maxNinstance = int(count(phase_kinematics == KINEMATICS_thermal_expansion_ID),pInt)
|
|
||||||
if (maxNinstance == 0_pInt) return
|
|
||||||
|
|
||||||
if (iand(debug_level(debug_constitutive),debug_levelBasic) /= 0_pInt) &
|
implicit none
|
||||||
write(6,'(a16,1x,i5,/)') '# instances:',maxNinstance
|
integer(pInt) :: &
|
||||||
|
Ninstance, &
|
||||||
|
p, i
|
||||||
|
real(pReal), dimension(:), allocatable :: &
|
||||||
|
temp
|
||||||
|
|
||||||
|
write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_thermal_expansion_LABEL//' init -+>>>'
|
||||||
|
|
||||||
allocate(kinematics_thermal_expansion_offset(material_Nphase), source=0_pInt)
|
Ninstance = int(count(phase_kinematics == KINEMATICS_thermal_expansion_ID),pInt)
|
||||||
allocate(kinematics_thermal_expansion_instance(material_Nphase), source=0_pInt)
|
|
||||||
do phase = 1, material_Nphase
|
if (iand(debug_level(debug_constitutive),debug_levelBasic) /= 0_pInt) &
|
||||||
kinematics_thermal_expansion_instance(phase) = count(phase_kinematics(:,1:phase) == kinematics_thermal_expansion_ID)
|
write(6,'(a16,1x,i5,/)') '# instances:',Ninstance
|
||||||
do kinematics = 1, phase_Nkinematics(phase)
|
|
||||||
if (phase_kinematics(kinematics,phase) == kinematics_thermal_expansion_ID) &
|
allocate(param(Ninstance))
|
||||||
kinematics_thermal_expansion_offset(phase) = kinematics
|
|
||||||
enddo
|
do p = 1_pInt, size(phase_kinematics)
|
||||||
enddo
|
if (all(phase_kinematics(:,p) /= KINEMATICS_thermal_expansion_ID)) cycle
|
||||||
|
|
||||||
allocate(kinematics_thermal_expansion_sizePostResults(maxNinstance), source=0_pInt)
|
! ToDo: Here we need to decide how to extend the concept of instances to
|
||||||
allocate(kinematics_thermal_expansion_sizePostResult(maxval(phase_Noutput),maxNinstance),source=0_pInt)
|
! kinetics and sources. I would suggest that the same mechanism exists at maximum once per phase
|
||||||
allocate(kinematics_thermal_expansion_output(maxval(phase_Noutput),maxNinstance))
|
|
||||||
kinematics_thermal_expansion_output = ''
|
! read up to three parameters (constant, linear, quadratic with T)
|
||||||
allocate(kinematics_thermal_expansion_Noutput(maxNinstance), source=0_pInt)
|
temp = config_phase(p)%getFloats('thermal_expansion11')
|
||||||
|
!lattice_thermalExpansion33(1,1,1:size(temp),p) = temp
|
||||||
rewind(fileUnit)
|
temp = config_phase(p)%getFloats('thermal_expansion22', &
|
||||||
phase = 0_pInt
|
defaultVal=[(0.0_pReal, i=1,size(temp))],requiredSize=size(temp))
|
||||||
do while (trim(line) /= IO_EOF .and. IO_lc(IO_getTag(line,'<','>')) /= MATERIAL_partPhase) ! wind forward to <phase>
|
!lattice_thermalExpansion33(2,2,1:size(temp),p) = temp
|
||||||
line = IO_read(fileUnit)
|
temp = config_phase(p)%getFloats('thermal_expansion33', &
|
||||||
enddo
|
defaultVal=[(0.0_pReal, i=1,size(temp))],requiredSize=size(temp))
|
||||||
|
enddo
|
||||||
parsingFile: do while (trim(line) /= IO_EOF) ! read through sections of phase part
|
|
||||||
line = IO_read(fileUnit)
|
|
||||||
if (IO_isBlank(line)) cycle ! skip empty lines
|
|
||||||
if (IO_getTag(line,'<','>') /= '') then ! stop at next part
|
|
||||||
line = IO_read(fileUnit, .true.) ! reset IO_read
|
|
||||||
exit
|
|
||||||
endif
|
|
||||||
if (IO_getTag(line,'[',']') /= '') then ! next phase section
|
|
||||||
phase = phase + 1_pInt ! advance phase section counter
|
|
||||||
cycle ! skip to next line
|
|
||||||
endif
|
|
||||||
if (phase > 0_pInt ) then; if (any(phase_kinematics(:,phase) == KINEMATICS_thermal_expansion_ID)) then ! do not short-circuit here (.and. with next if statemen). It's not safe in Fortran
|
|
||||||
instance = kinematics_thermal_expansion_instance(phase) ! which instance of my damage is present phase
|
|
||||||
chunkPos = IO_stringPos(line)
|
|
||||||
tag = IO_lc(IO_stringValue(line,chunkPos,1_pInt)) ! extract key...
|
|
||||||
select case(tag)
|
|
||||||
! case ('(output)')
|
|
||||||
! output = IO_lc(IO_stringValue(line,chunkPos,2_pInt)) ! ...and corresponding output
|
|
||||||
! select case(output)
|
|
||||||
! case ('thermalexpansionrate')
|
|
||||||
! kinematics_thermal_expansion_Noutput(instance) = kinematics_thermal_expansion_Noutput(instance) + 1_pInt
|
|
||||||
! kinematics_thermal_expansion_outputID(kinematics_thermal_expansion_Noutput(instance),instance) = &
|
|
||||||
! thermalexpansionrate_ID
|
|
||||||
! kinematics_thermal_expansion_output(kinematics_thermal_expansion_Noutput(instance),instance) = output
|
|
||||||
! ToDo add sizePostResult loop afterwards...
|
|
||||||
|
|
||||||
end select
|
|
||||||
endif; endif
|
|
||||||
enddo parsingFile
|
|
||||||
|
|
||||||
end subroutine kinematics_thermal_expansion_init
|
end subroutine kinematics_thermal_expansion_init
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief report initial thermal strain based on current temperature deviation from reference
|
!> @brief report initial thermal strain based on current temperature deviation from reference
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
pure function kinematics_thermal_expansion_initialStrain(ipc, ip, el)
|
pure function kinematics_thermal_expansion_initialStrain(homog,phase,offset)
|
||||||
use material, only: &
|
use material, only: &
|
||||||
material_phase, &
|
temperature
|
||||||
material_homog, &
|
use lattice, only: &
|
||||||
temperature, &
|
lattice_thermalExpansion33, &
|
||||||
thermalMapping
|
lattice_referenceTemperature
|
||||||
use lattice, only: &
|
|
||||||
lattice_thermalExpansion33, &
|
implicit none
|
||||||
lattice_referenceTemperature
|
integer(pInt), intent(in) :: &
|
||||||
|
phase, &
|
||||||
|
homog, offset
|
||||||
|
real(pReal), dimension(3,3) :: &
|
||||||
|
kinematics_thermal_expansion_initialStrain !< initial thermal strain (should be small strain, though)
|
||||||
|
|
||||||
implicit none
|
|
||||||
integer(pInt), intent(in) :: &
|
kinematics_thermal_expansion_initialStrain = &
|
||||||
ipc, & !< grain number
|
(temperature(homog)%p(offset) - lattice_referenceTemperature(phase))**1 / 1. * &
|
||||||
ip, & !< integration point number
|
lattice_thermalExpansion33(1:3,1:3,1,phase) + & ! constant coefficient
|
||||||
el !< element number
|
(temperature(homog)%p(offset) - lattice_referenceTemperature(phase))**2 / 2. * &
|
||||||
real(pReal), dimension(3,3) :: &
|
lattice_thermalExpansion33(1:3,1:3,2,phase) + & ! linear coefficient
|
||||||
kinematics_thermal_expansion_initialStrain !< initial thermal strain (should be small strain, though)
|
(temperature(homog)%p(offset) - lattice_referenceTemperature(phase))**3 / 3. * &
|
||||||
integer(pInt) :: &
|
lattice_thermalExpansion33(1:3,1:3,3,phase) ! quadratic coefficient
|
||||||
phase, &
|
|
||||||
homog, offset
|
|
||||||
|
|
||||||
phase = material_phase(ipc,ip,el)
|
|
||||||
homog = material_homog(ip,el)
|
|
||||||
offset = thermalMapping(homog)%p(ip,el)
|
|
||||||
|
|
||||||
kinematics_thermal_expansion_initialStrain = &
|
|
||||||
(temperature(homog)%p(offset) - lattice_referenceTemperature(phase))**1 / 1. * &
|
|
||||||
lattice_thermalExpansion33(1:3,1:3,1,phase) + & ! constant coefficient
|
|
||||||
(temperature(homog)%p(offset) - lattice_referenceTemperature(phase))**2 / 2. * &
|
|
||||||
lattice_thermalExpansion33(1:3,1:3,2,phase) + & ! linear coefficient
|
|
||||||
(temperature(homog)%p(offset) - lattice_referenceTemperature(phase))**3 / 3. * &
|
|
||||||
lattice_thermalExpansion33(1:3,1:3,3,phase) ! quadratic coefficient
|
|
||||||
|
|
||||||
end function kinematics_thermal_expansion_initialStrain
|
end function kinematics_thermal_expansion_initialStrain
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief contains the constitutive equation for calculating the velocity gradient
|
!> @brief contains the constitutive equation for calculating the velocity gradient
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine kinematics_thermal_expansion_LiAndItsTangent(Li, dLi_dTstar3333, ipc, ip, el)
|
subroutine kinematics_thermal_expansion_LiAndItsTangent(Li, dLi_dTstar, ipc, ip, el)
|
||||||
use material, only: &
|
use material, only: &
|
||||||
material_phase, &
|
material_phase, &
|
||||||
material_homog, &
|
material_homog, &
|
||||||
temperature, &
|
temperature, &
|
||||||
temperatureRate, &
|
temperatureRate, &
|
||||||
thermalMapping
|
thermalMapping
|
||||||
use lattice, only: &
|
use lattice, only: &
|
||||||
lattice_thermalExpansion33, &
|
lattice_thermalExpansion33, &
|
||||||
lattice_referenceTemperature
|
lattice_referenceTemperature
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
integer(pInt), intent(in) :: &
|
integer(pInt), intent(in) :: &
|
||||||
ipc, & !< grain number
|
ipc, & !< grain number
|
||||||
ip, & !< integration point number
|
ip, & !< integration point number
|
||||||
el !< element number
|
el !< element number
|
||||||
real(pReal), intent(out), dimension(3,3) :: &
|
real(pReal), intent(out), dimension(3,3) :: &
|
||||||
Li !< thermal velocity gradient
|
Li !< thermal velocity gradient
|
||||||
real(pReal), intent(out), dimension(3,3,3,3) :: &
|
real(pReal), intent(out), dimension(3,3,3,3) :: &
|
||||||
dLi_dTstar3333 !< derivative of Li with respect to Tstar (4th-order tensor defined to be zero)
|
dLi_dTstar !< derivative of Li with respect to Tstar (4th-order tensor defined to be zero)
|
||||||
integer(pInt) :: &
|
integer(pInt) :: &
|
||||||
phase, &
|
phase, &
|
||||||
homog, offset
|
homog, offset
|
||||||
real(pReal) :: &
|
real(pReal) :: &
|
||||||
T, TRef, TDot
|
T, TRef, TDot
|
||||||
|
|
||||||
phase = material_phase(ipc,ip,el)
|
phase = material_phase(ipc,ip,el)
|
||||||
homog = material_homog(ip,el)
|
homog = material_homog(ip,el)
|
||||||
offset = thermalMapping(homog)%p(ip,el)
|
offset = thermalMapping(homog)%p(ip,el)
|
||||||
T = temperature(homog)%p(offset)
|
T = temperature(homog)%p(offset)
|
||||||
TDot = temperatureRate(homog)%p(offset)
|
TDot = temperatureRate(homog)%p(offset)
|
||||||
TRef = lattice_referenceTemperature(phase)
|
TRef = lattice_referenceTemperature(phase)
|
||||||
|
|
||||||
Li = TDot * ( &
|
Li = TDot * ( &
|
||||||
lattice_thermalExpansion33(1:3,1:3,1,phase)*(T - TRef)**0 & ! constant coefficient
|
lattice_thermalExpansion33(1:3,1:3,1,phase)*(T - TRef)**0 & ! constant coefficient
|
||||||
+ lattice_thermalExpansion33(1:3,1:3,2,phase)*(T - TRef)**1 & ! linear coefficient
|
+ lattice_thermalExpansion33(1:3,1:3,2,phase)*(T - TRef)**1 & ! linear coefficient
|
||||||
+ lattice_thermalExpansion33(1:3,1:3,3,phase)*(T - TRef)**2 & ! quadratic coefficient
|
+ lattice_thermalExpansion33(1:3,1:3,3,phase)*(T - TRef)**2 & ! quadratic coefficient
|
||||||
) / &
|
) / &
|
||||||
(1.0_pReal &
|
(1.0_pReal &
|
||||||
+ lattice_thermalExpansion33(1:3,1:3,1,phase)*(T - TRef)**1 / 1. &
|
+ lattice_thermalExpansion33(1:3,1:3,1,phase)*(T - TRef)**1 / 1. &
|
||||||
+ lattice_thermalExpansion33(1:3,1:3,2,phase)*(T - TRef)**2 / 2. &
|
+ lattice_thermalExpansion33(1:3,1:3,2,phase)*(T - TRef)**2 / 2. &
|
||||||
+ lattice_thermalExpansion33(1:3,1:3,3,phase)*(T - TRef)**3 / 3. &
|
+ lattice_thermalExpansion33(1:3,1:3,3,phase)*(T - TRef)**3 / 3. &
|
||||||
)
|
)
|
||||||
dLi_dTstar3333 = 0.0_pReal
|
dLi_dTstar = 0.0_pReal
|
||||||
|
|
||||||
end subroutine kinematics_thermal_expansion_LiAndItsTangent
|
end subroutine kinematics_thermal_expansion_LiAndItsTangent
|
||||||
|
|
||||||
|
|
725
src/lattice.f90
725
src/lattice.f90
File diff suppressed because it is too large
Load Diff
106
src/material.f90
106
src/material.f90
|
@ -235,6 +235,7 @@ module material
|
||||||
public :: &
|
public :: &
|
||||||
material_init, &
|
material_init, &
|
||||||
material_allocatePlasticState, &
|
material_allocatePlasticState, &
|
||||||
|
material_allocateSourceState, &
|
||||||
ELASTICITY_hooke_ID ,&
|
ELASTICITY_hooke_ID ,&
|
||||||
PLASTICITY_none_ID, &
|
PLASTICITY_none_ID, &
|
||||||
PLASTICITY_isotropic_ID, &
|
PLASTICITY_isotropic_ID, &
|
||||||
|
@ -305,9 +306,7 @@ subroutine material_init()
|
||||||
texture_name
|
texture_name
|
||||||
use mesh, only: &
|
use mesh, only: &
|
||||||
mesh_homogenizationAt, &
|
mesh_homogenizationAt, &
|
||||||
mesh_NipsPerElem, &
|
theMesh
|
||||||
mesh_NcpElems, &
|
|
||||||
FE_geomtype
|
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
integer(pInt), parameter :: FILEUNIT = 210_pInt
|
integer(pInt), parameter :: FILEUNIT = 210_pInt
|
||||||
|
@ -399,10 +398,10 @@ subroutine material_init()
|
||||||
call material_populateGrains
|
call material_populateGrains
|
||||||
|
|
||||||
! BEGIN DEPRECATED
|
! BEGIN DEPRECATED
|
||||||
allocate(phaseAt ( homogenization_maxNgrains,mesh_nIPsPerElem,mesh_NcpElems),source=0_pInt)
|
allocate(phaseAt ( homogenization_maxNgrains,theMesh%elem%nIPs,theMesh%Nelems),source=0_pInt)
|
||||||
allocate(phasememberAt ( homogenization_maxNgrains,mesh_nIPsPerElem,mesh_NcpElems),source=0_pInt)
|
allocate(phasememberAt ( homogenization_maxNgrains,theMesh%elem%nIPs,theMesh%Nelems),source=0_pInt)
|
||||||
allocate(mappingHomogenization (2, mesh_nIPsPerElem,mesh_NcpElems),source=0_pInt)
|
allocate(mappingHomogenization (2, theMesh%elem%nIPs,theMesh%Nelems),source=0_pInt)
|
||||||
allocate(mappingHomogenizationConst( mesh_nIPsPerElem,mesh_NcpElems),source=1_pInt)
|
allocate(mappingHomogenizationConst( theMesh%elem%nIPs,theMesh%Nelems),source=1_pInt)
|
||||||
! END DEPRECATED
|
! END DEPRECATED
|
||||||
|
|
||||||
allocate(material_homogenizationAt,source=mesh_homogenizationAt)
|
allocate(material_homogenizationAt,source=mesh_homogenizationAt)
|
||||||
|
@ -410,9 +409,9 @@ subroutine material_init()
|
||||||
allocate(CounterHomogenization(size(config_homogenization)),source=0_pInt)
|
allocate(CounterHomogenization(size(config_homogenization)),source=0_pInt)
|
||||||
|
|
||||||
! BEGIN DEPRECATED
|
! BEGIN DEPRECATED
|
||||||
do e = 1_pInt,mesh_NcpElems
|
do e = 1_pInt,theMesh%Nelems
|
||||||
myHomog = mesh_homogenizationAt(e)
|
myHomog = mesh_homogenizationAt(e)
|
||||||
do i = 1_pInt, mesh_NipsPerElem
|
do i = 1_pInt, theMesh%elem%nIPs
|
||||||
CounterHomogenization(myHomog) = CounterHomogenization(myHomog) + 1_pInt
|
CounterHomogenization(myHomog) = CounterHomogenization(myHomog) + 1_pInt
|
||||||
mappingHomogenization(1:2,i,e) = [CounterHomogenization(myHomog),myHomog]
|
mappingHomogenization(1:2,i,e) = [CounterHomogenization(myHomog),myHomog]
|
||||||
do g = 1_pInt,homogenization_Ngrains(myHomog)
|
do g = 1_pInt,homogenization_Ngrains(myHomog)
|
||||||
|
@ -553,7 +552,7 @@ subroutine material_parseMicrostructure
|
||||||
microstructure_name
|
microstructure_name
|
||||||
use mesh, only: &
|
use mesh, only: &
|
||||||
mesh_microstructureAt, &
|
mesh_microstructureAt, &
|
||||||
mesh_NcpElems
|
theMesh
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
character(len=65536), dimension(:), allocatable :: &
|
character(len=65536), dimension(:), allocatable :: &
|
||||||
|
@ -571,7 +570,7 @@ subroutine material_parseMicrostructure
|
||||||
if(any(mesh_microstructureAt > size(config_microstructure))) &
|
if(any(mesh_microstructureAt > size(config_microstructure))) &
|
||||||
call IO_error(155_pInt,ext_msg='More microstructures in geometry than sections in material.config')
|
call IO_error(155_pInt,ext_msg='More microstructures in geometry than sections in material.config')
|
||||||
|
|
||||||
forall (e = 1_pInt:mesh_NcpElems) &
|
forall (e = 1_pInt:theMesh%Nelems) &
|
||||||
microstructure_active(mesh_microstructureAt(e)) = .true. ! current microstructure used in model? Elementwise view, maximum N operations for N elements
|
microstructure_active(mesh_microstructureAt(e)) = .true. ! current microstructure used in model? Elementwise view, maximum N operations for N elements
|
||||||
|
|
||||||
do m=1_pInt, size(config_microstructure)
|
do m=1_pInt, size(config_microstructure)
|
||||||
|
@ -922,7 +921,7 @@ subroutine material_allocatePlasticState(phase,NofMyPhase,&
|
||||||
sizeState,sizeDotState,sizeDeltaState,&
|
sizeState,sizeDotState,sizeDeltaState,&
|
||||||
Nslip,Ntwin,Ntrans)
|
Nslip,Ntwin,Ntrans)
|
||||||
use numerics, only: &
|
use numerics, only: &
|
||||||
numerics_integrator2 => numerics_integrator ! compatibility hack
|
numerics_integrator
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
integer(pInt), intent(in) :: &
|
integer(pInt), intent(in) :: &
|
||||||
|
@ -934,8 +933,6 @@ subroutine material_allocatePlasticState(phase,NofMyPhase,&
|
||||||
Nslip, &
|
Nslip, &
|
||||||
Ntwin, &
|
Ntwin, &
|
||||||
Ntrans
|
Ntrans
|
||||||
integer(pInt) :: numerics_integrator ! compatibility hack
|
|
||||||
numerics_integrator = numerics_integrator2(1) ! compatibility hack
|
|
||||||
|
|
||||||
plasticState(phase)%sizeState = sizeState
|
plasticState(phase)%sizeState = sizeState
|
||||||
plasticState(phase)%sizeDotState = sizeDotState
|
plasticState(phase)%sizeDotState = sizeDotState
|
||||||
|
@ -966,6 +963,47 @@ subroutine material_allocatePlasticState(phase,NofMyPhase,&
|
||||||
end subroutine material_allocatePlasticState
|
end subroutine material_allocatePlasticState
|
||||||
|
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief allocates the source state of a phase
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
subroutine material_allocateSourceState(phase,of,NofMyPhase,&
|
||||||
|
sizeState,sizeDotState,sizeDeltaState)
|
||||||
|
use numerics, only: &
|
||||||
|
numerics_integrator
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
integer(pInt), intent(in) :: &
|
||||||
|
phase, &
|
||||||
|
of, &
|
||||||
|
NofMyPhase, &
|
||||||
|
sizeState, sizeDotState,sizeDeltaState
|
||||||
|
|
||||||
|
sourceState(phase)%p(of)%sizeState = sizeState
|
||||||
|
sourceState(phase)%p(of)%sizeDotState = sizeDotState
|
||||||
|
sourceState(phase)%p(of)%sizeDeltaState = sizeDeltaState
|
||||||
|
plasticState(phase)%offsetDeltaState = sizeState-sizeDeltaState ! deltaState occupies latter part of state by definition
|
||||||
|
|
||||||
|
allocate(sourceState(phase)%p(of)%aTolState (sizeState), source=0.0_pReal)
|
||||||
|
allocate(sourceState(phase)%p(of)%state0 (sizeState,NofMyPhase), source=0.0_pReal)
|
||||||
|
allocate(sourceState(phase)%p(of)%partionedState0 (sizeState,NofMyPhase), source=0.0_pReal)
|
||||||
|
allocate(sourceState(phase)%p(of)%subState0 (sizeState,NofMyPhase), source=0.0_pReal)
|
||||||
|
allocate(sourceState(phase)%p(of)%state (sizeState,NofMyPhase), source=0.0_pReal)
|
||||||
|
|
||||||
|
allocate(sourceState(phase)%p(of)%dotState (sizeDotState,NofMyPhase), source=0.0_pReal)
|
||||||
|
if (numerics_integrator == 1_pInt) then
|
||||||
|
allocate(sourceState(phase)%p(of)%previousDotState (sizeDotState,NofMyPhase), source=0.0_pReal)
|
||||||
|
allocate(sourceState(phase)%p(of)%previousDotState2 (sizeDotState,NofMyPhase), source=0.0_pReal)
|
||||||
|
endif
|
||||||
|
if (numerics_integrator == 4_pInt) &
|
||||||
|
allocate(sourceState(phase)%p(of)%RK4dotState (sizeDotState,NofMyPhase), source=0.0_pReal)
|
||||||
|
if (numerics_integrator == 5_pInt) &
|
||||||
|
allocate(sourceState(phase)%p(of)%RKCK45dotState (6,sizeDotState,NofMyPhase), source=0.0_pReal)
|
||||||
|
|
||||||
|
allocate(sourceState(phase)%p(of)%deltaState (sizeDeltaState,NofMyPhase), source=0.0_pReal)
|
||||||
|
|
||||||
|
end subroutine material_allocateSourceState
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief populates the grains
|
!> @brief populates the grains
|
||||||
!> @details populates the grains by identifying active microstructure/homogenization pairs,
|
!> @details populates the grains by identifying active microstructure/homogenization pairs,
|
||||||
|
@ -984,13 +1022,10 @@ subroutine material_populateGrains
|
||||||
math_sampleFiberOri, &
|
math_sampleFiberOri, &
|
||||||
math_symmetricEulers
|
math_symmetricEulers
|
||||||
use mesh, only: &
|
use mesh, only: &
|
||||||
mesh_NipsPerElem, &
|
|
||||||
mesh_elemType, &
|
|
||||||
mesh_homogenizationAt, &
|
mesh_homogenizationAt, &
|
||||||
mesh_microstructureAt, &
|
mesh_microstructureAt, &
|
||||||
mesh_NcpElems, &
|
theMesh, &
|
||||||
mesh_ipVolume, &
|
mesh_ipVolume
|
||||||
FE_geomtype
|
|
||||||
use config, only: &
|
use config, only: &
|
||||||
config_homogenization, &
|
config_homogenization, &
|
||||||
config_microstructure, &
|
config_microstructure, &
|
||||||
|
@ -1026,24 +1061,24 @@ subroutine material_populateGrains
|
||||||
|
|
||||||
myDebug = debug_level(debug_material)
|
myDebug = debug_level(debug_material)
|
||||||
|
|
||||||
allocate(material_volume(homogenization_maxNgrains,mesh_nIPsPerElem,mesh_NcpElems), source=0.0_pReal)
|
allocate(material_volume(homogenization_maxNgrains,theMesh%elem%nIPs,theMesh%Nelems), source=0.0_pReal)
|
||||||
allocate(material_phase(homogenization_maxNgrains,mesh_nIPsPerElem,mesh_NcpElems), source=0_pInt)
|
allocate(material_phase(homogenization_maxNgrains,theMesh%elem%nIPs,theMesh%Nelems), source=0_pInt)
|
||||||
allocate(material_homog(mesh_nIPsPerElem,mesh_NcpElems), source=0_pInt)
|
allocate(material_homog(theMesh%elem%nIPs,theMesh%Nelems), source=0_pInt)
|
||||||
allocate(material_texture(homogenization_maxNgrains,mesh_nIPsPerElem,mesh_NcpElems), source=0_pInt)
|
allocate(material_texture(homogenization_maxNgrains,theMesh%elem%nIPs,theMesh%Nelems), source=0_pInt)
|
||||||
allocate(material_EulerAngles(3,homogenization_maxNgrains,mesh_nIPsPerElem,mesh_NcpElems),source=0.0_pReal)
|
allocate(material_EulerAngles(3,homogenization_maxNgrains,theMesh%elem%nIPs,theMesh%Nelems),source=0.0_pReal)
|
||||||
|
|
||||||
allocate(Ngrains(size(config_homogenization),size(config_microstructure)), source=0_pInt)
|
allocate(Ngrains(size(config_homogenization),size(config_microstructure)), source=0_pInt)
|
||||||
allocate(Nelems (size(config_homogenization),size(config_microstructure)), source=0_pInt)
|
allocate(Nelems (size(config_homogenization),size(config_microstructure)), source=0_pInt)
|
||||||
|
|
||||||
! populating homogenization schemes in each
|
! populating homogenization schemes in each
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
do e = 1_pInt, mesh_NcpElems
|
do e = 1_pInt, theMesh%Nelems
|
||||||
material_homog(1_pInt:mesh_NipsPerElem,e) = mesh_homogenizationAt(e)
|
material_homog(1_pInt:theMesh%elem%nIPs,e) = mesh_homogenizationAt(e)
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! precounting of elements for each homog/micro pair
|
! precounting of elements for each homog/micro pair
|
||||||
do e = 1_pInt, mesh_NcpElems
|
do e = 1_pInt, theMesh%Nelems
|
||||||
homog = mesh_homogenizationAt(e)
|
homog = mesh_homogenizationAt(e)
|
||||||
micro = mesh_microstructureAt(e)
|
micro = mesh_microstructureAt(e)
|
||||||
Nelems(homog,micro) = Nelems(homog,micro) + 1_pInt
|
Nelems(homog,micro) = Nelems(homog,micro) + 1_pInt
|
||||||
|
@ -1061,8 +1096,7 @@ subroutine material_populateGrains
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! identify maximum grain count per IP (from element) and find grains per homog/micro pair
|
! identify maximum grain count per IP (from element) and find grains per homog/micro pair
|
||||||
Nelems = 0_pInt ! reuse as counter
|
Nelems = 0_pInt ! reuse as counter
|
||||||
elementLooping: do e = 1_pInt,mesh_NcpElems
|
elementLooping: do e = 1_pInt,theMesh%Nelems
|
||||||
t = mesh_elemType
|
|
||||||
homog = mesh_homogenizationAt(e)
|
homog = mesh_homogenizationAt(e)
|
||||||
micro = mesh_microstructureAt(e)
|
micro = mesh_microstructureAt(e)
|
||||||
if (homog < 1_pInt .or. homog > size(config_homogenization)) & ! out of bounds
|
if (homog < 1_pInt .or. homog > size(config_homogenization)) & ! out of bounds
|
||||||
|
@ -1072,7 +1106,7 @@ subroutine material_populateGrains
|
||||||
if (microstructure_elemhomo(micro)) then ! how many grains are needed at this element?
|
if (microstructure_elemhomo(micro)) then ! how many grains are needed at this element?
|
||||||
dGrains = homogenization_Ngrains(homog) ! only one set of Ngrains (other IPs are plain copies)
|
dGrains = homogenization_Ngrains(homog) ! only one set of Ngrains (other IPs are plain copies)
|
||||||
else
|
else
|
||||||
dGrains = homogenization_Ngrains(homog) * mesh_NipsPerElem ! each IP has Ngrains
|
dGrains = homogenization_Ngrains(homog) * theMesh%elem%nIPs ! each IP has Ngrains
|
||||||
endif
|
endif
|
||||||
Ngrains(homog,micro) = Ngrains(homog,micro) + dGrains ! total grain count
|
Ngrains(homog,micro) = Ngrains(homog,micro) + dGrains ! total grain count
|
||||||
Nelems(homog,micro) = Nelems(homog,micro) + 1_pInt ! total element count
|
Nelems(homog,micro) = Nelems(homog,micro) + 1_pInt ! total element count
|
||||||
|
@ -1106,16 +1140,15 @@ subroutine material_populateGrains
|
||||||
|
|
||||||
do hme = 1_pInt, Nelems(homog,micro)
|
do hme = 1_pInt, Nelems(homog,micro)
|
||||||
e = elemsOfHomogMicro(homog,micro)%p(hme) ! my combination of homog and micro, only perform calculations for elements with homog, micro combinations which is indexed in cpElemsindex
|
e = elemsOfHomogMicro(homog,micro)%p(hme) ! my combination of homog and micro, only perform calculations for elements with homog, micro combinations which is indexed in cpElemsindex
|
||||||
t = mesh_elemType
|
|
||||||
if (microstructure_elemhomo(micro)) then ! homogeneous distribution of grains over each element's IPs
|
if (microstructure_elemhomo(micro)) then ! homogeneous distribution of grains over each element's IPs
|
||||||
volumeOfGrain(grain+1_pInt:grain+dGrains) = sum(mesh_ipVolume(1:mesh_NipsPerElem,e))/&
|
volumeOfGrain(grain+1_pInt:grain+dGrains) = sum(mesh_ipVolume(1:theMesh%elem%nIPs,e))/&
|
||||||
real(dGrains,pReal) ! each grain combines size of all IPs in that element
|
real(dGrains,pReal) ! each grain combines size of all IPs in that element
|
||||||
grain = grain + dGrains ! wind forward by Ngrains@IP
|
grain = grain + dGrains ! wind forward by Ngrains@IP
|
||||||
else
|
else
|
||||||
forall (i = 1_pInt:mesh_NipsPerElem) & ! loop over IPs
|
forall (i = 1_pInt:theMesh%elem%nIPs) & ! loop over IPs
|
||||||
volumeOfGrain(grain+(i-1)*dGrains+1_pInt:grain+i*dGrains) = &
|
volumeOfGrain(grain+(i-1)*dGrains+1_pInt:grain+i*dGrains) = &
|
||||||
mesh_ipVolume(i,e)/real(dGrains,pReal) ! assign IPvolume/Ngrains@IP to all grains of IP
|
mesh_ipVolume(i,e)/real(dGrains,pReal) ! assign IPvolume/Ngrains@IP to all grains of IP
|
||||||
grain = grain + mesh_NipsPerElem * dGrains ! wind forward by Nips*Ngrains@IP
|
grain = grain + theMesh%elem%nIPs * dGrains ! wind forward by Nips*Ngrains@IP
|
||||||
endif
|
endif
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
|
@ -1261,11 +1294,10 @@ subroutine material_populateGrains
|
||||||
|
|
||||||
do hme = 1_pInt, Nelems(homog,micro)
|
do hme = 1_pInt, Nelems(homog,micro)
|
||||||
e = elemsOfHomogMicro(homog,micro)%p(hme) ! only perform calculations for elements with homog, micro combinations which is indexed in cpElemsindex
|
e = elemsOfHomogMicro(homog,micro)%p(hme) ! only perform calculations for elements with homog, micro combinations which is indexed in cpElemsindex
|
||||||
t = mesh_elemType
|
|
||||||
if (microstructure_elemhomo(micro)) then ! homogeneous distribution of grains over each element's IPs
|
if (microstructure_elemhomo(micro)) then ! homogeneous distribution of grains over each element's IPs
|
||||||
m = 1_pInt ! process only first IP
|
m = 1_pInt ! process only first IP
|
||||||
else
|
else
|
||||||
m = mesh_NipsPerElem
|
m = theMesh%elem%nIPs
|
||||||
endif
|
endif
|
||||||
|
|
||||||
do i = 1_pInt, m ! loop over necessary IPs
|
do i = 1_pInt, m ! loop over necessary IPs
|
||||||
|
@ -1303,7 +1335,7 @@ subroutine material_populateGrains
|
||||||
|
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
do i = i, mesh_NipsPerElem ! loop over IPs to (possibly) distribute copies from first IP
|
do i = i, theMesh%elem%nIPs ! loop over IPs to (possibly) distribute copies from first IP
|
||||||
material_volume (1_pInt:dGrains,i,e) = material_volume (1_pInt:dGrains,1,e)
|
material_volume (1_pInt:dGrains,i,e) = material_volume (1_pInt:dGrains,1,e)
|
||||||
material_phase (1_pInt:dGrains,i,e) = material_phase (1_pInt:dGrains,1,e)
|
material_phase (1_pInt:dGrains,i,e) = material_phase (1_pInt:dGrains,1,e)
|
||||||
material_texture(1_pInt:dGrains,i,e) = material_texture(1_pInt:dGrains,1,e)
|
material_texture(1_pInt:dGrains,i,e) = material_texture(1_pInt:dGrains,1,e)
|
||||||
|
|
121
src/math.f90
121
src/math.f90
|
@ -70,6 +70,10 @@ module math
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! Provide deprecated names for compatibility
|
! Provide deprecated names for compatibility
|
||||||
|
|
||||||
|
interface math_cross
|
||||||
|
module procedure math_crossproduct
|
||||||
|
end interface math_cross
|
||||||
|
|
||||||
! ToDo MD: Our naming scheme was a little bit odd: We use essentially the re-ordering according to Nye
|
! ToDo MD: Our naming scheme was a little bit odd: We use essentially the re-ordering according to Nye
|
||||||
! (convenient because Abaqus and Marc want to have 12 on position 4)
|
! (convenient because Abaqus and Marc want to have 12 on position 4)
|
||||||
! but weight the shear components according to Mandel (convenient for matrix multiplications)
|
! but weight the shear components according to Mandel (convenient for matrix multiplications)
|
||||||
|
@ -98,26 +102,19 @@ module math
|
||||||
module procedure math_99to3333
|
module procedure math_99to3333
|
||||||
end interface math_Plain99to3333
|
end interface math_Plain99to3333
|
||||||
|
|
||||||
interface math_Mandel3333to66
|
|
||||||
module procedure math_sym3333to66
|
|
||||||
end interface math_Mandel3333to66
|
|
||||||
|
|
||||||
interface math_Mandel66to3333
|
|
||||||
module procedure math_66toSym3333
|
|
||||||
end interface math_Mandel66to3333
|
|
||||||
|
|
||||||
public :: &
|
public :: &
|
||||||
math_Plain33to9, &
|
math_Plain33to9, &
|
||||||
math_Plain9to33, &
|
math_Plain9to33, &
|
||||||
math_Mandel33to6, &
|
math_Mandel33to6, &
|
||||||
math_Mandel6to33, &
|
math_Mandel6to33, &
|
||||||
math_Plain3333to99, &
|
math_Plain3333to99, &
|
||||||
math_Plain99to3333, &
|
math_Plain99to3333
|
||||||
math_Mandel3333to66, &
|
|
||||||
math_Mandel66to3333
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
public :: &
|
public :: &
|
||||||
|
#if defined(__PGI)
|
||||||
|
norm2, &
|
||||||
|
#endif
|
||||||
math_init, &
|
math_init, &
|
||||||
math_qsort, &
|
math_qsort, &
|
||||||
math_expand, &
|
math_expand, &
|
||||||
|
@ -126,6 +123,7 @@ module math
|
||||||
math_identity4th, &
|
math_identity4th, &
|
||||||
math_civita, &
|
math_civita, &
|
||||||
math_delta, &
|
math_delta, &
|
||||||
|
math_cross, &
|
||||||
math_crossproduct, &
|
math_crossproduct, &
|
||||||
math_tensorproduct33, &
|
math_tensorproduct33, &
|
||||||
math_mul3x3, &
|
math_mul3x3, &
|
||||||
|
@ -351,20 +349,38 @@ end subroutine math_check
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief Quicksort algorithm for two-dimensional integer arrays
|
!> @brief Quicksort algorithm for two-dimensional integer arrays
|
||||||
! Sorting is done with respect to array(1,:)
|
! Sorting is done with respect to array(sort,:) and keeps array(/=sort,:) linked to it.
|
||||||
! and keeps array(2:N,:) linked to it.
|
! default: sort=1
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
recursive subroutine math_qsort(a, istart, iend)
|
recursive subroutine math_qsort(a, istart, iend, sortDim)
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
integer(pInt), dimension(:,:), intent(inout) :: a
|
integer(pInt), dimension(:,:), intent(inout) :: a
|
||||||
integer(pInt), intent(in) :: istart,iend
|
integer(pInt), intent(in),optional :: istart,iend, sortDim
|
||||||
integer(pInt) :: ipivot
|
integer(pInt) :: ipivot,s,e,d
|
||||||
|
|
||||||
if (istart < iend) then
|
if(present(istart)) then
|
||||||
ipivot = qsort_partition(a,istart, iend)
|
s = istart
|
||||||
call math_qsort(a, istart, ipivot-1_pInt)
|
else
|
||||||
call math_qsort(a, ipivot+1_pInt, iend)
|
s = lbound(a,2)
|
||||||
|
endif
|
||||||
|
|
||||||
|
if(present(iend)) then
|
||||||
|
e = iend
|
||||||
|
else
|
||||||
|
e = ubound(a,2)
|
||||||
|
endif
|
||||||
|
|
||||||
|
if(present(sortDim)) then
|
||||||
|
d = sortDim
|
||||||
|
else
|
||||||
|
d = 1
|
||||||
|
endif
|
||||||
|
|
||||||
|
if (s < e) then
|
||||||
|
ipivot = qsort_partition(a,s, e, d)
|
||||||
|
call math_qsort(a, s, ipivot-1_pInt, d)
|
||||||
|
call math_qsort(a, ipivot+1_pInt, e, d)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
@ -373,37 +389,34 @@ recursive subroutine math_qsort(a, istart, iend)
|
||||||
!-------------------------------------------------------------------------------------------------
|
!-------------------------------------------------------------------------------------------------
|
||||||
!> @brief Partitioning required for quicksort
|
!> @brief Partitioning required for quicksort
|
||||||
!-------------------------------------------------------------------------------------------------
|
!-------------------------------------------------------------------------------------------------
|
||||||
integer(pInt) function qsort_partition(a, istart, iend)
|
integer(pInt) function qsort_partition(a, istart, iend, sort)
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
integer(pInt), dimension(:,:), intent(inout) :: a
|
integer(pInt), dimension(:,:), intent(inout) :: a
|
||||||
integer(pInt), intent(in) :: istart,iend
|
integer(pInt), intent(in) :: istart,iend,sort
|
||||||
integer(pInt) :: i,j,k,tmp
|
integer(pInt), dimension(size(a,1)) :: tmp
|
||||||
|
integer(pInt) :: i,j
|
||||||
|
|
||||||
do
|
do
|
||||||
! find the first element on the right side less than or equal to the pivot point
|
! find the first element on the right side less than or equal to the pivot point
|
||||||
do j = iend, istart, -1_pInt
|
do j = iend, istart, -1_pInt
|
||||||
if (a(1,j) <= a(1,istart)) exit
|
if (a(sort,j) <= a(sort,istart)) exit
|
||||||
enddo
|
enddo
|
||||||
! find the first element on the left side greater than the pivot point
|
! find the first element on the left side greater than the pivot point
|
||||||
do i = istart, iend
|
do i = istart, iend
|
||||||
if (a(1,i) > a(1,istart)) exit
|
if (a(sort,i) > a(sort,istart)) exit
|
||||||
enddo
|
enddo
|
||||||
if (i < j) then ! if the indexes do not cross, exchange values
|
cross: if (i >= j) then ! if the indices cross, exchange left value with pivot and return with the partition index
|
||||||
do k = 1_pInt, int(size(a,1_pInt), pInt)
|
tmp = a(:,istart)
|
||||||
tmp = a(k,i)
|
a(:,istart) = a(:,j)
|
||||||
a(k,i) = a(k,j)
|
a(:,j) = tmp
|
||||||
a(k,j) = tmp
|
|
||||||
enddo
|
|
||||||
else ! if they do cross, exchange left value with pivot and return with the partition index
|
|
||||||
do k = 1_pInt, int(size(a,1_pInt), pInt)
|
|
||||||
tmp = a(k,istart)
|
|
||||||
a(k,istart) = a(k,j)
|
|
||||||
a(k,j) = tmp
|
|
||||||
enddo
|
|
||||||
qsort_partition = j
|
qsort_partition = j
|
||||||
return
|
return
|
||||||
endif
|
else cross ! if they do not cross, exchange values
|
||||||
|
tmp = a(:,i)
|
||||||
|
a(:,i) = a(:,j)
|
||||||
|
a(:,j) = tmp
|
||||||
|
endif cross
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
end function qsort_partition
|
end function qsort_partition
|
||||||
|
@ -1869,7 +1882,6 @@ function math_sampleGaussOri(center,FWHM)
|
||||||
math_sampleGaussOri = math_RtoEuler(math_mul33x33(R,math_EulerToR(center)))
|
math_sampleGaussOri = math_RtoEuler(math_mul33x33(R,math_EulerToR(center)))
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
end function math_sampleGaussOri
|
end function math_sampleGaussOri
|
||||||
|
|
||||||
|
|
||||||
|
@ -1942,11 +1954,11 @@ real(pReal) function math_sampleGaussVar(meanvalue, stddev, width)
|
||||||
tol_math_check
|
tol_math_check
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
real(pReal), intent(in) :: meanvalue, & ! meanvalue of gauss distribution
|
real(pReal), intent(in) :: meanvalue, & ! meanvalue of gauss distribution
|
||||||
stddev ! standard deviation of gauss distribution
|
stddev ! standard deviation of gauss distribution
|
||||||
real(pReal), intent(in), optional :: width ! width of considered values as multiples of standard deviation
|
real(pReal), intent(in), optional :: width ! width of considered values as multiples of standard deviation
|
||||||
real(pReal), dimension(2) :: rnd ! random numbers
|
real(pReal), dimension(2) :: rnd ! random numbers
|
||||||
real(pReal) :: scatter, & ! normalized scatter around meanvalue
|
real(pReal) :: scatter, & ! normalized scatter around meanvalue
|
||||||
myWidth
|
myWidth
|
||||||
|
|
||||||
if (abs(stddev) < tol_math_check) then
|
if (abs(stddev) < tol_math_check) then
|
||||||
|
@ -2707,4 +2719,19 @@ real(pReal) pure elemental function math_clip(a, left, right)
|
||||||
|
|
||||||
end function math_clip
|
end function math_clip
|
||||||
|
|
||||||
|
|
||||||
|
#if defined(__PGI)
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief substitute for the norm2 intrinsic which is not available in PGI 18.10
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
real(pReal) pure function norm2(v)
|
||||||
|
|
||||||
|
implicit none
|
||||||
|
real(pReal), intent(in), dimension(3) :: v
|
||||||
|
|
||||||
|
norm2 = sqrt(sum(v**2))
|
||||||
|
|
||||||
|
end function norm2
|
||||||
|
#endif
|
||||||
|
|
||||||
end module math
|
end module math
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue