Merge branch 'lattice-structure-rename' into ShowGrainBoundaries

This commit is contained in:
Martin Diehl 2020-11-29 20:50:41 +01:00
commit 4d2bf37193
189 changed files with 7093 additions and 5469 deletions

View File

@ -2,14 +2,10 @@
stages: stages:
- prepareAll - prepareAll
- python - python
- preprocessing - deprecated
- postprocessing - compile
- compilePETSc
- prepareGrid
- grid - grid
- compileMarc
- marc - marc
- example
- performance - performance
- createPackage - createPackage
- createDocumentation - createDocumentation
@ -20,12 +16,12 @@ stages:
################################################################################################### ###################################################################################################
before_script: before_script:
- if [ $(awk "/$CI_PIPELINE_ID/{print NR}" $TESTROOT/GitLabCI.queue)x == 'x' ]; - if [ $(awk "/$CI_PIPELINE_ID/{print NR}" $LOCAL_HOME/GitLabCI.queue)x == 'x' ];
then echo $CI_PIPELINE_ID >> $TESTROOT/GitLabCI.queue; then echo $CI_PIPELINE_ID >> $LOCAL_HOME/GitLabCI.queue;
fi fi
- while [ $(awk "/$CI_PIPELINE_ID/{print NR}" $TESTROOT/GitLabCI.queue) != 1 ]; - while [ $(awk "/$CI_PIPELINE_ID/{print NR}" $LOCAL_HOME/GitLabCI.queue) != 1 ];
do sleep 5m; do sleep 5m;
echo -e "Currently queued pipelines:\n$(cat $TESTROOT/GitLabCI.queue)\n"; echo -e "Currently queued pipelines:\n$(cat $LOCAL_HOME/GitLabCI.queue)\n";
done done
- source $DAMASKROOT/env/DAMASK.sh - source $DAMASKROOT/env/DAMASK.sh
- cd $DAMASKROOT/PRIVATE/testing - cd $DAMASKROOT/PRIVATE/testing
@ -45,61 +41,55 @@ variables:
# =============================================================================================== # ===============================================================================================
# Shortcut names # Shortcut names
# =============================================================================================== # ===============================================================================================
DAMASKROOT: "$TESTROOT/GitLabCI_Pipeline_$CI_PIPELINE_ID/DAMASK" DAMASKROOT: "$LOCAL_HOME/GitLabCI_Pipeline_$CI_PIPELINE_ID/DAMASK"
TESTROOT: "$LOCAL_HOME/GitLabCI_Pipeline_$CI_PIPELINE_ID/tests"
# =============================================================================================== # ===============================================================================================
# Names of module files to load # Names of module files to load
# =============================================================================================== # ===============================================================================================
# ++++++++++++ Compiler +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ Compiler +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
IntelCompiler17_8: "Compiler/Intel/17.8 Libraries/IMKL/2017" IntelCompiler19_1: "Compiler/Intel/19.1.2 Libraries/IMKL/2020"
IntelCompiler18_4: "Compiler/Intel/18.4 Libraries/IMKL/2018" GNUCompiler10: "Compiler/GNU/10"
GNUCompiler8_2: "Compiler/GNU/8.2"
# ------------ Defaults ---------------------------------------------- # ------------ Defaults ----------------------------------------------
IntelCompiler: "$IntelCompiler18_4" IntelCompiler: "$IntelCompiler19_1"
GNUCompiler: "$GNUCompiler8_2" GNUCompiler: "$GNUCompiler10"
# ++++++++++++ MPI ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ MPI ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
IMPI2018Intel18_4: "MPI/Intel/18.4/IntelMPI/2018" IMPI2020Intel19_1: "MPI/Intel/19.1.2/IntelMPI/2019"
MPICH3_3GNU8_2: "MPI/GNU/8.2/MPICH/3.3" OMPI4_0GNU10: "MPI/GNU/10/OpenMPI/4.0.5"
# ------------ Defaults ---------------------------------------------- # ------------ Defaults ----------------------------------------------
MPICH_Intel: "$IMPI2018Intel18_4" MPI_Intel: "$IMPI2020Intel19_1"
MPICH_GNU: "$MPICH3_3GNU8_2" MPI_GNU: "$OMPI4_0GNU10"
# ++++++++++++ PETSc ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ PETSc ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
PETSc3_10_3IMPI2018Intel18_4: "Libraries/PETSc/3.10.3/Intel-18.4-IntelMPI-2018" PETSc3_14_0IMPI2020Intel19_1: "Libraries/PETSc/3.14.0/Intel-19.1.2-IntelMPI-2019"
PETSc3_10_3MPICH3_3GNU8_2: "Libraries/PETSc/3.10.3/GNU-8.2-MPICH-3.3" PETSc3_14_0OMPI4_0GNU10: "Libraries/PETSc/3.14.0/GNU-10-OpenMPI-4.0.5"
# ------------ Defaults ---------------------------------------------- # ------------ Defaults ----------------------------------------------
PETSc_MPICH_Intel: "$PETSc3_10_3IMPI2018Intel18_4" PETSc_Intel: "$PETSc3_14_0IMPI2020Intel19_1"
PETSc_MPICH_GNU: "$PETSc3_10_3MPICH3_3GNU8_2" PETSc_GNU: "$PETSc3_14_0OMPI4_0GNU10"
# ++++++++++++ commercial FEM ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ commercial FEM ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
MSC2019_1: "FEM/MSC/2019.1" MSC2020: "FEM/MSC/2020"
# ------------ Defaults ---------------------------------------------- # ------------ Defaults ----------------------------------------------
MSC: "$MSC2019_1" MSC: "$MSC2020"
IntelMarc: "$IntelCompiler17_8" IntelMarc: "$IntelCompiler19_1"
HDF5Marc: "HDF5/1.10.5/Intel-17.8" HDF5Marc: "HDF5/1.12.0/Intel-19.1.2"
# ++++++++++++ Documentation ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Doxygen1_8_17: "Documentation/Doxygen/1.8.17"
# ------------ Defaults ----------------------------------------------
Doxygen: "$Doxygen1_8_17"
################################################################################################### ###################################################################################################
checkout: checkout:
stage: prepareAll stage: prepareAll
before_script: before_script:
- echo $CI_PIPELINE_ID >> $TESTROOT/GitLabCI.queue - echo $CI_PIPELINE_ID >> $LOCAL_HOME/GitLabCI.queue
- while [ $(awk "/$CI_PIPELINE_ID/{print NR}" $TESTROOT/GitLabCI.queue) != 1 ]; - while [ $(awk "/$CI_PIPELINE_ID/{print NR}" $LOCAL_HOME/GitLabCI.queue) != 1 ];
do sleep 5m; do sleep 5m;
echo -e "Currently queued pipelines:\n$(cat $TESTROOT/GitLabCI.queue)\n"; echo -e "Currently queued pipelines:\n$(cat $LOCAL_HOME/GitLabCI.queue)\n";
done done
script: script:
- mkdir -p $DAMASKROOT - mkdir -p $DAMASKROOT
- mkdir -p $TESTROOT
- cd $DAMASKROOT - cd $DAMASKROOT
- if [ -d DAMASK ]; then rm -rf DAMASK; fi # there might be some leftovers from a failed clone
- git clone -q git@magit1.mpie.de:damask/DAMASK.git . - git clone -q git@magit1.mpie.de:damask/DAMASK.git .
- git checkout $CI_COMMIT_SHA - git checkout $CI_COMMIT_SHA
- git submodule update --init - git submodule update --init
- source env/DAMASK.sh - source env/DAMASK.sh
- make processing - make processing
- mkdir /tmp/$CI_PIPELINE_ID
except: except:
- master - master
- release - release
@ -109,78 +99,77 @@ Pytest_python:
stage: python stage: python
script: script:
- cd $DAMASKROOT/python - cd $DAMASKROOT/python
- pytest --basetemp=/tmp/${CI_PIPELINE_ID}/python - pytest --basetemp=${TESTROOT}/python -v
except: except:
- master - master
- release - release
################################################################################################### ###################################################################################################
Pre_SeedGeneration: Pre_SeedGeneration:
stage: preprocessing stage: deprecated
script: PreProcessing_SeedGeneration/test.py script: PreProcessing_SeedGeneration/test.py
except: except:
- master - master
- release - release
Pre_GeomGeneration: Pre_GeomGeneration:
stage: preprocessing stage: deprecated
script: PreProcessing_GeomGeneration/test.py script: PreProcessing_GeomGeneration/test.py
except: except:
- master - master
- release - release
Pre_GeomModification: Pre_GeomModification:
stage: preprocessing stage: deprecated
script: PreProcessing_GeomModification/test.py script: PreProcessing_GeomModification/test.py
except: except:
- master - master
- release - release
Pre_General: Pre_General:
stage: preprocessing stage: deprecated
script: PreProcessing/test.py script: PreProcessing/test.py
except: except:
- master - master
- release - release
###################################################################################################
Post_General: Post_General:
stage: postprocessing stage: deprecated
script: PostProcessing/test.py script: PostProcessing/test.py
except: except:
- master - master
- release - release
Post_GeometryReconstruction: Post_GeometryReconstruction:
stage: postprocessing stage: deprecated
script: spectral_geometryReconstruction/test.py script: spectral_geometryReconstruction/test.py
except: except:
- master - master
- release - release
Post_addCurl: Post_addCurl:
stage: postprocessing stage: deprecated
script: addCurl/test.py script: addCurl/test.py
except: except:
- master - master
- release - release
Post_addDivergence: Post_addDivergence:
stage: postprocessing stage: deprecated
script: addDivergence/test.py script: addDivergence/test.py
except: except:
- master - master
- release - release
Post_addGradient: Post_addGradient:
stage: postprocessing stage: deprecated
script: addGradient/test.py script: addGradient/test.py
except: except:
- master - master
- release - release
Post_OrientationAverageMisorientation: Post_OrientationAverageMisorientation:
stage: postprocessing stage: deprecated
script: script:
- OrientationAverageMisorientation/test.py - OrientationAverageMisorientation/test.py
except: except:
@ -188,61 +177,76 @@ Post_OrientationAverageMisorientation:
- release - release
################################################################################################### ###################################################################################################
grid_mech_compile_Intel: compile_grid_Intel:
stage: compilePETSc stage: compile
script: script:
- module load $IntelCompiler $MPICH_Intel $PETSc_MPICH_Intel - module load $IntelCompiler $MPI_Intel $PETSc_Intel
- cp -r grid_mech_compile grid_mech_compile_Intel
- grid_mech_compile_Intel/test.py
- cd pytest - cd pytest
- pytest -k 'compile and grid' --basetemp=/tmp/${CI_PIPELINE_ID}/compile_grid_Intel - pytest -k 'compile and grid' --basetemp=${TESTROOT}/compile_grid_Intel
except: except:
- master - master
- release - release
Compile_FEM_Intel: compile_mesh_Intel:
stage: compilePETSc stage: compile
script: script:
- module load $IntelCompiler $MPICH_Intel $PETSc_MPICH_Intel - module load $IntelCompiler $MPI_Intel $PETSc_Intel
- cp -r FEM_compile FEM_compile_Intel
- FEM_compile_Intel/test.py
- cd pytest - cd pytest
- pytest -k 'compile and mesh' --basetemp=/tmp/${CI_PIPELINE_ID}/compile_mesh_Intel - pytest -k 'compile and mesh' --basetemp=${TESTROOT}/compile_mesh_Intel
except: except:
- master - master
- release - release
grid_mech_compile_GNU: compile_grid_GNU:
stage: compilePETSc stage: compile
script: script:
- module load $GNUCompiler $MPICH_GNU $PETSc_MPICH_GNU - module load $GNUCompiler $MPI_GNU $PETSc_GNU
- cp -r grid_mech_compile grid_mech_compile_GNU
- grid_mech_compile_GNU/test.py
- cd pytest - cd pytest
- pytest -k 'compile and grid' --basetemp=/tmp/${CI_PIPELINE_ID}/compile_grid_GNU - pytest -k 'compile and grid' --basetemp=${TESTROOT}/compile_grid_GNU
except: except:
- master - master
- release - release
Compile_FEM_GNU: compile_mesh_GNU:
stage: compilePETSc stage: compile
script: script:
- module load $GNUCompiler $MPICH_GNU $PETSc_MPICH_GNU - module load $GNUCompiler $MPI_GNU $PETSc_GNU
- cp -r FEM_compile FEM_compile_GNU
- FEM_compile_GNU/test.py
- cd pytest - cd pytest
- pytest -k 'compile and mesh' --basetemp=/tmp/${CI_PIPELINE_ID}/compile_mesh_GNU - pytest -k 'compile and mesh' --basetemp=${TESTROOT}/compile_mesh_GNU
except: except:
- master - master
- release - release
################################################################################################### compile_MARC:
Compile_Intel_Prepare: stage: compile
stage: prepareGrid
script: script:
- module load $IntelCompiler $MPICH_Intel $PETSc_MPICH_Intel - module load $IntelMarc $HDF5Marc $MSC
- cd $DAMASKROOT - cd pytest
- make clean grid processing - pytest -k 'compile and Marc' --basetemp=${TESTROOT}/compile_Marc
except:
- master
- release
setup_grid:
stage: compile
script:
- module load $IntelCompiler $MPI_Intel $PETSc_Intel
- BUILD_DIR=$(mktemp -d)
- cd ${BUILD_DIR}
- cmake -DDAMASK_SOLVER=GRID -DCMAKE_INSTALL_PREFIX=${DAMASKROOT} ${DAMASKROOT}
- make -j2 all install
except:
- master
- release
setup_mesh:
stage: compile
script:
- module load $IntelCompiler $MPI_Intel $PETSc_Intel
- BUILD_DIR=$(mktemp -d)
- cd ${BUILD_DIR}
- cmake -DDAMASK_SOLVER=MESH -DCMAKE_INSTALL_PREFIX=${DAMASKROOT} ${DAMASKROOT}
- make -j2 all install
except: except:
- master - master
- release - release
@ -251,9 +255,9 @@ Compile_Intel_Prepare:
Pytest_grid: Pytest_grid:
stage: grid stage: grid
script: script:
- module load $IntelCompiler $MPICH_Intel $PETSc_MPICH_Intel - module load $IntelCompiler $MPI_Intel $PETSc_Intel
- cd pytest - cd pytest
- pytest -m 'not compile' --basetemp=/tmp/${CI_PIPELINE_ID}/fortran - pytest -k 'not compile' --basetemp=${TESTROOT}/fortran -v
except: except:
- master - master
- release - release
@ -265,13 +269,6 @@ Thermal:
- master - master
- release - release
grid_parsingArguments:
stage: grid
script: grid_parsingArguments/test.py
except:
- master
- release
Nonlocal_Damage_DetectChanges: Nonlocal_Damage_DetectChanges:
stage: grid stage: grid
script: Nonlocal_Damage_DetectChanges/test.py script: Nonlocal_Damage_DetectChanges/test.py
@ -279,22 +276,6 @@ Nonlocal_Damage_DetectChanges:
- master - master
- release - release
grid_all_restart:
stage: grid
script: grid_all_restart/test.py
except:
- master
- release
grid_all_restartMPI:
stage: grid
script:
- module load $IntelCompiler $MPICH_Intel $PETSc_MPICH_Intel
- grid_all_restartMPI/test.py
except:
- master
- release
Plasticity_DetectChanges: Plasticity_DetectChanges:
stage: grid stage: grid
script: Plasticity_DetectChanges/test.py script: Plasticity_DetectChanges/test.py
@ -309,46 +290,8 @@ Phenopowerlaw_singleSlip:
- master - master
- release - release
###################################################################################################
Marc_compileIfort:
stage: compileMarc
script:
- module load $IntelMarc $HDF5Marc $MSC
- Marc_compileIfort/test.py
- cd pytest
- pytest -k 'compile and Marc'
except:
- master
- release
################################################################################################### ###################################################################################################
Hex_elastic:
stage: marc
script:
- module load $IntelMarc $HDF5Marc $MSC
- Hex_elastic/test.py
except:
- master
- release
CubicFCC_elastic:
stage: marc
script:
- module load $IntelMarc $HDF5Marc $MSC
- CubicFCC_elastic/test.py
except:
- master
- release
CubicBCC_elastic:
stage: marc
script:
- module load $IntelMarc $HDF5Marc $MSC
- CubicBCC_elastic/test.py
except:
- master
- release
J2_plasticBehavior: J2_plasticBehavior:
stage: marc stage: marc
script: script:
@ -367,25 +310,17 @@ Marc_elementLib:
- master - master
- release - release
###################################################################################################
grid_all_example:
stage: example
script: grid_all_example/test.py
except:
- master
- release
################################################################################################### ###################################################################################################
SpectralRuntime: SpectralRuntime:
stage: performance stage: performance
script: script:
- module load $IntelCompiler $MPICH_Intel $PETSc_MPICH_Intel - module load $IntelCompiler $MPI_Intel $PETSc_Intel
- cd $DAMASKROOT - cd $DAMASKROOT
- make clean grid processing OPTIMIZATION=AGGRESSIVE - make clean grid processing OPTIMIZATION=AGGRESSIVE
- cd $TESTROOT/performance # location of old results - cd $LOCAL_HOME/performance # location of old results
- git checkout . # undo any changes (i.e. run time data from non-development branch) - git checkout . # undo any changes (i.e. run time data from non-development branch)
- cd $DAMASKROOT/PRIVATE/testing - cd $DAMASKROOT/PRIVATE/testing
- SpectralAll_runtime/test.py -d $TESTROOT/performance - SpectralAll_runtime/test.py -d $LOCAL_HOME/performance
except: except:
- master - master
- release - release
@ -401,20 +336,10 @@ createTar:
- release - release
################################################################################################### ###################################################################################################
Marc: Python:
stage: createDocumentation stage: createDocumentation
script: script:
- module load $IntelCompiler $MPICH_Intel $PETSc_MPICH_Intel $Doxygen - echo 'tbd one matesting1'
- $DAMASKROOT/PRIVATE/documenting/runDoxygen.sh $DAMASKROOT marc
except:
- master
- release
GridSolver:
stage: createDocumentation
script:
- module load $IntelCompiler $MPICH_Intel $PETSc_MPICH_Intel $Doxygen
- $DAMASKROOT/PRIVATE/documenting/runDoxygen.sh $DAMASKROOT grid
except: except:
- master - master
- release - release
@ -423,11 +348,11 @@ GridSolver:
backupData: backupData:
stage: saveDocumentation stage: saveDocumentation
script: script:
- cd $TESTROOT/performance # location of new runtime results - cd $LOCAL_HOME/performance # location of new runtime results
- git commit -am"${CI_PIPELINE_ID}_${CI_COMMIT_SHA}" - git commit -am"${CI_PIPELINE_ID}_${CI_COMMIT_SHA}"
- mkdir $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA} - mkdir $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}
- mv $TESTROOT/performance/time.png $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/ - mv $LOCAL_HOME/performance/time.png $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
- mv $TESTROOT/performance/memory.png $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/ - mv $LOCAL_HOME/performance/memory.png $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
- mv $DAMASKROOT/PRIVATE/documenting/DAMASK_* $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/ - mv $DAMASKROOT/PRIVATE/documenting/DAMASK_* $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
only: only:
- development - development
@ -457,8 +382,8 @@ removeData:
before_script: before_script:
- echo "Removing data and lock of pipeline $CI_PIPELINE_ID" - echo "Removing data and lock of pipeline $CI_PIPELINE_ID"
script: script:
- rm -rf $TESTROOT/GitLabCI_Pipeline_$CI_PIPELINE_ID - rm -rf $LOCAL_HOME/GitLabCI_Pipeline_$CI_PIPELINE_ID
- sed -i "/$CI_PIPELINE_ID/d" $TESTROOT/GitLabCI.queue # in case pipeline was manually (web GUI) restarted and releaseLock was performed already - sed -i "/$CI_PIPELINE_ID/d" $LOCAL_HOME/GitLabCI.queue # in case pipeline was manually (web GUI) restarted and releaseLock was performed already
except: except:
- master - master
- release - release
@ -469,7 +394,7 @@ removeLock:
before_script: before_script:
- echo "Removing lock of pipeline $CI_PIPELINE_ID" - echo "Removing lock of pipeline $CI_PIPELINE_ID"
when: always when: always
script: sed -i "/$CI_PIPELINE_ID/d" $TESTROOT/GitLabCI.queue script: sed -i "/$CI_PIPELINE_ID/d" $LOCAL_HOME/GitLabCI.queue
except: except:
- master - master
- release - release

@ -1 +1 @@
Subproject commit 1e8c66897820468ab46958d995005e2b69204d0e Subproject commit 751f96155294e449b31c5d4913fea7bc02ce81b1

View File

@ -1 +1 @@
v3.0.0-alpha-556-gf379aecd8 v3.0.0-alpha-868-gb8344973d

View File

@ -98,11 +98,12 @@ set (DEBUG_FLAGS "${DEBUG_FLAGS} -ftrapuv")
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0") set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0")
# ... capture all floating-point exceptions, sets -ftz automatically # ... capture all floating-point exceptions, sets -ftz automatically
set (DEBUG_FLAGS "${DEBUG_FLAGS} -warn") # disable due to compiler bug https://community.intel.com/t5/Intel-Fortran-Compiler/false-positive-stand-f18-and-IEEE-SELECTED-REAL-KIND/m-p/1227336
#set (DEBUG_FLAGS "${DEBUG_FLAGS} -warn")
# enables warnings ... # enables warnings ...
set (DEBUG_FLAGS "${DEBUG_FLAGS} errors") #set (DEBUG_FLAGS "${DEBUG_FLAGS} errors")
# ... warnings are changed to errors # ... warnings are changed to errors
set (DEBUG_FLAGS "${DEBUG_FLAGS},stderrors") #set (DEBUG_FLAGS "${DEBUG_FLAGS},stderrors")
# ... warnings about Fortran standard violations are changed to errors # ... warnings about Fortran standard violations are changed to errors
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all") set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all")

2
env/CONFIG vendored
View File

@ -2,4 +2,4 @@
set DAMASK_NUM_THREADS = 4 set DAMASK_NUM_THREADS = 4
set MSC_ROOT = /opt/msc set MSC_ROOT = /opt/msc
set MARC_VERSION = 2019.1 set MSC_VERSION = 2020

2
env/DAMASK.csh vendored
View File

@ -51,4 +51,4 @@ else
setenv PYTHONPATH $DAMASK_ROOT/python:$PYTHONPATH setenv PYTHONPATH $DAMASK_ROOT/python:$PYTHONPATH
endif endif
setenv MSC_ROOT setenv MSC_ROOT
setenv MARC_VERSION setenv MSC_VERSION

3
examples/.gitignore vendored
View File

@ -3,6 +3,3 @@
*.xdmf *.xdmf
*.sta *.sta
*.vt* *.vt*
*.geom
*.seeds
postProc

View File

@ -1,16 +1,16 @@
# Kuo, J. C., Mikrostrukturmechanik von Bikristallen mit Kippkorngrenzen. Shaker-Verlag 2004. http://edoc.mpg.de/204079 # Kuo, J. C., Mikrostrukturmechanik von Bikristallen mit Kippkorngrenzen. Shaker-Verlag 2004. http://edoc.mpg.de/204079
Aluminum: Aluminum:
elasticity: {C_11: 110.9e9, C_12: 58.34e9, type: hooke} mechanics:
generic: lattice: aP
elasticity: {C_11: 110.9e9, C_12: 58.34e9, type: hooke}
output: [F, P, Fe, Fp, Lp] output: [F, P, Fe, Fp, Lp]
lattice: iso plasticity:
plasticity: type: isotropic
type: isotropic output: [xi]
output: [xi] xi_0: 31e6
xi_0: 31e6 xi_inf: 63e6
xi_inf: 63e6 dot_gamma_0: 0.001
dot_gamma_0: 0.001 n: 20
n: 20 M: 3
M: 3 h_0: 75e6
h_0: 75e6 a: 2.25
a: 2.25

View File

@ -1,17 +1,17 @@
# Maiti and Eisenlohr 2018 Scripta Materialia # Maiti and Eisenlohr 2018 Scripta Materialia
Air: Air:
elasticity: {C_11: 10e9, C_12: 0.0, type: hooke} mechanics:
generic: lattice: aP
elasticity: {C_11: 10e9, C_12: 0.0, type: hooke}
output: [F, P, Fe, Fp, Lp] output: [F, P, Fe, Fp, Lp]
lattice: iso plasticity:
plasticity: type: isotropic
type: isotropic output: [xi]
output: [xi] xi_0: 0.3e6
xi_0: 0.3e6 xi_inf: 0.6e6
xi_inf: 0.6e6 dot_gamma_0: 0.001
dot_gamma_0: 0.001 n: 5
n: 5 M: 3
M: 3 h_0: 1e6
h_0: 1e6 a: 2
a: 2 dilatation: true
dilatation: true

View File

@ -2,15 +2,16 @@
# Tasan et.al. 2015 International Journal of Plasticity # Tasan et.al. 2015 International Journal of Plasticity
# Diehl et.al. 2015 Meccanica # Diehl et.al. 2015 Meccanica
Ferrite: Ferrite:
elasticity: {C_11: 233.3e9, C_12: 135.5e9, C_44: 118.0e9, type: hooke} mechanics:
lattice: bcc lattice: cI
plasticity: elasticity: {C_11: 233.3e9, C_12: 135.5e9, C_44: 118.0e9, type: hooke}
N_sl: [12, 12] plasticity:
a_sl: 2.0 N_sl: [12, 12]
dot_gamma_0_sl: 0.001 a_sl: 2.0
h_0_sl_sl: 1000.0e6 dot_gamma_0_sl: 0.001
h_sl_sl: [1, 1, 1.4, 1.4, 1.4, 1.4] h_0_sl_sl: 1000.0e6
n_sl: 20 h_sl_sl: [1, 1, 1.4, 1.4, 1.4, 1.4]
type: phenopowerlaw n_sl: 20
xi_0_sl: [95.e6, 96.e6] type: phenopowerlaw
xi_inf_sl: [222.e6, 412.7e6] xi_0_sl: [95.e6, 96.e6]
xi_inf_sl: [222.e6, 412.7e6]

View File

@ -2,15 +2,16 @@
# Tasan et.al. 2015 International Journal of Plasticity # Tasan et.al. 2015 International Journal of Plasticity
# Diehl et.al. 2015 Meccanica # Diehl et.al. 2015 Meccanica
Martensite: Martensite:
elasticity: {C_11: 417.4e9, C_12: 242.4e9, C_44: 211.1e9, type: hooke} mechanics:
lattice: bcc lattice: cI
plasticity: elasticity: {C_11: 417.4e9, C_12: 242.4e9, C_44: 211.1e9, type: hooke}
N_sl: [12, 12] plasticity:
a_sl: 2.0 N_sl: [12, 12]
dot_gamma_0_sl: 0.001 a_sl: 2.0
h_0_sl_sl: 563.0e9 dot_gamma_0_sl: 0.001
h_sl_sl: [1, 1, 1.4, 1.4, 1.4, 1.4] h_0_sl_sl: 563.0e9
n_sl: 20 h_sl_sl: [1, 1, 1.4, 1.4, 1.4, 1.4]
type: phenopowerlaw n_sl: 20
xi_0_sl: [405.8e6, 456.7e6] type: phenopowerlaw
xi_inf_sl: [872.9e6, 971.2e6] xi_0_sl: [405.8e6, 456.7e6]
xi_inf_sl: [872.9e6, 971.2e6]

View File

@ -1,26 +1,26 @@
homogenization: homogenization:
SX: SX:
N_constituents: 1 N_constituents: 1
mech: {type: none} mechanics: {type: none}
phase: phase:
Aluminum: Aluminum:
elasticity: {C_11: 106.75e9, C_12: 60.41e9, C_44: 28.34e9, type: hooke} mechanics:
generic: lattice: cF
output: [F, P, Fe, Fp, Lp] output: [F, P, F_e, F_p, L_p]
lattice: fcc elasticity: {C_11: 106.75e9, C_12: 60.41e9, C_44: 28.34e9, type: hooke}
plasticity: plasticity:
N_sl: [12] N_sl: [12]
a_sl: 2.25 a_sl: 2.25
atol_xi: 1.0 atol_xi: 1.0
dot_gamma_0_sl: 0.001 dot_gamma_0_sl: 0.001
h_0_sl_sl: 75e6 h_0_sl_sl: 75e6
h_sl_sl: [1, 1, 1.4, 1.4, 1.4, 1.4] h_sl_sl: [1, 1, 1.4, 1.4, 1.4, 1.4]
n_sl: 20 n_sl: 20
output: [xi_sl] output: [xi_sl]
type: phenopowerlaw type: phenopowerlaw
xi_0_sl: [31e6] xi_0_sl: [31e6]
xi_inf_sl: [63e6] xi_inf_sl: [63e6]
material: material:
- constituents: - constituents:

View File

@ -2,7 +2,7 @@
homogenization: homogenization:
SX: SX:
N_constituents: 1 N_constituents: 1
mech: {type: none} mechanics: {type: none}
material: material:
- homogenization: SX - homogenization: SX
@ -108,19 +108,19 @@ material:
phase: phase:
Aluminum: Aluminum:
elasticity: {C_11: 106.75e9, C_12: 60.41e9, C_44: 28.34e9, type: hooke} lattice: cF
generic: mechanics:
output: [F, P, Fe, Fp, Lp, O] output: [F, P, F_e, F_p, L_p, O]
lattice: fcc elasticity: {C_11: 106.75e9, C_12: 60.41e9, C_44: 28.34e9, type: hooke}
plasticity: plasticity:
N_sl: [12] N_sl: [12]
a_sl: 2.25 a_sl: 2.25
atol_xi: 1.0 atol_xi: 1.0
dot_gamma_0_sl: 0.001 dot_gamma_0_sl: 0.001
h_0_sl_sl: 75e6 h_0_sl_sl: 75e6
h_sl_sl: [1, 1, 1.4, 1.4, 1.4, 1.4] h_sl_sl: [1, 1, 1.4, 1.4, 1.4, 1.4]
n_sl: 20 n_sl: 20
output: [xi_sl] output: [xi_sl]
type: phenopowerlaw type: phenopowerlaw
xi_0_sl: [31e6] xi_0_sl: [31e6]
xi_inf_sl: [63e6] xi_inf_sl: [63e6]

View File

@ -1,5 +1,5 @@
step: step:
- mech: - mechanics:
dot_F: [0, 0, 0, dot_F: [0, 0, 0,
1e-3, 0, 0, 1e-3, 0, 0,
0, 0, 0] 0, 0, 0]

View File

@ -1,6 +1,6 @@
--- ---
step: step:
- mech: - mechanics:
dot_F: [0, 0, 1e-3, dot_F: [0, 0, 1e-3,
0, 0, 0, 0, 0, 0,
0, 0, 0] 0, 0, 0]

View File

@ -1,7 +1,7 @@
--- ---
step: step:
- mech: - mechanics:
dot_F: [1.0e-3, 0, 0, dot_F: [1.0e-3, 0, 0,
0, x, 0, 0, x, 0,
0, 0, x] 0, 0, x]
@ -12,7 +12,7 @@ step:
t: 10 t: 10
N: 40 N: 40
f_out: 4 f_out: 4
- mech: - mechanics:
dot_F: [1.0e-3, 0, 0, dot_F: [1.0e-3, 0, 0,
0, x, 0, 0, x, 0,
0, 0, x] 0, 0, x]

View File

@ -7,15 +7,15 @@ from pathlib import Path
import damask import damask
marc_version = float(damask.environment.options['MARC_VERSION']) msc_version = float(damask.environment.options['MSC_VERSION'])
if int(marc_version) == marc_version: if int(msc_version) == msc_version:
marc_version = int(marc_version) msc_version = int(msc_version)
msc_root = Path(damask.environment.options['MSC_ROOT']) msc_root = Path(damask.environment.options['MSC_ROOT'])
damask_root = damask.environment.root_dir damask_root = damask.environment.root_dir
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Apply DAMASK modification to MSC.Marc/Mentat', description='Apply DAMASK modification to MSC.Marc/Mentat',
epilog = f'MSC_ROOT={msc_root} and MARC_VERSION={marc_version} (from {damask_root}/env/CONFIG)') epilog = f'MSC_ROOT={msc_root} and MSC_VERSION={msc_version} (from {damask_root}/env/CONFIG)')
parser.add_argument('--editor', dest='editor', metavar='string', default='vi', parser.add_argument('--editor', dest='editor', metavar='string', default='vi',
help='Name of the editor for MSC.Mentat (executable)') help='Name of the editor for MSC.Mentat (executable)')
@ -23,8 +23,8 @@ parser.add_argument('--editor', dest='editor', metavar='string', default='vi',
def copy_and_replace(in_file,dst): def copy_and_replace(in_file,dst):
with open(in_file) as f: with open(in_file) as f:
content = f.read() content = f.read()
content = content.replace('%INSTALLDIR%',msc_root) content = content.replace('%INSTALLDIR%',str(msc_root))
content = content.replace('%VERSION%',str(marc_version)) content = content.replace('%VERSION%',str(msc_version))
content = content.replace('%EDITOR%', parser.parse_args().editor) content = content.replace('%EDITOR%', parser.parse_args().editor)
with open(dst/Path(in_file).name,'w') as f: with open(dst/Path(in_file).name,'w') as f:
f.write(content) f.write(content)
@ -32,36 +32,36 @@ def copy_and_replace(in_file,dst):
print('adapting Marc tools...\n') print('adapting Marc tools...\n')
src = damask_root/f'installation/mods_MarcMentat/{marc_version}/Marc_tools' src = damask_root/f'installation/mods_MarcMentat/{msc_version}/Marc_tools'
dst = msc_root/f'marc{marc_version}/tools' dst = msc_root/f'marc{msc_version}/tools'
for in_file in glob.glob(str(src/'*damask*')) + [str(src/'include_linux64')]: for in_file in glob.glob(str(src/'*damask*')) + [str(src/'include_linux64')]:
copy_and_replace(in_file,dst) copy_and_replace(in_file,dst)
print('adapting Mentat scripts and menus...\n') print('adapting Mentat scripts and menus...\n')
src = damask_root/f'installation/mods_MarcMentat/{marc_version}/Mentat_bin' src = damask_root/f'installation/mods_MarcMentat/{msc_version}/Mentat_bin'
dst = msc_root/f'mentat{marc_version}/bin' dst = msc_root/f'mentat{msc_version}/bin'
for in_file in glob.glob(str(src/'*[!.original]')): for in_file in glob.glob(str(src/'*[!.original]')):
copy_and_replace(in_file,dst) copy_and_replace(in_file,dst)
src = damask_root/f'installation/mods_MarcMentat/{marc_version}/Mentat_menus' src = damask_root/f'installation/mods_MarcMentat/{msc_version}/Mentat_menus'
dst = msc_root/f'mentat{marc_version}/menus' dst = msc_root/f'mentat{msc_version}/menus'
for in_file in glob.glob(str(src/'job_run.ms')): for in_file in glob.glob(str(src/'job_run.ms')):
copy_and_replace(in_file,dst) copy_and_replace(in_file,dst)
print('compiling Mentat menu binaries...') print('compiling Mentat menu binaries...')
executable = str(msc_root/f'mentat{marc_version}/bin/mentat') executable = str(msc_root/f'mentat{msc_version}/bin/mentat')
menu_file = str(msc_root/f'mentat{marc_version}/menus/linux64/main.msb') menu_file = str(msc_root/f'mentat{msc_version}/menus/linux64/main.msb')
os.system(f'xvfb-run {executable} -compile {menu_file}') os.system(f'xvfb-run {executable} -compile {menu_file}')
print('setting file access rights...\n') print('setting file access rights...\n')
for pattern in [msc_root/f'marc{marc_version}/tools/*damask*', for pattern in [msc_root/f'marc{msc_version}/tools/*damask*',
msc_root/f'mentat{marc_version}/bin/submit?', msc_root/f'mentat{msc_version}/bin/submit?',
msc_root/f'mentat{marc_version}/bin/kill?']: msc_root/f'mentat{msc_version}/bin/kill?']:
for f in glob.glob(str(pattern)): for f in glob.glob(str(pattern)):
os.chmod(f,0o755) os.chmod(f,0o755)

View File

@ -20,9 +20,9 @@ parser.add_argument('filenames', nargs='+',
parser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string', parser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string',
help='name of subdirectory relative to the location of the DADF5 file to hold output') help='name of subdirectory relative to the location of the DADF5 file to hold output')
parser.add_argument('--mat', nargs='+', parser.add_argument('--mat', nargs='+',
help='labels for materialpoint',dest='mat') help='labels for homogenization',dest='mat')
parser.add_argument('--con', nargs='+', parser.add_argument('--con', nargs='+',
help='labels for constituent',dest='con') help='labels for phase',dest='con')
options = parser.parse_args() options = parser.parse_args()
@ -41,15 +41,15 @@ for filename in options.filenames:
table = damask.Table(np.ones(np.product(results.grid),dtype=int)*int(inc[3:]),{'inc':(1,)})\ table = damask.Table(np.ones(np.product(results.grid),dtype=int)*int(inc[3:]),{'inc':(1,)})\
.add('pos',coords.reshape(-1,3)) .add('pos',coords.reshape(-1,3))
results.pick('materialpoints',False) results.pick('homogenizations',False)
results.pick('constituents', True) results.pick('phases',True)
for label in options.con: for label in options.con:
x = results.get_dataset_location(label) x = results.get_dataset_location(label)
if len(x) != 0: if len(x) != 0:
table = table.add(label,results.read_dataset(x,0,plain=True).reshape(results.grid.prod(),-1)) table = table.add(label,results.read_dataset(x,0,plain=True).reshape(results.grid.prod(),-1))
results.pick('constituents', False) results.pick('phases',False)
results.pick('materialpoints',True) results.pick('homogenizations',True)
for label in options.mat: for label in options.mat:
x = results.get_dataset_location(label) x = results.get_dataset_location(label)
if len(x) != 0: if len(x) != 0:

View File

@ -13,85 +13,6 @@ import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version]) scriptID = ' '.join([scriptName,damask.version])
def volTetrahedron(coords):
"""
Return the volume of the tetrahedron with given vertices or sides.
If vertices are given they must be in a NumPy array with shape (4,3): the
position vectors of the 4 vertices in 3 dimensions; if the six sides are
given, they must be an array of length 6. If both are given, the sides
will be used in the calculation.
This method implements
Tartaglia's formula using the Cayley-Menger determinant:
|0 1 1 1 1 |
|1 0 s1^2 s2^2 s3^2|
288 V^2 = |1 s1^2 0 s4^2 s5^2|
|1 s2^2 s4^2 0 s6^2|
|1 s3^2 s5^2 s6^2 0 |
where s1, s2, ..., s6 are the tetrahedron side lengths.
from http://codereview.stackexchange.com/questions/77593/calculating-the-volume-of-a-tetrahedron
"""
# The indexes of rows in the vertices array corresponding to all
# possible pairs of vertices
vertex_pair_indexes = np.array(((0, 1), (0, 2), (0, 3),
(1, 2), (1, 3), (2, 3)))
# Get all the squares of all side lengths from the differences between
# the 6 different pairs of vertex positions
vertices = np.concatenate((coords[0],coords[1],coords[2],coords[3])).reshape(4,3)
vertex1, vertex2 = vertex_pair_indexes[:,0], vertex_pair_indexes[:,1]
sides_squared = np.sum((vertices[vertex1] - vertices[vertex2])**2,axis=-1)
# Set up the Cayley-Menger determinant
M = np.zeros((5,5))
# Fill in the upper triangle of the matrix
M[0,1:] = 1
# The squared-side length elements can be indexed using the vertex
# pair indices (compare with the determinant illustrated above)
M[tuple(zip(*(vertex_pair_indexes + 1)))] = sides_squared
# The matrix is symmetric, so we can fill in the lower triangle by
# adding the transpose
M = M + M.T
return np.sqrt(np.linalg.det(M) / 288)
def volumeMismatch(size,F,nodes):
"""
Calculates the volume mismatch.
volume mismatch is defined as the difference between volume of reconstructed
(compatible) cube and determinant of deformation gradient at Fourier point.
"""
coords = np.empty([8,3])
vMismatch = np.empty(F.shape[:3])
#--------------------------------------------------------------------------------------------------
# calculate actual volume and volume resulting from deformation gradient
for k in range(grid[0]):
for j in range(grid[1]):
for i in range(grid[2]):
coords[0,0:3] = nodes[k, j, i ,0:3]
coords[1,0:3] = nodes[k ,j, i+1,0:3]
coords[2,0:3] = nodes[k ,j+1,i+1,0:3]
coords[3,0:3] = nodes[k, j+1,i ,0:3]
coords[4,0:3] = nodes[k+1,j, i ,0:3]
coords[5,0:3] = nodes[k+1,j, i+1,0:3]
coords[6,0:3] = nodes[k+1,j+1,i+1,0:3]
coords[7,0:3] = nodes[k+1,j+1,i ,0:3]
vMismatch[k,j,i] = \
( abs(volTetrahedron([coords[6,0:3],coords[0,0:3],coords[7,0:3],coords[3,0:3]])) \
+ abs(volTetrahedron([coords[6,0:3],coords[0,0:3],coords[7,0:3],coords[4,0:3]])) \
+ abs(volTetrahedron([coords[6,0:3],coords[0,0:3],coords[2,0:3],coords[3,0:3]])) \
+ abs(volTetrahedron([coords[6,0:3],coords[0,0:3],coords[2,0:3],coords[1,0:3]])) \
+ abs(volTetrahedron([coords[6,0:3],coords[4,0:3],coords[1,0:3],coords[5,0:3]])) \
+ abs(volTetrahedron([coords[6,0:3],coords[4,0:3],coords[1,0:3],coords[0,0:3]]))) \
/np.linalg.det(F[k,j,i,0:3,0:3])
return vMismatch/(size.prod()/grid.prod())
def shapeMismatch(size,F,nodes,centres): def shapeMismatch(size,F,nodes,centres):
""" """
@ -101,42 +22,23 @@ def shapeMismatch(size,F,nodes,centres):
the corners of reconstructed (combatible) volume element and the vectors calculated by deforming the corners of reconstructed (combatible) volume element and the vectors calculated by deforming
the initial volume element with the current deformation gradient. the initial volume element with the current deformation gradient.
""" """
sMismatch = np.empty(F.shape[:3])
#--------------------------------------------------------------------------------------------------
# initial positions
delta = size/grid*.5 delta = size/grid*.5
coordsInitial = np.vstack((delta * np.array((-1,-1,-1)),
delta * np.array((+1,-1,-1)),
delta * np.array((+1,+1,-1)),
delta * np.array((-1,+1,-1)),
delta * np.array((-1,-1,+1)),
delta * np.array((+1,-1,+1)),
delta * np.array((+1,+1,+1)),
delta * np.array((-1,+1,+1))))
#-------------------------------------------------------------------------------------------------- return + np.linalg.norm(nodes[:-1,:-1,:-1] -centres - np.dot(F,delta * np.array((-1,-1,-1))),axis=-1)\
# compare deformed original and deformed positions to actual positions + np.linalg.norm(nodes[+1:,:-1,:-1] -centres - np.dot(F,delta * np.array((+1,-1,-1))),axis=-1)\
for k in range(grid[0]): + np.linalg.norm(nodes[+1:,+1:,:-1] -centres - np.dot(F,delta * np.array((+1,+1,-1))),axis=-1)\
for j in range(grid[1]): + np.linalg.norm(nodes[:-1,+1:,:-1] -centres - np.dot(F,delta * np.array((-1,+1,-1))),axis=-1)\
for i in range(grid[2]): + np.linalg.norm(nodes[:-1,:-1,+1:] -centres - np.dot(F,delta * np.array((-1,-1,+1))),axis=-1)\
sMismatch[k,j,i] = \ + np.linalg.norm(nodes[+1:,:-1,+1:] -centres - np.dot(F,delta * np.array((+1,-1,+1))),axis=-1)\
+ np.linalg.norm(nodes[k, j, i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[0,0:3]))\ + np.linalg.norm(nodes[+1:,+1:,+1:] -centres - np.dot(F,delta * np.array((+1,+1,+1))),axis=-1)\
+ np.linalg.norm(nodes[k+1,j, i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[1,0:3]))\ + np.linalg.norm(nodes[:-1,+1:,+1:] -centres - np.dot(F,delta * np.array((-1,+1,+1))),axis=-1)
+ np.linalg.norm(nodes[k+1,j+1,i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[2,0:3]))\
+ np.linalg.norm(nodes[k, j+1,i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[3,0:3]))\
+ np.linalg.norm(nodes[k, j, i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[4,0:3]))\
+ np.linalg.norm(nodes[k+1,j, i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[5,0:3]))\
+ np.linalg.norm(nodes[k+1,j+1,i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[6,0:3]))\
+ np.linalg.norm(nodes[k ,j+1,i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[7,0:3]))
return sMismatch
# -------------------------------------------------------------------- # --------------------------------------------------------------------
# MAIN # MAIN
# -------------------------------------------------------------------- # --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """ parser = OptionParser(usage='%prog options [ASCIItable(s)]', description = """
Add column(s) containing the shape and volume mismatch resulting from given deformation gradient. Add column(s) containing the shape and volume mismatch resulting from given deformation gradient.
Operates on periodic three-dimensional x,y,z-ordered data sets. Operates on periodic three-dimensional x,y,z-ordered data sets.
@ -155,10 +57,6 @@ parser.add_option('--no-shape','-s',
dest = 'shape', dest = 'shape',
action = 'store_false', action = 'store_false',
help = 'omit shape mismatch') help = 'omit shape mismatch')
parser.add_option('--no-volume','-v',
dest = 'volume',
action = 'store_false',
help = 'omit volume mismatch')
parser.set_defaults(pos = 'pos', parser.set_defaults(pos = 'pos',
defgrad = 'f', defgrad = 'f',
shape = True, shape = True,
@ -185,10 +83,4 @@ for name in filenames:
shapeMismatch.reshape(-1,1,order='F'), shapeMismatch.reshape(-1,1,order='F'),
scriptID+' '+' '.join(sys.argv[1:])) scriptID+' '+' '.join(sys.argv[1:]))
if options.volume: table.save((sys.stdout if name is None else name))
volumeMismatch = volumeMismatch(size,F,nodes)
table = table.add('volMismatch(({}))'.format(options.defgrad),
volumeMismatch.reshape(-1,1,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.save((sys.stdout if name is None else name), legacy=True)

View File

@ -55,4 +55,4 @@ for name in filenames:
curl.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape),order='F'), curl.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape),order='F'),
scriptID+' '+' '.join(sys.argv[1:])) scriptID+' '+' '.join(sys.argv[1:]))
table.save((sys.stdout if name is None else name), legacy=True) table.save((sys.stdout if name is None else name))

View File

@ -1,74 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
def derivative(coordinates,what):
result = np.empty_like(what)
# use differentiation by interpolation
# as described in http://www2.math.umd.edu/~dlevy/classes/amsc466/lecture-notes/differentiation-chap.pdf
result[1:-1,:] = + what[1:-1,:] * (2.*coordinates[1:-1]-coordinates[:-2]-coordinates[2:]) / \
((coordinates[1:-1]-coordinates[:-2])*(coordinates[1:-1]-coordinates[2:])) \
+ what[2:,:] * (coordinates[1:-1]-coordinates[:-2]) / \
((coordinates[2:]-coordinates[1:-1])*(coordinates[2:]-coordinates[:-2])) \
+ what[:-2,:] * (coordinates[1:-1]-coordinates[2:]) / \
((coordinates[:-2]-coordinates[1:-1])*(coordinates[:-2]-coordinates[2:])) \
result[0,:] = (what[0,:] - what[1,:]) / \
(coordinates[0] - coordinates[1])
result[-1,:] = (what[-1,:] - what[-2,:]) / \
(coordinates[-1] - coordinates[-2])
return result
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Add column(s) containing numerical derivative of requested column(s) with respect to given coordinates.
""", version = scriptID)
parser.add_option('-c','--coordinates',
dest = 'coordinates',
type = 'string', metavar='string',
help = 'heading of coordinate column')
parser.add_option('-l','--label',
dest = 'labels',
action = 'extend', metavar = '<string LIST>',
help = 'heading of column(s) to differentiate')
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
if options.coordinates is None:
parser.error('no coordinate column specified.')
if options.labels is None:
parser.error('no data column specified.')
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.load(StringIO(''.join(sys.stdin.read())) if name is None else name)
for label in options.labels:
table = table.add('d({})/d({})'.format(label,options.coordinates),
derivative(table.get(options.coordinates),table.get(label)),
scriptID+' '+' '.join(sys.argv[1:]))
table.save((sys.stdout if name is None else name), legacy=True)

View File

@ -16,7 +16,7 @@ scriptID = ' '.join([scriptName,damask.version])
# MAIN # MAIN
# -------------------------------------------------------------------- # --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """ parser = OptionParser(usage='%prog options [ASCIItable(s)]', description = """
Add displacments resulting from deformation gradient field. Add displacments resulting from deformation gradient field.
Operates on periodic three-dimensional x,y,z-ordered data sets. Operates on periodic three-dimensional x,y,z-ordered data sets.
Outputs at cell centers or cell nodes (into separate file). Outputs at cell centers or cell nodes (into separate file).
@ -60,7 +60,7 @@ for name in filenames:
.add('fluct({}).{}'.format(options.f,options.pos), .add('fluct({}).{}'.format(options.f,options.pos),
damask.grid_filters.node_displacement_fluct(size,F).reshape(-1,3,order='F'), damask.grid_filters.node_displacement_fluct(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))\ scriptID+' '+' '.join(sys.argv[1:]))\
.save((sys.stdout if name is None else os.path.splitext(name)[0]+'_nodal.txt'), legacy=True) .save((sys.stdout if name is None else os.path.splitext(name)[0]+'_nodal.txt'))
else: else:
table.add('avg({}).{}'.format(options.f,options.pos), table.add('avg({}).{}'.format(options.f,options.pos),
damask.grid_filters.cell_displacement_avg(size,F).reshape(-1,3,order='F'), damask.grid_filters.cell_displacement_avg(size,F).reshape(-1,3,order='F'),
@ -68,4 +68,4 @@ for name in filenames:
.add('fluct({}).{}'.format(options.f,options.pos), .add('fluct({}).{}'.format(options.f,options.pos),
damask.grid_filters.cell_displacement_fluct(size,F).reshape(-1,3,order='F'), damask.grid_filters.cell_displacement_fluct(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))\ scriptID+' '+' '.join(sys.argv[1:]))\
.save((sys.stdout if name is None else name), legacy=True) .save((sys.stdout if name is None else name))

View File

@ -55,4 +55,4 @@ for name in filenames:
div.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)//3,order='F'), div.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)//3,order='F'),
scriptID+' '+' '.join(sys.argv[1:])) scriptID+' '+' '.join(sys.argv[1:]))
table.save((sys.stdout if name is None else name), legacy=True) table.save((sys.stdout if name is None else name))

View File

@ -184,4 +184,4 @@ for name in filenames:
distance[i,:], distance[i,:],
scriptID+' '+' '.join(sys.argv[1:])) scriptID+' '+' '.join(sys.argv[1:]))
table.save((sys.stdout if name is None else name), legacy=True) table.save((sys.stdout if name is None else name))

View File

@ -55,4 +55,4 @@ for name in filenames:
grad.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)*3,order='F'), grad.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)*3,order='F'),
scriptID+' '+' '.join(sys.argv[1:])) scriptID+' '+' '.join(sys.argv[1:]))
table.save((sys.stdout if name is None else name), legacy=True) table.save((sys.stdout if name is None else name))

View File

@ -1,150 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Add quaternion and/or Bunge Euler angle representation of crystal lattice orientation.
Orientation is given by quaternion, Euler angles, rotation matrix, or crystal frame coordinates
(i.e. component vectors of rotation matrix).
Additional (globally fixed) rotations of the lab frame and/or crystal frame can be applied.
""", version = scriptID)
representations = ['quaternion', 'rodrigues', 'eulers', 'matrix', 'axisangle']
parser.add_option('-o',
'--output',
dest = 'output',
action = 'extend', metavar = '<string LIST>',
help = 'output orientation formats {{{}}}'.format(', '.join(representations)))
parser.add_option('-d',
'--degrees',
dest = 'degrees',
action = 'store_true',
help = 'all angles in degrees')
parser.add_option('-R',
'--labrotation',
dest='labrotation',
type = 'float', nargs = 4, metavar = ' '.join(['float']*4),
help = 'axis and angle of additional lab frame rotation [%default]')
parser.add_option('-r',
'--crystalrotation',
dest='crystalrotation',
type = 'float', nargs = 4, metavar = ' '.join(['float']*4),
help = 'axis and angle of additional crystal frame rotation [%default]')
parser.add_option('--eulers',
dest = 'eulers',
metavar = 'string',
help = 'Euler angles label')
parser.add_option('--rodrigues',
dest = 'rodrigues',
metavar = 'string',
help = 'Rodrigues vector label')
parser.add_option('--matrix',
dest = 'matrix',
metavar = 'string',
help = 'orientation matrix label')
parser.add_option('--quaternion',
dest = 'quaternion',
metavar = 'string',
help = 'quaternion label')
parser.add_option('-x',
dest = 'x',
metavar = 'string',
help = 'label of lab x vector (expressed in crystal coords)')
parser.add_option('-y',
dest = 'y',
metavar = 'string',
help = 'label of lab y vector (expressed in crystal coords)')
parser.add_option('-z',
dest = 'z',
metavar = 'string',
help = 'label of lab z vector (expressed in crystal coords)')
parser.add_option('--lattice',
dest = 'lattice',
metavar = 'string',
help = 'lattice structure to reduce rotation into fundamental zone')
parser.set_defaults(output = [],
labrotation = (1.,1.,1.,0.), # no rotation about (1,1,1)
crystalrotation = (1.,1.,1.,0.), # no rotation about (1,1,1)
lattice = None,
)
(options, filenames) = parser.parse_args()
if filenames == []: filenames = [None]
if options.output == [] or (not set(options.output).issubset(set(representations))):
parser.error('output must be chosen from {}.'.format(', '.join(representations)))
input = [options.eulers is not None,
options.rodrigues is not None,
options.x is not None and \
options.y is not None and \
options.z is not None,
options.matrix is not None,
options.quaternion is not None,
]
if np.sum(input) != 1: parser.error('needs exactly one input format.')
r = damask.Rotation.from_axis_angle(np.array(options.crystalrotation),options.degrees,normalize=True)
R = damask.Rotation.from_axis_angle(np.array(options.labrotation),options.degrees,normalize=True)
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.load(StringIO(''.join(sys.stdin.read())) if name is None else name)
if options.eulers is not None:
label = options.eulers
print(np.max(table.get(options.eulers),axis=0))
o = damask.Rotation.from_Eulers(table.get(options.eulers), options.degrees)
elif options.rodrigues is not None:
label = options.rodrigues
o = damask.Rotation.from_Rodrigues(table.get(options.rodrigues))
elif options.matrix is not None:
label = options.matrix
o = damask.Rotation.from_matrix(table.get(options.matrix).reshape(-1,3,3))
elif options.x is not None:
label = '<{},{},{}>'.format(options.x,options.y,options.z)
M = np.block([table.get(options.x),table.get(options.y),table.get(options.z)]).reshape(-1,3,3)
o = damask.Rotation.from_matrix(M/np.linalg.norm(M,axis=0))
elif options.quaternion is not None:
label = options.quaternion
o = damask.Rotation.from_quaternion(table.get(options.quaternion))
o = r.broadcast_to(o.shape) @ o @ R.broadcast_to(o.shape)
#if options.lattice is not None:
# o = damask.Orientation(rotation = o,lattice = options.lattice).reduced().rotation
if 'rodrigues' in options.output:
table = table.add('ro({})'.format(label),o.as_Rodrigues(), scriptID+' '+' '.join(sys.argv[1:]))
if 'eulers' in options.output:
table = table.add('eu({})'.format(label),o.as_Eulers(options.degrees), scriptID+' '+' '.join(sys.argv[1:]))
if 'quaternion' in options.output:
table = table.add('qu({})'.format(label),o.as_quaternion(), scriptID+' '+' '.join(sys.argv[1:]))
if 'matrix' in options.output:
table = table.add('om({})'.format(label),o.as_matrix(), scriptID+' '+' '.join(sys.argv[1:]))
if 'axisangle' in options.output:
table = table.add('om({})'.format(label),o.as_axisangle(options.degrees), scriptID+' '+' '.join(sys.argv[1:]))
table.save((sys.stdout if name is None else name), legacy=True)

View File

@ -14,91 +14,16 @@ scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version]) scriptID = ' '.join([scriptName,damask.version])
slipSystems = { slipSystems = {
'fcc': 'fcc': damask.lattice.kinematics['cF']['slip'][:12],
np.array([ 'bcc': damask.lattice.kinematics['cI']['slip'],
[+0,+1,-1 , +1,+1,+1], 'hex': damask.lattice.kinematics['hP']['slip'],
[-1,+0,+1 , +1,+1,+1],
[+1,-1,+0 , +1,+1,+1],
[+0,-1,-1 , -1,-1,+1],
[+1,+0,+1 , -1,-1,+1],
[-1,+1,+0 , -1,-1,+1],
[+0,-1,+1 , +1,-1,-1],
[-1,+0,-1 , +1,-1,-1],
[+1,+1,+0 , +1,-1,-1],
[+0,+1,+1 , -1,+1,-1],
[+1,+0,-1 , -1,+1,-1],
[-1,-1,+0 , -1,+1,-1],
],'d'),
'bcc':
np.array([
[+1,-1,+1 , +0,+1,+1],
[-1,-1,+1 , +0,+1,+1],
[+1,+1,+1 , +0,-1,+1],
[-1,+1,+1 , +0,-1,+1],
[-1,+1,+1 , +1,+0,+1],
[-1,-1,+1 , +1,+0,+1],
[+1,+1,+1 , -1,+0,+1],
[+1,-1,+1 , -1,+0,+1],
[-1,+1,+1 , +1,+1,+0],
[-1,+1,-1 , +1,+1,+0],
[+1,+1,+1 , -1,+1,+0],
[+1,+1,-1 , -1,+1,+0],
[-1,+1,+1 , +2,+1,+1],
[+1,+1,+1 , -2,+1,+1],
[+1,+1,-1 , +2,-1,+1],
[+1,-1,+1 , +2,+1,-1],
[+1,-1,+1 , +1,+2,+1],
[+1,+1,-1 , -1,+2,+1],
[+1,+1,+1 , +1,-2,+1],
[-1,+1,+1 , +1,+2,-1],
[+1,+1,-1 , +1,+1,+2],
[+1,-1,+1 , -1,+1,+2],
[-1,+1,+1 , +1,-1,+2],
[+1,+1,+1 , +1,+1,-2],
],'d'),
'hex':
np.array([
[+2,-1,-1,+0 , +0,+0,+0,+1],
[-1,+2,-1,+0 , +0,+0,+0,+1],
[-1,-1,+2,+0 , +0,+0,+0,+1],
[+2,-1,-1,+0 , +0,+1,-1,+0],
[-1,+2,-1,+0 , -1,+0,+1,+0],
[-1,-1,+2,+0 , +1,-1,+0,+0],
[-1,+1,+0,+0 , +1,+1,-2,+0],
[+0,-1,+1,+0 , -2,+1,+1,+0],
[+1,+0,-1,+0 , +1,-2,+1,+0],
[-1,+2,-1,+0 , +1,+0,-1,+1],
[-2,+1,+1,+0 , +0,+1,-1,+1],
[-1,-1,+2,+0 , -1,+1,+0,+1],
[+1,-2,+1,+0 , -1,+0,+1,+1],
[+2,-1,-1,+0 , +0,-1,+1,+1],
[+1,+1,-2,+0 , +1,-1,+0,+1],
[-2,+1,+1,+3 , +1,+0,-1,+1],
[-1,-1,+2,+3 , +1,+0,-1,+1],
[-1,-1,+2,+3 , +0,+1,-1,+1],
[+1,-2,+1,+3 , +0,+1,-1,+1],
[+1,-2,+1,+3 , -1,+1,+0,+1],
[+2,-1,-1,+3 , -1,+1,+0,+1],
[+2,-1,-1,+3 , -1,+0,+1,+1],
[+1,+1,-2,+3 , -1,+0,+1,+1],
[+1,+1,-2,+3 , +0,-1,+1,+1],
[-1,+2,-1,+3 , +0,-1,+1,+1],
[-1,+2,-1,+3 , +1,-1,+0,+1],
[-2,+1,+1,+3 , +1,-1,+0,+1],
[-1,-1,+2,+3 , +1,+1,-2,+2],
[+1,-2,+1,+3 , -1,+2,-1,+2],
[+2,-1,-1,+3 , -2,+1,+1,+2],
[+1,+1,-2,+3 , -1,-1,+2,+2],
[-1,+2,-1,+3 , +1,-2,+1,+2],
[-2,+1,+1,+3 , +2,-1,-1,+2],
],'d'),
} }
# -------------------------------------------------------------------- # --------------------------------------------------------------------
# MAIN # MAIN
# -------------------------------------------------------------------- # --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """ parser = OptionParser(usage='%prog options [ASCIItable(s)]', description = """
Add columns listing Schmid factors (and optional trace vector of selected system) for given Euler angles. Add columns listing Schmid factors (and optional trace vector of selected system) for given Euler angles.
""", version = scriptID) """, version = scriptID)
@ -156,11 +81,11 @@ elif options.lattice == 'hex':
# convert 4 Miller index notation of hex to orthogonal 3 Miller index notation # convert 4 Miller index notation of hex to orthogonal 3 Miller index notation
for i in range(len(slip_direction)): for i in range(len(slip_direction)):
slip_direction[i] = np.array([slipSystems['hex'][i,0]*1.5, slip_direction[i] = np.array([slipSystems['hex'][i,0]*1.5,
(slipSystems['hex'][i,0] + 2.*slipSystems['hex'][i,1])*0.5*np.sqrt(3), (slipSystems['hex'][i,0] + 2.*slipSystems['hex'][i,1])*0.5*np.sqrt(3),
slipSystems['hex'][i,3]*options.CoverA, slipSystems['hex'][i,3]*options.CoverA,
]) ])
slip_normal[i] = np.array([slipSystems['hex'][i,4], slip_normal[i] = np.array([slipSystems['hex'][i,4],
(slipSystems['hex'][i,4] + 2.*slipSystems['hex'][i,5])/np.sqrt(3), (slipSystems['hex'][i,4] + 2.*slipSystems['hex'][i,5])/np.sqrt(3),
slipSystems['hex'][i,7]/options.CoverA, slipSystems['hex'][i,7]/options.CoverA,
]) ])
@ -189,4 +114,4 @@ for name in filenames:
for i,label in enumerate(labels): for i,label in enumerate(labels):
table = table.add(label,S[:,i],scriptID+' '+' '.join(sys.argv[1:])) table = table.add(label,S[:,i],scriptID+' '+' '.join(sys.argv[1:]))
table.save((sys.stdout if name is None else name), legacy=True) table.save((sys.stdout if name is None else name))

View File

@ -1,61 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Permute all values in given column(s).
""", version = scriptID)
parser.add_option('-l','--label',
dest = 'label',
action = 'extend', metavar = '<string LIST>',
help ='column(s) to permute')
parser.add_option('-u', '--unique',
dest = 'unique',
action = 'store_true',
help = 'shuffle unique values as group')
parser.add_option('-r', '--rnd',
dest = 'randomSeed',
type = 'int', metavar = 'int',
help = 'seed of random number generator [%default]')
parser.set_defaults(label = [],
unique = False,
randomSeed = None,
)
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.load(StringIO(''.join(sys.stdin.read())) if name is None else name)
randomSeed = int(os.urandom(4).hex(), 16) if options.randomSeed is None else options.randomSeed # random seed per file
rng = np.random.default_rng(randomSeed)
for label in options.label:
data = table.get(label)
uniques,inverse = np.unique(data,return_inverse=True,axis=0) if options.unique else (data,np.arange(len(data)))
rng.shuffle(uniques)
table = table.set(label,uniques[inverse], scriptID+' '+' '.join(sys.argv[1:]))
table.save((sys.stdout if name is None else name), legacy=True)

View File

@ -14,7 +14,7 @@ scriptID = ' '.join([scriptName,damask.version])
# MAIN # MAIN
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [DREAM.3Dfile(s)]', description = """ parser = OptionParser(usage='%prog options [DREAM.3Dfile(s)]', description = """
Converts DREAM.3D file. Input can be cell data (direct pointwise takeover) or grain data (individual Converts DREAM.3D file. Input can be cell data (direct pointwise takeover) or grain data (individual
grains are segmented). Requires orientation data as quaternion. grains are segmented). Requires orientation data as quaternion.

View File

@ -1,69 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
minimal_surfaces = list(damask.Geom._minimal_surface.keys())
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [geomfile]', description = """
Generate a bicontinuous structure of given type.
""", version = scriptID)
parser.add_option('-t','--type',
dest = 'type',
choices = minimal_surfaces, metavar = 'string',
help = 'type of minimal surface [primitive] {%s}' %(','.join(minimal_surfaces)))
parser.add_option('-f','--threshold',
dest = 'threshold',
type = 'float', metavar = 'float',
help = 'threshold value defining minimal surface [%default]')
parser.add_option('-g', '--grid',
dest = 'grid',
type = 'int', nargs = 3, metavar = 'int int int',
help = 'a,b,c grid of hexahedral box [%default]')
parser.add_option('-s', '--size',
dest = 'size',
type = 'float', nargs = 3, metavar = 'float float float',
help = 'x,y,z size of hexahedral box [%default]')
parser.add_option('-p', '--periods',
dest = 'periods',
type = 'int', metavar = 'int',
help = 'number of repetitions of unit cell [%default]')
parser.add_option('--m',
dest = 'microstructure',
type = 'int', nargs = 2, metavar = 'int int',
help = 'two microstructure indices to be used [%default]')
parser.set_defaults(type = minimal_surfaces[0],
threshold = 0.0,
periods = 1,
grid = (16,16,16),
size = (1.0,1.0,1.0),
microstructure = (1,2),
)
(options,filename) = parser.parse_args()
name = None if filename == [] else filename[0]
damask.util.report(scriptName,name)
geom=damask.Geom.from_minimal_surface(options.grid,options.size,options.type,options.threshold,
options.periods,options.microstructure)
damask.util.croak(geom)
geom.save_ASCII(sys.stdout if name is None else name)

View File

@ -17,7 +17,7 @@ scriptID = ' '.join([scriptName,damask.version])
# MAIN # MAIN
# -------------------------------------------------------------------- # --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [geomfile]', description = """ parser = OptionParser(usage='%prog options [geomfile]', description = """
Generate description of an osteon enclosing the Harvesian canal and separated by interstitial tissue. Generate description of an osteon enclosing the Harvesian canal and separated by interstitial tissue.
The osteon phase is lamellar with a twisted plywood structure. The osteon phase is lamellar with a twisted plywood structure.
Its fiber orientation is oscillating by +/- amplitude within one period. Its fiber orientation is oscillating by +/- amplitude within one period.

View File

@ -1,57 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Converts ASCII table. Input can be microstructure or orientation (as quaternion). For the latter,
phase information can be given additionally.
""", version = scriptID)
parser.add_option('--coordinates',
dest = 'pos',
type = 'string', metavar = 'string',
help = 'coordinates label (%default)')
parser.add_option('--phase',
dest = 'phase',
type = 'string', metavar = 'string',
help = 'phase label')
parser.add_option('--microstructure',
dest = 'microstructure',
type = 'string', metavar = 'string',
help = 'microstructure label')
parser.add_option('-q', '--quaternion',
dest = 'quaternion',
type = 'string', metavar='string',
help = 'quaternion label')
parser.set_defaults(pos= 'pos')
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
for name in filenames:
damask.util.report(scriptName,name)
labels = []
for l in [options.quaternion,options.phase,options.microstructure]:
if l is not None: labels.append(l)
t = damask.Table.load(name)
geom = damask.Geom.from_table(t,options.pos,labels)
damask.util.croak(geom)
geom.save_ASCII(sys.stdout if name is None else os.path.splitext(name)[0]+'.geom')

View File

@ -15,8 +15,8 @@ scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version]) scriptID = ' '.join([scriptName,damask.version])
getInterfaceEnergy = lambda A,B: np.float32((A*B != 0)*(A != B)*1.0) # 1.0 if A & B are distinct & nonzero, 0.0 otherwise getInterfaceEnergy = lambda A,B: np.float32((A != B)*1.0) # 1.0 if A & B are distinct, 0.0 otherwise
struc = ndimage.generate_binary_structure(3,1) # 3D von Neumann neighborhood struc = ndimage.generate_binary_structure(3,1) # 3D von Neumann neighborhood
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
@ -62,7 +62,7 @@ if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
damask.util.report(scriptName,name) damask.util.report(scriptName,name)
geom = damask.Geom.load_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) geom = damask.Geom.load(StringIO(''.join(sys.stdin.read())) if name is None else name)
grid_original = geom.grid grid_original = geom.grid
damask.util.croak(geom) damask.util.croak(geom)
@ -174,4 +174,4 @@ for name in filenames:
origin = geom.origin, origin = geom.origin,
comments = geom.comments + [scriptID + ' ' + ' '.join(sys.argv[1:])], comments = geom.comments + [scriptID + ' ' + ' '.join(sys.argv[1:])],
)\ )\
.save_ASCII(sys.stdout if name is None else name) .save(sys.stdout if name is None else name)

View File

@ -11,8 +11,6 @@ import numpy as np
import damask import damask
sys.path.append(str(damask.solver.Marc().library_path))
scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version]) scriptID = ' '.join([scriptName,damask.version])
@ -30,11 +28,11 @@ def parseMFD(dat):
# lines that start with a space are numerical data # lines that start with a space are numerical data
if line[0] == ' ': if line[0] == ' ':
formatted[section]['els'].append([]) formatted[section]['els'].append([])
# grab numbers # grab numbers
nums = re.split(r'\s+', line.strip()) nums = re.split(r'\s+', line.strip())
for num in nums: for num in nums:
# floating point has format ' -x.xxxxxxxxxxxxe+yy' # floating point has format ' -x.xxxxxxxxxxxxe+yy'
# scientific notation is used for float # scientific notation is used for float
if (len(num) >= 4) and (num[-4] == 'e'): if (len(num) >= 4) and (num[-4] == 'e'):
@ -47,7 +45,7 @@ def parseMFD(dat):
else: else:
formatted[section]['els'].append([]) formatted[section]['els'].append([])
formatted[section]['els'][-1] = line formatted[section]['els'][-1] = line
else: # Not in a section, we are looking for a =beg= now else: # Not in a section, we are looking for a =beg= now
search = re.search(r'=beg=\s+(\d+)\s\((.*?)\)', line) search = re.search(r'=beg=\s+(\d+)\s\((.*?)\)', line)
if search is not None: # found start of a new section if search is not None: # found start of a new section
@ -60,7 +58,7 @@ def parseMFD(dat):
section += 1 section += 1
formatted.append({'label': '', 'uid': -2, 'els': []}) # make dummy section to store unrecognized data formatted.append({'label': '', 'uid': -2, 'els': []}) # make dummy section to store unrecognized data
formatted[section]['els'].append(line) formatted[section]['els'].append(line)
return formatted return formatted
def asMFD(mfd_data): def asMFD(mfd_data):
@ -93,14 +91,14 @@ def add_servoLinks(mfd_data,active=[True,True,True]): # directions on which to
'max': np.zeros(3,dtype='d'), 'max': np.zeros(3,dtype='d'),
'delta': np.zeros(3,dtype='d'), 'delta': np.zeros(3,dtype='d'),
} }
mfd_dict = {} mfd_dict = {}
for i in range(len(mfd_data)): for i in range(len(mfd_data)):
mfd_dict[mfd_data[i]['label']] = i mfd_dict[mfd_data[i]['label']] = i
NodeCoords = np.array(mfd_data[mfd_dict['nodes']]['els'][1::4])[:,1:4] NodeCoords = np.array(mfd_data[mfd_dict['nodes']]['els'][1::4])[:,1:4]
Nnodes = NodeCoords.shape[0] Nnodes = NodeCoords.shape[0]
box['min'] = NodeCoords.min(axis=0) # find the bounding box box['min'] = NodeCoords.min(axis=0) # find the bounding box
box['max'] = NodeCoords.max(axis=0) box['max'] = NodeCoords.max(axis=0)
box['delta'] = box['max']-box['min'] box['delta'] = box['max']-box['min']
@ -131,7 +129,7 @@ def add_servoLinks(mfd_data,active=[True,True,True]): # directions on which to
elif (key[base[coord]] == "%.8e"%box['max'][coord]): # compare to max of bounding box (i.e. is on outer face?) elif (key[base[coord]] == "%.8e"%box['max'][coord]): # compare to max of bounding box (i.e. is on outer face?)
Nmax += 1 # count outer (front) face membership Nmax += 1 # count outer (front) face membership
maxFlag[coord] = True # remember face membership (for linked nodes) maxFlag[coord] = True # remember face membership (for linked nodes)
if Nmin > 0: # node is on a back face if Nmin > 0: # node is on a back face
# prepare for any non-existing entries in the data structure # prepare for any non-existing entries in the data structure
if key['x'] not in baseNode.keys(): if key['x'] not in baseNode.keys():
@ -140,17 +138,17 @@ def add_servoLinks(mfd_data,active=[True,True,True]): # directions on which to
baseNode[key['x']][key['y']] = {} baseNode[key['x']][key['y']] = {}
if key['z'] not in baseNode[key['x']][key['y']].keys(): if key['z'] not in baseNode[key['x']][key['y']].keys():
baseNode[key['x']][key['y']][key['z']] = 0 baseNode[key['x']][key['y']][key['z']] = 0
baseNode[key['x']][key['y']][key['z']] = node+1 # remember the base node id baseNode[key['x']][key['y']][key['z']] = node+1 # remember the base node id
if Nmax > 0 and Nmax >= Nmin: # node is on at least as many front than back faces if Nmax > 0 and Nmax >= Nmin: # node is on at least as many front than back faces
if any([maxFlag[i] and active[i] for i in range(3)]): if any([maxFlag[i] and active[i] for i in range(3)]):
linkNodes.append({'id': node+1,'coord': NodeCoords[node], 'faceMember': [maxFlag[i] and active[i] for i in range(3)]}) linkNodes.append({'id': node+1,'coord': NodeCoords[node], 'faceMember': [maxFlag[i] and active[i] for i in range(3)]})
mfd_data[mfd_dict['entities']]['els'][0][0] += len(linkNodes) * 3 mfd_data[mfd_dict['entities']]['els'][0][0] += len(linkNodes) * 3
baseCorner = baseNode["%.8e"%box['min'][0]]["%.8e"%box['min'][1]]["%.8e"%box['min'][2]] # detect ultimate base node baseCorner = baseNode["%.8e"%box['min'][0]]["%.8e"%box['min'][1]]["%.8e"%box['min'][2]] # detect ultimate base node
links = {'uid': 1705, 'label': 'links', 'els': [[7,0],[9,0]]} links = {'uid': 1705, 'label': 'links', 'els': [[7,0],[9,0]]}
linkID = 0 linkID = 0
for node in linkNodes: # loop over all linked nodes for node in linkNodes: # loop over all linked nodes
@ -165,7 +163,7 @@ def add_servoLinks(mfd_data,active=[True,True,True]): # directions on which to
for dof in [1,2,3]: for dof in [1,2,3]:
tied_node = node['id'] tied_node = node['id']
nterms = 1 + nLinks nterms = 1 + nLinks
linkID += 1 linkID += 1
# Link header # Link header
links['els'].append('link{0}\n'.format(linkID)) links['els'].append('link{0}\n'.format(linkID))
@ -173,10 +171,10 @@ def add_servoLinks(mfd_data,active=[True,True,True]): # directions on which to
links['els'].append([0]) links['els'].append([0])
links['els'].append([0]) links['els'].append([0])
links['els'].append([0, 0, 0, tied_node]) links['els'].append([0, 0, 0, tied_node])
# these need to be put in groups of four # these need to be put in groups of four
link_payload = [dof, 0, nterms] link_payload = [dof, 0, nterms]
# Individual node contributions (node, dof, coef.) # Individual node contributions (node, dof, coef.)
for i in range(nterms): for i in range(nterms):
if i == nLinks: if i == nLinks:
@ -190,7 +188,7 @@ def add_servoLinks(mfd_data,active=[True,True,True]): # directions on which to
link_payload.append(1.0 - nLinks) link_payload.append(1.0 - nLinks)
else: else:
link_payload.append(1.0) link_payload.append(1.0)
# Needs to be formatted 4 data points per row, character width of 20, so 80 total # Needs to be formatted 4 data points per row, character width of 20, so 80 total
for j in range(0, len(link_payload), 4): for j in range(0, len(link_payload), 4):
links['els'].append(link_payload[j:j+4]) links['els'].append(link_payload[j:j+4])
@ -206,9 +204,9 @@ def add_servoLinks(mfd_data,active=[True,True,True]): # directions on which to
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# MAIN # MAIN
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(usage='%prog options [file[s]]', description = """
Set up servo linking to achieve periodic boundary conditions for a regular hexahedral mesh. Set up servo linking to achieve periodic boundary conditions for a regular hexahedral mesh.
Use *py_connection to operate on model presently opened in MSC.Mentat. Use *py_connection to operate on model presently opened in MSC.Mentat.
""", version = scriptID) """, version = scriptID)
@ -235,6 +233,7 @@ if remote and filenames != []:
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
if remote: if remote:
sys.path.append(str(damask.solver.Marc().library_path))
import py_mentat import py_mentat
damask.util.report(scriptName, 'waiting to connect...') damask.util.report(scriptName, 'waiting to connect...')

View File

@ -9,7 +9,6 @@ import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version]) scriptID = ' '.join([scriptName,damask.version])
sys.path.append(str(damask.solver.Marc().library_path))
#------------------------------------------------------------------------------------------------- #-------------------------------------------------------------------------------------------------
def outMentat(cmd,locals): def outMentat(cmd,locals):
@ -168,7 +167,7 @@ def initial_conditions(material):
# MAIN # MAIN
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(usage='%prog options [file[s]]', description = """
Generate MSC.Marc FE hexahedral mesh from geom file. Generate MSC.Marc FE hexahedral mesh from geom file.
""", version = scriptID) """, version = scriptID)
@ -185,9 +184,10 @@ parser.set_defaults(port = None,
if options.port is not None: if options.port is not None:
try: try:
sys.path.append(str(damask.solver.Marc().library_path))
import py_mentat import py_mentat
except ImportError: except ImportError:
parser.error('no valid Mentat release found.') parser.error('no valid Mentat release found')
# --- loop over input files ------------------------------------------------------------------------ # --- loop over input files ------------------------------------------------------------------------
@ -196,7 +196,7 @@ if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
damask.util.report(scriptName,name) damask.util.report(scriptName,name)
geom = damask.Geom.load_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) geom = damask.Geom.load(StringIO(''.join(sys.stdin.read())) if name is None else name)
material = geom.material.flatten(order='F') material = geom.material.flatten(order='F')
cmds = [\ cmds = [\

View File

@ -164,7 +164,7 @@ class myThread (threading.Thread):
# MAIN # MAIN
# -------------------------------------------------------------------- # --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(usage='%prog options [file[s]]', description = """
Monte Carlo simulation to produce seed file that gives same size distribution like given geometry file. Monte Carlo simulation to produce seed file that gives same size distribution like given geometry file.
""", version = scriptID) """, version = scriptID)

View File

@ -16,7 +16,7 @@ scriptID = ' '.join([scriptName,damask.version])
# MAIN # MAIN
# -------------------------------------------------------------------- # --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(usage='%prog options [file[s]]', description = """
Create seeds file by poking at 45 degree through given geom file. Create seeds file by poking at 45 degree through given geom file.
Mimics APS Beamline 34-ID-E DAXM poking. Mimics APS Beamline 34-ID-E DAXM poking.

View File

@ -1,4 +1,12 @@
"""Tools for pre and post processing of DAMASK simulations.""" """
Tools for pre and post processing of DAMASK simulations.
Modules that contain only one class (of the same name),
are prefixed by a '_'. For example, '_colormap' contains
a class called 'Colormap' which is imported as 'damask.Colormap'.
"""
from pathlib import Path as _Path from pathlib import Path as _Path
import re as _re import re as _re
@ -12,15 +20,16 @@ from ._environment import Environment as _ # noqa
environment = _() environment = _()
from . import util # noqa from . import util # noqa
from . import seeds # noqa from . import seeds # noqa
from . import tensor # noqa
from . import mechanics # noqa from . import mechanics # noqa
from . import solver # noqa from . import solver # noqa
from . import grid_filters # noqa from . import grid_filters # noqa
from ._lattice import Symmetry, Lattice# noqa from . import lattice # noqa
from ._table import Table # noqa
from ._rotation import Rotation # noqa from ._rotation import Rotation # noqa
from ._orientation import Orientation # noqa
from ._table import Table # noqa
from ._vtk import VTK # noqa from ._vtk import VTK # noqa
from ._colormap import Colormap # noqa from ._colormap import Colormap # noqa
from ._orientation import Orientation # noqa
from ._config import Config # noqa from ._config import Config # noqa
from ._configmaterial import ConfigMaterial # noqa from ._configmaterial import ConfigMaterial # noqa
from ._geom import Geom # noqa from ._geom import Geom # noqa

View File

@ -385,38 +385,3 @@ class ASCIItable():
self.data = np.loadtxt(self.__IO__['in'],usecols=use,ndmin=2) self.data = np.loadtxt(self.__IO__['in'],usecols=use,ndmin=2)
return labels_missing return labels_missing
# ------------------------------------------------------------------
def data_write(self):
"""Write current data array and report alive output back."""
if len(self.data) == 0: return True
if isinstance(self.data[0],list):
return self.output_write(['\t'.join(map(self._quote,items)) for items in self.data])
else:
return self.output_write( '\t'.join(map(self._quote,self.data)))
# ------------------------------------------------------------------
def data_writeArray(self):
"""Write whole numpy array data."""
for row in self.data:
try:
output = list(map(repr,row))
except Exception:
output = [repr(row)]
try:
self.__IO__['out'].write('\t'.join(output) + '\n')
except Exception:
pass
# ------------------------------------------------------------------
def data_append(self,
what):
if isinstance(what, str):
self.data += [what]
else:
try:
for item in what: self.data_append(item)
except TypeError:
self.data += [str(what)]

View File

@ -22,6 +22,19 @@ _ref_white = np.array([.95047, 1.00000, 1.08883])
# - support NaN color (paraview) # - support NaN color (paraview)
class Colormap(mpl.colors.ListedColormap): class Colormap(mpl.colors.ListedColormap):
"""
Enhance matplotlib colormap functionality to be used within DAMASK.
References
----------
[1] DAMASK colormap theory
https://www.kennethmoreland.com/color-maps/ColorMapsExpanded.pdf
[2] DAMASK colormaps first use
https://doi.org/10.1016/j.ijplas.2012.09.012
[3] Matplotlib colormaps overview
https://matplotlib.org/tutorials/colors/colormaps.html
"""
def __add__(self,other): def __add__(self,other):
"""Concatenate colormaps.""" """Concatenate colormaps."""
@ -36,6 +49,17 @@ class Colormap(mpl.colors.ListedColormap):
"""Return inverted colormap.""" """Return inverted colormap."""
return self.reversed() return self.reversed()
def __repr__(self):
"""Show colormap as matplotlib figure."""
fig = plt.figure(self.name,figsize=(5,.5))
ax1 = fig.add_axes([0, 0, 1, 1])
ax1.set_axis_off()
ax1.imshow(np.linspace(0,1,self.N).reshape(1,-1),
aspect='auto', cmap=self, interpolation='nearest')
plt.show(block = False)
return self.name
@staticmethod @staticmethod
def from_range(low,high,name='DAMASK colormap',N=256,model='rgb'): def from_range(low,high,name='DAMASK colormap',N=256,model='rgb'):
""" """
@ -126,40 +150,16 @@ class Colormap(mpl.colors.ListedColormap):
""" """
# matplotlib presets # matplotlib presets
for cat in Colormap._predefined_mpl: try:
for n in cat[1]: colormap = cm.__dict__[name]
if n == name: return Colormap(np.array(list(map(colormap,np.linspace(0,1,N)))
colormap = cm.__dict__[name] if isinstance(colormap,mpl.colors.LinearSegmentedColormap) else
if isinstance(colormap,mpl.colors.LinearSegmentedColormap): colormap.colors),
return Colormap(np.array(list(map(colormap,np.linspace(0,1,N)))),name=name) name=name)
else: except KeyError:
return Colormap(np.array(colormap.colors),name=name) # DAMASK presets
definition = Colormap._predefined_DAMASK[name]
# DAMASK presets return Colormap.from_range(definition['low'],definition['high'],name,N)
definition = Colormap._predefined_DAMASK[name]
return Colormap.from_range(definition['low'],definition['high'],name,N)
@staticmethod
def list_predefined():
"""
List predefined colormaps by category.
References
----------
[1] DAMASK colormap theory
https://www.kennethmoreland.com/color-maps/ColorMapsExpanded.pdf
[2] DAMASK colormaps first use
https://doi.org/10.1016/j.ijplas.2012.09.012
[3] Matplotlib colormaps overview
https://matplotlib.org/tutorials/colors/colormaps.html
"""
print('DAMASK colormaps')
print(' '+', '.join(Colormap._predefined_DAMASK.keys()))
for cat in Colormap._predefined_mpl:
print(f'{cat[0]}')
print(' '+', '.join(cat[1]))
def shade(self,field,bounds=None,gap=None): def shade(self,field,bounds=None,gap=None):
@ -168,9 +168,9 @@ class Colormap(mpl.colors.ListedColormap):
Parameters Parameters
---------- ----------
field : numpy.array of shape(:,:) field : numpy.array of shape (:,:)
Data to be shaded. Data to be shaded.
bounds : iterable of len(2), optional bounds : iterable of len (2), optional
Colormap value range (low,high). Colormap value range (low,high).
gap : field.dtype, optional gap : field.dtype, optional
Transparent value. NaN will always be rendered transparent. Transparent value. NaN will always be rendered transparent.
@ -203,18 +203,6 @@ class Colormap(mpl.colors.ListedColormap):
mode='RGBA') mode='RGBA')
def show(self,aspect=10,vertical=False):
"""Show colormap as matplotlib figure."""
fig = plt.figure(figsize=(5/aspect,5) if vertical else (5,5/aspect))
ax1 = fig.add_axes([0, 0, 1, 1])
ax1.set_axis_off()
ax1.imshow(np.linspace(1 if vertical else 0,
0 if vertical else 1,
self.N).reshape((-1,1) if vertical else (1,-1)),
aspect='auto', cmap=self, interpolation='nearest')
plt.show()
def reversed(self,name=None): def reversed(self,name=None):
""" """
Make a reversed instance of the colormap. Make a reversed instance of the colormap.
@ -235,7 +223,6 @@ class Colormap(mpl.colors.ListedColormap):
return Colormap(np.array(rev.colors),rev.name[:-4] if rev.name.endswith('_r_r') else rev.name) return Colormap(np.array(rev.colors),rev.name[:-4] if rev.name.endswith('_r_r') else rev.name)
def save_paraview(self,fname=None): def save_paraview(self,fname=None):
""" """
Write colormap to JSON file for Paraview. Write colormap to JSON file for Paraview.
@ -247,13 +234,13 @@ class Colormap(mpl.colors.ListedColormap):
consist of the name of the colormap and extension '.json'. consist of the name of the colormap and extension '.json'.
""" """
if fname is not None: if fname is None:
fhandle = None
else:
try: try:
fhandle = open(fname,'w') fhandle = open(fname,'w')
except TypeError: except TypeError:
fhandle = fname fhandle = fname
else:
fhandle = None
colors = [] colors = []
for i,c in enumerate(np.round(self.colors,6).tolist()): for i,c in enumerate(np.round(self.colors,6).tolist()):
@ -266,11 +253,9 @@ class Colormap(mpl.colors.ListedColormap):
'DefaultMap':True, 'DefaultMap':True,
'RGBPoints':colors 'RGBPoints':colors
}] }]
if fhandle is None:
with open(self.name.replace(' ','_')+'.json', 'w') as f: with open(self.name.replace(' ','_')+'.json', 'w') if fhandle is None else fhandle as f:
json.dump(out, f,indent=4) json.dump(out, f,indent=4)
else:
json.dump(out,fhandle,indent=4)
def save_ASCII(self,fname=None): def save_ASCII(self,fname=None):
@ -284,22 +269,19 @@ class Colormap(mpl.colors.ListedColormap):
consist of the name of the colormap and extension '.txt'. consist of the name of the colormap and extension '.txt'.
""" """
if fname is not None: if fname is None:
fhandle = None
else:
try: try:
fhandle = open(fname,'w') fhandle = open(fname,'w')
except TypeError: except TypeError:
fhandle = fname fhandle = fname
else:
fhandle = None
labels = {'RGBA':4} if self.colors.shape[1] == 4 else {'RGB': 3} labels = {'RGBA':4} if self.colors.shape[1] == 4 else {'RGB': 3}
t = Table(self.colors,labels,f'Creator: {util.execution_stamp("Colormap")}') t = Table(self.colors,labels,f'Creator: {util.execution_stamp("Colormap")}')
if fhandle is None: with open(self.name.replace(' ','_')+'.txt', 'w') if fhandle is None else fhandle as f:
with open(self.name.replace(' ','_')+'.txt', 'w') as f: t.save(f)
t.save(f)
else:
t.save(fhandle)
def save_GOM(self,fname=None): def save_GOM(self,fname=None):
@ -313,24 +295,21 @@ class Colormap(mpl.colors.ListedColormap):
consist of the name of the colormap and extension '.legend'. consist of the name of the colormap and extension '.legend'.
""" """
if fname is not None: if fname is None:
fhandle = None
else:
try: try:
fhandle = open(fname,'w') fhandle = open(fname,'w')
except TypeError: except TypeError:
fhandle = fname fhandle = fname
else:
fhandle = None
# ToDo: test in GOM # ToDo: test in GOM
GOM_str = '1 1 {name} 9 {name} '.format(name=self.name.replace(" ","_")) \ GOM_str = '1 1 {name} 9 {name} '.format(name=self.name.replace(" ","_")) \
+ '0 1 0 3 0 0 -1 9 \\ 0 0 0 255 255 255 0 0 255 ' \ + '0 1 0 3 0 0 -1 9 \\ 0 0 0 255 255 255 0 0 255 ' \
+ f'30 NO_UNIT 1 1 64 64 64 255 1 0 0 0 0 0 0 3 0 {len(self.colors)}' \ + f'30 NO_UNIT 1 1 64 64 64 255 1 0 0 0 0 0 0 3 0 {len(self.colors)}' \
+ ' '.join([f' 0 {c[0]} {c[1]} {c[2]} 255 1' for c in reversed((self.colors*255).astype(int))]) \ + ' '.join([f' 0 {c[0]} {c[1]} {c[2]} 255 1' for c in reversed((self.colors*255).astype(int))]) \
+ '\n' + '\n'
if fhandle is None: with open(self.name.replace(' ','_')+'.legend', 'w') if fhandle is None else fhandle as f:
with open(self.name.replace(' ','_')+'.legend', 'w') as f: f.write(GOM_str)
f.write(GOM_str)
else:
fhandle.write(GOM_str)
def save_gmsh(self,fname=None): def save_gmsh(self,fname=None):
@ -344,22 +323,19 @@ class Colormap(mpl.colors.ListedColormap):
consist of the name of the colormap and extension '.msh'. consist of the name of the colormap and extension '.msh'.
""" """
if fname is not None: if fname is None:
fhandle = None
else:
try: try:
fhandle = open(fname,'w') fhandle = open(fname,'w')
except TypeError: except TypeError:
fhandle = fname fhandle = fname
else:
fhandle = None
# ToDo: test in gmsh # ToDo: test in gmsh
gmsh_str = 'View.ColorTable = {\n' \ gmsh_str = 'View.ColorTable = {\n' \
+'\n'.join([f'{c[0]},{c[1]},{c[2]},' for c in self.colors[:,:3]*255]) \ +'\n'.join([f'{c[0]},{c[1]},{c[2]},' for c in self.colors[:,:3]*255]) \
+'\n}\n' +'\n}\n'
if fhandle is None: with open(self.name.replace(' ','_')+'.msh', 'w') if fhandle is None else fhandle as f:
with open(self.name.replace(' ','_')+'.msh', 'w') as f: f.write(gmsh_str)
f.write(gmsh_str)
else:
fhandle.write(gmsh_str)
@staticmethod @staticmethod
@ -387,7 +363,6 @@ class Colormap(mpl.colors.ListedColormap):
if msh_sat[2] < - np.pi/3.0: hSpin *= -1.0 if msh_sat[2] < - np.pi/3.0: hSpin *= -1.0
return msh_sat[2] + hSpin return msh_sat[2] + hSpin
lo = np.array(low) lo = np.array(low)
hi = np.array(high) hi = np.array(high)
@ -407,28 +382,28 @@ class Colormap(mpl.colors.ListedColormap):
return (1.0 - frac) * lo + frac * hi return (1.0 - frac) * lo + frac * hi
_predefined_mpl= [('Perceptually Uniform Sequential', [ _predefined_mpl= {'Perceptually Uniform Sequential': [
'viridis', 'plasma', 'inferno', 'magma', 'cividis']), 'viridis', 'plasma', 'inferno', 'magma', 'cividis'],
('Sequential', [ 'Sequential': [
'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds', 'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu', 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']), 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn'],
('Sequential (2)', [ 'Sequential (2)': [
'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink', 'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',
'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia', 'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',
'hot', 'afmhot', 'gist_heat', 'copper']), 'hot', 'afmhot', 'gist_heat', 'copper'],
('Diverging', [ 'Diverging': [
'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu', 'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',
'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']), 'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic'],
('Cyclic', ['twilight', 'twilight_shifted', 'hsv']), 'Cyclic': ['twilight', 'twilight_shifted', 'hsv'],
('Qualitative', [ 'Qualitative': [
'Pastel1', 'Pastel2', 'Paired', 'Accent', 'Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3', 'Dark2', 'Set1', 'Set2', 'Set3',
'tab10', 'tab20', 'tab20b', 'tab20c']), 'tab10', 'tab20', 'tab20b', 'tab20c'],
('Miscellaneous', [ 'Miscellaneous': [
'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern', 'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',
'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg',
'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar'])] 'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar']}
_predefined_DAMASK = {'orientation': {'low': [0.933334,0.878432,0.878431], _predefined_DAMASK = {'orientation': {'low': [0.933334,0.878432,0.878431],
'high': [0.250980,0.007843,0.000000]}, 'high': [0.250980,0.007843,0.000000]},
@ -437,6 +412,9 @@ class Colormap(mpl.colors.ListedColormap):
'stress': {'low': [0.878432,0.874511,0.949019], 'stress': {'low': [0.878432,0.874511,0.949019],
'high': [0.000002,0.000000,0.286275]}} 'high': [0.000002,0.000000,0.286275]}}
predefined = dict(**{'DAMASK':list(_predefined_DAMASK)},**_predefined_mpl)
@staticmethod @staticmethod
def _hsv2rgb(hsv): def _hsv2rgb(hsv):
""" """

View File

@ -1,6 +1,7 @@
from io import StringIO from io import StringIO
import abc import abc
import numpy as np
import yaml import yaml
class NiceDumper(yaml.SafeDumper): class NiceDumper(yaml.SafeDumper):
@ -15,6 +16,11 @@ class NiceDumper(yaml.SafeDumper):
def increase_indent(self, flow=False, indentless=False): def increase_indent(self, flow=False, indentless=False):
return super().increase_indent(flow, False) return super().increase_indent(flow, False)
def represent_data(self, data):
"""Cast Config objects and its subclasses to dict."""
return self.represent_data(dict(data)) if isinstance(data, dict) and type(data) != dict else \
super().represent_data(data)
class Config(dict): class Config(dict):
"""YAML-based configuration.""" """YAML-based configuration."""
@ -64,7 +70,20 @@ class Config(dict):
kwargs['width'] = 256 kwargs['width'] = 256
if 'default_flow_style' not in kwargs: if 'default_flow_style' not in kwargs:
kwargs['default_flow_style'] = None kwargs['default_flow_style'] = None
fhandle.write(yaml.dump(dict(self),Dumper=NiceDumper,**kwargs)) if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = False
def array_representer(dumper, data):
"""Convert numpy array to list of native types."""
return dumper.represent_list([d.item() for d in data])
NiceDumper.add_representer(np.ndarray, array_representer)
try:
fhandle.write(yaml.dump(self,Dumper=NiceDumper,**kwargs))
except TypeError: # compatibility with old pyyaml
del kwargs['sort_keys']
fhandle.write(yaml.dump(self,Dumper=NiceDumper,**kwargs))
@property @property

View File

@ -2,10 +2,9 @@ import copy
import numpy as np import numpy as np
from . import grid_filters
from . import Config from . import Config
from . import Lattice
from . import Rotation from . import Rotation
from . import Orientation
class ConfigMaterial(Config): class ConfigMaterial(Config):
"""Material configuration.""" """Material configuration."""
@ -25,8 +24,22 @@ class ConfigMaterial(Config):
super().save(fname,**kwargs) super().save(fname,**kwargs)
@classmethod
def load(cls,fname='material.yaml'):
"""
Load from yaml file.
Parameters
----------
fname : file, str, or pathlib.Path, optional
Filename or file for writing. Defaults to 'material.yaml'.
"""
return super(ConfigMaterial,cls).load(fname)
@staticmethod @staticmethod
def from_table(table,coordinates=None,constituents={},**kwargs): def from_table(table,constituents={},**kwargs):
""" """
Load from an ASCII table. Load from an ASCII table.
@ -34,10 +47,6 @@ class ConfigMaterial(Config):
---------- ----------
table : damask.Table table : damask.Table
Table that contains material information. Table that contains material information.
coordinates : str, optional
Label of spatial coordiates. Used for sorting and performing a
sanity check. Default to None, in which case no sorting or checking is
peformed.
constituents : dict, optional constituents : dict, optional
Entries for 'constituents'. The key is the name and the value specifies Entries for 'constituents'. The key is the name and the value specifies
the label of the data column in the table the label of the data column in the table
@ -54,7 +63,7 @@ class ConfigMaterial(Config):
pos pos pos qu qu qu qu phase homog pos pos pos qu qu qu qu phase homog
0 0 0 0 0.19 0.8 0.24 -0.51 Aluminum SX 0 0 0 0 0.19 0.8 0.24 -0.51 Aluminum SX
1 1 0 0 0.8 0.19 0.24 -0.51 Steel SX 1 1 0 0 0.8 0.19 0.24 -0.51 Steel SX
>>> cm.from_table(t,'pos',{'O':'qu','phase':'phase'},homogenization='homog') >>> cm.from_table(t,{'O':'qu','phase':'phase'},homogenization='homog')
material: material:
- constituents: - constituents:
- O: [0.19, 0.8, 0.24, -0.51] - O: [0.19, 0.8, 0.24, -0.51]
@ -68,19 +77,14 @@ class ConfigMaterial(Config):
homogenization: SX homogenization: SX
""" """
if coordinates is not None: constituents_ = {k:table.get(v) for k,v in constituents.items()}
t = table.sort_by([f'{i}_{coordinates}' for i in range(3,0,-1)]) kwargs_ = {k:table.get(v) for k,v in kwargs.items()}
grid_filters.coord0_check(t.get(coordinates))
else:
t = table
constituents_ = {k:t.get(v) for k,v in constituents.items()}
kwargs_ = {k:t.get(v) for k,v in kwargs.items()}
_,idx = np.unique(np.hstack(list({**constituents_,**kwargs_}.values())),return_index=True,axis=0) _,idx = np.unique(np.hstack(list({**constituents_,**kwargs_}.values())),return_index=True,axis=0)
constituents_ = {k:v[idx].squeeze() for k,v in constituents_.items()} idx = np.sort(idx)
kwargs_ = {k:v[idx].squeeze() for k,v in kwargs_.items()} constituents_ = {k:np.atleast_1d(v[idx].squeeze()) for k,v in constituents_.items()}
kwargs_ = {k:np.atleast_1d(v[idx].squeeze()) for k,v in kwargs_.items()}
return ConfigMaterial().material_add(constituents_,**kwargs_) return ConfigMaterial().material_add(constituents_,**kwargs_)
@ -148,7 +152,7 @@ class ConfigMaterial(Config):
for k,v in self['phase'].items(): for k,v in self['phase'].items():
if 'lattice' in v: if 'lattice' in v:
try: try:
Lattice(v['lattice']) Orientation(lattice=v['lattice'])
except KeyError: except KeyError:
s = v['lattice'] s = v['lattice']
print(f"Invalid lattice: '{s}' in phase '{k}'") print(f"Invalid lattice: '{s}' in phase '{k}'")
@ -222,26 +226,25 @@ class ConfigMaterial(Config):
return dup return dup
def material_add(self,constituents,**kwargs): def material_add(self,constituents=None,**kwargs):
""" """
Add material entries. Add material entries.
Parameters Parameters
---------- ----------
constituents : dict constituents : dict, optional
Entries for 'constituents'. The key is the name and the value specifies Entries for 'constituents' as key-value pair.
the label of the data column in the table
**kwargs **kwargs
Keyword arguments where the key is the name and the value specifies Key-value pairs.
the label of the data column in the table
Examples Examples
-------- --------
>>> import damask >>> import damask
>>> m = damask.ConfigMaterial()
>>> O = damask.Rotation.from_random(3).as_quaternion() >>> O = damask.Rotation.from_random(3).as_quaternion()
>>> phase = ['Aluminum','Steel','Aluminum'] >>> phase = ['Aluminum','Steel','Aluminum']
>>> m.material_add(constituents={'phase':phase,'O':O},homogenization='SX') >>> m = damask.ConfigMaterial().material_add(constituents={'phase':phase,'O':O},
... homogenization='SX')
>>> m
material: material:
- constituents: - constituents:
- O: [0.577764, -0.146299, -0.617669, 0.513010] - O: [0.577764, -0.146299, -0.617669, 0.513010]
@ -260,19 +263,31 @@ class ConfigMaterial(Config):
homogenization: SX homogenization: SX
""" """
c = [{'constituents':u} for u in ConfigMaterial._constituents(**constituents)] length = -1
for v in kwargs.values():
if hasattr(v,'__len__') and not isinstance(v,str):
if length != -1 and len(v) != length:
raise ValueError('Cannot add entries of different length')
else:
length = len(v)
length = max(1,length)
c = [{} for _ in range(length)] if constituents is None else \
[{'constituents':u} for u in ConfigMaterial._constituents(**constituents)]
if len(c) == 1: c = [copy.deepcopy(c[0]) for _ in range(length)]
if length != 1 and length != len(c):
raise ValueError('Cannot add entries of different length')
for k,v in kwargs.items(): for k,v in kwargs.items():
if hasattr(v,'__len__') and not isinstance(v,str): if hasattr(v,'__len__') and not isinstance(v,str):
if len(v) != len(c):
raise ValueError('len mismatch 1')
for i,vv in enumerate(v): for i,vv in enumerate(v):
c[i][k] = [w.item() for w in vv] if isinstance(vv,np.ndarray) else vv.item() c[i][k] = vv.item() if isinstance(vv,np.generic) else vv
else: else:
for i in range(len(c)): for i in range(len(c)):
c[i][k] = v c[i][k] = v
dup = copy.deepcopy(self) dup = copy.deepcopy(self)
if 'material' not in dup: dup['material'] = [] dup['material'] = dup['material'] + c if 'material' in dup else c
dup['material'] +=c
return dup return dup
@ -280,6 +295,7 @@ class ConfigMaterial(Config):
@staticmethod @staticmethod
def _constituents(N=1,**kwargs): def _constituents(N=1,**kwargs):
"""Construct list of constituents.""" """Construct list of constituents."""
N_material=1
for v in kwargs.values(): for v in kwargs.values():
if hasattr(v,'__len__') and not isinstance(v,str): N_material = len(v) if hasattr(v,'__len__') and not isinstance(v,str): N_material = len(v)
@ -288,9 +304,9 @@ class ConfigMaterial(Config):
for k,v in kwargs.items(): for k,v in kwargs.items():
if hasattr(v,'__len__') and not isinstance(v,str): if hasattr(v,'__len__') and not isinstance(v,str):
if len(v) != N_material: if len(v) != N_material:
raise ValueError('len mismatch 2') raise ValueError('Cannot add entries of different length')
for i,vv in enumerate(np.array(v)): for i,vv in enumerate(np.array(v)):
m[i][0][k] = [w.item() for w in vv] if isinstance(vv,np.ndarray) else vv.item() m[i][0][k] = vv.item() if isinstance(vv,np.generic) else vv
else: else:
for i in range(N_material): for i in range(N_material):
m[i][0][k] = v m[i][0][k] = v

View File

@ -3,14 +3,8 @@ from pathlib import Path
class Environment: class Environment:
def __init__(self):
"""Do Nothing."""
pass
@property @property
def screen_size(self): def screen_size(self):
width = 1024
height = 768
try: try:
import wx import wx
_ = wx.App(False) # noqa _ = wx.App(False) # noqa
@ -23,7 +17,9 @@ class Environment:
height = tk.winfo_screenheight() height = tk.winfo_screenheight()
tk.destroy() tk.destroy()
except Exception as e: except Exception as e:
pass width = 1024
height = 768
return (width,height) return (width,height)
@ -32,7 +28,7 @@ class Environment:
options = {} options = {}
for item in ['DAMASK_NUM_THREADS', for item in ['DAMASK_NUM_THREADS',
'MSC_ROOT', 'MSC_ROOT',
'MARC_VERSION', 'MSC_VERSION',
]: ]:
options[item] = os.environ[item] if item in os.environ else None options[item] = os.environ[item] if item in os.environ else None
@ -43,8 +39,3 @@ class Environment:
def root_dir(self): def root_dir(self):
"""Return DAMASK root path.""" """Return DAMASK root path."""
return Path(__file__).parents[2] return Path(__file__).parents[2]
# for compatibility
def rootDir(self):
return str(self.root_dir)

View File

@ -2,8 +2,10 @@ import copy
import multiprocessing as mp import multiprocessing as mp
from functools import partial from functools import partial
from os import path from os import path
import warnings
import numpy as np import numpy as np
import pandas as pd
import h5py import h5py
from scipy import ndimage,spatial from scipy import ndimage,spatial
@ -19,7 +21,7 @@ class Geom:
def __init__(self,material,size,origin=[0.0,0.0,0.0],comments=[]): def __init__(self,material,size,origin=[0.0,0.0,0.0],comments=[]):
""" """
New geometry definition from array of material, size, and origin. New geometry definition from array of materials, size, and origin.
Parameters Parameters
---------- ----------
@ -33,28 +35,10 @@ class Geom:
Comment lines. Comment lines.
""" """
if len(material.shape) != 3: self.material = material
raise ValueError(f'Invalid material shape {material.shape}.') self.size = size
elif material.dtype not in np.sctypes['float'] + np.sctypes['int']: self.origin = origin
raise TypeError(f'Invalid material data type {material.dtype}.') self.comments = comments
else:
self.material = np.copy(material)
if self.material.dtype in np.sctypes['float'] and \
np.all(self.material == self.material.astype(int).astype(float)):
self.material = self.material.astype(int)
if len(size) != 3 or any(np.array(size) <= 0):
raise ValueError(f'Invalid size {size}.')
else:
self.size = np.array(size)
if len(origin) != 3:
raise ValueError(f'Invalid origin {origin}.')
else:
self.origin = np.array(origin)
self.comments = [str(c) for c in comments] if isinstance(comments,list) else [str(comments)]
def __repr__(self): def __repr__(self):
@ -90,35 +74,90 @@ class Geom:
""" """
message = [] message = []
if np.any(other.grid != self.grid): if np.any(other.grid != self.grid):
message.append(util.delete(f'grid a b c: {util.srepr(other.grid," x ")}')) message.append(util.deemph(f'grid a b c: {util.srepr(other.grid," x ")}'))
message.append(util.emph( f'grid a b c: {util.srepr( self.grid," x ")}')) message.append(util.emph( f'grid a b c: {util.srepr( self.grid," x ")}'))
if not np.allclose(other.size,self.size): if not np.allclose(other.size,self.size):
message.append(util.delete(f'size x y z: {util.srepr(other.size," x ")}')) message.append(util.deemph(f'size x y z: {util.srepr(other.size," x ")}'))
message.append(util.emph( f'size x y z: {util.srepr( self.size," x ")}')) message.append(util.emph( f'size x y z: {util.srepr( self.size," x ")}'))
if not np.allclose(other.origin,self.origin): if not np.allclose(other.origin,self.origin):
message.append(util.delete(f'origin x y z: {util.srepr(other.origin," ")}')) message.append(util.deemph(f'origin x y z: {util.srepr(other.origin," ")}'))
message.append(util.emph( f'origin x y z: {util.srepr( self.origin," ")}')) message.append(util.emph( f'origin x y z: {util.srepr( self.origin," ")}'))
if other.N_materials != self.N_materials: if other.N_materials != self.N_materials:
message.append(util.delete(f'# materials: {other.N_materials}')) message.append(util.deemph(f'# materials: {other.N_materials}'))
message.append(util.emph( f'# materials: { self.N_materials}')) message.append(util.emph( f'# materials: { self.N_materials}'))
if np.nanmax(other.material) != np.nanmax(self.material): if np.nanmax(other.material) != np.nanmax(self.material):
message.append(util.delete(f'max material: {np.nanmax(other.material)}')) message.append(util.deemph(f'max material: {np.nanmax(other.material)}'))
message.append(util.emph( f'max material: {np.nanmax( self.material)}')) message.append(util.emph( f'max material: {np.nanmax( self.material)}'))
return util.return_message(message) return util.return_message(message)
@property
def material(self):
"""Material indices."""
return self._material
@material.setter
def material(self,material):
if len(material.shape) != 3:
raise ValueError(f'Invalid material shape {material.shape}.')
elif material.dtype not in np.sctypes['float'] + np.sctypes['int']:
raise TypeError(f'Invalid material data type {material.dtype}.')
else:
self._material = np.copy(material)
if self.material.dtype in np.sctypes['float'] and \
np.all(self.material == self.material.astype(int).astype(float)):
self._material = self.material.astype(int)
@property
def size(self):
"""Physical size of geometry in meter."""
return self._size
@size.setter
def size(self,size):
if len(size) != 3 or any(np.array(size) <= 0):
raise ValueError(f'Invalid size {size}.')
else:
self._size = np.array(size)
@property
def origin(self):
"""Coordinates of geometry origin in meter."""
return self._origin
@origin.setter
def origin(self,origin):
if len(origin) != 3:
raise ValueError(f'Invalid origin {origin}.')
else:
self._origin = np.array(origin)
@property
def comments(self):
"""Comments/history of geometry."""
return self._comments
@comments.setter
def comments(self,comments):
self._comments = [str(c) for c in comments] if isinstance(comments,list) else [str(comments)]
@property @property
def grid(self): def grid(self):
"""Grid dimension of geometry."""
return np.asarray(self.material.shape) return np.asarray(self.material.shape)
@property @property
def N_materials(self): def N_materials(self):
"""Number of (unique) material indices within geometry."""
return np.unique(self.material).size return np.unique(self.material).size
@ -131,7 +170,7 @@ class Geom:
---------- ----------
fname : str or or pathlib.Path fname : str or or pathlib.Path
Geometry file to read. Geometry file to read.
Valid extension is .vtr, it will be appended if not given. Valid extension is .vtr, which will be appended if not given.
""" """
v = VTK.load(fname if str(fname).endswith('.vtr') else str(fname)+'.vtr') v = VTK.load(fname if str(fname).endswith('.vtr') else str(fname)+'.vtr')
@ -150,12 +189,16 @@ class Geom:
""" """
Read a geom file. Read a geom file.
Storing geometry files in ASCII format is deprecated.
This function will be removed in a future version of DAMASK.
Parameters Parameters
---------- ----------
fname : str or file handle fname : str, pathlib.Path, or file handle
Geometry file to read. Geometry file to read.
""" """
warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.1.0', DeprecationWarning)
try: try:
f = open(fname) f = open(fname)
except TypeError: except TypeError:
@ -170,9 +213,8 @@ class Geom:
if not keyword.startswith('head') or header_length < 3: if not keyword.startswith('head') or header_length < 3:
raise TypeError('Header length information missing or invalid') raise TypeError('Header length information missing or invalid')
content = f.readlines()
comments = [] comments = []
content = f.readlines()
for i,line in enumerate(content[:header_length]): for i,line in enumerate(content[:header_length]):
items = line.split('#')[0].lower().strip().split() items = line.split('#')[0].lower().strip().split()
key = items[0] if items else '' key = items[0] if items else ''
@ -185,7 +227,7 @@ class Geom:
else: else:
comments.append(line.strip()) comments.append(line.strip())
material = np.empty(grid.prod()) # initialize as flat array material = np.empty(grid.prod()) # initialize as flat array
i = 0 i = 0
for line in content[header_length:]: for line in content[header_length:]:
items = line.split('#')[0].split() items = line.split('#')[0].split()
@ -203,13 +245,12 @@ class Geom:
if i != grid.prod(): if i != grid.prod():
raise TypeError(f'Invalid file: expected {grid.prod()} entries, found {i}') raise TypeError(f'Invalid file: expected {grid.prod()} entries, found {i}')
if not np.any(np.mod(material,1) != 0.0): # no float present if not np.any(np.mod(material,1) != 0.0): # no float present
material = material.astype('int') material = material.astype('int') - (1 if material.min() > 0 else 0)
return Geom(material.reshape(grid,order='F'),size,origin,comments) return Geom(material.reshape(grid,order='F'),size,origin,comments)
@staticmethod @staticmethod
def load_DREAM3D(fname,base_group,point_data=None,material='FeatureIds'): def load_DREAM3D(fname,base_group,point_data=None,material='FeatureIds'):
""" """
@ -220,26 +261,26 @@ class Geom:
fname : str fname : str
Filename of the DREAM.3D file Filename of the DREAM.3D file
base_group : str base_group : str
Name of the group (folder) below 'DataContainers'. For example Name of the group (folder) below 'DataContainers',
'SyntheticVolumeDataContainer'. for example 'SyntheticVolumeDataContainer'.
point_data : str, optional point_data : str, optional
Name of the group (folder) containing the point wise material data, Name of the group (folder) containing the pointwise material data,
for example 'CellData'. Defaults to None, in which case points consecutively numbered. for example 'CellData'. Defaults to None, in which case points are consecutively numbered.
material : str, optional material : str, optional
Name of the dataset containing the material ID. Defaults to Name of the dataset containing the material ID.
'FeatureIds'. Defaults to 'FeatureIds'.
""" """
root_dir ='DataContainers' root_dir ='DataContainers'
f = h5py.File(fname, 'r') f = h5py.File(fname, 'r')
g = path.join(root_dir,base_group,'_SIMPL_GEOMETRY') g = path.join(root_dir,base_group,'_SIMPL_GEOMETRY')
size = f[path.join(g,'DIMENSIONS')][()] * f[path.join(g,'SPACING')][()]
grid = f[path.join(g,'DIMENSIONS')][()] grid = f[path.join(g,'DIMENSIONS')][()]
size = f[path.join(g,'SPACING')][()] * grid
origin = f[path.join(g,'ORIGIN')][()] origin = f[path.join(g,'ORIGIN')][()]
group_pointwise = path.join(root_dir,base_group,point_data)
ma = np.arange(1,np.product(grid)+1,dtype=int) if point_data is None else \ ma = np.arange(grid.prod(),dtype=int) \
np.reshape(f[path.join(group_pointwise,material)],grid.prod()) if point_data is None else \
np.reshape(f[path.join(root_dir,base_group,point_data,material)],grid.prod())
return Geom(ma.reshape(grid,order='F'),size,origin,util.execution_stamp('Geom','load_DREAM3D')) return Geom(ma.reshape(grid,order='F'),size,origin,util.execution_stamp('Geom','load_DREAM3D'))
@ -247,28 +288,29 @@ class Geom:
@staticmethod @staticmethod
def from_table(table,coordinates,labels): def from_table(table,coordinates,labels):
""" """
Load an ASCII table. Derive geometry from an ASCII table.
Parameters Parameters
---------- ----------
table : damask.Table table : damask.Table
Table that contains material information. Table that contains material information.
coordinates : str coordinates : str
Label of the column containing the spatial coordinates. Label of the vector column containing the spatial coordinates.
Need to be ordered (1./x fast, 3./z slow).
labels : str or list of str labels : str or list of str
Label(s) of the columns containing the material definition. Label(s) of the columns containing the material definition.
Each unique combintation of values results in a material. Each unique combintation of values results in one material ID.
""" """
t = table.sort_by([f'{i}_{coordinates}' for i in range(3,0,-1)]) grid,size,origin = grid_filters.cell_coord0_gridSizeOrigin(table.get(coordinates))
grid,size,origin = grid_filters.cell_coord0_gridSizeOrigin(t.get(coordinates))
labels_ = [labels] if isinstance(labels,str) else labels labels_ = [labels] if isinstance(labels,str) else labels
_,unique_inverse = np.unique(np.hstack([t.get(l) for l in labels_]),return_inverse=True,axis=0) unique,unique_inverse = np.unique(np.hstack([table.get(l) for l in labels_]),return_inverse=True,axis=0)
ma = unique_inverse.reshape(grid,order='F') + 1
return Geom(ma,size,origin,util.execution_stamp('Geom','from_table')) ma = np.arange(grid.prod()) if len(unique) == grid.prod() else \
np.arange(unique.size)[np.argsort(pd.unique(unique_inverse))][unique_inverse]
return Geom(ma.reshape(grid,order='F'),size,origin,util.execution_stamp('Geom','from_table'))
@staticmethod @staticmethod
@ -291,8 +333,8 @@ class Geom:
weights : numpy.ndarray of shape (seeds.shape[0]) weights : numpy.ndarray of shape (seeds.shape[0])
Weights of the seeds. Setting all weights to 1.0 gives a standard Voronoi tessellation. Weights of the seeds. Setting all weights to 1.0 gives a standard Voronoi tessellation.
material : numpy.ndarray of shape (seeds.shape[0]), optional material : numpy.ndarray of shape (seeds.shape[0]), optional
Material ID of the seeds. Defaults to None, in which case materials are Material ID of the seeds.
consecutively numbered. Defaults to None, in which case materials are consecutively numbered.
periodic : Boolean, optional periodic : Boolean, optional
Perform a periodic tessellation. Defaults to True. Perform a periodic tessellation. Defaults to True.
@ -340,8 +382,8 @@ class Geom:
seeds : numpy.ndarray of shape (:,3) seeds : numpy.ndarray of shape (:,3)
Position of the seed points in meter. All points need to lay within the box. Position of the seed points in meter. All points need to lay within the box.
material : numpy.ndarray of shape (seeds.shape[0]), optional material : numpy.ndarray of shape (seeds.shape[0]), optional
Material ID of the seeds. Defaults to None, in which case materials are Material ID of the seeds.
consecutively numbered. Defaults to None, in which case materials are consecutively numbered.
periodic : Boolean, optional periodic : Boolean, optional
Perform a periodic tessellation. Defaults to True. Perform a periodic tessellation. Defaults to True.
@ -417,7 +459,7 @@ class Geom:
Number of periods per unit cell. Defaults to 1. Number of periods per unit cell. Defaults to 1.
materials : (int, int), optional materials : (int, int), optional
Material IDs. Defaults to (1,2). Material IDs. Defaults to (1,2).
Notes Notes
----- -----
The following triply-periodic minimal surfaces are implemented: The following triply-periodic minimal surfaces are implemented:
@ -436,19 +478,19 @@ class Geom:
References References
---------- ----------
Surface curvature in triply-periodic minimal surface architectures as
a distinct design parameter in preparing advanced tissue engineering scaffolds
Sébastien B G Blanquer, Maike Werner, Markus Hannula, Shahriar Sharifi, Sébastien B G Blanquer, Maike Werner, Markus Hannula, Shahriar Sharifi,
Guillaume P R Lajoinie, David Eglin, Jari Hyttinen, André A Poot, and Dirk W Grijpma Guillaume P R Lajoinie, David Eglin, Jari Hyttinen, André A Poot, and Dirk W Grijpma
10.1088/1758-5090/aa6553 Surface curvature in triply-periodic minimal surface architectures as
a distinct design parameter in preparing advanced tissue engineering scaffolds
https://doi.org/10.1088/1758-5090/aa6553
Triply Periodic Bicontinuous Cubic Microdomain Morphologies by Symmetries
Meinhard Wohlgemuth, Nataliya Yufa, James Hoffman, and Edwin L. Thomas Meinhard Wohlgemuth, Nataliya Yufa, James Hoffman, and Edwin L. Thomas
10.1021/ma0019499 Triply Periodic Bicontinuous Cubic Microdomain Morphologies by Symmetries
https://doi.org/10.1021/ma0019499
Minisurf A minimal surface generator for finite element modeling and additive manufacturing
Meng-Ting Hsieh, Lorenzo Valdevit Meng-Ting Hsieh, Lorenzo Valdevit
10.1016/j.simpa.2020.100026 Minisurf A minimal surface generator for finite element modeling and additive manufacturing
https://doi.org/10.1016/j.simpa.2020.100026
""" """
x,y,z = np.meshgrid(periods*2.0*np.pi*(np.arange(grid[0])+0.5)/grid[0], x,y,z = np.meshgrid(periods*2.0*np.pi*(np.arange(grid[0])+0.5)/grid[0],
@ -463,17 +505,17 @@ class Geom:
def save(self,fname,compress=True): def save(self,fname,compress=True):
""" """
Store as vtk rectilinear grid. Store as VTK rectilinear grid.
Parameters Parameters
---------- ----------
fname : str or or pathlib.Path fname : str or pathlib.Path
Filename to write. Valid extension is .vtr, it will be appended if not given. Filename to write. Valid extension is .vtr, it will be appended if not given.
compress : bool, optional compress : bool, optional
Compress with zlib algorithm. Defaults to True. Compress with zlib algorithm. Defaults to True.
""" """
v = VTK.from_rectilinearGrid(self.grid,self.size,self.origin) v = VTK.from_rectilinear_grid(self.grid,self.size,self.origin)
v.add(self.material.flatten(order='F'),'material') v.add(self.material.flatten(order='F'),'material')
v.add_comments(self.comments) v.add_comments(self.comments)
@ -484,6 +526,9 @@ class Geom:
""" """
Write a geom file. Write a geom file.
Storing geometry files in ASCII format is deprecated.
This function will be removed in a future version of DAMASK.
Parameters Parameters
---------- ----------
fname : str or file handle fname : str or file handle
@ -492,6 +537,7 @@ class Geom:
Compress geometry with 'x of y' and 'a to b'. Compress geometry with 'x of y' and 'a to b'.
""" """
warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.1.0', DeprecationWarning)
header = [f'{len(self.comments)+4} header'] + self.comments \ header = [f'{len(self.comments)+4} header'] + self.comments \
+ ['grid a {} b {} c {}'.format(*self.grid), + ['grid a {} b {} c {}'.format(*self.grid),
'size x {} y {} z {}'.format(*self.size), 'size x {} y {} z {}'.format(*self.size),
@ -508,8 +554,7 @@ class Geom:
def show(self): def show(self):
"""Show on screen.""" """Show on screen."""
v = VTK.from_rectilinearGrid(self.grid,self.size,self.origin) VTK.from_rectilinear_grid(self.grid,self.size,self.origin).show()
v.show()
def add_primitive(self,dimension,center,exponent, def add_primitive(self,dimension,center,exponent,
@ -519,20 +564,20 @@ class Geom:
Parameters Parameters
---------- ----------
dimension : int or float numpy.ndarray of shape(3) dimension : int or float numpy.ndarray of shape (3)
Dimension (diameter/side length) of the primitive. If given as Dimension (diameter/side length) of the primitive. If given as
integers, grid point locations (cell centers) are addressed. integers, grid point locations (cell centers) are addressed.
If given as floats, coordinates are addressed. If given as floats, coordinates are addressed.
center : int or float numpy.ndarray of shape(3) center : int or float numpy.ndarray of shape (3)
Center of the primitive. If given as integers, grid point Center of the primitive. If given as integers, grid point
locations (cell centers) are addressed. coordinates (cell centers) are addressed.
If given as floats, coordinates are addressed. If given as floats, coordinates in space are addressed.
exponent : numpy.ndarray of shape(3) or float exponent : numpy.ndarray of shape (3) or float
Exponents for the three axes. Exponents for the three axes.
0 gives octahedron (ǀxǀ^(2^0) + ǀyǀ^(2^0) + ǀzǀ^(2^0) < 1) 0 gives octahedron (ǀxǀ^(2^0) + ǀyǀ^(2^0) + ǀzǀ^(2^0) < 1)
1 gives sphere (ǀxǀ^(2^1) + ǀyǀ^(2^1) + ǀzǀ^(2^1) < 1) 1 gives sphere (ǀxǀ^(2^1) + ǀyǀ^(2^1) + ǀzǀ^(2^1) < 1)
fill : int, optional fill : int, optional
Fill value for primitive. Defaults to material.max() + 1. Fill value for primitive. Defaults to material.max()+1.
R : damask.Rotation, optional R : damask.Rotation, optional
Rotation of primitive. Defaults to no rotation. Rotation of primitive. Defaults to no rotation.
inverse : Boolean, optional inverse : Boolean, optional
@ -542,25 +587,27 @@ class Geom:
Repeat primitive over boundaries. Defaults to True. Repeat primitive over boundaries. Defaults to True.
""" """
# normalized 'radius' and center # radius and center
r = np.array(dimension)/self.grid/2.0 if np.array(dimension).dtype in np.sctypes['int'] else \ r = np.array(dimension)/2.0*self.size/self.grid if np.array(dimension).dtype in np.sctypes['int'] else \
np.array(dimension)/self.size/2.0 np.array(dimension)/2.0
c = (np.array(center) + .5)/self.grid if np.array(center).dtype in np.sctypes['int'] else \ c = (np.array(center) + .5)*self.size/self.grid if np.array(center).dtype in np.sctypes['int'] else \
(np.array(center) - self.origin)/self.size (np.array(center) - self.origin)
coords = grid_filters.cell_coord0(self.grid,np.ones(3)) \ coords = grid_filters.cell_coord0(self.grid,self.size,
- ((np.ones(3)-(1./self.grid if np.array(center).dtype in np.sctypes['int'] else 0))*0.5 if periodic else c) # periodic center is always at CoG -(0.5*(self.size + (self.size/self.grid
if np.array(center).dtype in np.sctypes['int'] else
0)) if periodic else c))
coords_rot = R.broadcast_to(tuple(self.grid))@coords coords_rot = R.broadcast_to(tuple(self.grid))@coords
with np.errstate(all='ignore'): with np.errstate(all='ignore'):
mask = np.sum(np.power(coords_rot/r,2.0**np.array(exponent)),axis=-1) > 1.0 mask = np.sum(np.power(coords_rot/r,2.0**np.array(exponent)),axis=-1) > 1.0
if periodic: # translate back to center if periodic: # translate back to center
mask = np.roll(mask,((c-np.ones(3)*.5)*self.grid).astype(int),(0,1,2)) mask = np.roll(mask,((c/self.size-0.5)*self.grid).round().astype(int),(0,1,2))
fill_ = np.full_like(self.material,np.nanmax(self.material)+1 if fill is None else fill) return Geom(material = np.where(np.logical_not(mask) if inverse else mask,
self.material,
return Geom(material = np.where(np.logical_not(mask) if inverse else mask, self.material,fill_), np.nanmax(self.material)+1 if fill is None else fill),
size = self.size, size = self.size,
origin = self.origin, origin = self.origin,
comments = self.comments+[util.execution_stamp('Geom','add_primitive')], comments = self.comments+[util.execution_stamp('Geom','add_primitive')],
@ -687,12 +734,10 @@ class Geom:
def renumber(self): def renumber(self):
"""Renumber sorted material indices to 1,...,N.""" """Renumber sorted material indices as 0,...,N-1."""
renumbered = np.empty(self.grid,dtype=self.material.dtype) _,renumbered = np.unique(self.material,return_inverse=True)
for i, oldID in enumerate(np.unique(self.material)):
renumbered = np.where(self.material == oldID, i+1, renumbered)
return Geom(material = renumbered, return Geom(material = renumbered.reshape(self.grid),
size = self.size, size = self.size,
origin = self.origin, origin = self.origin,
comments = self.comments+[util.execution_stamp('Geom','renumber')], comments = self.comments+[util.execution_stamp('Geom','renumber')],
@ -714,7 +759,7 @@ class Geom:
if fill is None: fill = np.nanmax(self.material) + 1 if fill is None: fill = np.nanmax(self.material) + 1
dtype = float if np.isnan(fill) or int(fill) != fill or self.material.dtype==np.float else int dtype = float if np.isnan(fill) or int(fill) != fill or self.material.dtype==np.float else int
Eulers = R.as_Eulers(degrees=True) Eulers = R.as_Euler_angles(degrees=True)
material_in = self.material.copy() material_in = self.material.copy()
# These rotations are always applied in the reference coordinate system, i.e. (z,x,z) not (z,x',z'') # These rotations are always applied in the reference coordinate system, i.e. (z,x,z) not (z,x',z'')
@ -783,17 +828,33 @@ class Geom:
New material indices. New material indices.
""" """
substituted = self.material.copy() def mp(entry,mapper):
for from_ms,to_ms in zip(from_material,to_material): return mapper[entry] if entry in mapper else entry
substituted[self.material==from_ms] = to_ms
return Geom(material = substituted, mp = np.vectorize(mp)
mapper = dict(zip(from_material,to_material))
return Geom(material = mp(self.material,mapper).reshape(self.grid),
size = self.size, size = self.size,
origin = self.origin, origin = self.origin,
comments = self.comments+[util.execution_stamp('Geom','substitute')], comments = self.comments+[util.execution_stamp('Geom','substitute')],
) )
def sort(self):
"""Sort material indices such that min(material) is located at (0,0,0)."""
a = self.material.flatten(order='F')
from_ma = pd.unique(a)
sort_idx = np.argsort(from_ma)
ma = np.unique(a)[sort_idx][np.searchsorted(from_ma,a,sorter = sort_idx)]
return Geom(material = ma.reshape(self.grid,order='F'),
size = self.size,
origin = self.origin,
comments = self.comments+[util.execution_stamp('Geom','sort')],
)
def vicinity_offset(self,vicinity=1,offset=None,trigger=[],periodic=True): def vicinity_offset(self,vicinity=1,offset=None,trigger=[],periodic=True):
""" """
Offset material index of points in the vicinity of xxx. Offset material index of points in the vicinity of xxx.
@ -809,7 +870,7 @@ class Geom:
Defaults to 1. Defaults to 1.
offset : int, optional offset : int, optional
Offset (positive or negative) to tag material indices, Offset (positive or negative) to tag material indices,
defaults to material.max() + 1. defaults to material.max()+1.
trigger : list of ints, optional trigger : list of ints, optional
List of material indices that trigger a change. List of material indices that trigger a change.
Defaults to [], meaning that any different neighbor triggers a change. Defaults to [], meaning that any different neighbor triggers a change.
@ -820,15 +881,11 @@ class Geom:
def tainted_neighborhood(stencil,trigger): def tainted_neighborhood(stencil,trigger):
me = stencil[stencil.shape[0]//2] me = stencil[stencil.shape[0]//2]
if len(trigger) == 0: return np.any(stencil != me
return np.any(stencil != me) if len(trigger) == 0 else
if me in trigger: np.in1d(stencil,np.array(list(set(trigger) - {me}))))
trigger = set(trigger)
trigger.remove(me)
trigger = list(trigger)
return np.any(np.in1d(stencil,np.array(trigger)))
offset_ = np.nanmax(self.material) if offset is None else offset offset_ = np.nanmax(self.material)+1 if offset is None else offset
mask = ndimage.filters.generic_filter(self.material, mask = ndimage.filters.generic_filter(self.material,
tainted_neighborhood, tainted_neighborhood,
size=1+2*vicinity, size=1+2*vicinity,
@ -850,28 +907,31 @@ class Geom:
---------- ----------
periodic : bool, optional periodic : bool, optional
Show boundaries across periodicity. Defaults to True. Show boundaries across periodicity. Defaults to True.
direction : string, optional directions : iterable containing str, optional
Directions to consider ('x','y','z'). Defaults to 'xyz'. Direction(s) along which the geometry is mirrored.
Valid entries are 'x', 'y', 'z'. Defaults to 'xyz'.
""" """
valid = ['x','y','z']
if not set(directions).issubset(valid):
raise ValueError(f'Invalid direction {set(directions).difference(valid)} specified.')
# offset (for connectivity)
o = [[0, self.grid[0]+1, np.prod(self.grid[:2]+1)+self.grid[0]+1, np.prod(self.grid[:2]+1)], o = [[0, self.grid[0]+1, np.prod(self.grid[:2]+1)+self.grid[0]+1, np.prod(self.grid[:2]+1)],
[0, np.prod(self.grid[:2]+1), np.prod(self.grid[:2]+1)+1, 1], [0, np.prod(self.grid[:2]+1), np.prod(self.grid[:2]+1)+1, 1],
[0, 1, self.grid[0]+1+1, self.grid[0]+1]] [0, 1, self.grid[0]+1+1, self.grid[0]+1]] # offset for connectivity
connectivity = [] connectivity = []
for i,d in enumerate(['x','y','z']): for i,d in enumerate(['x','y','z']):
if d not in directions.lower(): continue if d not in directions: continue
mask = self.material != np.roll(self.material,1,i) mask = self.material != np.roll(self.material,1,i)
for j in [0,1,2]: for j in [0,1,2]:
mask = np.concatenate((mask,np.take(mask,[0],j)*(i==j)),j) mask = np.concatenate((mask,np.take(mask,[0],j)*(i==j)),j)
if i == 0 and not periodic: mask[0,:,:] = mask[-1,:,:] = False if i == 0 and not periodic: mask[0,:,:] = mask[-1,:,:] = False
if i == 1 and not periodic: mask[:,0,:] = mask[:,-1,:] = False if i == 1 and not periodic: mask[:,0,:] = mask[:,-1,:] = False
if i == 2 and not periodic: mask[:,:,0] = mask[:,:,-1] = False if i == 2 and not periodic: mask[:,:,0] = mask[:,:,-1] = False
base_nodes = np.argwhere(mask.flatten(order='F')).reshape(-1,1) base_nodes = np.argwhere(mask.flatten(order='F')).reshape(-1,1)
connectivity.append(np.block([base_nodes + o[i][k] for k in range(4)])) connectivity.append(np.block([base_nodes + o[i][k] for k in range(4)]))
coords = grid_filters.node_coord0(self.grid,self.size,self.origin).reshape(-1,3,order='F') coords = grid_filters.node_coord0(self.grid,self.size,self.origin).reshape(-1,3,order='F')
return VTK.from_unstructuredGrid(coords,np.vstack(connectivity),'QUAD') return VTK.from_unstructured_grid(coords,np.vstack(connectivity),'QUAD')

View File

@ -1,646 +0,0 @@
import numpy as np
class Symmetry:
"""
Symmetry-related operations for crystal systems.
References
----------
https://en.wikipedia.org/wiki/Crystal_system
"""
crystal_systems = [None,'orthorhombic','tetragonal','hexagonal','cubic']
def __init__(self, system = None):
"""
Symmetry Definition.
Parameters
----------
system : {None,'orthorhombic','tetragonal','hexagonal','cubic'}, optional
Name of the crystal system. Defaults to 'None'.
"""
if system is not None and system.lower() not in self.crystal_systems:
raise KeyError(f'Crystal system "{system}" is unknown')
self.system = system.lower() if isinstance(system,str) else system
def __copy__(self):
"""Copy."""
return self.__class__(self.system)
copy = __copy__
def __repr__(self):
"""Readable string."""
return f'{self.system}'
def __eq__(self, other):
"""
Equal to other.
Parameters
----------
other : Symmetry
Symmetry to check for equality.
"""
return self.system == other.system
def __neq__(self, other):
"""
Not Equal to other.
Parameters
----------
other : Symmetry
Symmetry to check for inequality.
"""
return not self.__eq__(other)
def __cmp__(self,other):
"""
Linear ordering.
Parameters
----------
other : Symmetry
Symmetry to check for for order.
"""
myOrder = self.crystal_systems.index(self.system)
otherOrder = self.crystal_systems.index(other.system)
return (myOrder > otherOrder) - (myOrder < otherOrder)
@property
def symmetry_operations(self):
"""Symmetry operations as quaternions."""
if self.system == 'cubic':
sym_quats = [
[ 1.0, 0.0, 0.0, 0.0 ],
[ 0.0, 1.0, 0.0, 0.0 ],
[ 0.0, 0.0, 1.0, 0.0 ],
[ 0.0, 0.0, 0.0, 1.0 ],
[ 0.0, 0.0, 0.5*np.sqrt(2), 0.5*np.sqrt(2) ],
[ 0.0, 0.0, 0.5*np.sqrt(2),-0.5*np.sqrt(2) ],
[ 0.0, 0.5*np.sqrt(2), 0.0, 0.5*np.sqrt(2) ],
[ 0.0, 0.5*np.sqrt(2), 0.0, -0.5*np.sqrt(2) ],
[ 0.0, 0.5*np.sqrt(2),-0.5*np.sqrt(2), 0.0 ],
[ 0.0, -0.5*np.sqrt(2),-0.5*np.sqrt(2), 0.0 ],
[ 0.5, 0.5, 0.5, 0.5 ],
[-0.5, 0.5, 0.5, 0.5 ],
[-0.5, 0.5, 0.5, -0.5 ],
[-0.5, 0.5, -0.5, 0.5 ],
[-0.5, -0.5, 0.5, 0.5 ],
[-0.5, -0.5, 0.5, -0.5 ],
[-0.5, -0.5, -0.5, 0.5 ],
[-0.5, 0.5, -0.5, -0.5 ],
[-0.5*np.sqrt(2), 0.0, 0.0, 0.5*np.sqrt(2) ],
[ 0.5*np.sqrt(2), 0.0, 0.0, 0.5*np.sqrt(2) ],
[-0.5*np.sqrt(2), 0.0, 0.5*np.sqrt(2), 0.0 ],
[-0.5*np.sqrt(2), 0.0, -0.5*np.sqrt(2), 0.0 ],
[-0.5*np.sqrt(2), 0.5*np.sqrt(2), 0.0, 0.0 ],
[-0.5*np.sqrt(2),-0.5*np.sqrt(2), 0.0, 0.0 ],
]
elif self.system == 'hexagonal':
sym_quats = [
[ 1.0, 0.0, 0.0, 0.0 ],
[-0.5*np.sqrt(3), 0.0, 0.0, -0.5 ],
[ 0.5, 0.0, 0.0, 0.5*np.sqrt(3) ],
[ 0.0, 0.0, 0.0, 1.0 ],
[-0.5, 0.0, 0.0, 0.5*np.sqrt(3) ],
[-0.5*np.sqrt(3), 0.0, 0.0, 0.5 ],
[ 0.0, 1.0, 0.0, 0.0 ],
[ 0.0, -0.5*np.sqrt(3), 0.5, 0.0 ],
[ 0.0, 0.5, -0.5*np.sqrt(3), 0.0 ],
[ 0.0, 0.0, 1.0, 0.0 ],
[ 0.0, -0.5, -0.5*np.sqrt(3), 0.0 ],
[ 0.0, 0.5*np.sqrt(3), 0.5, 0.0 ],
]
elif self.system == 'tetragonal':
sym_quats = [
[ 1.0, 0.0, 0.0, 0.0 ],
[ 0.0, 1.0, 0.0, 0.0 ],
[ 0.0, 0.0, 1.0, 0.0 ],
[ 0.0, 0.0, 0.0, 1.0 ],
[ 0.0, 0.5*np.sqrt(2), 0.5*np.sqrt(2), 0.0 ],
[ 0.0, -0.5*np.sqrt(2), 0.5*np.sqrt(2), 0.0 ],
[ 0.5*np.sqrt(2), 0.0, 0.0, 0.5*np.sqrt(2) ],
[-0.5*np.sqrt(2), 0.0, 0.0, 0.5*np.sqrt(2) ],
]
elif self.system == 'orthorhombic':
sym_quats = [
[ 1.0,0.0,0.0,0.0 ],
[ 0.0,1.0,0.0,0.0 ],
[ 0.0,0.0,1.0,0.0 ],
[ 0.0,0.0,0.0,1.0 ],
]
else:
sym_quats = [
[ 1.0,0.0,0.0,0.0 ],
]
return np.array(sym_quats)
def in_FZ(self,rho):
"""
Check whether given Rodrigues-Frank vector falls into fundamental zone.
Fundamental zone in Rodrigues space is point symmetric around origin.
"""
if(rho.shape[-1] != 3):
raise ValueError('Input is not a Rodrigues-Frank vector field.')
rho_abs = np.abs(rho)
with np.errstate(invalid='ignore'):
# using '*'/prod for 'and'
if self.system == 'cubic':
return np.where(np.prod(np.sqrt(2)-1. >= rho_abs,axis=-1) *
(1. >= np.sum(rho_abs,axis=-1)),True,False)
elif self.system == 'hexagonal':
return np.where(np.prod(1. >= rho_abs,axis=-1) *
(2. >= np.sqrt(3)*rho_abs[...,0] + rho_abs[...,1]) *
(2. >= np.sqrt(3)*rho_abs[...,1] + rho_abs[...,0]) *
(2. >= np.sqrt(3) + rho_abs[...,2]),True,False)
elif self.system == 'tetragonal':
return np.where(np.prod(1. >= rho_abs[...,:2],axis=-1) *
(np.sqrt(2) >= rho_abs[...,0] + rho_abs[...,1]) *
(np.sqrt(2) >= rho_abs[...,2] + 1.),True,False)
elif self.system == 'orthorhombic':
return np.where(np.prod(1. >= rho_abs,axis=-1),True,False)
else:
return np.where(np.all(np.isfinite(rho_abs),axis=-1),True,False)
def in_disorientation_SST(self,rho):
"""
Check whether given Rodrigues-Frank vector (of misorientation) falls into standard stereographic triangle.
References
----------
A. Heinz and P. Neumann, Acta Crystallographica Section A 47:780-789, 1991
https://doi.org/10.1107/S0108767391006864
"""
if(rho.shape[-1] != 3):
raise ValueError('Input is not a Rodrigues-Frank vector field.')
with np.errstate(invalid='ignore'):
# using '*' for 'and'
if self.system == 'cubic':
return np.where((rho[...,0] >= rho[...,1]) * \
(rho[...,1] >= rho[...,2]) * \
(rho[...,2] >= 0),True,False)
elif self.system == 'hexagonal':
return np.where((rho[...,0] >= rho[...,1]*np.sqrt(3)) * \
(rho[...,1] >= 0) * \
(rho[...,2] >= 0),True,False)
elif self.system == 'tetragonal':
return np.where((rho[...,0] >= rho[...,1]) * \
(rho[...,1] >= 0) * \
(rho[...,2] >= 0),True,False)
elif self.system == 'orthorhombic':
return np.where((rho[...,0] >= 0) * \
(rho[...,1] >= 0) * \
(rho[...,2] >= 0),True,False)
else:
return np.ones_like(rho[...,0],dtype=bool)
#ToDo: IPF color in separate function
def in_SST(self,vector,proper=False,color=False):
"""
Check whether given vector falls into standard stereographic triangle of own symmetry.
proper considers only vectors with z >= 0, hence uses two neighboring SSTs.
Return inverse pole figure color if requested.
Bases are computed from
>>> basis = {'cubic' : np.linalg.inv(np.array([[0.,0.,1.], # direction of red
... [1.,0.,1.]/np.sqrt(2.), # direction of green
... [1.,1.,1.]/np.sqrt(3.)]).T), # direction of blue
... 'hexagonal' : np.linalg.inv(np.array([[0.,0.,1.], # direction of red
... [1.,0.,0.], # direction of green
... [np.sqrt(3.),1.,0.]/np.sqrt(4.)]).T), # direction of blue
... 'tetragonal' : np.linalg.inv(np.array([[0.,0.,1.], # direction of red
... [1.,0.,0.], # direction of green
... [1.,1.,0.]/np.sqrt(2.)]).T), # direction of blue
... 'orthorhombic': np.linalg.inv(np.array([[0.,0.,1.], # direction of red
... [1.,0.,0.], # direction of green
... [0.,1.,0.]]).T), # direction of blue
... }
"""
if(vector.shape[-1] != 3):
raise ValueError('Input is not a 3D vector field.')
if self.system == 'cubic':
basis = {'improper':np.array([ [-1. , 0. , 1. ],
[ np.sqrt(2.) , -np.sqrt(2.) , 0. ],
[ 0. , np.sqrt(3.) , 0. ] ]),
'proper':np.array([ [ 0. , -1. , 1. ],
[-np.sqrt(2.) , np.sqrt(2.) , 0. ],
[ np.sqrt(3.) , 0. , 0. ] ]),
}
elif self.system == 'hexagonal':
basis = {'improper':np.array([ [ 0. , 0. , 1. ],
[ 1. , -np.sqrt(3.) , 0. ],
[ 0. , 2. , 0. ] ]),
'proper':np.array([ [ 0. , 0. , 1. ],
[-1. , np.sqrt(3.) , 0. ],
[ np.sqrt(3.) , -1. , 0. ] ]),
}
elif self.system == 'tetragonal':
basis = {'improper':np.array([ [ 0. , 0. , 1. ],
[ 1. , -1. , 0. ],
[ 0. , np.sqrt(2.) , 0. ] ]),
'proper':np.array([ [ 0. , 0. , 1. ],
[-1. , 1. , 0. ],
[ np.sqrt(2.) , 0. , 0. ] ]),
}
elif self.system == 'orthorhombic':
basis = {'improper':np.array([ [ 0., 0., 1.],
[ 1., 0., 0.],
[ 0., 1., 0.] ]),
'proper':np.array([ [ 0., 0., 1.],
[-1., 0., 0.],
[ 0., 1., 0.] ]),
}
else: # direct exit for unspecified symmetry
if color:
return (np.ones_like(vector[...,0],bool),np.zeros_like(vector))
else:
return np.ones_like(vector[...,0],bool)
b_i = np.broadcast_to(basis['improper'],vector.shape+(3,))
if proper:
b_p = np.broadcast_to(basis['proper'], vector.shape+(3,))
improper = np.all(np.around(np.einsum('...ji,...i',b_i,vector),12)>=0.0,axis=-1,keepdims=True)
theComponents = np.where(np.broadcast_to(improper,vector.shape),
np.around(np.einsum('...ji,...i',b_i,vector),12),
np.around(np.einsum('...ji,...i',b_p,vector),12))
else:
vector_ = np.block([vector[...,0:2],np.abs(vector[...,2:3])]) # z component projects identical
theComponents = np.around(np.einsum('...ji,...i',b_i,vector_),12)
in_SST = np.all(theComponents >= 0.0,axis=-1)
if color: # have to return color array
with np.errstate(invalid='ignore',divide='ignore'):
rgb = (theComponents/np.linalg.norm(theComponents,axis=-1,keepdims=True))**0.5 # smoothen color ramps
rgb = np.minimum(1.,rgb) # limit to maximum intensity
rgb /= np.max(rgb,axis=-1,keepdims=True) # normalize to (HS)V = 1
rgb[np.broadcast_to(~in_SST.reshape(vector[...,0].shape+(1,)),vector.shape)] = 0.0
return (in_SST,rgb)
else:
return in_SST
# ******************************************************************************************
class Lattice: # ToDo: Make a subclass of Symmetry!
"""
Bravais lattice.
This contains only a mapping from Bravais lattice to symmetry
and orientation relationships. It could include twin and slip systems.
References
----------
https://en.wikipedia.org/wiki/Bravais_lattice
"""
lattices = {
'iso': {'system':None},
'triclinic':{'system':None},
'bct': {'system':'tetragonal'},
'hex': {'system':'hexagonal'},
'fcc': {'system':'cubic','c/a':1.0},
'bcc': {'system':'cubic','c/a':1.0},
}
def __init__(self,lattice,c_over_a=None):
"""
New lattice of given type.
Parameters
----------
lattice : str
Bravais lattice.
"""
self.lattice = lattice
self.symmetry = Symmetry(self.lattices[lattice]['system'])
# transition to subclass
self.system = self.symmetry.system
self.in_SST = self.symmetry.in_SST
self.in_FZ = self.symmetry.in_FZ
self.in_disorientation_SST = self.symmetry.in_disorientation_SST
def __repr__(self):
"""Report basic lattice information."""
return f'Bravais lattice {self.lattice} ({self.symmetry} crystal system)'
# Kurdjomov--Sachs orientation relationship for fcc <-> bcc transformation
# from S. Morito et al., Journal of Alloys and Compounds 577:s587-s592, 2013
# also see K. Kitahara et al., Acta Materialia 54:1279-1288, 2006
_KS = {'mapping':{'fcc':0,'bcc':1},
'planes': np.array([
[[ 1, 1, 1],[ 0, 1, 1]],
[[ 1, 1, 1],[ 0, 1, 1]],
[[ 1, 1, 1],[ 0, 1, 1]],
[[ 1, 1, 1],[ 0, 1, 1]],
[[ 1, 1, 1],[ 0, 1, 1]],
[[ 1, 1, 1],[ 0, 1, 1]],
[[ 1, -1, 1],[ 0, 1, 1]],
[[ 1, -1, 1],[ 0, 1, 1]],
[[ 1, -1, 1],[ 0, 1, 1]],
[[ 1, -1, 1],[ 0, 1, 1]],
[[ 1, -1, 1],[ 0, 1, 1]],
[[ 1, -1, 1],[ 0, 1, 1]],
[[ -1, 1, 1],[ 0, 1, 1]],
[[ -1, 1, 1],[ 0, 1, 1]],
[[ -1, 1, 1],[ 0, 1, 1]],
[[ -1, 1, 1],[ 0, 1, 1]],
[[ -1, 1, 1],[ 0, 1, 1]],
[[ -1, 1, 1],[ 0, 1, 1]],
[[ 1, 1, -1],[ 0, 1, 1]],
[[ 1, 1, -1],[ 0, 1, 1]],
[[ 1, 1, -1],[ 0, 1, 1]],
[[ 1, 1, -1],[ 0, 1, 1]],
[[ 1, 1, -1],[ 0, 1, 1]],
[[ 1, 1, -1],[ 0, 1, 1]]],dtype='float'),
'directions': np.array([
[[ -1, 0, 1],[ -1, -1, 1]],
[[ -1, 0, 1],[ -1, 1, -1]],
[[ 0, 1, -1],[ -1, -1, 1]],
[[ 0, 1, -1],[ -1, 1, -1]],
[[ 1, -1, 0],[ -1, -1, 1]],
[[ 1, -1, 0],[ -1, 1, -1]],
[[ 1, 0, -1],[ -1, -1, 1]],
[[ 1, 0, -1],[ -1, 1, -1]],
[[ -1, -1, 0],[ -1, -1, 1]],
[[ -1, -1, 0],[ -1, 1, -1]],
[[ 0, 1, 1],[ -1, -1, 1]],
[[ 0, 1, 1],[ -1, 1, -1]],
[[ 0, -1, 1],[ -1, -1, 1]],
[[ 0, -1, 1],[ -1, 1, -1]],
[[ -1, 0, -1],[ -1, -1, 1]],
[[ -1, 0, -1],[ -1, 1, -1]],
[[ 1, 1, 0],[ -1, -1, 1]],
[[ 1, 1, 0],[ -1, 1, -1]],
[[ -1, 1, 0],[ -1, -1, 1]],
[[ -1, 1, 0],[ -1, 1, -1]],
[[ 0, -1, -1],[ -1, -1, 1]],
[[ 0, -1, -1],[ -1, 1, -1]],
[[ 1, 0, 1],[ -1, -1, 1]],
[[ 1, 0, 1],[ -1, 1, -1]]],dtype='float')}
# Greninger--Troiano orientation relationship for fcc <-> bcc transformation
# from Y. He et al., Journal of Applied Crystallography 39:72-81, 2006
_GT = {'mapping':{'fcc':0,'bcc':1},
'planes': np.array([
[[ 1, 1, 1],[ 1, 0, 1]],
[[ 1, 1, 1],[ 1, 1, 0]],
[[ 1, 1, 1],[ 0, 1, 1]],
[[ -1, -1, 1],[ -1, 0, 1]],
[[ -1, -1, 1],[ -1, -1, 0]],
[[ -1, -1, 1],[ 0, -1, 1]],
[[ -1, 1, 1],[ -1, 0, 1]],
[[ -1, 1, 1],[ -1, 1, 0]],
[[ -1, 1, 1],[ 0, 1, 1]],
[[ 1, -1, 1],[ 1, 0, 1]],
[[ 1, -1, 1],[ 1, -1, 0]],
[[ 1, -1, 1],[ 0, -1, 1]],
[[ 1, 1, 1],[ 1, 1, 0]],
[[ 1, 1, 1],[ 0, 1, 1]],
[[ 1, 1, 1],[ 1, 0, 1]],
[[ -1, -1, 1],[ -1, -1, 0]],
[[ -1, -1, 1],[ 0, -1, 1]],
[[ -1, -1, 1],[ -1, 0, 1]],
[[ -1, 1, 1],[ -1, 1, 0]],
[[ -1, 1, 1],[ 0, 1, 1]],
[[ -1, 1, 1],[ -1, 0, 1]],
[[ 1, -1, 1],[ 1, -1, 0]],
[[ 1, -1, 1],[ 0, -1, 1]],
[[ 1, -1, 1],[ 1, 0, 1]]],dtype='float'),
'directions': np.array([
[[ -5,-12, 17],[-17, -7, 17]],
[[ 17, -5,-12],[ 17,-17, -7]],
[[-12, 17, -5],[ -7, 17,-17]],
[[ 5, 12, 17],[ 17, 7, 17]],
[[-17, 5,-12],[-17, 17, -7]],
[[ 12,-17, -5],[ 7,-17,-17]],
[[ -5, 12,-17],[-17, 7,-17]],
[[ 17, 5, 12],[ 17, 17, 7]],
[[-12,-17, 5],[ -7,-17, 17]],
[[ 5,-12,-17],[ 17, -7,-17]],
[[-17, -5, 12],[-17,-17, 7]],
[[ 12, 17, 5],[ 7, 17, 17]],
[[ -5, 17,-12],[-17, 17, -7]],
[[-12, -5, 17],[ -7,-17, 17]],
[[ 17,-12, -5],[ 17, -7,-17]],
[[ 5,-17,-12],[ 17,-17, -7]],
[[ 12, 5, 17],[ 7, 17, 17]],
[[-17, 12, -5],[-17, 7,-17]],
[[ -5,-17, 12],[-17,-17, 7]],
[[-12, 5,-17],[ -7, 17,-17]],
[[ 17, 12, 5],[ 17, 7, 17]],
[[ 5, 17, 12],[ 17, 17, 7]],
[[ 12, -5,-17],[ 7,-17,-17]],
[[-17,-12, 5],[-17,-7, 17]]],dtype='float')}
# Greninger--Troiano' orientation relationship for fcc <-> bcc transformation
# from Y. He et al., Journal of Applied Crystallography 39:72-81, 2006
_GTprime = {'mapping':{'fcc':0,'bcc':1},
'planes': np.array([
[[ 7, 17, 17],[ 12, 5, 17]],
[[ 17, 7, 17],[ 17, 12, 5]],
[[ 17, 17, 7],[ 5, 17, 12]],
[[ -7,-17, 17],[-12, -5, 17]],
[[-17, -7, 17],[-17,-12, 5]],
[[-17,-17, 7],[ -5,-17, 12]],
[[ 7,-17,-17],[ 12, -5,-17]],
[[ 17, -7,-17],[ 17,-12, -5]],
[[ 17,-17, -7],[ 5,-17,-12]],
[[ -7, 17,-17],[-12, 5,-17]],
[[-17, 7,-17],[-17, 12, -5]],
[[-17, 17, -7],[ -5, 17,-12]],
[[ 7, 17, 17],[ 12, 17, 5]],
[[ 17, 7, 17],[ 5, 12, 17]],
[[ 17, 17, 7],[ 17, 5, 12]],
[[ -7,-17, 17],[-12,-17, 5]],
[[-17, -7, 17],[ -5,-12, 17]],
[[-17,-17, 7],[-17, -5, 12]],
[[ 7,-17,-17],[ 12,-17, -5]],
[[ 17, -7,-17],[ 5, -12,-17]],
[[ 17,-17, -7],[ 17, -5,-12]],
[[ -7, 17,-17],[-12, 17, -5]],
[[-17, 7,-17],[ -5, 12,-17]],
[[-17, 17, -7],[-17, 5,-12]]],dtype='float'),
'directions': np.array([
[[ 0, 1, -1],[ 1, 1, -1]],
[[ -1, 0, 1],[ -1, 1, 1]],
[[ 1, -1, 0],[ 1, -1, 1]],
[[ 0, -1, -1],[ -1, -1, -1]],
[[ 1, 0, 1],[ 1, -1, 1]],
[[ 1, -1, 0],[ 1, -1, -1]],
[[ 0, 1, -1],[ -1, 1, -1]],
[[ 1, 0, 1],[ 1, 1, 1]],
[[ -1, -1, 0],[ -1, -1, 1]],
[[ 0, -1, -1],[ 1, -1, -1]],
[[ -1, 0, 1],[ -1, -1, 1]],
[[ -1, -1, 0],[ -1, -1, -1]],
[[ 0, -1, 1],[ 1, -1, 1]],
[[ 1, 0, -1],[ 1, 1, -1]],
[[ -1, 1, 0],[ -1, 1, 1]],
[[ 0, 1, 1],[ -1, 1, 1]],
[[ -1, 0, -1],[ -1, -1, -1]],
[[ -1, 1, 0],[ -1, 1, -1]],
[[ 0, -1, 1],[ -1, -1, 1]],
[[ -1, 0, -1],[ -1, 1, -1]],
[[ 1, 1, 0],[ 1, 1, 1]],
[[ 0, 1, 1],[ 1, 1, 1]],
[[ 1, 0, -1],[ 1, -1, -1]],
[[ 1, 1, 0],[ 1, 1, -1]]],dtype='float')}
# Nishiyama--Wassermann orientation relationship for fcc <-> bcc transformation
# from H. Kitahara et al., Materials Characterization 54:378-386, 2005
_NW = {'mapping':{'fcc':0,'bcc':1},
'planes': np.array([
[[ 1, 1, 1],[ 0, 1, 1]],
[[ 1, 1, 1],[ 0, 1, 1]],
[[ 1, 1, 1],[ 0, 1, 1]],
[[ -1, 1, 1],[ 0, 1, 1]],
[[ -1, 1, 1],[ 0, 1, 1]],
[[ -1, 1, 1],[ 0, 1, 1]],
[[ 1, -1, 1],[ 0, 1, 1]],
[[ 1, -1, 1],[ 0, 1, 1]],
[[ 1, -1, 1],[ 0, 1, 1]],
[[ -1, -1, 1],[ 0, 1, 1]],
[[ -1, -1, 1],[ 0, 1, 1]],
[[ -1, -1, 1],[ 0, 1, 1]]],dtype='float'),
'directions': np.array([
[[ 2, -1, -1],[ 0, -1, 1]],
[[ -1, 2, -1],[ 0, -1, 1]],
[[ -1, -1, 2],[ 0, -1, 1]],
[[ -2, -1, -1],[ 0, -1, 1]],
[[ 1, 2, -1],[ 0, -1, 1]],
[[ 1, -1, 2],[ 0, -1, 1]],
[[ 2, 1, -1],[ 0, -1, 1]],
[[ -1, -2, -1],[ 0, -1, 1]],
[[ -1, 1, 2],[ 0, -1, 1]],
[[ 2, -1, 1],[ 0, -1, 1]], #It is wrong in the paper, but matrix is correct
[[ -1, 2, 1],[ 0, -1, 1]],
[[ -1, -1, -2],[ 0, -1, 1]]],dtype='float')}
# Pitsch orientation relationship for fcc <-> bcc transformation
# from Y. He et al., Acta Materialia 53:1179-1190, 2005
_Pitsch = {'mapping':{'fcc':0,'bcc':1},
'planes': np.array([
[[ 0, 1, 0],[ -1, 0, 1]],
[[ 0, 0, 1],[ 1, -1, 0]],
[[ 1, 0, 0],[ 0, 1, -1]],
[[ 1, 0, 0],[ 0, -1, -1]],
[[ 0, 1, 0],[ -1, 0, -1]],
[[ 0, 0, 1],[ -1, -1, 0]],
[[ 0, 1, 0],[ -1, 0, -1]],
[[ 0, 0, 1],[ -1, -1, 0]],
[[ 1, 0, 0],[ 0, -1, -1]],
[[ 1, 0, 0],[ 0, -1, 1]],
[[ 0, 1, 0],[ 1, 0, -1]],
[[ 0, 0, 1],[ -1, 1, 0]]],dtype='float'),
'directions': np.array([
[[ 1, 0, 1],[ 1, -1, 1]],
[[ 1, 1, 0],[ 1, 1, -1]],
[[ 0, 1, 1],[ -1, 1, 1]],
[[ 0, 1, -1],[ -1, 1, -1]],
[[ -1, 0, 1],[ -1, -1, 1]],
[[ 1, -1, 0],[ 1, -1, -1]],
[[ 1, 0, -1],[ 1, -1, -1]],
[[ -1, 1, 0],[ -1, 1, -1]],
[[ 0, -1, 1],[ -1, -1, 1]],
[[ 0, 1, 1],[ -1, 1, 1]],
[[ 1, 0, 1],[ 1, -1, 1]],
[[ 1, 1, 0],[ 1, 1, -1]]],dtype='float')}
# Bain orientation relationship for fcc <-> bcc transformation
# from Y. He et al., Journal of Applied Crystallography 39:72-81, 2006
_Bain = {'mapping':{'fcc':0,'bcc':1},
'planes': np.array([
[[ 1, 0, 0],[ 1, 0, 0]],
[[ 0, 1, 0],[ 0, 1, 0]],
[[ 0, 0, 1],[ 0, 0, 1]]],dtype='float'),
'directions': np.array([
[[ 0, 1, 0],[ 0, 1, 1]],
[[ 0, 0, 1],[ 1, 0, 1]],
[[ 1, 0, 0],[ 1, 1, 0]]],dtype='float')}
def relation_operations(self,model):
"""
Crystallographic orientation relationships for phase transformations.
References
----------
S. Morito et al., Journal of Alloys and Compounds 577:s587-s592, 2013
https://doi.org/10.1016/j.jallcom.2012.02.004
K. Kitahara et al., Acta Materialia 54(5):1279-1288, 2006
https://doi.org/10.1016/j.actamat.2005.11.001
Y. He et al., Journal of Applied Crystallography 39:72-81, 2006
https://doi.org/10.1107/S0021889805038276
H. Kitahara et al., Materials Characterization 54(4-5):378-386, 2005
https://doi.org/10.1016/j.matchar.2004.12.015
Y. He et al., Acta Materialia 53(4):1179-1190, 2005
https://doi.org/10.1016/j.actamat.2004.11.021
"""
models={'KS':self._KS, 'GT':self._GT, 'GT_prime':self._GTprime,
'NW':self._NW, 'Pitsch': self._Pitsch, 'Bain':self._Bain}
try:
relationship = models[model]
except KeyError :
raise KeyError(f'Orientation relationship "{model}" is unknown')
if self.lattice not in relationship['mapping']:
raise ValueError(f'Relationship "{model}" not supported for lattice "{self.lattice}"')
r = {'lattice':Lattice((set(relationship['mapping'])-{self.lattice}).pop()), # target lattice
'rotations':[] }
myPlane_id = relationship['mapping'][self.lattice]
otherPlane_id = (myPlane_id+1)%2
myDir_id = myPlane_id +2
otherDir_id = otherPlane_id +2
for miller in np.hstack((relationship['planes'],relationship['directions'])):
myPlane = miller[myPlane_id]/ np.linalg.norm(miller[myPlane_id])
myDir = miller[myDir_id]/ np.linalg.norm(miller[myDir_id])
myMatrix = np.array([myDir,np.cross(myPlane,myDir),myPlane])
otherPlane = miller[otherPlane_id]/ np.linalg.norm(miller[otherPlane_id])
otherDir = miller[otherDir_id]/ np.linalg.norm(miller[otherDir_id])
otherMatrix = np.array([otherDir,np.cross(otherPlane,otherDir),otherPlane])
r['rotations'].append(np.dot(otherMatrix.T,myMatrix))
r['rotations'] = np.array(r['rotations'])
return r

File diff suppressed because it is too large Load Diff

View File

@ -7,6 +7,7 @@ import xml.etree.ElementTree as ET
import xml.dom.minidom import xml.dom.minidom
from pathlib import Path from pathlib import Path
from functools import partial from functools import partial
from collections import defaultdict
import h5py import h5py
import numpy as np import numpy as np
@ -15,12 +16,13 @@ from numpy.lib import recfunctions as rfn
import damask import damask
from . import VTK from . import VTK
from . import Table from . import Table
from . import Rotation
from . import Orientation from . import Orientation
from . import grid_filters from . import grid_filters
from . import mechanics from . import mechanics
from . import tensor
from . import util from . import util
h5py3 = h5py.__version__[0] == '3'
class Result: class Result:
""" """
@ -35,20 +37,16 @@ class Result:
Parameters Parameters
---------- ----------
fname : str fname : str or pathlib.Path
name of the DADF5 file to be opened. Name of the DADF5 file to be opened.
""" """
with h5py.File(fname,'r') as f: with h5py.File(fname,'r') as f:
try: self.version_major = f.attrs['DADF5_version_major']
self.version_major = f.attrs['DADF5_version_major'] self.version_minor = f.attrs['DADF5_version_minor']
self.version_minor = f.attrs['DADF5_version_minor']
except KeyError:
self.version_major = f.attrs['DADF5-major']
self.version_minor = f.attrs['DADF5-minor']
if self.version_major != 0 or not 2 <= self.version_minor <= 7: if self.version_major != 0 or not 7 <= self.version_minor <= 9:
raise TypeError(f'Unsupported DADF5 version {self.version_major}.{self.version_minor}') raise TypeError(f'Unsupported DADF5 version {self.version_major}.{self.version_minor}')
self.structured = 'grid' in f['geometry'].attrs.keys() self.structured = 'grid' in f['geometry'].attrs.keys()
@ -56,35 +54,33 @@ class Result:
if self.structured: if self.structured:
self.grid = f['geometry'].attrs['grid'] self.grid = f['geometry'].attrs['grid']
self.size = f['geometry'].attrs['size'] self.size = f['geometry'].attrs['size']
self.origin = f['geometry'].attrs['origin'] if self.version_major == 0 and self.version_minor >= 5 else \ self.origin = f['geometry'].attrs['origin']
np.zeros(3)
r=re.compile('inc[0-9]+') r=re.compile('inc[0-9]+')
increments_unsorted = {int(i[3:]):i for i in f.keys() if r.match(i)} increments_unsorted = {int(i[3:]):i for i in f.keys() if r.match(i)}
self.increments = [increments_unsorted[i] for i in sorted(increments_unsorted)] self.increments = [increments_unsorted[i] for i in sorted(increments_unsorted)]
self.times = [round(f[i].attrs['time/s'],12) for i in self.increments] self.times = [round(f[i].attrs['time/s'],12) for i in self.increments]
self.Nmaterialpoints, self.Nconstituents = np.shape(f['mapping/cellResults/constituent']) self.N_materialpoints, self.N_constituents = np.shape(f['mapping/phase'])
self.materialpoints = [m.decode() for m in np.unique(f['mapping/cellResults/materialpoint']['Name'])]
self.constituents = [c.decode() for c in np.unique(f['mapping/cellResults/constituent'] ['Name'])]
# faster, but does not work with (deprecated) DADF5_postResults self.homogenizations = [m.decode() for m in np.unique(f['mapping/homogenization']['Name'])]
#self.materialpoints = [m for m in f['inc0/materialpoint']] self.phases = [c.decode() for c in np.unique(f['mapping/phase']['Name'])]
#self.constituents = [c for c in f['inc0/constituent']]
self.con_physics = [] self.out_type_ph = []
for c in self.constituents: for c in self.phases:
self.con_physics += f['/'.join([self.increments[0],'constituent',c])].keys() self.out_type_ph += f['/'.join([self.increments[0],'phase',c])].keys()
self.con_physics = list(set(self.con_physics)) # make unique self.out_type_ph = list(set(self.out_type_ph)) # make unique
self.mat_physics = [] self.out_type_ho = []
for m in self.materialpoints: for m in self.homogenizations:
self.mat_physics += f['/'.join([self.increments[0],'materialpoint',m])].keys() self.out_type_ho += f['/'.join([self.increments[0],'homogenization',m])].keys()
self.mat_physics = list(set(self.mat_physics)) # make unique self.out_type_ho = list(set(self.out_type_ho)) # make unique
self.selection = {'increments': self.increments, self.selection = {'increments': self.increments,
'constituents': self.constituents,'materialpoints': self.materialpoints, 'phases': self.phases,
'con_physics': self.con_physics, 'mat_physics': self.mat_physics 'homogenizations': self.homogenizations,
'out_type_ph': self.out_type_ph,
'out_type_ho': self.out_type_ho
} }
self.fname = Path(fname).absolute() self.fname = Path(fname).absolute()
@ -93,7 +89,7 @@ class Result:
def __repr__(self): def __repr__(self):
"""Show selected data.""" """Show summary of file content."""
all_selected_increments = self.selection['increments'] all_selected_increments = self.selection['increments']
self.pick('increments',all_selected_increments[0:1]) self.pick('increments',all_selected_increments[0:1])
@ -117,14 +113,18 @@ class Result:
Parameters Parameters
---------- ----------
action : str action : str
select from 'set', 'add', and 'del' Select from 'set', 'add', and 'del'.
what : str what : str
attribute to change (must be from self.selection) Attribute to change (must be from self.selection).
datasets : list of str or bool datasets : list of str or bool
name of datasets as list, supports ? and * wildcards. Name of datasets as list, supports ? and * wildcards.
True is equivalent to [*], False is equivalent to [] True is equivalent to [*], False is equivalent to []
""" """
def natural_sort(key):
convert = lambda text: int(text) if text.isdigit() else text
return [ convert(c) for c in re.split('([0-9]+)', key) ]
# allow True/False and string arguments # allow True/False and string arguments
if datasets is True: if datasets is True:
datasets = ['*'] datasets = ['*']
@ -158,25 +158,60 @@ class Result:
self.selection[what] = valid self.selection[what] = valid
elif action == 'add': elif action == 'add':
add = existing.union(valid) add = existing.union(valid)
add_sorted = sorted(add, key=lambda x: int("".join([i for i in x if i.isdigit()]))) add_sorted = sorted(add, key=natural_sort)
self.selection[what] = add_sorted self.selection[what] = add_sorted
elif action == 'del': elif action == 'del':
diff = existing.difference(valid) diff = existing.difference(valid)
diff_sorted = sorted(diff, key=lambda x: int("".join([i for i in x if i.isdigit()]))) diff_sorted = sorted(diff, key=natural_sort)
self.selection[what] = diff_sorted self.selection[what] = diff_sorted
def _get_attribute(self,path,attr):
"""
Get the attribute of a dataset.
Parameters
----------
Path : str
Path to the dataset.
attr : str
Name of the attribute to get.
Returns
-------
attr at path, str or None.
The requested attribute, None if not found.
"""
with h5py.File(self.fname,'r') as f:
try:
return f[path].attrs[attr] if h5py3 else f[path].attrs[attr].decode()
except KeyError:
return None
def allow_modification(self): def allow_modification(self):
print(util.bcolors().WARNING+util.bcolors().BOLD+ """Allow to overwrite existing data."""
'Warning: Modification of existing datasets allowed!'+ print(util.warn('Warning: Modification of existing datasets allowed!'))
util.bcolors().ENDC)
self._allow_modification = True self._allow_modification = True
def disallow_modification(self): def disallow_modification(self):
"""Disllow to overwrite existing data (default case)."""
self._allow_modification = False self._allow_modification = False
def incs_in_range(self,start,end): def incs_in_range(self,start,end):
"""
Select all increments within a given range.
Parameters
----------
start : int or str
Start increment.
end : int or str
End increment.
"""
selected = [] selected = []
for i,inc in enumerate([int(i[3:]) for i in self.increments]): for i,inc in enumerate([int(i[3:]) for i in self.increments]):
s,e = map(lambda x: int(x[3:] if isinstance(x,str) and x.startswith('inc') else x), (start,end)) s,e = map(lambda x: int(x[3:] if isinstance(x,str) and x.startswith('inc') else x), (start,end))
@ -186,6 +221,17 @@ class Result:
def times_in_range(self,start,end): def times_in_range(self,start,end):
"""
Select all increments within a given time range.
Parameters
----------
start : float
Time of start increment.
end : float
Time of end increment.
"""
selected = [] selected = []
for i,time in enumerate(self.times): for i,time in enumerate(self.times):
if start <= time <= end: if start <= time <= end:
@ -200,7 +246,7 @@ class Result:
Parameters Parameters
---------- ----------
what : str what : str
attribute to change (must be from self.selection) Attribute to change (must be from self.selection).
""" """
datasets = self.selection[what] datasets = self.selection[what]
@ -280,43 +326,41 @@ class Result:
for path_old in self.get_dataset_location(name_old): for path_old in self.get_dataset_location(name_old):
path_new = os.path.join(os.path.dirname(path_old),name_new) path_new = os.path.join(os.path.dirname(path_old),name_new)
f[path_new] = f[path_old] f[path_new] = f[path_old]
f[path_new].attrs['Renamed'] = 'Original name: {}'.encode() f[path_new].attrs['Renamed'] = f'Original name: {name_old}' if h5py3 else \
f'Original name: {name_old}'.encode()
del f[path_old] del f[path_old]
else: else:
raise PermissionError('Rename operation not permitted') raise PermissionError('Rename operation not permitted')
# def datamerger(regular expression to filter groups into one copy) def place(self,datasets,constituent=0,tagged=False,split=True):
def place(self,datasets,component=0,tagged=False,split=True):
""" """
Distribute datasets onto geometry and return Table or (split) dictionary of Tables. Distribute datasets onto geometry and return Table or (split) dictionary of Tables.
Must not mix nodal end cell data. Must not mix nodal end cell data.
Only data within Only data within
- inc?????/constituent/*_*/* - inc*/phase/*/*
- inc?????/materialpoint/*_*/* - inc*/homogenization/*/*
- inc?????/geometry/* - inc*/geometry/*
are considered. are considered.
Parameters Parameters
---------- ----------
datasets : iterable or str datasets : iterable or str
component : int constituent : int
homogenization component to consider for constituent data Constituent to consider for phase data
tagged : bool tagged : bool
tag Table.column name with '#component' tag Table.column name with '#constituent'
defaults to False defaults to False
split : bool split : bool
split Table by increment and return dictionary of Tables split Table by increment and return dictionary of Tables
defaults to True defaults to True
""" """
sets = datasets if hasattr(datasets,'__iter__') and not isinstance(datasets,str) \ sets = datasets if hasattr(datasets,'__iter__') and not isinstance(datasets,str) else \
else [datasets] [datasets]
tag = f'#{component}' if tagged else '' tag = f'#{constituent}' if tagged else ''
tbl = {} if split else None tbl = {} if split else None
inGeom = {} inGeom = {}
inData = {} inData = {}
@ -328,15 +372,15 @@ class Result:
key = '/'.join([prop,name+tag]) key = '/'.join([prop,name+tag])
if key not in inGeom: if key not in inGeom:
if prop == 'geometry': if prop == 'geometry':
inGeom[key] = inData[key] = np.arange(self.Nmaterialpoints) inGeom[key] = inData[key] = np.arange(self.N_materialpoints)
elif prop == 'constituent': elif prop == 'phase':
inGeom[key] = np.where(f['mapping/cellResults/constituent'][:,component]['Name'] == str.encode(name))[0] inGeom[key] = np.where(f['mapping/phase'][:,constituent]['Name'] == str.encode(name))[0]
inData[key] = f['mapping/cellResults/constituent'][inGeom[key],component]['Position'] inData[key] = f['mapping/phase'][inGeom[key],constituent]['Position']
else: elif prop == 'homogenization':
inGeom[key] = np.where(f['mapping/cellResults/materialpoint']['Name'] == str.encode(name))[0] inGeom[key] = np.where(f['mapping/homogenization']['Name'] == str.encode(name))[0]
inData[key] = f['mapping/cellResults/materialpoint'][inGeom[key].tolist()]['Position'] inData[key] = f['mapping/homogenization'][inGeom[key].tolist()]['Position']
shape = np.shape(f[path]) shape = np.shape(f[path])
data = np.full((self.Nmaterialpoints,) + (shape[1:] if len(shape)>1 else (1,)), data = np.full((self.N_materialpoints,) + (shape[1:] if len(shape)>1 else (1,)),
np.nan, np.nan,
dtype=np.dtype(f[path])) dtype=np.dtype(f[path]))
data[inGeom[key]] = (f[path] if len(shape)>1 else np.expand_dims(f[path],1))[inData[key]] data[inGeom[key]] = (f[path] if len(shape)>1 else np.expand_dims(f[path],1))[inData[key]]
@ -345,12 +389,12 @@ class Result:
try: try:
tbl[inc].add(path,data) tbl[inc].add(path,data)
except KeyError: except KeyError:
tbl[inc] = Table(data.reshape(self.Nmaterialpoints,-1),{path:data.shape[1:]}) tbl[inc] = Table(data.reshape(self.N_materialpoints,-1),{path:data.shape[1:]})
else: else:
try: try:
tbl.add(path,data) tbl.add(path,data)
except AttributeError: except AttributeError:
tbl = Table(data.reshape(self.Nmaterialpoints,-1),{path:data.shape[1:]}) tbl = Table(data.reshape(self.N_materialpoints,-1),{path:data.shape[1:]})
return tbl return tbl
@ -360,8 +404,8 @@ class Result:
Return groups that contain all requested datasets. Return groups that contain all requested datasets.
Only groups within Only groups within
- inc*/constituent/*/* - inc*/phase/*/*
- inc*/materialpoint/*/* - inc*/homogenization/*/*
- inc*/geometry/* - inc*/geometry/*
are considered as they contain user-relevant data. are considered as they contain user-relevant data.
@ -393,7 +437,7 @@ class Result:
with h5py.File(self.fname,'r') as f: with h5py.File(self.fname,'r') as f:
for i in self.iterate('increments'): for i in self.iterate('increments'):
for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): for o,p in zip(['phases','homogenizations'],['out_type_ph','out_type_ho']):
for oo in self.iterate(o): for oo in self.iterate(o):
for pp in self.iterate(p): for pp in self.iterate(p):
group = '/'.join([i,o[:-1],oo,pp]) # o[:-1]: plural/singular issue group = '/'.join([i,o[:-1],oo,pp]) # o[:-1]: plural/singular issue
@ -412,7 +456,7 @@ class Result:
with h5py.File(self.fname,'r') as f: with h5py.File(self.fname,'r') as f:
for i in self.iterate('increments'): for i in self.iterate('increments'):
message += f'\n{i} ({self.times[self.increments.index(i)]}s)\n' message += f'\n{i} ({self.times[self.increments.index(i)]}s)\n'
for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): for o,p in zip(['phases','homogenizations'],['out_type_ph','out_type_ho']):
message += f' {o[:-1]}\n' message += f' {o[:-1]}\n'
for oo in self.iterate(o): for oo in self.iterate(o):
message += f' {oo}\n' message += f' {oo}\n'
@ -422,8 +466,13 @@ class Result:
for d in f[group].keys(): for d in f[group].keys():
try: try:
dataset = f['/'.join([group,d])] dataset = f['/'.join([group,d])]
unit = f" / {dataset.attrs['Unit'].decode()}" if 'Unit' in dataset.attrs else '' if 'Unit' in dataset.attrs:
description = dataset.attrs['Description'].decode() unit = f" / {dataset.attrs['Unit']}" if h5py3 else \
f" / {dataset.attrs['Unit'].decode()}"
else:
unit = ''
description = dataset.attrs['Description'] if h5py3 else \
dataset.attrs['Description'].decode()
message += f' {d}{unit}: {description}\n' message += f' {d}{unit}: {description}\n'
except KeyError: except KeyError:
pass pass
@ -441,7 +490,7 @@ class Result:
path.append(k) path.append(k)
except KeyError: except KeyError:
pass pass
for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): for o,p in zip(['phases','homogenizations'],['out_type_ph','out_type_ho']):
for oo in self.iterate(o): for oo in self.iterate(o):
for pp in self.iterate(p): for pp in self.iterate(p):
k = '/'.join([i,o[:-1],oo,pp,label]) k = '/'.join([i,o[:-1],oo,pp,label])
@ -453,19 +502,6 @@ class Result:
return path return path
def get_constituent_ID(self,c=0):
"""Pointwise constituent ID."""
with h5py.File(self.fname,'r') as f:
names = f['/mapping/cellResults/constituent']['Name'][:,c].astype('str')
return np.array([int(n.split('_')[0]) for n in names.tolist()],dtype=np.int32)
def get_crystal_structure(self): # ToDo: extension to multi constituents/phase
"""Info about the crystal structure."""
with h5py.File(self.fname,'r') as f:
return f[self.get_dataset_location('orientation')[0]].attrs['Lattice'].astype('str') # np.bytes_ to string
def enable_user_function(self,func): def enable_user_function(self,func):
globals()[func.__name__]=func globals()[func.__name__]=func
print(f'Function {func.__name__} enabled in add_calculation.') print(f'Function {func.__name__} enabled in add_calculation.')
@ -476,9 +512,21 @@ class Result:
Dataset for all points/cells. Dataset for all points/cells.
If more than one path is given, the dataset is composed of the individual contributions. If more than one path is given, the dataset is composed of the individual contributions.
Parameters
----------
path : list of strings
The name of the datasets to consider.
c : int, optional
The constituent to consider. Defaults to 0.
plain: boolean, optional
Convert into plain numpy datatype.
Only relevant for compound datatype, e.g. the orientation.
Defaults to False.
""" """
with h5py.File(self.fname,'r') as f: with h5py.File(self.fname,'r') as f:
shape = (self.Nmaterialpoints,) + np.shape(f[path[0]])[1:] shape = (self.N_materialpoints,) + np.shape(f[path[0]])[1:]
if len(shape) == 1: shape = shape +(1,) if len(shape) == 1: shape = shape +(1,)
dataset = np.full(shape,np.nan,dtype=np.dtype(f[path[0]])) dataset = np.full(shape,np.nan,dtype=np.dtype(f[path[0]]))
for pa in path: for pa in path:
@ -488,17 +536,17 @@ class Result:
dataset = np.array(f[pa]) dataset = np.array(f[pa])
continue continue
p = np.where(f['mapping/cellResults/constituent'][:,c]['Name'] == str.encode(label))[0] p = np.where(f['mapping/phase'][:,c]['Name'] == str.encode(label))[0]
if len(p)>0: if len(p)>0:
u = (f['mapping/cellResults/constituent']['Position'][p,c]) u = (f['mapping/phase']['Position'][p,c])
a = np.array(f[pa]) a = np.array(f[pa])
if len(a.shape) == 1: if len(a.shape) == 1:
a=a.reshape([a.shape[0],1]) a=a.reshape([a.shape[0],1])
dataset[p,:] = a[u,:] dataset[p,:] = a[u,:]
p = np.where(f['mapping/cellResults/materialpoint']['Name'] == str.encode(label))[0] p = np.where(f['mapping/homogenization']['Name'] == str.encode(label))[0]
if len(p)>0: if len(p)>0:
u = (f['mapping/cellResults/materialpoint']['Position'][p.tolist()]) u = (f['mapping/homogenization']['Position'][p.tolist()])
a = np.array(f[pa]) a = np.array(f[pa])
if len(a.shape) == 1: if len(a.shape) == 1:
a=a.reshape([a.shape[0],1]) a=a.reshape([a.shape[0],1])
@ -589,19 +637,19 @@ class Result:
@staticmethod @staticmethod
def _add_Cauchy(P,F): def _add_stress_Cauchy(P,F):
return { return {
'data': mechanics.Cauchy(P['data'],F['data']), 'data': mechanics.stress_Cauchy(P['data'],F['data']),
'label': 'sigma', 'label': 'sigma',
'meta': { 'meta': {
'Unit': P['meta']['Unit'], 'Unit': P['meta']['Unit'],
'Description': "Cauchy stress calculated " 'Description': "Cauchy stress calculated "
f"from {P['label']} ({P['meta']['Description']})" f"from {P['label']} ({P['meta']['Description']})"
f" and {F['label']} ({F['meta']['Description']})", f" and {F['label']} ({F['meta']['Description']})",
'Creator': 'add_Cauchy' 'Creator': 'add_stress_Cauchy'
} }
} }
def add_Cauchy(self,P='P',F='F'): def add_stress_Cauchy(self,P='P',F='F'):
""" """
Add Cauchy stress calculated from first Piola-Kirchhoff stress and deformation gradient. Add Cauchy stress calculated from first Piola-Kirchhoff stress and deformation gradient.
@ -613,7 +661,7 @@ class Result:
Label of the dataset containing the deformation gradient. Defaults to F. Label of the dataset containing the deformation gradient. Defaults to F.
""" """
self._add_generic_pointwise(self._add_Cauchy,{'P':P,'F':F}) self._add_generic_pointwise(self._add_stress_Cauchy,{'P':P,'F':F})
@staticmethod @staticmethod
@ -643,7 +691,7 @@ class Result:
@staticmethod @staticmethod
def _add_deviator(T): def _add_deviator(T):
return { return {
'data': mechanics.deviatoric_part(T['data']), 'data': tensor.deviatoric(T['data']),
'label': f"s_{T['label']}", 'label': f"s_{T['label']}",
'meta': { 'meta': {
'Unit': T['meta']['Unit'], 'Unit': T['meta']['Unit'],
@ -674,7 +722,7 @@ class Result:
label,p = 'Minimum',0 label,p = 'Minimum',0
return { return {
'data': mechanics.eigenvalues(T_sym['data'])[:,p], 'data': tensor.eigenvalues(T_sym['data'])[:,p],
'label': f"lambda_{eigenvalue}({T_sym['label']})", 'label': f"lambda_{eigenvalue}({T_sym['label']})",
'meta' : { 'meta' : {
'Unit': T_sym['meta']['Unit'], 'Unit': T_sym['meta']['Unit'],
@ -706,7 +754,7 @@ class Result:
elif eigenvalue == 'min': elif eigenvalue == 'min':
label,p = 'minimum',0 label,p = 'minimum',0
return { return {
'data': mechanics.eigenvectors(T_sym['data'])[:,p], 'data': tensor.eigenvectors(T_sym['data'])[:,p],
'label': f"v_{eigenvalue}({T_sym['label']})", 'label': f"v_{eigenvalue}({T_sym['label']})",
'meta' : { 'meta' : {
'Unit': '1', 'Unit': '1',
@ -734,9 +782,11 @@ class Result:
@staticmethod @staticmethod
def _add_IPF_color(q,l): def _add_IPF_color(q,l):
m = util.scale_to_coprime(np.array(l)) m = util.scale_to_coprime(np.array(l))
try:
o = Orientation(Rotation(rfn.structured_to_unstructured(q['data'])), lattice = {'fcc':'cF','bcc':'cI','hex':'hP'}[q['meta']['Lattice']]
lattice = q['meta']['Lattice']) except KeyError:
lattice = q['meta']['Lattice']
o = Orientation(rotation = (rfn.structured_to_unstructured(q['data'])),lattice=lattice)
return { return {
'data': np.uint8(o.IPF_color(l)*255), 'data': np.uint8(o.IPF_color(l)*255),
@ -788,20 +838,27 @@ class Result:
@staticmethod @staticmethod
def _add_Mises(T_sym): def _add_equivalent_Mises(T_sym,kind):
t = 'strain' if T_sym['meta']['Unit'] == '1' else \ k = kind
'stress' if k is None:
if T_sym['meta']['Unit'] == '1':
k = 'strain'
elif T_sym['meta']['Unit'] == 'Pa':
k = 'stress'
if k not in ['stress', 'strain']:
raise ValueError('invalid von Mises kind {kind}')
return { return {
'data': (mechanics.Mises_strain if t=='strain' else mechanics.Mises_stress)(T_sym['data']), 'data': (mechanics.equivalent_strain_Mises if k=='strain' else \
mechanics.equivalent_stress_Mises)(T_sym['data']),
'label': f"{T_sym['label']}_vM", 'label': f"{T_sym['label']}_vM",
'meta': { 'meta': {
'Unit': T_sym['meta']['Unit'], 'Unit': T_sym['meta']['Unit'],
'Description': f"Mises equivalent {t} of {T_sym['label']} ({T_sym['meta']['Description']})", 'Description': f"Mises equivalent {k} of {T_sym['label']} ({T_sym['meta']['Description']})",
'Creator': 'add_Mises' 'Creator': 'add_Mises'
} }
} }
def add_Mises(self,T_sym): def add_equivalent_Mises(self,T_sym,kind=None):
""" """
Add the equivalent Mises stress or strain of a symmetric tensor. Add the equivalent Mises stress or strain of a symmetric tensor.
@ -809,9 +866,12 @@ class Result:
---------- ----------
T_sym : str T_sym : str
Label of symmetric tensorial stress or strain dataset. Label of symmetric tensorial stress or strain dataset.
kind : {'stress', 'strain', None}, optional
Kind of the von Mises equivalent. Defaults to None, in which case
it is selected based on the unit of the dataset ('1' -> strain, 'Pa' -> stress').
""" """
self._add_generic_pointwise(self._add_Mises,{'T_sym':T_sym}) self._add_generic_pointwise(self._add_equivalent_Mises,{'T_sym':T_sym},{'kind':kind})
@staticmethod @staticmethod
@ -853,19 +913,19 @@ class Result:
@staticmethod @staticmethod
def _add_PK2(P,F): def _add_stress_second_Piola_Kirchhoff(P,F):
return { return {
'data': mechanics.PK2(P['data'],F['data']), 'data': mechanics.stress_second_Piola_Kirchhoff(P['data'],F['data']),
'label': 'S', 'label': 'S',
'meta': { 'meta': {
'Unit': P['meta']['Unit'], 'Unit': P['meta']['Unit'],
'Description': "2. Piola-Kirchhoff stress calculated " 'Description': "2. Piola-Kirchhoff stress calculated "
f"from {P['label']} ({P['meta']['Description']})" f"from {P['label']} ({P['meta']['Description']})"
f" and {F['label']} ({F['meta']['Description']})", f" and {F['label']} ({F['meta']['Description']})",
'Creator': 'add_PK2' 'Creator': 'add_stress_second_Piola_Kirchhoff'
} }
} }
def add_PK2(self,P='P',F='F'): def add_stress_second_Piola_Kirchhoff(self,P='P',F='F'):
""" """
Add second Piola-Kirchhoff stress calculated from first Piola-Kirchhoff stress and deformation gradient. Add second Piola-Kirchhoff stress calculated from first Piola-Kirchhoff stress and deformation gradient.
@ -877,59 +937,64 @@ class Result:
Label of deformation gradient dataset. Defaults to F. Label of deformation gradient dataset. Defaults to F.
""" """
self._add_generic_pointwise(self._add_PK2,{'P':P,'F':F}) self._add_generic_pointwise(self._add_stress_second_Piola_Kirchhoff,{'P':P,'F':F})
# The add_pole functionality needs discussion.
# The new Crystal object can perform such a calculation but the outcome depends on the lattice parameters
# as well as on whether a direction or plane is concerned (see the DAMASK_examples/pole_figure notebook).
# Below code appears to be too simplistic.
# @staticmethod
# def _add_pole(q,p,polar):
# pole = np.array(p)
# unit_pole = pole/np.linalg.norm(pole)
# m = util.scale_to_coprime(pole)
# rot = Rotation(q['data'].view(np.double).reshape(-1,4))
#
# rotatedPole = rot @ np.broadcast_to(unit_pole,rot.shape+(3,)) # rotate pole according to crystal orientation
# xy = rotatedPole[:,0:2]/(1.+abs(unit_pole[2])) # stereographic projection
# coords = xy if not polar else \
# np.block([np.sqrt(xy[:,0:1]*xy[:,0:1]+xy[:,1:2]*xy[:,1:2]),np.arctan2(xy[:,1:2],xy[:,0:1])])
# return {
# 'data': coords,
# 'label': 'p^{}_[{} {} {})'.format(u'rφ' if polar else 'xy',*m),
# 'meta' : {
# 'Unit': '1',
# 'Description': '{} coordinates of stereographic projection of pole (direction/plane) in crystal frame'\
# .format('Polar' if polar else 'Cartesian'),
# 'Creator': 'add_pole'
# }
# }
# def add_pole(self,q,p,polar=False):
# """
# Add coordinates of stereographic projection of given pole in crystal frame.
#
# Parameters
# ----------
# q : str
# Label of the dataset containing the crystallographic orientation as quaternions.
# p : numpy.array of shape (3)
# Crystallographic direction or plane.
# polar : bool, optional
# Give pole in polar coordinates. Defaults to False.
#
# """
# self._add_generic_pointwise(self._add_pole,{'q':q},{'p':p,'polar':polar})
@staticmethod @staticmethod
def _add_pole(q,p,polar): def _add_rotation(F):
pole = np.array(p)
unit_pole = pole/np.linalg.norm(pole)
m = util.scale_to_coprime(pole)
rot = Rotation(q['data'].view(np.double).reshape(-1,4))
rotatedPole = rot @ np.broadcast_to(unit_pole,rot.shape+(3,)) # rotate pole according to crystal orientation
xy = rotatedPole[:,0:2]/(1.+abs(unit_pole[2])) # stereographic projection
coords = xy if not polar else \
np.block([np.sqrt(xy[:,0:1]*xy[:,0:1]+xy[:,1:2]*xy[:,1:2]),np.arctan2(xy[:,1:2],xy[:,0:1])])
return { return {
'data': coords, 'data': mechanics.rotation(F['data']).as_matrix(),
'label': 'p^{}_[{} {} {})'.format(u'' if polar else 'xy',*m),
'meta' : {
'Unit': '1',
'Description': '{} coordinates of stereographic projection of pole (direction/plane) in crystal frame'\
.format('Polar' if polar else 'Cartesian'),
'Creator': 'add_pole'
}
}
def add_pole(self,q,p,polar=False):
"""
Add coordinates of stereographic projection of given pole in crystal frame.
Parameters
----------
q : str
Label of the dataset containing the crystallographic orientation as quaternions.
p : numpy.array of shape (3)
Crystallographic direction or plane.
polar : bool, optional
Give pole in polar coordinates. Defaults to False.
"""
self._add_generic_pointwise(self._add_pole,{'q':q},{'p':p,'polar':polar})
@staticmethod
def _add_rotational_part(F):
return {
'data': mechanics.rotational_part(F['data']),
'label': f"R({F['label']})", 'label': f"R({F['label']})",
'meta': { 'meta': {
'Unit': F['meta']['Unit'], 'Unit': F['meta']['Unit'],
'Description': f"Rotational part of {F['label']} ({F['meta']['Description']})", 'Description': f"Rotational part of {F['label']} ({F['meta']['Description']})",
'Creator': 'add_rotational_part' 'Creator': 'add_rotation'
} }
} }
def add_rotational_part(self,F): def add_rotation(self,F):
""" """
Add rotational part of a deformation gradient. Add rotational part of a deformation gradient.
@ -939,13 +1004,13 @@ class Result:
Label of deformation gradient dataset. Label of deformation gradient dataset.
""" """
self._add_generic_pointwise(self._add_rotational_part,{'F':F}) self._add_generic_pointwise(self._add_rotation,{'F':F})
@staticmethod @staticmethod
def _add_spherical(T): def _add_spherical(T):
return { return {
'data': mechanics.spherical_part(T['data']), 'data': tensor.spherical(T['data'],False),
'label': f"p_{T['label']}", 'label': f"p_{T['label']}",
'meta': { 'meta': {
'Unit': T['meta']['Unit'], 'Unit': T['meta']['Unit'],
@ -967,21 +1032,21 @@ class Result:
@staticmethod @staticmethod
def _add_strain_tensor(F,t,m): def _add_strain(F,t,m):
return { return {
'data': mechanics.strain_tensor(F['data'],t,m), 'data': mechanics.strain(F['data'],t,m),
'label': f"epsilon_{t}^{m}({F['label']})", 'label': f"epsilon_{t}^{m}({F['label']})",
'meta': { 'meta': {
'Unit': F['meta']['Unit'], 'Unit': F['meta']['Unit'],
'Description': f"Strain tensor of {F['label']} ({F['meta']['Description']})", 'Description': f"Strain tensor of {F['label']} ({F['meta']['Description']})",
'Creator': 'add_strain_tensor' 'Creator': 'add_strain'
} }
} }
def add_strain_tensor(self,F='F',t='V',m=0.0): def add_strain(self,F='F',t='V',m=0.0):
""" """
Add strain tensor of a deformation gradient. Add strain tensor of a deformation gradient.
For details refer to damask.mechanics.strain_tensor For details refer to damask.mechanics.strain
Parameters Parameters
---------- ----------
@ -994,13 +1059,13 @@ class Result:
Order of the strain calculation. Defaults to 0.0. Order of the strain calculation. Defaults to 0.0.
""" """
self._add_generic_pointwise(self._add_strain_tensor,{'F':F},{'t':t,'m':m}) self._add_generic_pointwise(self._add_strain,{'F':F},{'t':t,'m':m})
@staticmethod @staticmethod
def _add_stretch_tensor(F,t): def _add_stretch_tensor(F,t):
return { return {
'data': (mechanics.left_stretch if t.upper() == 'V' else mechanics.right_stretch)(F['data']), 'data': (mechanics.stretch_left if t.upper() == 'V' else mechanics.stretch_right)(F['data']),
'label': f"{t}({F['label']})", 'label': f"{t}({F['label']})",
'meta': { 'meta': {
'Unit': F['meta']['Unit'], 'Unit': F['meta']['Unit'],
@ -1035,7 +1100,7 @@ class Result:
loc = f[group+'/'+label] loc = f[group+'/'+label]
datasets_in[arg]={'data' :loc[()], datasets_in[arg]={'data' :loc[()],
'label':label, 'label':label,
'meta': {k:v.decode() for k,v in loc.attrs.items()}} 'meta': {k:(v if h5py3 else v.decode()) for k,v in loc.attrs.items()}}
lock.release() lock.release()
r = func(**datasets_in,**args) r = func(**datasets_in,**args)
return [group,r] return [group,r]
@ -1080,17 +1145,21 @@ class Result:
if self._allow_modification and result[0]+'/'+result[1]['label'] in f: if self._allow_modification and result[0]+'/'+result[1]['label'] in f:
dataset = f[result[0]+'/'+result[1]['label']] dataset = f[result[0]+'/'+result[1]['label']]
dataset[...] = result[1]['data'] dataset[...] = result[1]['data']
dataset.attrs['Overwritten'] = 'Yes'.encode() dataset.attrs['Overwritten'] = 'Yes' if h5py3 else \
'Yes'.encode()
else: else:
dataset = f[result[0]].create_dataset(result[1]['label'],data=result[1]['data']) dataset = f[result[0]].create_dataset(result[1]['label'],data=result[1]['data'])
now = datetime.datetime.now().astimezone() now = datetime.datetime.now().astimezone()
dataset.attrs['Created'] = now.strftime('%Y-%m-%d %H:%M:%S%z').encode() dataset.attrs['Created'] = now.strftime('%Y-%m-%d %H:%M:%S%z') if h5py3 else \
now.strftime('%Y-%m-%d %H:%M:%S%z').encode()
for l,v in result[1]['meta'].items(): for l,v in result[1]['meta'].items():
dataset.attrs[l]=v.encode() dataset.attrs[l]=v if h5py3 else v.encode()
creator = f"damask.Result.{dataset.attrs['Creator'].decode()} v{damask.version}" creator = dataset.attrs['Creator'] if h5py3 else \
dataset.attrs['Creator'] = creator.encode() dataset.attrs['Creator'].decode()
dataset.attrs['Creator'] = f"damask.Result.{creator} v{damask.version}" if h5py3 else \
f"damask.Result.{creator} v{damask.version}".encode()
except (OSError,RuntimeError) as err: except (OSError,RuntimeError) as err:
print(f'Could not add dataset: {err}.') print(f'Could not add dataset: {err}.')
@ -1107,7 +1176,7 @@ class Result:
This works only for scalar, 3-vector and 3x3-tensor data. This works only for scalar, 3-vector and 3x3-tensor data.
Selection is not taken into account. Selection is not taken into account.
""" """
if len(self.constituents) != 1 or not self.structured: if self.N_constituents != 1 or not self.structured:
raise NotImplementedError('XDMF only available for grid results with 1 constituent.') raise NotImplementedError('XDMF only available for grid results with 1 constituent.')
xdmf=ET.Element('Xdmf') xdmf=ET.Element('Xdmf')
@ -1158,9 +1227,11 @@ class Result:
delta.text="{} {} {}".format(*(self.size/self.grid)) delta.text="{} {} {}".format(*(self.size/self.grid))
type_map = defaultdict(lambda:'Matrix', ( ((),'Scalar'), ((3,),'Vector'), ((3,3),'Tensor')) )
with h5py.File(self.fname,'r') as f: with h5py.File(self.fname,'r') as f:
attributes.append(ET.SubElement(grid, 'Attribute')) attributes.append(ET.SubElement(grid, 'Attribute'))
attributes[-1].attrib={'Name': 'u', attributes[-1].attrib={'Name': 'u / m',
'Center': 'Node', 'Center': 'Node',
'AttributeType': 'Vector'} 'AttributeType': 'Vector'}
data_items.append(ET.SubElement(attributes[-1], 'DataItem')) data_items.append(ET.SubElement(attributes[-1], 'DataItem'))
@ -1169,7 +1240,7 @@ class Result:
'Dimensions': '{} {} {} 3'.format(*(self.grid+1))} 'Dimensions': '{} {} {} 3'.format(*(self.grid+1))}
data_items[-1].text=f'{os.path.split(self.fname)[1]}:/{inc}/geometry/u_n' data_items[-1].text=f'{os.path.split(self.fname)[1]}:/{inc}/geometry/u_n'
for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): for o,p in zip(['phases','homogenizations'],['out_type_ph','out_type_ho']):
for oo in getattr(self,o): for oo in getattr(self,o):
for pp in getattr(self,p): for pp in getattr(self,p):
g = '/'.join([inc,o[:-1],oo,pp]) g = '/'.join([inc,o[:-1],oo,pp])
@ -1177,19 +1248,21 @@ class Result:
name = '/'.join([g,l]) name = '/'.join([g,l])
shape = f[name].shape[1:] shape = f[name].shape[1:]
dtype = f[name].dtype dtype = f[name].dtype
prec = f[name].dtype.itemsize
if (shape not in [(1,), (3,), (3,3)]) or dtype != np.float64: continue if dtype != np.float64: continue
prec = f[name].dtype.itemsize
unit = f[name].attrs['Unit'] if h5py3 else f[name].attrs['Unit'].decode()
attributes.append(ET.SubElement(grid, 'Attribute')) attributes.append(ET.SubElement(grid, 'Attribute'))
attributes[-1].attrib={'Name': name.split('/',2)[2], attributes[-1].attrib={'Name': name.split('/',2)[2]+f' / {unit}',
'Center': 'Cell', 'Center': 'Cell',
'AttributeType': 'Tensor'} 'AttributeType': type_map[shape]}
data_items.append(ET.SubElement(attributes[-1], 'DataItem')) data_items.append(ET.SubElement(attributes[-1], 'DataItem'))
data_items[-1].attrib={'Format': 'HDF', data_items[-1].attrib={'Format': 'HDF',
'NumberType': 'Float', 'NumberType': 'Float',
'Precision': f'{prec}', 'Precision': f'{prec}',
'Dimensions': '{} {} {} {}'.format(*self.grid,np.prod(shape))} 'Dimensions': '{} {} {} {}'.format(*self.grid,1 if shape == () else
np.prod(shape))}
data_items[-1].text=f'{os.path.split(self.fname)[1]}:{name}' data_items[-1].text=f'{os.path.split(self.fname)[1]}:{name}'
with open(self.fname.with_suffix('.xdmf').name,'w') as f: with open(self.fname.with_suffix('.xdmf').name,'w') as f:
@ -1212,58 +1285,53 @@ class Result:
if mode.lower()=='cell': if mode.lower()=='cell':
if self.structured: if self.structured:
v = VTK.from_rectilinearGrid(self.grid,self.size,self.origin) v = VTK.from_rectilinear_grid(self.grid,self.size,self.origin)
else: else:
with h5py.File(self.fname,'r') as f: with h5py.File(self.fname,'r') as f:
v = VTK.from_unstructuredGrid(f['/geometry/x_n'][()], v = VTK.from_unstructured_grid(f['/geometry/x_n'][()],
f['/geometry/T_c'][()]-1, f['/geometry/T_c'][()]-1,
f['/geometry/T_c'].attrs['VTK_TYPE'].decode()) f['/geometry/T_c'].attrs['VTK_TYPE'] if h5py3 else \
f['/geometry/T_c'].attrs['VTK_TYPE'].decode())
elif mode.lower()=='point': elif mode.lower()=='point':
v = VTK.from_polyData(self.cell_coordinates()) v = VTK.from_poly_data(self.cell_coordinates)
N_digits = int(np.floor(np.log10(max(1,int(self.increments[-1][3:])))))+1 N_digits = int(np.floor(np.log10(max(1,int(self.increments[-1][3:])))))+1
for inc in util.show_progress(self.iterate('increments'),len(self.selection['increments'])): for inc in util.show_progress(self.iterate('increments'),len(self.selection['increments'])):
materialpoints_backup = self.selection['materialpoints'].copy() picked_backup_ho = self.selection['homogenizations'].copy()
self.pick('materialpoints',False) self.pick('homogenizations',False)
for label in (labels if isinstance(labels,list) else [labels]): for label in (labels if isinstance(labels,list) else [labels]):
for p in self.iterate('con_physics'): for o in self.iterate('out_type_ph'):
if p != 'generic': for c in range(self.N_constituents):
for c in self.iterate('constituents'): prefix = '' if self.N_constituents == 1 else f'constituent{c}/'
x = self.get_dataset_location(label) if o != 'mechanics':
if len(x) == 0: for _ in self.iterate('phases'):
path = self.get_dataset_location(label)
if len(path) == 0:
continue
array = self.read_dataset(path,c)
v.add(array,prefix+path[0].split('/',1)[1]+f' / {self._get_attribute(path[0],"Unit")}')
else:
paths = self.get_dataset_location(label)
if len(paths) == 0:
continue continue
array = self.read_dataset(x,0) array = self.read_dataset(paths,c)
v.add(array,'1_'+x[0].split('/',1)[1]) #ToDo: hard coded 1! ph_name = re.compile(r'(?<=(phase\/))(.*?)(?=(mechanics))') # identify phase name
else: dset_name = prefix+re.sub(ph_name,r'',paths[0].split('/',1)[1]) # remove phase name
x = self.get_dataset_location(label) v.add(array,dset_name+f' / {self._get_attribute(paths[0],"Unit")}')
if len(x) == 0: self.pick('homogenizations',picked_backup_ho)
continue
array = self.read_dataset(x,0)
ph_name = re.compile(r'(?<=(constituent\/))(.*?)(?=(generic))') # identify phase name
dset_name = '1_' + re.sub(ph_name,r'',x[0].split('/',1)[1]) # removing phase name
v.add(array,dset_name)
self.pick('materialpoints',materialpoints_backup)
constituents_backup = self.selection['constituents'].copy() picked_backup_ph = self.selection['phases'].copy()
self.pick('constituents',False) self.pick('phases',False)
for label in (labels if isinstance(labels,list) else [labels]): for label in (labels if isinstance(labels,list) else [labels]):
for p in self.iterate('mat_physics'): for _ in self.iterate('out_type_ho'):
if p != 'generic': paths = self.get_dataset_location(label)
for m in self.iterate('materialpoints'): if len(paths) == 0:
x = self.get_dataset_location(label) continue
if len(x) == 0: array = self.read_dataset(paths)
continue v.add(array,paths[0].split('/',1)[1]+f' / {self._get_attribute(paths[0],"Unit")}')
array = self.read_dataset(x,0) self.pick('phases',picked_backup_ph)
v.add(array,'1_'+x[0].split('/',1)[1]) #ToDo: why 1_?
else:
x = self.get_dataset_location(label)
if len(x) == 0:
continue
array = self.read_dataset(x,0)
v.add(array,'1_'+x[0].split('/',1)[1])
self.pick('constituents',constituents_backup)
u = self.read_dataset(self.get_dataset_location('u_n' if mode.lower() == 'cell' else 'u_p')) u = self.read_dataset(self.get_dataset_location('u_n' if mode.lower() == 'cell' else 'u_p'))
v.add(u,'u') v.add(u,'u')

View File

@ -1,6 +1,6 @@
import numpy as np import numpy as np
from . import mechanics from . import tensor
from . import util from . import util
from . import grid_filters from . import grid_filters
@ -13,18 +13,18 @@ _R1 = (3.*np.pi/4.)**(1./3.)
class Rotation: class Rotation:
u""" u"""
Orientation stored with functionality for conversion to different representations. Rotation with functionality for conversion between different representations.
The following conventions apply: The following conventions apply:
- coordinate frames are right-handed. - Coordinate frames are right-handed.
- a rotation angle ω is taken to be positive for a counterclockwise rotation - A rotation angle ω is taken to be positive for a counterclockwise rotation
when viewing from the end point of the rotation axis towards the origin. when viewing from the end point of the rotation axis towards the origin.
- rotations will be interpreted in the passive sense. - Rotations will be interpreted in the passive sense.
- Euler angle triplets are implemented using the Bunge convention, - Euler angle triplets are implemented using the Bunge convention,
with the angular ranges as [0,2π], [0,π], [0,2π]. with angular ranges of [0,2π], [0,π], [0,2π].
- the rotation angle ω is limited to the interval [0,π]. - The rotation angle ω is limited to the interval [0,π].
- the real part of a quaternion is positive, Re(q) > 0 - The real part of a quaternion is positive, Re(q) > 0
- P = -1 (as default). - P = -1 (as default).
Examples Examples
@ -33,7 +33,7 @@ class Rotation:
coordinates "b" expressed in system "B": coordinates "b" expressed in system "B":
- b = Q @ a - b = Q @ a
- b = np.dot(Q.asMatrix(),a) - b = np.dot(Q.as_matrix(),a)
References References
---------- ----------
@ -44,20 +44,70 @@ class Rotation:
__slots__ = ['quaternion'] __slots__ = ['quaternion']
def __init__(self,quaternion = np.array([1.0,0.0,0.0,0.0])): def __init__(self,rotation = np.array([1.0,0.0,0.0,0.0])):
""" """
Initializes to identity unless specified. Initialize rotation object.
Parameters Parameters
---------- ----------
quaternion : numpy.ndarray, optional rotation : list, numpy.ndarray, Rotation, optional
Unit quaternion in positive real hemisphere. Unit quaternion in positive real hemisphere.
Use .from_quaternion to perform a sanity check. Use .from_quaternion to perform a sanity check.
Defaults to no rotation.
""" """
if quaternion.shape[-1] != 4: if isinstance(rotation,Rotation):
raise ValueError('Not a quaternion') self.quaternion = rotation.quaternion.copy()
self.quaternion = quaternion.copy() elif np.array(rotation).shape[-1] == 4:
self.quaternion = np.array(rotation)
else:
raise TypeError('"rotation" is neither a Rotation nor a quaternion')
def __repr__(self):
"""Represent rotation as unit quaternion, rotation matrix, and Bunge-Euler angles."""
if self == Rotation():
return 'Rotation()'
else:
return f'Quaternions {self.shape}:\n'+str(self.quaternion) \
if self.quaternion.shape != (4,) else \
'\n'.join([
'Quaternion: (real={:.3f}, imag=<{:+.3f}, {:+.3f}, {:+.3f}>)'.format(*(self.quaternion)),
'Matrix:\n{}'.format(np.round(self.as_matrix(),8)),
'Bunge Eulers / deg: ({:3.2f}, {:3.2f}, {:3.2f})'.format(*self.as_Euler_angles(degrees=True)),
])
# ToDo: Check difference __copy__ vs __deepcopy__
def __copy__(self,**kwargs):
"""Copy."""
return self.__class__(rotation=kwargs['rotation'] if 'rotation' in kwargs else self.quaternion)
copy = __copy__
def __getitem__(self,item):
"""Return slice according to item."""
return self.copy() \
if self.shape == () else \
self.copy(rotation=self.quaternion[item+(slice(None),)] if isinstance(item,tuple) else self.quaternion[item])
def __eq__(self,other):
"""
Equal to other.
Equality is determined taking limited floating point precision into
account. See numpy.allclose for details.
Parameters
----------
other : Rotation
Rotation to check for equality.
"""
return np.prod(self.shape,dtype=int) == np.prod(other.shape,dtype=int) \
and np.allclose(self.quaternion,other.quaternion)
@property @property
@ -65,39 +115,36 @@ class Rotation:
return self.quaternion.shape[:-1] return self.quaternion.shape[:-1]
# ToDo: Check difference __copy__ vs __deepcopy__
def __copy__(self):
"""Copy."""
return self.__class__(self.quaternion)
copy = __copy__
def __repr__(self):
"""Orientation displayed as unit quaternion, rotation matrix, and Bunge-Euler angles."""
if self.quaternion.shape != (4,):
return 'Quaternions:\n'+str(self.quaternion) # ToDo: could be nicer ...
return '\n'.join([
'Quaternion: (real={:.3f}, imag=<{:+.3f}, {:+.3f}, {:+.3f}>)'.format(*(self.quaternion)),
'Matrix:\n{}'.format(np.round(self.as_matrix(),8)),
'Bunge Eulers / deg: ({:3.2f}, {:3.2f}, {:3.2f})'.format(*self.as_Eulers(degrees=True)),
])
def __getitem__(self,item):
"""Iterate over leading/leftmost dimension of Rotation array."""
if self.shape == (): return self.copy()
if isinstance(item,tuple) and len(item) >= len(self):
raise IndexError('Too many indices')
return self.__class__(self.quaternion[item])
def __len__(self): def __len__(self):
"""Length of leading/leftmost dimension of Rotation array.""" """Length of leading/leftmost dimension of Rotation array."""
return 0 if self.shape == () else self.shape[0] return 0 if self.shape == () else self.shape[0]
def __matmul__(self, other): def __invert__(self):
"""Inverse rotation (backward rotation)."""
dup = self.copy()
dup.quaternion[...,1:] *= -1
return dup
def __pow__(self,pwr):
"""
Raise quaternion to power.
Equivalent to performing the rotation 'pwr' times.
Parameters
----------
pwr : float
Power to raise quaternion to.
"""
phi = np.arccos(self.quaternion[...,0:1])
p = self.quaternion[...,1:]/np.linalg.norm(self.quaternion[...,1:],axis=-1,keepdims=True)
return self.copy(rotation=Rotation(np.block([np.cos(pwr*phi),np.sin(pwr*phi)*p]))._standardize())
def __matmul__(self,other):
""" """
Rotation of vector, second or fourth order tensor, or rotation object. Rotation of vector, second or fourth order tensor, or rotation object.
@ -112,14 +159,14 @@ class Rotation:
Rotated vector, second or fourth order tensor, or rotation object. Rotated vector, second or fourth order tensor, or rotation object.
""" """
if isinstance(other, Rotation): if isinstance(other,Rotation):
q_m = self.quaternion[...,0:1] q_m = self.quaternion[...,0:1]
p_m = self.quaternion[...,1:] p_m = self.quaternion[...,1:]
q_o = other.quaternion[...,0:1] q_o = other.quaternion[...,0:1]
p_o = other.quaternion[...,1:] p_o = other.quaternion[...,1:]
q = (q_m*q_o - np.einsum('...i,...i',p_m,p_o).reshape(self.shape+(1,))) q = (q_m*q_o - np.einsum('...i,...i',p_m,p_o).reshape(self.shape+(1,)))
p = q_m*p_o + q_o*p_m + _P * np.cross(p_m,p_o) p = q_m*p_o + q_o*p_m + _P * np.cross(p_m,p_o)
return self.__class__(np.block([q,p]))._standardize() return Rotation(np.block([q,p]))._standardize()
elif isinstance(other,np.ndarray): elif isinstance(other,np.ndarray):
if self.shape + (3,) == other.shape: if self.shape + (3,) == other.shape:
@ -146,27 +193,89 @@ class Rotation:
def _standardize(self): def _standardize(self):
"""Standardize (ensure positive real hemisphere).""" """Standardize quaternion (ensure positive real hemisphere)."""
self.quaternion[self.quaternion[...,0] < 0.0] *= -1 self.quaternion[self.quaternion[...,0] < 0.0] *= -1
return self return self
def inverse(self):
"""In-place inverse rotation (backward rotation)."""
self.quaternion[...,1:] *= -1
return self
def __invert__(self): def append(self,other):
"""Inverse rotation (backward rotation).""" """Extend rotation array along first dimension with other array."""
return self.copy().inverse() return self.copy(rotation=np.vstack((self.quaternion,other.quaternion)))
def inversed(self):
"""Inverse rotation (backward rotation).""" def flatten(self,order = 'C'):
return ~ self """Flatten quaternion array."""
return self.copy(rotation=self.quaternion.reshape((-1,4),order=order))
def reshape(self,shape,order = 'C'):
"""Reshape quaternion array."""
if isinstance(shape,(int,np.integer)): shape = (shape,)
return self.copy(rotation=self.quaternion.reshape(tuple(shape)+(4,),order=order))
def broadcast_to(self,shape,mode = 'right'):
"""
Broadcast quaternion array to shape.
Parameters
----------
shape : tuple
Shape of broadcasted array.
mode : str, optional
Where to preferentially locate missing dimensions.
Either 'left' or 'right' (default).
"""
if isinstance(shape,(int,np.integer)): shape = (shape,)
return self.copy(rotation=np.broadcast_to(self.quaternion.reshape(util.shapeshifter(self.shape,shape,mode)+(4,)),
shape+(4,)))
def average(self,weights = None):
"""
Average rotations along last dimension.
Parameters
----------
weights : list of floats, optional
Relative weight of each rotation.
Returns
-------
average : Rotation
Weighted average of original Rotation field.
References
----------
Quaternion averaging
F. Landis Markley, Yang Cheng, John L. Crassidis, Yaakov Oshman
Journal of Guidance, Control, and Dynamics 30(4):1193-1197, 2007
10.2514/1.28949
"""
def _M(quat):
"""Intermediate representation supporting quaternion averaging."""
return np.einsum('...i,...j',quat,quat)
if not weights:
weights = np.ones(self.shape,dtype=float)
eig, vec = np.linalg.eig(np.sum(_M(self.quaternion) * weights[...,np.newaxis,np.newaxis],axis=-3) \
/np.sum( weights[...,np.newaxis,np.newaxis],axis=-3))
return Rotation.from_quaternion(np.real(
np.squeeze(
np.take_along_axis(vec,
eig.argmax(axis=-1)[...,np.newaxis,np.newaxis],
axis=-1),
axis=-1)),
accept_homomorph = True)
def misorientation(self,other): def misorientation(self,other):
""" """
Get Misorientation. Calculate misorientation from self to other Rotation.
Parameters Parameters
---------- ----------
@ -177,33 +286,6 @@ class Rotation:
return other@~self return other@~self
def broadcast_to(self,shape):
if isinstance(shape,(int,np.integer)): shape = (shape,)
if self.shape == ():
q = np.broadcast_to(self.quaternion,shape+(4,))
else:
q = np.block([np.broadcast_to(self.quaternion[...,0:1],shape).reshape(shape+(1,)),
np.broadcast_to(self.quaternion[...,1:2],shape).reshape(shape+(1,)),
np.broadcast_to(self.quaternion[...,2:3],shape).reshape(shape+(1,)),
np.broadcast_to(self.quaternion[...,3:4],shape).reshape(shape+(1,))])
return self.__class__(q)
def average(self,other): #ToDo: discuss calling for vectors
"""
Calculate the average rotation.
Parameters
----------
other : Rotation
Rotation from which the average is rotated.
"""
if self.quaternion.shape != (4,) or other.quaternion.shape != (4,):
raise NotImplementedError('Support for multiple rotations missing')
return Rotation.from_average([self,other])
################################################################################################ ################################################################################################
# convert to different orientation representations (numpy arrays) # convert to different orientation representations (numpy arrays)
@ -219,8 +301,8 @@ class Rotation:
""" """
return self.quaternion.copy() return self.quaternion.copy()
def as_Eulers(self, def as_Euler_angles(self,
degrees = False): degrees = False):
""" """
Represent as Bunge-Euler angles. Represent as Bunge-Euler angles.
@ -277,8 +359,8 @@ class Rotation:
""" """
return Rotation._qu2om(self.quaternion) return Rotation._qu2om(self.quaternion)
def as_Rodrigues(self, def as_Rodrigues_vector(self,
vector = False): compact = False):
""" """
Represent as Rodrigues-Frank vector with separated axis and angle argument. Represent as Rodrigues-Frank vector with separated axis and angle argument.
@ -296,7 +378,7 @@ class Rotation:
""" """
ro = Rotation._qu2ro(self.quaternion) ro = Rotation._qu2ro(self.quaternion)
if vector: if compact:
with np.errstate(invalid='ignore'): with np.errstate(invalid='ignore'):
return ro[...,:3]*ro[...,3:4] return ro[...,:3]*ro[...,3:4]
else: else:
@ -309,7 +391,7 @@ class Rotation:
Returns Returns
------- -------
h : numpy.ndarray of shape (...,3) h : numpy.ndarray of shape (...,3)
Homochoric vector: (h_1, h_2, h_3), ǀhǀ < 1/2*π^(2/3). Homochoric vector: (h_1, h_2, h_3), ǀhǀ < (3/4*π)^(1/3).
""" """
return Rotation._qu2ho(self.quaternion) return Rotation._qu2ho(self.quaternion)
@ -326,20 +408,6 @@ class Rotation:
""" """
return Rotation._qu2cu(self.quaternion) return Rotation._qu2cu(self.quaternion)
@property
def M(self): # ToDo not sure about the name: as_M or M? we do not have a from_M
"""
Intermediate representation supporting quaternion averaging.
References
----------
F. Landis Markley et al., Journal of Guidance, Control, and Dynamics 30(4):1193-1197, 2007
https://doi.org/10.2514/1.28949
"""
return np.einsum('...i,...j',self.quaternion,self.quaternion)
################################################################################################ ################################################################################################
# Static constructors. The input data needs to follow the conventions, options allow to # Static constructors. The input data needs to follow the conventions, options allow to
# relax the conventions. # relax the conventions.
@ -347,7 +415,7 @@ class Rotation:
def from_quaternion(q, def from_quaternion(q,
accept_homomorph = False, accept_homomorph = False,
P = -1, P = -1,
acceptHomomorph = None): # old name (for compatibility) **kwargs):
""" """
Initialize from quaternion. Initialize from quaternion.
@ -363,15 +431,13 @@ class Rotation:
Convention used. Defaults to -1. Convention used. Defaults to -1.
""" """
if acceptHomomorph is not None:
accept_homomorph = acceptHomomorph # for compatibility
qu = np.array(q,dtype=float) qu = np.array(q,dtype=float)
if qu.shape[:-2:-1] != (4,): if qu.shape[:-2:-1] != (4,):
raise ValueError('Invalid shape.') raise ValueError('Invalid shape.')
if abs(P) != 1: if abs(P) != 1:
raise ValueError('P ∉ {-1,1}') raise ValueError('P ∉ {-1,1}')
if P == 1: qu[...,1:4] *= -1 qu[...,1:4] *= -P
if accept_homomorph: if accept_homomorph:
qu[qu[...,0] < 0.0] *= -1 qu[qu[...,0] < 0.0] *= -1
else: else:
@ -383,8 +449,9 @@ class Rotation:
return Rotation(qu) return Rotation(qu)
@staticmethod @staticmethod
def from_Eulers(phi, def from_Euler_angles(phi,
degrees = False): degrees = False,
**kwargs):
""" """
Initialize from Bunge-Euler angles. Initialize from Bunge-Euler angles.
@ -411,7 +478,8 @@ class Rotation:
def from_axis_angle(axis_angle, def from_axis_angle(axis_angle,
degrees = False, degrees = False,
normalize = False, normalize = False,
P = -1): P = -1,
**kwargs):
""" """
Initialize from Axis angle pair. Initialize from Axis angle pair.
@ -434,7 +502,7 @@ class Rotation:
if abs(P) != 1: if abs(P) != 1:
raise ValueError('P ∉ {-1,1}') raise ValueError('P ∉ {-1,1}')
if P == 1: ax[...,0:3] *= -1 ax[...,0:3] *= -P
if degrees: ax[..., 3] = np.radians(ax[...,3]) if degrees: ax[..., 3] = np.radians(ax[...,3])
if normalize: ax[...,0:3] /= np.linalg.norm(ax[...,0:3],axis=-1,keepdims=True) if normalize: ax[...,0:3] /= np.linalg.norm(ax[...,0:3],axis=-1,keepdims=True)
if np.any(ax[...,3] < 0.0) or np.any(ax[...,3] > np.pi): if np.any(ax[...,3] < 0.0) or np.any(ax[...,3] > np.pi):
@ -448,14 +516,15 @@ class Rotation:
@staticmethod @staticmethod
def from_basis(basis, def from_basis(basis,
orthonormal = True, orthonormal = True,
reciprocal = False): reciprocal = False,
**kwargs):
""" """
Initialize from lattice basis vectors. Initialize from lattice basis vectors.
Parameters Parameters
---------- ----------
basis : numpy.ndarray of shape (...,3,3) basis : numpy.ndarray of shape (...,3,3)
Three lattice basis vectors in three dimensions. Three three-dimensional lattice basis vectors.
orthonormal : boolean, optional orthonormal : boolean, optional
Basis is strictly orthonormal, i.e. is free of stretch components. Defaults to True. Basis is strictly orthonormal, i.e. is free of stretch components. Defaults to True.
reciprocal : boolean, optional reciprocal : boolean, optional
@ -463,15 +532,15 @@ class Rotation:
""" """
om = np.array(basis,dtype=float) om = np.array(basis,dtype=float)
if om.shape[:-3:-1] != (3,3): if om.shape[-2:] != (3,3):
raise ValueError('Invalid shape.') raise ValueError('Invalid shape.')
if reciprocal: if reciprocal:
om = np.linalg.inv(mechanics.transpose(om)/np.pi) # transform reciprocal basis set om = np.linalg.inv(tensor.transpose(om)/np.pi) # transform reciprocal basis set
orthonormal = False # contains stretch orthonormal = False # contains stretch
if not orthonormal: if not orthonormal:
(U,S,Vh) = np.linalg.svd(om) # singular value decomposition (U,S,Vh) = np.linalg.svd(om) # singular value decomposition
om = np.einsum('...ij,...jl->...il',U,Vh) om = np.einsum('...ij,...jl',U,Vh)
if not np.all(np.isclose(np.linalg.det(om),1.0)): if not np.all(np.isclose(np.linalg.det(om),1.0)):
raise ValueError('Orientation matrix has determinant ≠ 1.') raise ValueError('Orientation matrix has determinant ≠ 1.')
if not np.all(np.isclose(np.einsum('...i,...i',om[...,0],om[...,1]), 0.0)) \ if not np.all(np.isclose(np.einsum('...i,...i',om[...,0],om[...,1]), 0.0)) \
@ -482,7 +551,7 @@ class Rotation:
return Rotation(Rotation._om2qu(om)) return Rotation(Rotation._om2qu(om))
@staticmethod @staticmethod
def from_matrix(R): def from_matrix(R,**kwargs):
""" """
Initialize from rotation matrix. Initialize from rotation matrix.
@ -495,17 +564,46 @@ class Rotation:
return Rotation.from_basis(R) return Rotation.from_basis(R)
@staticmethod @staticmethod
def from_Rodrigues(rho, def from_parallel(a,b,
normalize = False, **kwargs):
P = -1):
""" """
Initialize from Rodrigues-Frank vector. Initialize from pairs of two orthogonal lattice basis vectors.
Parameters
----------
a : numpy.ndarray of shape (...,2,3)
Two three-dimensional lattice vectors of first orthogonal basis.
b : numpy.ndarray of shape (...,2,3)
Corresponding three-dimensional lattice vectors of second basis.
"""
a_ = np.array(a)
b_ = np.array(b)
if a_.shape[-2:] != (2,3) or b_.shape[-2:] != (2,3) or a_.shape != b_.shape:
raise ValueError('Invalid shape.')
am = np.stack([ a_[...,0,:],
a_[...,1,:],
np.cross(a_[...,0,:],a_[...,1,:]) ],axis=-2)
bm = np.stack([ b_[...,0,:],
b_[...,1,:],
np.cross(b_[...,0,:],b_[...,1,:]) ],axis=-2)
return Rotation.from_basis(np.swapaxes(am/np.linalg.norm(am,axis=-1,keepdims=True),-1,-2))\
.misorientation(Rotation.from_basis(np.swapaxes(bm/np.linalg.norm(bm,axis=-1,keepdims=True),-1,-2)))
@staticmethod
def from_Rodrigues_vector(rho,
normalize = False,
P = -1,
**kwargs):
"""
Initialize from Rodrigues-Frank vector (angle separated from axis).
Parameters Parameters
---------- ----------
rho : numpy.ndarray of shape (...,4) rho : numpy.ndarray of shape (...,4)
Rodrigues-Frank vector (angle separated from axis). Rodrigues-Frank vector. (n_1, n_2, n_3, tan(ω/2)), ǀnǀ = 1 and ω [0,π].
(n_1, n_2, n_3, tan(ω/2)), ǀnǀ = 1 and ω [0,π].
normalize : boolean, optional normalize : boolean, optional
Allow ǀnǀ 1. Defaults to False. Allow ǀnǀ 1. Defaults to False.
P : int {-1,1}, optional P : int {-1,1}, optional
@ -518,7 +616,7 @@ class Rotation:
if abs(P) != 1: if abs(P) != 1:
raise ValueError('P ∉ {-1,1}') raise ValueError('P ∉ {-1,1}')
if P == 1: ro[...,0:3] *= -1 ro[...,0:3] *= -P
if normalize: ro[...,0:3] /= np.linalg.norm(ro[...,0:3],axis=-1,keepdims=True) if normalize: ro[...,0:3] /= np.linalg.norm(ro[...,0:3],axis=-1,keepdims=True)
if np.any(ro[...,3] < 0.0): if np.any(ro[...,3] < 0.0):
raise ValueError('Rodrigues vector rotation angle not positive.') raise ValueError('Rodrigues vector rotation angle not positive.')
@ -529,7 +627,8 @@ class Rotation:
@staticmethod @staticmethod
def from_homochoric(h, def from_homochoric(h,
P = -1): P = -1,
**kwargs):
""" """
Initialize from homochoric vector. Initialize from homochoric vector.
@ -547,7 +646,7 @@ class Rotation:
if abs(P) != 1: if abs(P) != 1:
raise ValueError('P ∉ {-1,1}') raise ValueError('P ∉ {-1,1}')
if P == 1: ho *= -1 ho *= -P
if np.any(np.linalg.norm(ho,axis=-1) >_R1+1e-9): if np.any(np.linalg.norm(ho,axis=-1) >_R1+1e-9):
raise ValueError('Homochoric coordinate outside of the sphere.') raise ValueError('Homochoric coordinate outside of the sphere.')
@ -556,7 +655,8 @@ class Rotation:
@staticmethod @staticmethod
def from_cubochoric(c, def from_cubochoric(c,
P = -1): P = -1,
**kwargs):
""" """
Initialize from cubochoric vector. Initialize from cubochoric vector.
@ -577,46 +677,15 @@ class Rotation:
if np.abs(np.max(cu)) > np.pi**(2./3.) * 0.5+1e-9: if np.abs(np.max(cu)) > np.pi**(2./3.) * 0.5+1e-9:
raise ValueError('Cubochoric coordinate outside of the cube.') raise ValueError('Cubochoric coordinate outside of the cube.')
ho = Rotation._cu2ho(cu) ho = -P * Rotation._cu2ho(cu)
if P == 1: ho *= -1
return Rotation(Rotation._ho2qu(ho)) return Rotation(Rotation._ho2qu(ho))
@staticmethod @staticmethod
def from_average(rotations,weights = None): def from_random(shape = None,
""" rng_seed = None,
Average rotation. **kwargs):
References
----------
F. Landis Markley et al., Journal of Guidance, Control, and Dynamics 30(4):1193-1197, 2007
https://doi.org/10.2514/1.28949
Parameters
----------
rotations : list of Rotations
Rotations to average from
weights : list of floats, optional
Weights for each rotation used for averaging
"""
if not all(isinstance(item, Rotation) for item in rotations):
raise TypeError('Only instances of Rotation can be averaged.')
N = len(rotations)
if not weights:
weights = np.ones(N,dtype='i')
for i,(r,n) in enumerate(zip(rotations,weights)):
M = r.M * n if i == 0 \
else M + r.M * n # noqa add (multiples) of this rotation to average noqa
eig, vec = np.linalg.eig(M/N)
return Rotation.from_quaternion(np.real(vec.T[eig.argmax()]),accept_homomorph = True)
@staticmethod
def from_random(shape=None,seed=None):
""" """
Draw random rotation. Draw random rotation.
@ -627,18 +696,13 @@ class Rotation:
shape : tuple of ints, optional shape : tuple of ints, optional
Shape of the sample. Defaults to None which gives a Shape of the sample. Defaults to None which gives a
single rotation single rotation
seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
A seed to initialize the BitGenerator. Defaults to None. A seed to initialize the BitGenerator. Defaults to None.
If None, then fresh, unpredictable entropy will be pulled from the OS. If None, then fresh, unpredictable entropy will be pulled from the OS.
""" """
rng = np.random.default_rng(seed) rng = np.random.default_rng(rng_seed)
if shape is None: r = rng.random(3 if shape is None else tuple(shape)+(3,) if hasattr(shape, '__iter__') else (shape,3))
r = rng.random(3)
elif hasattr(shape, '__iter__'):
r = rng.random(tuple(shape)+(3,))
else:
r = rng.random((shape,3))
A = np.sqrt(r[...,2]) A = np.sqrt(r[...,2])
B = np.sqrt(1.0-r[...,2]) B = np.sqrt(1.0-r[...,2])
@ -647,14 +711,17 @@ class Rotation:
np.cos(2.0*np.pi*r[...,1])*B, np.cos(2.0*np.pi*r[...,1])*B,
np.sin(2.0*np.pi*r[...,0])*A],axis=-1) np.sin(2.0*np.pi*r[...,0])*A],axis=-1)
return Rotation(q.reshape(r.shape[:-1]+(4,)) if shape is not None else q)._standardize() return Rotation(q if shape is None else q.reshape(r.shape[:-1]+(4,)))._standardize()
# for compatibility
__mul__ = __matmul__
@staticmethod @staticmethod
def from_ODF(weights,Eulers,N=500,degrees=True,fractions=True,seed=None): def from_ODF(weights,
phi,
N = 500,
degrees = True,
fractions = True,
rng_seed = None,
**kwargs):
""" """
Sample discrete values from a binned ODF. Sample discrete values from a binned ODF.
@ -662,7 +729,7 @@ class Rotation:
---------- ----------
weights : numpy.ndarray of shape (n) weights : numpy.ndarray of shape (n)
Texture intensity values (probability density or volume fraction) at Euler grid points. Texture intensity values (probability density or volume fraction) at Euler grid points.
Eulers : numpy.ndarray of shape (n,3) phi : numpy.ndarray of shape (n,3)
Grid coordinates in Euler space at which weights are defined. Grid coordinates in Euler space at which weights are defined.
N : integer, optional N : integer, optional
Number of discrete orientations to be sampled from the given ODF. Number of discrete orientations to be sampled from the given ODF.
@ -672,7 +739,7 @@ class Rotation:
fractions : boolean, optional fractions : boolean, optional
ODF values correspond to volume fractions, not probability density. ODF values correspond to volume fractions, not probability density.
Defaults to True. Defaults to True.
seed: {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional rng_seed: {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
A seed to initialize the BitGenerator. Defaults to None, i.e. unpredictable entropy A seed to initialize the BitGenerator. Defaults to None, i.e. unpredictable entropy
will be pulled from the OS. will be pulled from the OS.
@ -695,19 +762,24 @@ class Rotation:
""" """
def _dg(eu,deg): def _dg(eu,deg):
"""Return infinitesimal Euler space volume of bin(s).""" """Return infinitesimal Euler space volume of bin(s)."""
Eulers_sorted = eu[np.lexsort((eu[:,0],eu[:,1],eu[:,2]))] phi_sorted = eu[np.lexsort((eu[:,0],eu[:,1],eu[:,2]))]
steps,size,_ = grid_filters.cell_coord0_gridSizeOrigin(Eulers_sorted) steps,size,_ = grid_filters.cell_coord0_gridSizeOrigin(phi_sorted)
delta = np.radians(size/steps) if deg else size/steps delta = np.radians(size/steps) if deg else size/steps
return delta[0]*2.0*np.sin(delta[1]/2.0)*delta[2] / 8.0 / np.pi**2 * np.sin(np.radians(eu[:,1]) if deg else eu[:,1]) return delta[0]*2.0*np.sin(delta[1]/2.0)*delta[2] / 8.0 / np.pi**2 * np.sin(np.radians(eu[:,1]) if deg else eu[:,1])
dg = 1.0 if fractions else _dg(Eulers,degrees) dg = 1.0 if fractions else _dg(phi,degrees)
dV_V = dg * np.maximum(0.0,weights.squeeze()) dV_V = dg * np.maximum(0.0,weights.squeeze())
return Rotation.from_Eulers(Eulers[util.hybrid_IA(dV_V,N,seed)],degrees) return Rotation.from_Euler_angles(phi[util.hybrid_IA(dV_V,N,rng_seed)],degrees)
@staticmethod @staticmethod
def from_spherical_component(center,sigma,N=500,degrees=True,seed=None): def from_spherical_component(center,
sigma,
N = 500,
degrees = True,
rng_seed = None,
**kwargs):
""" """
Calculate set of rotations with Gaussian distribution around center. Calculate set of rotations with Gaussian distribution around center.
@ -721,12 +793,12 @@ class Rotation:
Number of samples, defaults to 500. Number of samples, defaults to 500.
degrees : boolean, optional degrees : boolean, optional
sigma is given in degrees. sigma is given in degrees.
seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
A seed to initialize the BitGenerator. Defaults to None, i.e. unpredictable entropy A seed to initialize the BitGenerator. Defaults to None, i.e. unpredictable entropy
will be pulled from the OS. will be pulled from the OS.
""" """
rng = np.random.default_rng(seed) rng = np.random.default_rng(rng_seed)
sigma = np.radians(sigma) if degrees else sigma sigma = np.radians(sigma) if degrees else sigma
u,Theta = (rng.random((N,2)) * 2.0 * np.array([1,np.pi]) - np.array([1.0, 0])).T u,Theta = (rng.random((N,2)) * 2.0 * np.array([1,np.pi]) - np.array([1.0, 0])).T
omega = abs(rng.normal(scale=sigma,size=N)) omega = abs(rng.normal(scale=sigma,size=N))
@ -738,7 +810,13 @@ class Rotation:
@staticmethod @staticmethod
def from_fiber_component(alpha,beta,sigma=0.0,N=500,degrees=True,seed=None): def from_fiber_component(alpha,
beta,
sigma = 0.0,
N = 500,
degrees = True,
rng_seed = None,
**kwargs):
""" """
Calculate set of rotations with Gaussian distribution around direction. Calculate set of rotations with Gaussian distribution around direction.
@ -755,12 +833,12 @@ class Rotation:
Number of samples, defaults to 500. Number of samples, defaults to 500.
degrees : boolean, optional degrees : boolean, optional
sigma, alpha, and beta are given in degrees. sigma, alpha, and beta are given in degrees.
seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
A seed to initialize the BitGenerator. Defaults to None, i.e. unpredictable entropy A seed to initialize the BitGenerator. Defaults to None, i.e. unpredictable entropy
will be pulled from the OS. will be pulled from the OS.
""" """
rng = np.random.default_rng(seed) rng = np.random.default_rng(rng_seed)
sigma_,alpha_,beta_ = map(np.radians,(sigma,alpha,beta)) if degrees else (sigma,alpha,beta) sigma_,alpha_,beta_ = map(np.radians,(sigma,alpha,beta)) if degrees else (sigma,alpha,beta)
d_cr = np.array([np.sin(alpha_[0])*np.cos(alpha_[1]), np.sin(alpha_[0])*np.sin(alpha_[1]), np.cos(alpha_[0])]) d_cr = np.array([np.sin(alpha_[0])*np.cos(alpha_[1]), np.sin(alpha_[0])*np.sin(alpha_[1]), np.cos(alpha_[0])])

View File

@ -31,7 +31,7 @@ class Table:
def __repr__(self): def __repr__(self):
"""Brief overview.""" """Brief overview."""
return util.srepr(self.comments)+'\n'+self.data.__repr__() return '\n'.join(['# '+c for c in self.comments])+'\n'+self.data.__repr__()
def __len__(self): def __len__(self):
"""Number of rows.""" """Number of rows."""
@ -159,7 +159,7 @@ class Table:
comments = [util.execution_stamp('Table','from_ang')] comments = [util.execution_stamp('Table','from_ang')]
for line in content: for line in content:
if line.startswith('#'): if line.startswith('#'):
comments.append(line.strip()) comments.append(line.split('#',1)[1].strip())
else: else:
break break
@ -175,7 +175,7 @@ class Table:
@property @property
def labels(self): def labels(self):
return list(self.shapes.keys()) return list(self.shapes)
def get(self,label): def get(self,label):
@ -222,6 +222,7 @@ class Table:
dup.data[label] = data.reshape(dup.data[label].shape) dup.data[label] = data.reshape(dup.data[label].shape)
return dup return dup
def add(self,label,data,info=None): def add(self,label,data,info=None):
""" """
Add column data. Add column data.

View File

@ -316,12 +316,6 @@ class Test:
return self.compare_Array(refName,curName) return self.compare_Array(refName,curName)
def compare_ArrayCurCur(self,cur0,cur1):
cur0Name = self.fileInCurrent(cur0)
cur1Name = self.fileInCurrent(cur1)
return self.compare_Array(cur0Name,cur1Name)
def compare_Table(self,headings0,file0, def compare_Table(self,headings0,file0,
headings1,file1, headings1,file1,
normHeadings='',normType=None, normHeadings='',normType=None,

View File

@ -1,3 +1,4 @@
import os
import multiprocessing as mp import multiprocessing as mp
from pathlib import Path from pathlib import Path
@ -36,7 +37,7 @@ class VTK:
@staticmethod @staticmethod
def from_rectilinearGrid(grid,size,origin=np.zeros(3)): def from_rectilinear_grid(grid,size,origin=np.zeros(3)):
""" """
Create VTK of type vtk.vtkRectilinearGrid. Create VTK of type vtk.vtkRectilinearGrid.
@ -64,7 +65,7 @@ class VTK:
@staticmethod @staticmethod
def from_unstructuredGrid(nodes,connectivity,cell_type): def from_unstructured_grid(nodes,connectivity,cell_type):
""" """
Create VTK of type vtk.vtkUnstructuredGrid. Create VTK of type vtk.vtkUnstructuredGrid.
@ -96,7 +97,7 @@ class VTK:
@staticmethod @staticmethod
def from_polyData(points): def from_poly_data(points):
""" """
Create VTK of type vtk.polyData. Create VTK of type vtk.polyData.
@ -108,11 +109,18 @@ class VTK:
Spatial position of the points. Spatial position of the points.
""" """
N = points.shape[0]
vtk_points = vtk.vtkPoints() vtk_points = vtk.vtkPoints()
vtk_points.SetData(np_to_vtk(points)) vtk_points.SetData(np_to_vtk(points))
vtk_cells = vtk.vtkCellArray()
vtk_cells.SetNumberOfCells(N)
vtk_cells.SetCells(N,np_to_vtkIdTypeArray(np.stack((np.ones (N,dtype=np.int64),
np.arange(N,dtype=np.int64)),axis=1).ravel(),deep=True))
vtk_data = vtk.vtkPolyData() vtk_data = vtk.vtkPolyData()
vtk_data.SetPoints(vtk_points) vtk_data.SetPoints(vtk_points)
vtk_data.SetVerts(vtk_cells)
return VTK(vtk_data) return VTK(vtk_data)
@ -131,6 +139,8 @@ class VTK:
vtkUnstructuredGrid, and vtkPolyData. vtkUnstructuredGrid, and vtkPolyData.
""" """
if not os.path.isfile(fname): # vtk has a strange error handling
raise FileNotFoundError(f'no such file: {fname}')
ext = Path(fname).suffix ext = Path(fname).suffix
if ext == '.vtk' or dataset_type is not None: if ext == '.vtk' or dataset_type is not None:
reader = vtk.vtkGenericDataObjectReader() reader = vtk.vtkGenericDataObjectReader()
@ -164,6 +174,7 @@ class VTK:
return VTK(vtk_data) return VTK(vtk_data)
@staticmethod @staticmethod
def _write(writer): def _write(writer):
"""Wrapper for parallel writing.""" """Wrapper for parallel writing."""
@ -189,11 +200,10 @@ class VTK:
elif isinstance(self.vtk_data,vtk.vtkPolyData): elif isinstance(self.vtk_data,vtk.vtkPolyData):
writer = vtk.vtkXMLPolyDataWriter() writer = vtk.vtkXMLPolyDataWriter()
default_ext = writer.GetDefaultFileExtension() default_ext = '.'+writer.GetDefaultFileExtension()
ext = Path(fname).suffix ext = Path(fname).suffix
if ext and ext != '.'+default_ext: writer.SetFileName(str(fname)+(default_ext if default_ext != ext else ''))
raise ValueError(f'Given extension {ext} does not match default .{default_ext}')
writer.SetFileName(str(Path(fname).with_suffix('.'+default_ext)))
if compress: if compress:
writer.SetCompressorTypeToZLib() writer.SetCompressorTypeToZLib()
else: else:
@ -238,10 +248,10 @@ class VTK:
else data).reshape(N_data,-1),deep=True) # avoid large files else data).reshape(N_data,-1),deep=True) # avoid large files
d.SetName(label) d.SetName(label)
if N_data == N_cells: if N_data == N_points:
self.vtk_data.GetCellData().AddArray(d)
elif N_data == N_points:
self.vtk_data.GetPointData().AddArray(d) self.vtk_data.GetPointData().AddArray(d)
elif N_data == N_cells:
self.vtk_data.GetCellData().AddArray(d)
else: else:
raise ValueError(f'Cell / point count ({N_cells} / {N_points}) differs from data ({N_data}).') raise ValueError(f'Cell / point count ({N_cells} / {N_points}) differs from data ({N_data}).')
elif isinstance(data,pd.DataFrame): elif isinstance(data,pd.DataFrame):

420
python/damask/lattice.py Normal file
View File

@ -0,0 +1,420 @@
import numpy as _np
kinematics = {
'cF': {
'slip' : _np.array([
[+0,+1,-1 , +1,+1,+1],
[-1,+0,+1 , +1,+1,+1],
[+1,-1,+0 , +1,+1,+1],
[+0,-1,-1 , -1,-1,+1],
[+1,+0,+1 , -1,-1,+1],
[-1,+1,+0 , -1,-1,+1],
[+0,-1,+1 , +1,-1,-1],
[-1,+0,-1 , +1,-1,-1],
[+1,+1,+0 , +1,-1,-1],
[+0,+1,+1 , -1,+1,-1],
[+1,+0,-1 , -1,+1,-1],
[-1,-1,+0 , -1,+1,-1],
[+1,+1,+0 , +1,-1,+0],
[+1,-1,+0 , +1,+1,+0],
[+1,+0,+1 , +1,+0,-1],
[+1,+0,-1 , +1,+0,+1],
[+0,+1,+1 , +0,+1,-1],
[+0,+1,-1 , +0,+1,+1],
],'d'),
'twin' : _np.array([
[-2, 1, 1, 1, 1, 1],
[ 1,-2, 1, 1, 1, 1],
[ 1, 1,-2, 1, 1, 1],
[ 2,-1, 1, -1,-1, 1],
[-1, 2, 1, -1,-1, 1],
[-1,-1,-2, -1,-1, 1],
[-2,-1,-1, 1,-1,-1],
[ 1, 2,-1, 1,-1,-1],
[ 1,-1, 2, 1,-1,-1],
[ 2, 1,-1, -1, 1,-1],
[-1,-2,-1, -1, 1,-1],
[-1, 1, 2, -1, 1,-1],
],dtype=float),
},
'cI': {
'slip' : _np.array([
[+1,-1,+1 , +0,+1,+1],
[-1,-1,+1 , +0,+1,+1],
[+1,+1,+1 , +0,-1,+1],
[-1,+1,+1 , +0,-1,+1],
[-1,+1,+1 , +1,+0,+1],
[-1,-1,+1 , +1,+0,+1],
[+1,+1,+1 , -1,+0,+1],
[+1,-1,+1 , -1,+0,+1],
[-1,+1,+1 , +1,+1,+0],
[-1,+1,-1 , +1,+1,+0],
[+1,+1,+1 , -1,+1,+0],
[+1,+1,-1 , -1,+1,+0],
[-1,+1,+1 , +2,+1,+1],
[+1,+1,+1 , -2,+1,+1],
[+1,+1,-1 , +2,-1,+1],
[+1,-1,+1 , +2,+1,-1],
[+1,-1,+1 , +1,+2,+1],
[+1,+1,-1 , -1,+2,+1],
[+1,+1,+1 , +1,-2,+1],
[-1,+1,+1 , +1,+2,-1],
[+1,+1,-1 , +1,+1,+2],
[+1,-1,+1 , -1,+1,+2],
[-1,+1,+1 , +1,-1,+2],
[+1,+1,+1 , +1,+1,-2],
],'d'),
'twin' : _np.array([
[-1, 1, 1, 2, 1, 1],
[ 1, 1, 1, -2, 1, 1],
[ 1, 1,-1, 2,-1, 1],
[ 1,-1, 1, 2, 1,-1],
[ 1,-1, 1, 1, 2, 1],
[ 1, 1,-1, -1, 2, 1],
[ 1, 1, 1, 1,-2, 1],
[-1, 1, 1, 1, 2,-1],
[ 1, 1,-1, 1, 1, 2],
[ 1,-1, 1, -1, 1, 2],
[-1, 1, 1, 1,-1, 2],
[ 1, 1, 1, 1, 1,-2],
],dtype=float),
},
'hP': {
'slip' : _np.array([
[+2,-1,-1,+0 , +0,+0,+0,+1],
[-1,+2,-1,+0 , +0,+0,+0,+1],
[-1,-1,+2,+0 , +0,+0,+0,+1],
[+2,-1,-1,+0 , +0,+1,-1,+0],
[-1,+2,-1,+0 , -1,+0,+1,+0],
[-1,-1,+2,+0 , +1,-1,+0,+0],
[-1,+1,+0,+0 , +1,+1,-2,+0],
[+0,-1,+1,+0 , -2,+1,+1,+0],
[+1,+0,-1,+0 , +1,-2,+1,+0],
[-1,+2,-1,+0 , +1,+0,-1,+1],
[-2,+1,+1,+0 , +0,+1,-1,+1],
[-1,-1,+2,+0 , -1,+1,+0,+1],
[+1,-2,+1,+0 , -1,+0,+1,+1],
[+2,-1,-1,+0 , +0,-1,+1,+1],
[+1,+1,-2,+0 , +1,-1,+0,+1],
[-2,+1,+1,+3 , +1,+0,-1,+1],
[-1,-1,+2,+3 , +1,+0,-1,+1],
[-1,-1,+2,+3 , +0,+1,-1,+1],
[+1,-2,+1,+3 , +0,+1,-1,+1],
[+1,-2,+1,+3 , -1,+1,+0,+1],
[+2,-1,-1,+3 , -1,+1,+0,+1],
[+2,-1,-1,+3 , -1,+0,+1,+1],
[+1,+1,-2,+3 , -1,+0,+1,+1],
[+1,+1,-2,+3 , +0,-1,+1,+1],
[-1,+2,-1,+3 , +0,-1,+1,+1],
[-1,+2,-1,+3 , +1,-1,+0,+1],
[-2,+1,+1,+3 , +1,-1,+0,+1],
[-1,-1,+2,+3 , +1,+1,-2,+2],
[+1,-2,+1,+3 , -1,+2,-1,+2],
[+2,-1,-1,+3 , -2,+1,+1,+2],
[+1,+1,-2,+3 , -1,-1,+2,+2],
[-1,+2,-1,+3 , +1,-2,+1,+2],
[-2,+1,+1,+3 , +2,-1,-1,+2],
],'d'),
'twin' : _np.array([
[-1, 0, 1, 1, 1, 0, -1, 2], # shear = (3-(c/a)^2)/(sqrt(3) c/a) <-10.1>{10.2}
[ 0, -1, 1, 1, 0, 1, -1, 2],
[ 1, -1, 0, 1, -1, 1, 0, 2],
[ 1, 0, -1, 1, -1, 0, 1, 2],
[ 0, 1, -1, 1, 0, -1, 1, 2],
[-1, 1, 0, 1, 1, -1, 0, 2],
[-1, -1, 2, 6, 1, 1, -2, 1], # shear = 1/(c/a) <11.6>{-1-1.1}
[ 1, -2, 1, 6, -1, 2, -1, 1],
[ 2, -1, -1, 6, -2, 1, 1, 1],
[ 1, 1, -2, 6, -1, -1, 2, 1],
[-1, 2, -1, 6, 1, -2, 1, 1],
[-2, 1, 1, 6, 2, -1, -1, 1],
[ 1, 0, -1, -2, 1, 0, -1, 1], # shear = (4(c/a)^2-9)/(4 sqrt(3) c/a) <10.-2>{10.1}
[ 0, 1, -1, -2, 0, 1, -1, 1],
[-1, 1, 0, -2, -1, 1, 0, 1],
[-1, 0, 1, -2, -1, 0, 1, 1],
[ 0, -1, 1, -2, 0, -1, 1, 1],
[ 1, -1, 0, -2, 1, -1, 0, 1],
[ 1, 1, -2, -3, 1, 1, -2, 2], # shear = 2((c/a)^2-2)/(3 c/a) <11.-3>{11.2}
[-1, 2, -1, -3, -1, 2, -1, 2],
[-2, 1, 1, -3, -2, 1, 1, 2],
[-1, -1, 2, -3, -1, -1, 2, 2],
[ 1, -2, 1, -3, 1, -2, 1, 2],
[ 2, -1, -1, -3, 2, -1, -1, 2],
],dtype=float),
},
}
# Kurdjomov--Sachs orientation relationship for fcc <-> bcc transformation
# from S. Morito et al., Journal of Alloys and Compounds 577:s587-s592, 2013
# also see K. Kitahara et al., Acta Materialia 54:1279-1288, 2006
relations = {
'KS': {
'cF' : _np.array([
[[ -1, 0, 1],[ 1, 1, 1]],
[[ -1, 0, 1],[ 1, 1, 1]],
[[ 0, 1, -1],[ 1, 1, 1]],
[[ 0, 1, -1],[ 1, 1, 1]],
[[ 1, -1, 0],[ 1, 1, 1]],
[[ 1, -1, 0],[ 1, 1, 1]],
[[ 1, 0, -1],[ 1, -1, 1]],
[[ 1, 0, -1],[ 1, -1, 1]],
[[ -1, -1, 0],[ 1, -1, 1]],
[[ -1, -1, 0],[ 1, -1, 1]],
[[ 0, 1, 1],[ 1, -1, 1]],
[[ 0, 1, 1],[ 1, -1, 1]],
[[ 0, -1, 1],[ -1, 1, 1]],
[[ 0, -1, 1],[ -1, 1, 1]],
[[ -1, 0, -1],[ -1, 1, 1]],
[[ -1, 0, -1],[ -1, 1, 1]],
[[ 1, 1, 0],[ -1, 1, 1]],
[[ 1, 1, 0],[ -1, 1, 1]],
[[ -1, 1, 0],[ 1, 1, -1]],
[[ -1, 1, 0],[ 1, 1, -1]],
[[ 0, -1, -1],[ 1, 1, -1]],
[[ 0, -1, -1],[ 1, 1, -1]],
[[ 1, 0, 1],[ 1, 1, -1]],
[[ 1, 0, 1],[ 1, 1, -1]],
],dtype=float),
'cI' : _np.array([
[[ -1, -1, 1],[ 0, 1, 1]],
[[ -1, 1, -1],[ 0, 1, 1]],
[[ -1, -1, 1],[ 0, 1, 1]],
[[ -1, 1, -1],[ 0, 1, 1]],
[[ -1, -1, 1],[ 0, 1, 1]],
[[ -1, 1, -1],[ 0, 1, 1]],
[[ -1, -1, 1],[ 0, 1, 1]],
[[ -1, 1, -1],[ 0, 1, 1]],
[[ -1, -1, 1],[ 0, 1, 1]],
[[ -1, 1, -1],[ 0, 1, 1]],
[[ -1, -1, 1],[ 0, 1, 1]],
[[ -1, 1, -1],[ 0, 1, 1]],
[[ -1, -1, 1],[ 0, 1, 1]],
[[ -1, 1, -1],[ 0, 1, 1]],
[[ -1, -1, 1],[ 0, 1, 1]],
[[ -1, 1, -1],[ 0, 1, 1]],
[[ -1, -1, 1],[ 0, 1, 1]],
[[ -1, 1, -1],[ 0, 1, 1]],
[[ -1, -1, 1],[ 0, 1, 1]],
[[ -1, 1, -1],[ 0, 1, 1]],
[[ -1, -1, 1],[ 0, 1, 1]],
[[ -1, 1, -1],[ 0, 1, 1]],
[[ -1, -1, 1],[ 0, 1, 1]],
[[ -1, 1, -1],[ 0, 1, 1]],
],dtype=float),
},
'GT': {
'cF' : _np.array([
[[ -5,-12, 17],[ 1, 1, 1]],
[[ 17, -5,-12],[ 1, 1, 1]],
[[-12, 17, -5],[ 1, 1, 1]],
[[ 5, 12, 17],[ -1, -1, 1]],
[[-17, 5,-12],[ -1, -1, 1]],
[[ 12,-17, -5],[ -1, -1, 1]],
[[ -5, 12,-17],[ -1, 1, 1]],
[[ 17, 5, 12],[ -1, 1, 1]],
[[-12,-17, 5],[ -1, 1, 1]],
[[ 5,-12,-17],[ 1, -1, 1]],
[[-17, -5, 12],[ 1, -1, 1]],
[[ 12, 17, 5],[ 1, -1, 1]],
[[ -5, 17,-12],[ 1, 1, 1]],
[[-12, -5, 17],[ 1, 1, 1]],
[[ 17,-12, -5],[ 1, 1, 1]],
[[ 5,-17,-12],[ -1, -1, 1]],
[[ 12, 5, 17],[ -1, -1, 1]],
[[-17, 12, -5],[ -1, -1, 1]],
[[ -5,-17, 12],[ -1, 1, 1]],
[[-12, 5,-17],[ -1, 1, 1]],
[[ 17, 12, 5],[ -1, 1, 1]],
[[ 5, 17, 12],[ 1, -1, 1]],
[[ 12, -5,-17],[ 1, -1, 1]],
[[-17,-12, 5],[ 1, -1, 1]],
],dtype=float),
'cI' : _np.array([
[[-17, -7, 17],[ 1, 0, 1]],
[[ 17,-17, -7],[ 1, 1, 0]],
[[ -7, 17,-17],[ 0, 1, 1]],
[[ 17, 7, 17],[ -1, 0, 1]],
[[-17, 17, -7],[ -1, -1, 0]],
[[ 7,-17,-17],[ 0, -1, 1]],
[[-17, 7,-17],[ -1, 0, 1]],
[[ 17, 17, 7],[ -1, 1, 0]],
[[ -7,-17, 17],[ 0, 1, 1]],
[[ 17, -7,-17],[ 1, 0, 1]],
[[-17,-17, 7],[ 1, -1, 0]],
[[ 7, 17, 17],[ 0, -1, 1]],
[[-17, 17, -7],[ 1, 1, 0]],
[[ -7,-17, 17],[ 0, 1, 1]],
[[ 17, -7,-17],[ 1, 0, 1]],
[[ 17,-17, -7],[ -1, -1, 0]],
[[ 7, 17, 17],[ 0, -1, 1]],
[[-17, 7,-17],[ -1, 0, 1]],
[[-17,-17, 7],[ -1, 1, 0]],
[[ -7, 17,-17],[ 0, 1, 1]],
[[ 17, 7, 17],[ -1, 0, 1]],
[[ 17, 17, 7],[ 1, -1, 0]],
[[ 7,-17,-17],[ 0, -1, 1]],
[[-17, -7, 17],[ 1, 0, 1]],
],dtype=float),
},
'GT_prime': {
'cF' : _np.array([
[[ 0, 1, -1],[ 7, 17, 17]],
[[ -1, 0, 1],[ 17, 7, 17]],
[[ 1, -1, 0],[ 17, 17, 7]],
[[ 0, -1, -1],[ -7,-17, 17]],
[[ 1, 0, 1],[-17, -7, 17]],
[[ 1, -1, 0],[-17,-17, 7]],
[[ 0, 1, -1],[ 7,-17,-17]],
[[ 1, 0, 1],[ 17, -7,-17]],
[[ -1, -1, 0],[ 17,-17, -7]],
[[ 0, -1, -1],[ -7, 17,-17]],
[[ -1, 0, 1],[-17, 7,-17]],
[[ -1, -1, 0],[-17, 17, -7]],
[[ 0, -1, 1],[ 7, 17, 17]],
[[ 1, 0, -1],[ 17, 7, 17]],
[[ -1, 1, 0],[ 17, 17, 7]],
[[ 0, 1, 1],[ -7,-17, 17]],
[[ -1, 0, -1],[-17, -7, 17]],
[[ -1, 1, 0],[-17,-17, 7]],
[[ 0, -1, 1],[ 7,-17,-17]],
[[ -1, 0, -1],[ 17, -7,-17]],
[[ 1, 1, 0],[ 17,-17, -7]],
[[ 0, 1, 1],[ -7, 17,-17]],
[[ 1, 0, -1],[-17, 7,-17]],
[[ 1, 1, 0],[-17, 17, -7]],
],dtype=float),
'cI' : _np.array([
[[ 1, 1, -1],[ 12, 5, 17]],
[[ -1, 1, 1],[ 17, 12, 5]],
[[ 1, -1, 1],[ 5, 17, 12]],
[[ -1, -1, -1],[-12, -5, 17]],
[[ 1, -1, 1],[-17,-12, 5]],
[[ 1, -1, -1],[ -5,-17, 12]],
[[ -1, 1, -1],[ 12, -5,-17]],
[[ 1, 1, 1],[ 17,-12, -5]],
[[ -1, -1, 1],[ 5,-17,-12]],
[[ 1, -1, -1],[-12, 5,-17]],
[[ -1, -1, 1],[-17, 12, -5]],
[[ -1, -1, -1],[ -5, 17,-12]],
[[ 1, -1, 1],[ 12, 17, 5]],
[[ 1, 1, -1],[ 5, 12, 17]],
[[ -1, 1, 1],[ 17, 5, 12]],
[[ -1, 1, 1],[-12,-17, 5]],
[[ -1, -1, -1],[ -5,-12, 17]],
[[ -1, 1, -1],[-17, -5, 12]],
[[ -1, -1, 1],[ 12,-17, -5]],
[[ -1, 1, -1],[ 5,-12,-17]],
[[ 1, 1, 1],[ 17, -5,-12]],
[[ 1, 1, 1],[-12, 17, -5]],
[[ 1, -1, -1],[ -5, 12,-17]],
[[ 1, 1, -1],[-17, 5,-12]],
],dtype=float),
},
'NW': {
'cF' : _np.array([
[[ 2, -1, -1],[ 1, 1, 1]],
[[ -1, 2, -1],[ 1, 1, 1]],
[[ -1, -1, 2],[ 1, 1, 1]],
[[ -2, -1, -1],[ -1, 1, 1]],
[[ 1, 2, -1],[ -1, 1, 1]],
[[ 1, -1, 2],[ -1, 1, 1]],
[[ 2, 1, -1],[ 1, -1, 1]],
[[ -1, -2, -1],[ 1, -1, 1]],
[[ -1, 1, 2],[ 1, -1, 1]],
[[ 2, -1, 1],[ -1, -1, 1]],
[[ -1, 2, 1],[ -1, -1, 1]],
[[ -1, -1, -2],[ -1, -1, 1]],
],dtype=float),
'cI' : _np.array([
[[ 0, -1, 1],[ 0, 1, 1]],
[[ 0, -1, 1],[ 0, 1, 1]],
[[ 0, -1, 1],[ 0, 1, 1]],
[[ 0, -1, 1],[ 0, 1, 1]],
[[ 0, -1, 1],[ 0, 1, 1]],
[[ 0, -1, 1],[ 0, 1, 1]],
[[ 0, -1, 1],[ 0, 1, 1]],
[[ 0, -1, 1],[ 0, 1, 1]],
[[ 0, -1, 1],[ 0, 1, 1]],
[[ 0, -1, 1],[ 0, 1, 1]],
[[ 0, -1, 1],[ 0, 1, 1]],
[[ 0, -1, 1],[ 0, 1, 1]],
],dtype=float),
},
'Pitsch': {
'cF' : _np.array([
[[ 1, 0, 1],[ 0, 1, 0]],
[[ 1, 1, 0],[ 0, 0, 1]],
[[ 0, 1, 1],[ 1, 0, 0]],
[[ 0, 1, -1],[ 1, 0, 0]],
[[ -1, 0, 1],[ 0, 1, 0]],
[[ 1, -1, 0],[ 0, 0, 1]],
[[ 1, 0, -1],[ 0, 1, 0]],
[[ -1, 1, 0],[ 0, 0, 1]],
[[ 0, -1, 1],[ 1, 0, 0]],
[[ 0, 1, 1],[ 1, 0, 0]],
[[ 1, 0, 1],[ 0, 1, 0]],
[[ 1, 1, 0],[ 0, 0, 1]],
],dtype=float),
'cI' : _np.array([
[[ 1, -1, 1],[ -1, 0, 1]],
[[ 1, 1, -1],[ 1, -1, 0]],
[[ -1, 1, 1],[ 0, 1, -1]],
[[ -1, 1, -1],[ 0, -1, -1]],
[[ -1, -1, 1],[ -1, 0, -1]],
[[ 1, -1, -1],[ -1, -1, 0]],
[[ 1, -1, -1],[ -1, 0, -1]],
[[ -1, 1, -1],[ -1, -1, 0]],
[[ -1, -1, 1],[ 0, -1, -1]],
[[ -1, 1, 1],[ 0, -1, 1]],
[[ 1, -1, 1],[ 1, 0, -1]],
[[ 1, 1, -1],[ -1, 1, 0]],
],dtype=float),
},
'Bain': {
'cF' : _np.array([
[[ 0, 1, 0],[ 1, 0, 0]],
[[ 0, 0, 1],[ 0, 1, 0]],
[[ 1, 0, 0],[ 0, 0, 1]],
],dtype=float),
'cI' : _np.array([
[[ 0, 1, 1],[ 1, 0, 0]],
[[ 1, 0, 1],[ 0, 1, 0]],
[[ 1, 1, 0],[ 0, 0, 1]],
],dtype=float),
},
'Burgers' : {
'cI' : _np.array([
[[ -1, 1, 1],[ 1, 1, 0]],
[[ -1, 1, -1],[ 1, 1, 0]],
[[ 1, 1, 1],[ 1, -1, 0]],
[[ 1, 1, -1],[ 1, -1, 0]],
[[ 1, 1, -1],[ 1, 0, 1]],
[[ -1, 1, 1],[ 1, 0, 1]],
[[ 1, 1, 1],[ -1, 0, 1]],
[[ 1, -1, 1],[ -1, 0, 1]],
[[ -1, 1, -1],[ 0, 1, 1]],
[[ 1, 1, -1],[ 0, 1, 1]],
[[ -1, 1, 1],[ 0, -1, 1]],
[[ 1, 1, 1],[ 0, -1, 1]],
],dtype=float),
'hP' : _np.array([
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
],dtype=float),
},
}

View File

@ -1,307 +1,282 @@
"""Finite-strain continuum mechanics."""
from . import tensor as _tensor
from . import _rotation
import numpy as _np import numpy as _np
def Cauchy(P,F):
"""
Return Cauchy stress calculated from first Piola-Kirchhoff stress and deformation gradient.
Resulting tensor is symmetrized as the Cauchy stress needs to be symmetric. def deformation_Cauchy_Green_left(F):
"""
Calculate left Cauchy-Green deformation tensor (Finger deformation tensor).
Parameters Parameters
---------- ----------
F : numpy.ndarray of shape (:,3,3) or (3,3) F : numpy.ndarray of shape (...,3,3)
Deformation gradient. Deformation gradient.
P : numpy.ndarray of shape (:,3,3) or (3,3)
First Piola-Kirchhoff stress. Returns
-------
B : numpy.ndarray of shape (...,3,3)
Left Cauchy-Green deformation tensor.
""" """
if _np.shape(F) == _np.shape(P) == (3,3): return _np.matmul(F,_tensor.transpose(F))
sigma = 1.0/_np.linalg.det(F) * _np.dot(P,F.T)
else:
sigma = _np.einsum('i,ijk,ilk->ijl',1.0/_np.linalg.det(F),P,F)
return symmetric(sigma)
def deviatoric_part(T): def deformation_Cauchy_Green_right(F):
""" """
Return deviatoric part of a tensor. Calculate right Cauchy-Green deformation tensor.
Parameters Parameters
---------- ----------
T : numpy.ndarray of shape (:,3,3) or (3,3) F : numpy.ndarray of shape (...,3,3)
Tensor of which the deviatoric part is computed. Deformation gradient.
Returns
-------
C : numpy.ndarray of shape (...,3,3)
Right Cauchy-Green deformation tensor.
""" """
return T - _np.eye(3)*spherical_part(T) if _np.shape(T) == (3,3) else \ return _np.matmul(_tensor.transpose(F),F)
T - _np.einsum('ijk,i->ijk',_np.broadcast_to(_np.eye(3),[T.shape[0],3,3]),spherical_part(T))
def eigenvalues(T_sym): def equivalent_strain_Mises(epsilon):
""" """
Return the eigenvalues, i.e. principal components, of a symmetric tensor. Calculate the Mises equivalent of a strain tensor.
The eigenvalues are sorted in ascending order, each repeated according to
its multiplicity.
Parameters Parameters
---------- ----------
T_sym : numpy.ndarray of shape (:,3,3) or (3,3) epsilon : numpy.ndarray of shape (...,3,3)
Symmetric tensor of which the eigenvalues are computed. Symmetric strain tensor of which the von Mises equivalent is computed.
Returns
-------
epsilon_vM : numpy.ndarray of shape (...)
Von Mises equivalent strain of epsilon.
""" """
return _np.linalg.eigvalsh(symmetric(T_sym)) return _equivalent_Mises(epsilon,2.0/3.0)
def eigenvectors(T_sym,RHS=False): def equivalent_stress_Mises(sigma):
""" """
Return eigenvectors of a symmetric tensor. Calculate the Mises equivalent of a stress tensor.
The eigenvalues are sorted in ascending order of their associated eigenvalues.
Parameters Parameters
---------- ----------
T_sym : numpy.ndarray of shape (:,3,3) or (3,3) sigma : numpy.ndarray of shape (...,3,3)
Symmetric tensor of which the eigenvectors are computed. Symmetric stress tensor of which the von Mises equivalent is computed.
RHS: bool, optional
Enforce right-handed coordinate system. Default is False. Returns
-------
sigma_vM : numpy.ndarray of shape (...)
Von Mises equivalent stress of sigma.
""" """
(u,v) = _np.linalg.eigh(symmetric(T_sym)) return _equivalent_Mises(sigma,3.0/2.0)
if RHS:
if _np.shape(T_sym) == (3,3):
if _np.linalg.det(v) < 0.0: v[:,2] *= -1.0
else:
v[_np.linalg.det(v) < 0.0,:,2] *= -1.0
return v
def left_stretch(T):
"""
Return the left stretch of a tensor.
Parameters
----------
T : numpy.ndarray of shape (:,3,3) or (3,3)
Tensor of which the left stretch is computed.
"""
return _polar_decomposition(T,'V')[0]
def maximum_shear(T_sym): def maximum_shear(T_sym):
""" """
Return the maximum shear component of a symmetric tensor. Calculate the maximum shear component of a symmetric tensor.
Parameters Parameters
---------- ----------
T_sym : numpy.ndarray of shape (:,3,3) or (3,3) T_sym : numpy.ndarray of shape (...,3,3)
Symmetric tensor of which the maximum shear is computed. Symmetric tensor of which the maximum shear is computed.
Returns
-------
gamma_max : numpy.ndarray of shape (...)
Maximum shear of T_sym.
""" """
w = eigenvalues(T_sym) w = _tensor.eigenvalues(T_sym)
return (w[0] - w[2])*0.5 if _np.shape(T_sym) == (3,3) else \ return (w[...,0] - w[...,2])*0.5
(w[:,0] - w[:,2])*0.5
def Mises_strain(epsilon): def rotation(T):
""" """
Return the Mises equivalent of a strain tensor. Calculate the rotational part of a tensor.
Parameters Parameters
---------- ----------
epsilon : numpy.ndarray of shape (:,3,3) or (3,3) T : numpy.ndarray of shape (...,3,3)
Symmetric strain tensor of which the von Mises equivalent is computed.
"""
return _Mises(epsilon,2.0/3.0)
def Mises_stress(sigma):
"""
Return the Mises equivalent of a stress tensor.
Parameters
----------
sigma : numpy.ndarray of shape (:,3,3) or (3,3)
Symmetric stress tensor of which the von Mises equivalent is computed.
"""
return _Mises(sigma,3.0/2.0)
def PK2(P,F):
"""
Calculate second Piola-Kirchhoff stress from first Piola-Kirchhoff stress and deformation gradient.
Parameters
----------
P : numpy.ndarray of shape (...,3,3) or (3,3)
First Piola-Kirchhoff stress.
F : numpy.ndarray of shape (...,3,3) or (3,3)
Deformation gradient.
"""
if _np.shape(F) == _np.shape(P) == (3,3):
S = _np.dot(_np.linalg.inv(F),P)
else:
S = _np.einsum('...jk,...kl->...jl',_np.linalg.inv(F),P)
return symmetric(S)
def right_stretch(T):
"""
Return the right stretch of a tensor.
Parameters
----------
T : numpy.ndarray of shape (:,3,3) or (3,3)
Tensor of which the right stretch is computed.
"""
return _polar_decomposition(T,'U')[0]
def rotational_part(T):
"""
Return the rotational part of a tensor.
Parameters
----------
T : numpy.ndarray of shape (:,3,3) or (3,3)
Tensor of which the rotational part is computed. Tensor of which the rotational part is computed.
""" Returns
return _polar_decomposition(T,'R')[0] -------
R : damask.Rotation of shape (...)
Rotational part of the vector.
def spherical_part(T,tensor=False):
"""
Return spherical (hydrostatic) part of a tensor.
Parameters
----------
T : numpy.ndarray of shape (:,3,3) or (3,3)
Tensor of which the hydrostatic part is computed.
tensor : bool, optional
Map spherical part onto identity tensor. Default is false
""" """
if T.shape == (3,3): return _rotation.Rotation.from_matrix(_polar_decomposition(T,'R')[0])
sph = _np.trace(T)/3.0
return sph if not tensor else _np.eye(3)*sph
else:
sph = _np.trace(T,axis1=1,axis2=2)/3.0
if not tensor:
return sph
else:
return _np.einsum('ijk,i->ijk',_np.broadcast_to(_np.eye(3),(T.shape[0],3,3)),sph)
def strain_tensor(F,t,m): def strain(F,t,m):
""" """
Return strain tensor calculated from deformation gradient. Calculate strain tensor (SethHill family).
For details refer to https://en.wikipedia.org/wiki/Finite_strain_theory and For details refer to https://en.wikipedia.org/wiki/Finite_strain_theory and
https://de.wikipedia.org/wiki/Verzerrungstensor https://de.wikipedia.org/wiki/Verzerrungstensor
Parameters Parameters
---------- ----------
F : numpy.ndarray of shape (:,3,3) or (3,3) F : numpy.ndarray of shape (...,3,3)
Deformation gradient. Deformation gradient.
t : {V, U} t : {V, U}
Type of the polar decomposition, V for left stretch tensor and U for right stretch tensor. Type of the polar decomposition, V for left stretch tensor
and U for right stretch tensor.
m : float m : float
Order of the strain. Order of the strain.
Returns
-------
epsilon : numpy.ndarray of shape (...,3,3)
Strain of F.
""" """
F_ = F.reshape(1,3,3) if F.shape == (3,3) else F
if t == 'V': if t == 'V':
B = _np.matmul(F_,transpose(F_)) w,n = _np.linalg.eigh(deformation_Cauchy_Green_left(F))
w,n = _np.linalg.eigh(B)
elif t == 'U': elif t == 'U':
C = _np.matmul(transpose(F_),F_) w,n = _np.linalg.eigh(deformation_Cauchy_Green_right(F))
w,n = _np.linalg.eigh(C)
if m > 0.0: if m > 0.0:
eps = 1.0/(2.0*abs(m)) * (+ _np.matmul(n,_np.einsum('ij,ikj->ijk',w**m,n)) eps = 1.0/(2.0*abs(m)) * (+ _np.einsum('...j,...kj,...lj',w**m,n,n) - _np.eye(3))
- _np.broadcast_to(_np.eye(3),[F_.shape[0],3,3]))
elif m < 0.0: elif m < 0.0:
eps = 1.0/(2.0*abs(m)) * (- _np.matmul(n,_np.einsum('ij,ikj->ijk',w**m,n)) eps = 1.0/(2.0*abs(m)) * (- _np.einsum('...j,...kj,...lj',w**m,n,n) + _np.eye(3))
+ _np.broadcast_to(_np.eye(3),[F_.shape[0],3,3]))
else: else:
eps = _np.matmul(n,_np.einsum('ij,ikj->ijk',0.5*_np.log(w),n)) eps = _np.einsum('...j,...kj,...lj',0.5*_np.log(w),n,n)
return eps.reshape(3,3) if _np.shape(F) == (3,3) else \ return eps
eps
def symmetric(T):
def stress_Cauchy(P,F):
""" """
Return the symmetrized tensor. Calculate the Cauchy stress (true stress).
Resulting tensor is symmetrized as the Cauchy stress needs to be symmetric.
Parameters Parameters
---------- ----------
T : numpy.ndarray of shape (...,3,3) or (3,3) P : numpy.ndarray of shape (...,3,3)
Tensor of which the symmetrized values are computed. First Piola-Kirchhoff stress.
F : numpy.ndarray of shape (...,3,3)
Deformation gradient.
Returns
-------
sigma : numpy.ndarray of shape (...,3,3)
Cauchy stress.
""" """
return (T+transpose(T))*0.5 return _tensor.symmetric(_np.einsum('...,...ij,...kj',1.0/_np.linalg.det(F),P,F))
def transpose(T): def stress_second_Piola_Kirchhoff(P,F):
""" """
Return the transpose of a tensor. Calculate the second Piola-Kirchhoff stress.
Resulting tensor is symmetrized as the second Piola-Kirchhoff stress
needs to be symmetric.
Parameters Parameters
---------- ----------
T : numpy.ndarray of shape (...,3,3) or (3,3) P : numpy.ndarray of shape (...,3,3)
Tensor of which the transpose is computed. First Piola-Kirchhoff stress.
F : numpy.ndarray of shape (...,3,3)
Deformation gradient.
Returns
-------
S : numpy.ndarray of shape (...,3,3)
Second Piola-Kirchhoff stress.
""" """
return T.T if _np.shape(T) == (3,3) else \ return _tensor.symmetric(_np.einsum('...ij,...jk',_np.linalg.inv(F),P))
_np.swapaxes(T,axis2=-2,axis1=-1)
def stretch_left(T):
"""
Calculate left stretch of a tensor.
Parameters
----------
T : numpy.ndarray of shape (...,3,3)
Tensor of which the left stretch is computed.
Returns
-------
V : numpy.ndarray of shape (...,3,3)
Left stretch tensor from Polar decomposition of T.
"""
return _polar_decomposition(T,'V')[0]
def stretch_right(T):
"""
Calculate right stretch of a tensor.
Parameters
----------
T : numpy.ndarray of shape (...,3,3)
Tensor of which the right stretch is computed.
Returns
-------
U : numpy.ndarray of shape (...,3,3)
Left stretch tensor from Polar decomposition of T.
"""
return _polar_decomposition(T,'U')[0]
def _polar_decomposition(T,requested): def _polar_decomposition(T,requested):
""" """
Singular value decomposition. Perform singular value decomposition.
Parameters Parameters
---------- ----------
T : numpy.ndarray of shape (:,3,3) or (3,3) T : numpy.ndarray of shape (...,3,3)
Tensor of which the singular values are computed. Tensor of which the singular values are computed.
requested : iterable of str requested : iterable of str
Requested outputs: R for the rotation tensor, Requested outputs: R for the rotation tensor,
V for left stretch tensor and U for right stretch tensor. V for left stretch tensor and U for right stretch tensor.
""" """
u, s, vh = _np.linalg.svd(T) u, _, vh = _np.linalg.svd(T)
R = _np.dot(u,vh) if _np.shape(T) == (3,3) else \ R = _np.einsum('...ij,...jk',u,vh)
_np.einsum('ijk,ikl->ijl',u,vh)
output = [] output = []
if 'R' in requested: if 'R' in requested:
output.append(R) output.append(R)
if 'V' in requested: if 'V' in requested:
output.append(_np.dot(T,R.T) if _np.shape(T) == (3,3) else _np.einsum('ijk,ilk->ijl',T,R)) output.append(_np.einsum('...ij,...kj',T,R))
if 'U' in requested: if 'U' in requested:
output.append(_np.dot(R.T,T) if _np.shape(T) == (3,3) else _np.einsum('ikj,ikl->ijl',R,T)) output.append(_np.einsum('...ji,...jk',R,T))
if len(output) == 0:
raise ValueError('output needs to be out of V, R, U')
return tuple(output) return tuple(output)
def _Mises(T_sym,s): def _equivalent_Mises(T_sym,s):
""" """
Base equation for Mises equivalent of a stres or strain tensor. Base equation for Mises equivalent of a stress or strain tensor.
Parameters Parameters
---------- ----------
T_sym : numpy.ndarray of shape (:,3,3) or (3,3) T_sym : numpy.ndarray of shape (...,3,3)
Symmetric tensor of which the von Mises equivalent is computed. Symmetric tensor of which the von Mises equivalent is computed.
s : float s : float
Scaling factor (2/3 for strain, 3/2 for stress). Scaling factor (2/3 for strain, 3/2 for stress).
""" """
d = deviatoric_part(T_sym) d = _tensor.deviatoric(T_sym)
return _np.sqrt(s*(_np.sum(d**2.0))) if _np.shape(T_sym) == (3,3) else \ return _np.sqrt(s*_np.sum(d**2.0,axis=(-1,-2)))
_np.sqrt(s*_np.einsum('ijk->i',d**2.0))

View File

@ -1,3 +1,5 @@
"""Functionality for generation of seed points for Voronoi or Laguerre tessellation."""
from scipy import spatial as _spatial from scipy import spatial as _spatial
import numpy as _np import numpy as _np
@ -5,7 +7,7 @@ from . import util
from . import grid_filters from . import grid_filters
def from_random(size,N_seeds,grid=None,seed=None): def from_random(size,N_seeds,grid=None,rng_seed=None):
""" """
Random seeding in space. Random seeding in space.
@ -18,12 +20,12 @@ def from_random(size,N_seeds,grid=None,seed=None):
grid : numpy.ndarray of shape (3), optional. grid : numpy.ndarray of shape (3), optional.
If given, ensures that all seeds initiate one grain if using a If given, ensures that all seeds initiate one grain if using a
standard Voronoi tessellation. standard Voronoi tessellation.
seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
A seed to initialize the BitGenerator. Defaults to None. A seed to initialize the BitGenerator. Defaults to None.
If None, then fresh, unpredictable entropy will be pulled from the OS. If None, then fresh, unpredictable entropy will be pulled from the OS.
""" """
rng = _np.random.default_rng(seed) rng = _np.random.default_rng(rng_seed)
if grid is None: if grid is None:
coords = rng.random((N_seeds,3)) * size coords = rng.random((N_seeds,3)) * size
else: else:
@ -34,7 +36,7 @@ def from_random(size,N_seeds,grid=None,seed=None):
return coords return coords
def from_Poisson_disc(size,N_seeds,N_candidates,distance,periodic=True,seed=None): def from_Poisson_disc(size,N_seeds,N_candidates,distance,periodic=True,rng_seed=None):
""" """
Seeding in space according to a Poisson disc distribution. Seeding in space according to a Poisson disc distribution.
@ -50,12 +52,12 @@ def from_Poisson_disc(size,N_seeds,N_candidates,distance,periodic=True,seed=None
Minimum acceptable distance to other seeds. Minimum acceptable distance to other seeds.
periodic : boolean, optional periodic : boolean, optional
Calculate minimum distance for periodically repeated grid. Calculate minimum distance for periodically repeated grid.
seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
A seed to initialize the BitGenerator. Defaults to None. A seed to initialize the BitGenerator. Defaults to None.
If None, then fresh, unpredictable entropy will be pulled from the OS. If None, then fresh, unpredictable entropy will be pulled from the OS.
""" """
rng = _np.random.default_rng(seed) rng = _np.random.default_rng(rng_seed)
coords = _np.empty((N_seeds,3)) coords = _np.empty((N_seeds,3))
coords[0] = rng.random(3) * size coords[0] = rng.random(3) * size

View File

@ -1,6 +1,7 @@
import subprocess import subprocess
import shlex import shlex
import string import re
import io
from pathlib import Path from pathlib import Path
from .. import environment from .. import environment
@ -8,7 +9,7 @@ from .. import environment
class Marc: class Marc:
"""Wrapper to run DAMASK with MSCMarc.""" """Wrapper to run DAMASK with MSCMarc."""
def __init__(self,version=environment.options['MARC_VERSION']): def __init__(self,version=environment.options['MSC_VERSION']):
""" """
Create a Marc solver object. Create a Marc solver object.
@ -27,7 +28,10 @@ class Marc:
path_MSC = environment.options['MSC_ROOT'] path_MSC = environment.options['MSC_ROOT']
path_lib = Path(f'{path_MSC}/mentat{self.version}/shlib/linux64') path_lib = Path(f'{path_MSC}/mentat{self.version}/shlib/linux64')
return path_lib if path_lib.is_dir() else None if not path_lib.is_dir():
raise FileNotFoundError(f'library path "{path_lib}" not found')
return path_lib
@property @property
@ -36,10 +40,12 @@ class Marc:
path_MSC = environment.options['MSC_ROOT'] path_MSC = environment.options['MSC_ROOT']
path_tools = Path(f'{path_MSC}/marc{self.version}/tools') path_tools = Path(f'{path_MSC}/marc{self.version}/tools')
return path_tools if path_tools.is_dir() else None if not path_tools.is_dir():
raise FileNotFoundError(f'tools path "{path_tools}" not found')
return path_tools
#--------------------------
def submit_job(self, def submit_job(self,
model, model,
job = 'job1', job = 'job1',
@ -48,38 +54,37 @@ class Marc:
optimization = '', optimization = '',
): ):
usersub = environment.root_dir/'src/DAMASK_marc' usersub = environment.root_dir/'src/DAMASK_marc'
usersub = usersub.parent/(usersub.name + ('.f90' if compile else '.marc')) usersub = usersub.parent/(usersub.name + ('.f90' if compile else '.marc'))
if not usersub.is_file(): if not usersub.is_file():
raise FileNotFoundError("DAMASK4Marc ({}) '{}' not found".format(('source' if compile else 'binary'),usersub)) raise FileNotFoundError(f'subroutine ({"source" if compile else "binary"}) "{usersub}" not found')
# Define options [see Marc Installation and Operation Guide, pp 23] # Define options [see Marc Installation and Operation Guide, pp 23]
script = f'run_damask_{optimization}mp' script = f'run_damask_{optimization}mp'
cmd = str(self.tools_path/Path(script)) + \ cmd = str(self.tools_path/script) + \
' -jid ' + model + '_' + job + \ ' -jid ' + model+'_'+job + \
' -nprocd 1 -autorst 0 -ci n -cr n -dcoup 0 -b no -v no' ' -nprocd 1 -autorst 0 -ci n -cr n -dcoup 0 -b no -v no'
cmd += ' -u ' + str(usersub) + ' -save y' if compile else \
if compile: cmd += ' -u ' + str(usersub) + ' -save y' ' -prog ' + str(usersub.with_suffix(''))
else: cmd += ' -prog ' + str(usersub.with_suffix(''))
print('job submission {} compilation: {}'.format(('with' if compile else 'without'),usersub))
if logfile: log = open(logfile, 'w')
print(cmd) print(cmd)
process = subprocess.Popen(shlex.split(cmd),stdout = log,stderr = subprocess.STDOUT)
log.close()
process.wait()
#-------------------------- if logfile is not None:
def exit_number_from_outFile(self,outFile=None): try:
exitnumber = -1 f = open(logfile,'w+')
with open(outFile,'r') as fid_out: except TypeError:
for line in fid_out: f = logfile
if (string.find(line,'tress iteration') != -1): else:
print(line) f = io.StringIO()
elif (string.find(line,'Exit number') != -1):
substr = line[string.find(line,'Exit number'):len(line)]
exitnumber = int(substr[12:16])
return exitnumber proc = subprocess.Popen(shlex.split(cmd),stdout=f,stderr=subprocess.STDOUT)
proc.wait()
f.seek(0)
try:
v = int(re.search('Exit number ([0-9]+)',''.join(f.readlines())).group(1))
except (AttributeError,ValueError):
raise RuntimeError('Marc simulation failed (unknown return value)')
if v != 3004:
raise RuntimeError(f'Marc simulation failed ({v})')

131
python/damask/tensor.py Normal file
View File

@ -0,0 +1,131 @@
"""
Tensor operations.
Notes
-----
This is not a tensor class, but a collection of routines
to operate on numpy.ndarrays of shape (...,3,3).
"""
import numpy as _np
def deviatoric(T):
"""
Calculate deviatoric part of a tensor.
Parameters
----------
T : numpy.ndarray of shape (...,3,3)
Tensor of which the deviatoric part is computed.
Returns
-------
T' : numpy.ndarray of shape (...,3,3)
Deviatoric part of T.
"""
return T - spherical(T,tensor=True)
def eigenvalues(T_sym):
"""
Eigenvalues, i.e. principal components, of a symmetric tensor.
Parameters
----------
T_sym : numpy.ndarray of shape (...,3,3)
Symmetric tensor of which the eigenvalues are computed.
Returns
-------
lambda : numpy.ndarray of shape (...,3)
Eigenvalues of T_sym sorted in ascending order, each repeated
according to its multiplicity.
"""
return _np.linalg.eigvalsh(symmetric(T_sym))
def eigenvectors(T_sym,RHS=False):
"""
Eigenvectors of a symmetric tensor.
Parameters
----------
T_sym : numpy.ndarray of shape (...,3,3)
Symmetric tensor of which the eigenvectors are computed.
RHS: bool, optional
Enforce right-handed coordinate system. Defaults to False.
Returns
-------
x : numpy.ndarray of shape (...,3,3)
Eigenvectors of T_sym sorted in ascending order of their
associated eigenvalues.
"""
(u,v) = _np.linalg.eigh(symmetric(T_sym))
if RHS:
v[_np.linalg.det(v) < 0.0,:,2] *= -1.0
return v
def spherical(T,tensor=True):
"""
Calculate spherical part of a tensor.
Parameters
----------
T : numpy.ndarray of shape (...,3,3)
Tensor of which the spherical part is computed.
tensor : bool, optional
Map spherical part onto identity tensor. Defaults to True.
Returns
-------
p : numpy.ndarray of shape (...,3,3)
unless tensor == False: shape (...,)
Spherical part of tensor T. p is an isotropic tensor.
"""
sph = _np.trace(T,axis2=-2,axis1=-1)/3.0
return _np.einsum('...jk,...',_np.eye(3),sph) if tensor else sph
def symmetric(T):
"""
Symmetrize tensor.
Parameters
----------
T : numpy.ndarray of shape (...,3,3)
Tensor of which the symmetrized values are computed.
Returns
-------
T_sym : numpy.ndarray of shape (...,3,3)
Symmetrized tensor T.
"""
return (T+transpose(T))*0.5
def transpose(T):
"""
Transpose tensor.
Parameters
----------
T : numpy.ndarray of shape (...,3,3)
Tensor of which the transpose is computed.
Returns
-------
T.T : numpy.ndarray of shape (...,3,3)
Transpose of tensor T.
"""
return _np.swapaxes(T,axis2=-2,axis1=-1)

View File

@ -3,6 +3,7 @@ import datetime
import os import os
import subprocess import subprocess
import shlex import shlex
import re
import fractions import fractions
from functools import reduce from functools import reduce
from optparse import Option from optparse import Option
@ -16,14 +17,17 @@ __all__=[
'srepr', 'srepr',
'croak', 'croak',
'report', 'report',
'emph','deemph','delete','strikeout', 'emph','deemph','warn','strikeout',
'execute', 'execute',
'show_progress', 'show_progress',
'scale_to_coprime', 'scale_to_coprime',
'project_stereographic',
'hybrid_IA', 'hybrid_IA',
'return_message', 'return_message',
'extendableOption', 'extendableOption',
'execution_stamp' 'execution_stamp',
'shapeshifter', 'shapeblender',
'extend_docstring', 'extended_docstring'
] ]
#################################################################################################### ####################################################################################################
@ -38,7 +42,7 @@ def srepr(arg,glue = '\n'):
arg : iterable arg : iterable
Items to join. Items to join.
glue : str, optional glue : str, optional
Defaults to \n. Glue used for joining operation. Defaults to \n.
""" """
if (not hasattr(arg, "strip") and if (not hasattr(arg, "strip") and
@ -52,6 +56,8 @@ def croak(what, newline = True):
""" """
Write formated to stderr. Write formated to stderr.
DEPRECATED
Parameters Parameters
---------- ----------
what : str or iterable what : str or iterable
@ -68,7 +74,7 @@ def croak(what, newline = True):
def report(who = None, def report(who = None,
what = None): what = None):
""" """
Reports script and file name. Report script and file name.
DEPRECATED DEPRECATED
@ -80,16 +86,13 @@ def emph(what):
"""Formats string with emphasis.""" """Formats string with emphasis."""
return bcolors.BOLD+srepr(what)+bcolors.ENDC return bcolors.BOLD+srepr(what)+bcolors.ENDC
def deemph(what): def deemph(what):
"""Formats string with deemphasis.""" """Formats string with deemphasis."""
return bcolors.DIM+srepr(what)+bcolors.ENDC return bcolors.DIM+srepr(what)+bcolors.ENDC
def warn(what):
def delete(what): """Formats string for warning."""
"""Formats string as deleted.""" return bcolors.WARNING+emph(what)+bcolors.ENDC
return bcolors.DIM+srepr(what)+bcolors.ENDC
def strikeout(what): def strikeout(what):
"""Formats string as strikeout.""" """Formats string as strikeout."""
@ -160,7 +163,15 @@ def show_progress(iterable,N_iter=None,prefix='',bar_length=50):
def scale_to_coprime(v): def scale_to_coprime(v):
"""Scale vector to co-prime (relatively prime) integers.""" """
Scale vector to co-prime (relatively prime) integers.
Parameters
----------
v : numpy.ndarray of shape (:)
Vector to scale.
"""
MAX_DENOMINATOR = 1000000 MAX_DENOMINATOR = 1000000
def get_square_denominator(x): def get_square_denominator(x):
@ -169,6 +180,7 @@ def scale_to_coprime(v):
def lcm(a, b): def lcm(a, b):
"""Least common multiple.""" """Least common multiple."""
# Python 3.9 provides math.lcm, see https://stackoverflow.com/questions/51716916.
return a * b // np.gcd(a, b) return a * b // np.gcd(a, b)
m = (np.array(v) * reduce(lcm, map(lambda x: int(get_square_denominator(x)),v)) ** 0.5).astype(np.int) m = (np.array(v) * reduce(lcm, map(lambda x: int(get_square_denominator(x)),v)) ** 0.5).astype(np.int)
@ -181,6 +193,28 @@ def scale_to_coprime(v):
return m return m
def project_stereographic(vector,normalize=False):
"""
Apply stereographic projection to vector.
Parameters
----------
vector : numpy.ndarray of shape (...,3)
Vector coordinates to be projected.
normalize : bool
Ensure unit length for vector. Defaults to False.
Returns
-------
coordinates : numpy.ndarray of shape (...,2)
Projected coordinates.
"""
v_ = vector/np.linalg.norm(vector,axis=-1,keepdims=True) if normalize else vector
return np.block([v_[...,:2]/(1+np.abs(v_[...,2:3])),
np.zeros_like(v_[...,2:3])])
def execution_stamp(class_name,function_name=None): def execution_stamp(class_name,function_name=None):
"""Timestamp the execution of a (function within a) class.""" """Timestamp the execution of a (function within a) class."""
now = datetime.datetime.now().astimezone().strftime('%Y-%m-%d %H:%M:%S%z') now = datetime.datetime.now().astimezone().strftime('%Y-%m-%d %H:%M:%S%z')
@ -188,7 +222,21 @@ def execution_stamp(class_name,function_name=None):
return f'damask.{class_name}{_function_name} v{version} ({now})' return f'damask.{class_name}{_function_name} v{version} ({now})'
def hybrid_IA(dist,N,seed=None): def hybrid_IA(dist,N,rng_seed=None):
"""
Hybrid integer approximation.
Parameters
----------
dist : numpy.ndarray
Distribution to be approximated
N : int
Number of samples to draw.
rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
A seed to initialize the BitGenerator. Defaults to None.
If None, then fresh, unpredictable entropy will be pulled from the OS.
"""
N_opt_samples,N_inv_samples = (max(np.count_nonzero(dist),N),0) # random subsampling if too little samples requested N_opt_samples,N_inv_samples = (max(np.count_nonzero(dist),N),0) # random subsampling if too little samples requested
scale_,scale,inc_factor = (0.0,float(N_opt_samples),1.0) scale_,scale,inc_factor = (0.0,float(N_opt_samples),1.0)
@ -199,7 +247,112 @@ def hybrid_IA(dist,N,seed=None):
if N_inv_samples < N_opt_samples else \ if N_inv_samples < N_opt_samples else \
(scale_,0.5*(scale_ + scale), 1.0) (scale_,0.5*(scale_ + scale), 1.0)
return np.repeat(np.arange(len(dist)),repeats)[np.random.default_rng(seed).permutation(N_inv_samples)[:N]] return np.repeat(np.arange(len(dist)),repeats)[np.random.default_rng(rng_seed).permutation(N_inv_samples)[:N]]
def shapeshifter(fro,to,mode='left',keep_ones=False):
"""
Return a tuple that reshapes 'fro' to become broadcastable to 'to'.
Parameters
----------
fro : tuple
Original shape of array.
to : tuple
Target shape of array after broadcasting.
len(to) cannot be less than len(fro).
mode : str, optional
Indicates whether new axes are preferably added to
either 'left' or 'right' of the original shape.
Defaults to 'left'.
keep_ones : bool, optional
Treat '1' in fro as literal value instead of dimensional placeholder.
Defaults to False.
"""
beg = dict(left ='(^.*\\b)',
right='(^.*?\\b)')
sep = dict(left ='(.*\\b)',
right='(.*?\\b)')
end = dict(left ='(.*?$)',
right='(.*$)')
fro = (1,) if not len(fro) else fro
to = (1,) if not len(to) else to
try:
grp = re.match(beg[mode]
+f',{sep[mode]}'.join(map(lambda x: f'{x}'
if x>1 or (keep_ones and len(fro)>1) else
'\\d+',fro))
+f',{end[mode]}',
','.join(map(str,to))+',').groups()
except AttributeError:
raise ValueError(f'Shapes can not be shifted {fro} --> {to}')
fill = ()
for g,d in zip(grp,fro+(None,)):
fill += (1,)*g.count(',')+(d,)
return fill[:-1]
def shapeblender(a,b):
"""
Return a shape that overlaps the rightmost entries of 'a' with the leftmost of 'b'.
Parameters
----------
a : tuple
Shape of first array.
b : tuple
Shape of second array.
Examples
--------
>>> shapeblender((4,4,3),(3,2,1))
(4,4,3,2,1)
>>> shapeblender((1,2),(1,2,3))
(1,2,3)
>>> shapeblender((1,),(2,2,1))
(1,2,2,1)
>>> shapeblender((3,2),(3,2))
(3,2)
"""
i = min(len(a),len(b))
while i > 0 and a[-i:] != b[:i]: i -= 1
return a + b[i:]
def extend_docstring(extra_docstring):
"""
Decorator: Append to function's docstring.
Parameters
----------
extra_docstring : str
Docstring to append.
"""
def _decorator(func):
func.__doc__ += extra_docstring
return func
return _decorator
def extended_docstring(f,extra_docstring):
"""
Decorator: Combine another function's docstring with a given docstring.
Parameters
----------
f : function
Function of which the docstring is taken.
extra_docstring : str
Docstring to append.
"""
def _decorator(func):
func.__doc__ = f.__doc__ + extra_docstring
return func
return _decorator
#################################################################################################### ####################################################################################################
@ -295,17 +448,6 @@ class bcolors:
UNDERLINE = '\033[4m' UNDERLINE = '\033[4m'
CROSSOUT = '\033[9m' CROSSOUT = '\033[9m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
self.BOLD = ''
self.UNDERLINE = ''
self.CROSSOUT = ''
class return_message: class return_message:
"""Object with formatted return message.""" """Object with formatted return message."""

View File

@ -1,8 +1,13 @@
from pathlib import Path from pathlib import Path
import datetime import datetime
import os
import numpy as np import numpy as np
import pytest import pytest
import matplotlib as mpl
if os.name == 'posix' and 'DISPLAY' not in os.environ:
mpl.use('Agg')
import matplotlib.pyplot as plt
import damask import damask
@ -25,8 +30,9 @@ def patch_datetime_now(monkeypatch):
monkeypatch.setattr(datetime, 'datetime', mydatetime) monkeypatch.setattr(datetime, 'datetime', mydatetime)
@pytest.fixture @pytest.fixture
def execution_stamp(monkeypatch): def patch_execution_stamp(monkeypatch):
"""Set damask.util.execution_stamp for reproducible tests results.""" """Set damask.util.execution_stamp for reproducible tests results."""
def execution_stamp(class_name,function_name=None): def execution_stamp(class_name,function_name=None):
_function_name = '' if function_name is None else f'.{function_name}' _function_name = '' if function_name is None else f'.{function_name}'
@ -35,21 +41,31 @@ def execution_stamp(monkeypatch):
monkeypatch.setattr(damask.util, 'execution_stamp', execution_stamp) monkeypatch.setattr(damask.util, 'execution_stamp', execution_stamp)
@pytest.fixture
def patch_plt_show(monkeypatch):
def _None(block=None):
pass
monkeypatch.setattr(plt, 'show', _None, raising=True)
def pytest_addoption(parser): def pytest_addoption(parser):
parser.addoption("--update", parser.addoption("--update",
action="store_true", action="store_true",
default=False) default=False)
@pytest.fixture @pytest.fixture
def update(request): def update(request):
"""Store current results as new reference results.""" """Store current results as new reference results."""
return request.config.getoption("--update") return request.config.getoption("--update")
@pytest.fixture @pytest.fixture
def reference_dir_base(): def ref_path_base():
"""Directory containing reference results.""" """Directory containing reference results."""
return Path(__file__).parent/'reference' return Path(__file__).parent/'reference'
@pytest.fixture @pytest.fixture
def set_of_quaternions(): def set_of_quaternions():
"""A set of n random rotations.""" """A set of n random rotations."""
@ -66,7 +82,7 @@ def set_of_quaternions():
return qu return qu
n = 1100 n = 600
scatter=1.e-2 scatter=1.e-2
specials = np.array([ specials = np.array([
[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0],

View File

@ -33,12 +33,12 @@ material:
phase: phase:
Aluminum: Aluminum:
elasticity: {C_11: 106.75e9, C_12: 60.41e9, C_44: 28.34e9, type: hooke} lattice: cF
generic: mech:
output: [F, P, Fe, Fp, Lp] output: [F, P, F_e, F_p, L_p]
lattice: fcc elasticity: {C_11: 106.75e9, C_12: 60.41e9, C_44: 28.34e9, type: hooke}
Steel: Steel:
elasticity: {C_11: 233.3e9, C_12: 135.5e9, C_44: 118.0e9, type: hooke} lattice: cI
generic: mech:
output: [F, P, Fe, Fp, Lp] output: [F, P, F_e, F_p, L_p]
lattice: bcc elasticity: {C_11: 233.3e9, C_12: 135.5e9, C_44: 118.0e9, type: hooke}

View File

@ -1,30 +0,0 @@
<?xml version="1.0"?>
<VTKFile type="RectilinearGrid" version="0.1" byte_order="LittleEndian" header_type="UInt32" compressor="vtkZLibDataCompressor">
<RectilinearGrid WholeExtent="0 12 0 15 0 20">
<FieldData>
<Array type="String" Name="comments" NumberOfTuples="1" format="binary">
AQAAAACAAABdAAAAYAAAAA==eF4NwlsKgCAQAMCO4mcRK6tmrwt0g35jKXuQtqHR+WuYhQKlUw6Og1wjh2nkyBcf0+NSct7Tc/AlXiNRIpC/dwJra9hW03Tk5qUVuUaNoBRoKxT2lf6XqBCL7APiGxu3
</Array>
</FieldData>
<Piece Extent="0 12 0 15 0 20">
<PointData>
</PointData>
<CellData>
<DataArray type="Int64" Name="material" format="binary" RangeMin="0" RangeMax="3">
AQAAAACAAACAcAAAFwQAAA==eF7t2sFuYzkQQ9Gk+///uTddm0KIy6JUdmCbG2KAV0e2n6QBJvP19XO+//cf0X9FU77NnroVcnu7ISf1aZ663ocKzStPdYWcqdv31S3P3bc0R63cV/Er5KRuhbxTv0Lus32Kex6moXNX7X7OHnJ7uyEn9WnebRWaU03vm+anbt+v5Lieex623O37s4ec3+5XyEvdCrnKn8b1p6FzkYbc03XIS31yUp/m3VahObd76HlqtV9pbuqp80De1KXznHrTe4gc16eQp3w35PVOQ+70c/eQn4b2bRpyT9chL/XJSX2ad1uF5tzuoefd7vuVnndbnQeam3rqPPffLfXS80xO6lbIO/UrW25l21fv+zS0X9OQe7oOealPTurTvNsqNOd2Dz0/7Qo9N+1+Luh5avc8T7+X67oh79SvbLmVbb+y5dJ+T0P7KQ25p+uQl/rkpD7Nu61Cc2730PPTrtBzad+6n7unznP6vW7dQ+Slbs+2X9lyK1s+7fc0tI/SkHu6DnmpT07q07zbKjTndg89P+0KPfdbu+f0e53eE8rbuoe23Mq2T+8zjbtfpqH9k4bc03XIS31yUp/m3VahObd76PlpV+i5tKd/F5y2+ntg/+dpp3+f2nJVtu/P7fTf51bU738aer9pyD1dh7zUJyf1ad5tFZpzu4een3aFnkt7635W/1/F6fdy/38NlS1Xpe7lR/u30n+fW1Hv4TS0f9KQe7oOealPTurTvNsqNOd2Dz0/7Wfdn2krV90f5FXTPefeT8p1fTdbbg/5p6H3mob2TxraR6lPbu9pyEt9clKf5t1WoTm3e+h5t7fuT/LTdeiemK6j/rsweXT+6O925KY+ea5fSX03U38adz9Ok+53Cu1/tY4bcnu7ISf1ad5tFZpzu78Hep6azgPNk0u+u96te6ivc+v+7E1u6vfc8lVOXfIr276731P3tk/ni9xTn9ZRIY/WUaF5t1VoTjW9b5pXXnoepu7UrybvUT45r+pXbvs9W27llv/oz0/nmLxT312nh7zUJ8e9l04/9+l52HK7T17qV5P3Kj45Hz/rym33UT6dX5qnz033EDmpm65DjnsvpZ+bXPe9TL2pX01O6n58r8lL3XfxaZ5626d1aI6a7iGan7qn/55xPWp3HXJSn+apn+2n65D3rj45Hz9rcrd9mqeme47mXffWOq7ndvq7U9P3oHnqZ/vpOuR9/FmTl7of32u6f2hedYXuO3Km7uk6rue2+7nJmb4Xmqd+dT9dh7yPP2vyUnfb77nl97j3BLmu35u81J3e0xVy6Hfq3XPLpe+TeuRO9/27+uk65L2rT86jfJVTX8W9J2777j2tQh6to0Lz7u+lQs70Xq3vc8tz3/uWOz1Xv9VP1yHvVfwKeVNf5dSt/gdcjxTB
</DataArray>
</CellData>
<Coordinates>
<DataArray type="Float64" Name="x" format="binary" RangeMin="0" RangeMax="1">
AQAAAACAAABoAAAANAAAAA==eF5jYICAUDDYag+hj9pDRC9A+VftV4HBLaj4Ayj/EVT+KVT8BVT8FVT8LVT8gz0An9sjLg==
</DataArray>
<DataArray type="Float64" Name="y" format="binary" RangeMin="0" RangeMax="1">
AQAAAACAAACAAAAASAAAAA==eF5jYIAAQTDYaA+hD9rPmgkCJ6H8i/ahYHAVKn7T/t5dMIDKP7Q3BoPHUHVP7cvB4DlU/Uv7PbtB4DVU31t7iK0f7AHaRzOg
</DataArray>
<DataArray type="Float64" Name="z" format="binary" RangeMin="0" RangeMax="1">
AQAAAACAAACoAAAAVQAAAA==eF5jYICAWTNBYKU9hN5pb2IMAoeh/JP2EFUXoOKX7dPTQOAaVP6m/dkzIHAHqu4BVPwhVP1jqPwTqL5nUHUvoOpeQtW9hqp7A1X3Dqrugz0ASSZF3Q==
</DataArray>
</Coordinates>
</Piece>
</RectilinearGrid>
</VTKFile>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,30 @@
<?xml version="1.0"?>
<VTKFile type="RectilinearGrid" version="0.1" byte_order="LittleEndian" header_type="UInt32" compressor="vtkZLibDataCompressor">
<RectilinearGrid WholeExtent="0 12 0 15 0 20">
<FieldData>
<Array type="String" Name="comments" NumberOfTuples="1" format="binary">
AQAAAACAAABJAAAATwAAAA==eF4FwVEKgCAMANCO4mcRG5sTUy/QOVZKQUqB4Pl7L2vT/uBe3ob91FrMECQk0PrdCsEHuI4gzsVNspktWQJmsNFwTOKT8EpMtEw/IaUSzA==
</Array>
</FieldData>
<Piece Extent="0 12 0 15 0 20">
<PointData>
</PointData>
<CellData>
<DataArray type="Int32" Name="material" format="binary" RangeMin="0" RangeMax="7">
AQAAAACAAABAOAAA2AAAAA==eF7t0TEOwgAQA8EkkPD/H1PQ4pMsbXdryd1U9nH8zzs0Rf/rFfqE6lmfQv27zbf761mfQv27zbf761mfQv27zbf761mfQv27zbf761mfQv27zbf761mfQv27zbf761mfQv27zbf761mfQv27zbf761mfQv27zbf761l/ht6h+tm/Qj+heta3f+ln3+6vZ337l3727f561rd/6Wff7q9nffuXfvbt/nrWt3/pZ9/ur2d9+5d+9u3+eta3f+ln3+6vZ337l3727f561rd/6Wff7q9n/ReDyzIp
</DataArray>
</CellData>
<Coordinates>
<DataArray type="Float64" Name="x" format="binary" RangeMin="0" RangeMax="1.2">
AQAAAACAAABoAAAAOQAAAA==eF5jYICAmWCw0x5Cn7Q3BoPLUP5N+/9gcB8q/tg+DQyeQeVf2p8BgzdQde+h4h+h6j/bAwAS5DYX
</DataArray>
<DataArray type="Float64" Name="y" format="binary" RangeMin="0" RangeMax="1.5">
AQAAAACAAACAAAAARQAAAA==eF5jYICAWTNBYKc9hD5pb2IMApeh/Jv2EFUPoOKP7dPTQOAZVP6l/dkzIPAGqu4DVPwjVP1nqPwXqL5vUHU/7AHEUzT8
</DataArray>
<DataArray type="Float64" Name="z" format="binary" RangeMin="0" RangeMax="2">
AQAAAACAAACoAAAAVQAAAA==eF5jYICAWTNBYKc9hD5pb2IMApeh/Jv2EFUPoOKP7dPTQOAZVP6l/dkzIPAGqu4DVPwjVP1nqPwXqL5vUHU/oOp+QtX9hqr7A1X3D6qOwQEAqKdGHg==
</DataArray>
</Coordinates>
</Piece>
</RectilinearGrid>
</VTKFile>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,5 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
180.0 45.00000000000001 180.0 1 1
270.0 45.00000000000001 90.0 1 2
315.0 0.0 0.0 1 3

View File

@ -0,0 +1,26 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
146.75362934444064 9.976439066337804 256.395594327347 1 1
356.59977719102034 43.39784965440254 12.173896584899929 1 2
75.92521636876346 43.82007387041961 277.8843642946069 1 3
326.75362934444064 9.976439066337806 76.39559432734703 1 4
176.59977719102034 43.397849654402556 192.17389658489986 1 5
255.92521636876344 43.82007387041961 97.88436429460687 1 6
213.24637065555936 9.976439066337804 103.604405672653 1 7
3.400222808979685 43.39784965440255 347.8261034151001 1 8
284.0747836312365 43.82007387041961 82.11563570539313 1 9
33.24637065555936 9.976439066337804 283.60440567265294 1 10
183.40022280897963 43.397849654402556 167.8261034151001 1 11
104.07478363123654 43.82007387041961 262.1156357053931 1 12
273.4002228089796 43.397849654402556 77.82610341510008 1 13
123.24637065555939 9.976439066337806 193.60440567265297 1 14
194.07478363123653 43.82007387041961 172.11563570539317 1 15
93.40022280897969 43.39784965440255 257.8261034151001 1 16
303.24637065555936 9.976439066337804 13.604405672652977 1 17
14.074783631236542 43.82007387041961 352.1156357053931 1 18
86.59977719102032 43.39784965440254 282.17389658489986 1 19
236.75362934444058 9.976439066337804 166.39559432734703 1 20
165.92521636876344 43.82007387041961 187.88436429460683 1 21
266.59977719102034 43.39784965440254 102.17389658489992 1 22
56.75362934444064 9.976439066337804 346.395594327347 1 23
345.9252163687635 43.82007387041961 7.884364294606862 1 24

View File

@ -0,0 +1,26 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
166.39559432734697 9.976439066337804 236.75362934444058 1 1
352.1156357053931 43.82007387041961 14.074783631236542 1 2
77.82610341510008 43.397849654402556 273.4002228089796 1 3
346.395594327347 9.976439066337804 56.75362934444064 1 4
172.11563570539317 43.82007387041961 194.07478363123653 1 5
257.8261034151001 43.39784965440255 93.40022280897969 1 6
193.604405672653 9.976439066337804 123.24637065555939 1 7
7.884364294606862 43.82007387041961 345.9252163687635 1 8
282.17389658489986 43.39784965440254 86.59977719102032 1 9
13.604405672652977 9.976439066337804 303.24637065555936 1 10
187.88436429460683 43.82007387041961 165.92521636876344 1 11
102.17389658489992 43.39784965440254 266.59977719102034 1 12
277.8843642946069 43.82007387041961 75.92521636876346 1 13
103.604405672653 9.976439066337804 213.24637065555936 1 14
192.17389658489986 43.397849654402556 176.59977719102034 1 15
97.88436429460687 43.82007387041961 255.92521636876344 1 16
283.60440567265294 9.976439066337804 33.24637065555936 1 17
12.173896584899929 43.39784965440254 356.59977719102034 1 18
82.11563570539313 43.82007387041961 284.0747836312365 1 19
256.395594327347 9.976439066337804 146.75362934444064 1 20
167.8261034151001 43.397849654402556 183.40022280897963 1 21
262.1156357053931 43.82007387041961 104.07478363123654 1 22
76.39559432734703 9.976439066337806 326.75362934444064 1 23
347.8261034151001 43.39784965440255 3.400222808979685 1 24

View File

@ -0,0 +1,26 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
114.20342833932975 10.52877936550932 204.20342833932972 1 1
94.3573968784815 80.40593177313954 311.22729452432543 1 2
175.6426031215185 80.40593177313954 48.77270547567447 1 3
155.79657166067025 10.52877936550932 155.79657166067025 1 4
99.62136089109411 85.70366403943004 318.04510841542015 1 5
170.37863910890587 85.70366403943002 41.954891584579855 1 6
85.64260312151852 80.40593177313954 48.77270547567448 1 7
65.79657166067024 10.52877936550932 155.79657166067025 1 8
9.621360891094124 85.70366403943004 318.04510841542015 1 9
80.37863910890587 85.70366403943004 41.95489158457987 1 10
24.203428339329758 10.52877936550932 204.20342833932975 1 11
4.357396878481486 80.40593177313954 311.2272945243255 1 12
204.20342833932972 10.52877936550932 204.20342833932972 1 13
184.35739687848147 80.40593177313954 311.2272945243255 1 14
265.64260312151845 80.40593177313953 48.77270547567449 1 15
245.79657166067025 10.528779365509317 155.79657166067025 1 16
189.62136089109413 85.70366403943004 318.04510841542015 1 17
260.3786391089059 85.70366403943002 41.954891584579855 1 18
170.37863910890587 94.29633596056996 138.04510841542015 1 19
99.62136089109411 94.29633596056998 221.95489158457983 1 20
155.79657166067025 169.4712206344907 24.203428339329754 1 21
175.64260312151848 99.59406822686046 131.22729452432552 1 22
94.35739687848151 99.59406822686046 228.77270547567446 1 23
114.20342833932975 169.4712206344907 335.7965716606702 1 24

View File

@ -0,0 +1,14 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
96.91733794010702 83.13253115922213 314.5844440567886 1 1
173.082662059893 83.13253115922211 45.41555594321143 1 2
135.0 9.735610317245317 180.0 1 3
263.082662059893 83.13253115922213 45.415555943211444 1 4
186.91733794010702 83.13253115922211 314.5844440567886 1 5
224.99999999999997 9.735610317245317 180.0 1 6
83.082662059893 83.13253115922213 45.415555943211444 1 7
6.917337940106983 83.13253115922211 314.5844440567886 1 8
45.0 9.73561031724532 180.0 1 9
13.638707279476469 45.81931182053557 80.40196970123216 1 10
256.36129272052347 45.81931182053556 279.59803029876775 1 11
315.0 99.73561031724536 0.0 1 12

View File

@ -0,0 +1,14 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
135.41555594321144 83.13253115922213 173.082662059893 1 1
260.26438968275465 90.0 135.0 1 2
260.40196970123213 45.81931182053557 13.638707279476478 1 3
314.5844440567886 83.13253115922213 96.91733794010702 1 4
350.40196970123213 45.81931182053557 283.6387072794765 1 5
170.26438968275465 90.0 224.99999999999997 1 6
315.4155559432114 83.13253115922213 353.08266205989304 1 7
99.73561031724536 90.0 225.0 1 8
279.59803029876787 45.819311820535574 166.36129272052352 1 9
134.58444405678856 83.13253115922213 276.91733794010696 1 10
9.598030298767851 45.819311820535574 76.36129272052355 1 11
9.735610317245369 90.0 315.0 1 12

View File

@ -0,0 +1,19 @@
3x3:1_Schmid 3x3:2_Schmid 3x3:3_Schmid 3x3:4_Schmid 3x3:5_Schmid 3x3:6_Schmid 3x3:7_Schmid 3x3:8_Schmid 3x3:9_Schmid
0.0 0.0 0.0 0.408248290463863 0.408248290463863 0.40824829046386296 -0.4082482904638631 -0.4082482904638631 -0.408248290463863
-0.408248290463863 -0.408248290463863 -0.40824829046386296 2.4997998108697446e-17 2.4997998108697446e-17 2.499799810869744e-17 0.4082482904638631 0.4082482904638631 0.408248290463863
0.408248290463863 0.408248290463863 0.40824829046386296 -0.4082482904638631 -0.4082482904638631 -0.408248290463863 0.0 0.0 0.0
4.999599621739488e-17 4.9995996217394874e-17 -4.999599621739488e-17 0.408248290463863 0.40824829046386296 -0.408248290463863 0.408248290463863 0.40824829046386296 -0.408248290463863
-0.408248290463863 -0.40824829046386296 0.408248290463863 -2.499799810869744e-17 -2.4997998108697437e-17 2.499799810869744e-17 -0.408248290463863 -0.40824829046386296 0.408248290463863
0.408248290463863 0.40824829046386296 -0.408248290463863 -0.4082482904638631 -0.408248290463863 0.4082482904638631 0.0 0.0 0.0
0.0 0.0 0.0 -0.408248290463863 0.408248290463863 0.408248290463863 0.4082482904638631 -0.4082482904638631 -0.4082482904638631
-0.408248290463863 0.408248290463863 0.408248290463863 -2.499799810869744e-17 2.499799810869744e-17 2.499799810869744e-17 -0.408248290463863 0.408248290463863 0.408248290463863
0.408248290463863 -0.408248290463863 -0.408248290463863 0.408248290463863 -0.408248290463863 -0.408248290463863 0.0 0.0 0.0
-4.999599621739488e-17 4.999599621739488e-17 -4.9995996217394874e-17 -0.408248290463863 0.408248290463863 -0.40824829046386296 -0.408248290463863 0.408248290463863 -0.40824829046386296
-0.408248290463863 0.408248290463863 -0.40824829046386296 2.4997998108697446e-17 -2.4997998108697446e-17 2.499799810869744e-17 0.4082482904638631 -0.4082482904638631 0.408248290463863
0.408248290463863 -0.408248290463863 0.40824829046386296 0.408248290463863 -0.408248290463863 0.40824829046386296 0.0 0.0 0.0
0.4999999999999999 -0.4999999999999999 0.0 0.4999999999999999 -0.4999999999999999 0.0 0.0 0.0 0.0
0.5 0.4999999999999999 -6.123233995736766e-17 -0.5000000000000001 -0.5 6.123233995736767e-17 0.0 0.0 0.0
0.4999999999999999 -3.0616169978683824e-17 -0.4999999999999999 3.0616169978683824e-17 -1.874699728327322e-33 -3.0616169978683824e-17 0.4999999999999999 -3.0616169978683824e-17 -0.4999999999999999
0.5 -3.061616997868383e-17 0.4999999999999999 -3.0616169978683836e-17 1.8746997283273227e-33 -3.061616997868383e-17 -0.5000000000000001 3.0616169978683836e-17 -0.5
0.0 6.123233995736765e-17 -6.123233995736765e-17 0.0 0.4999999999999999 -0.4999999999999999 0.0 0.4999999999999999 -0.4999999999999999
0.0 0.0 0.0 0.0 0.5 0.4999999999999999 0.0 -0.5000000000000001 -0.5

View File

@ -0,0 +1,13 @@
3x3:1_Schmid 3x3:2_Schmid 3x3:3_Schmid 3x3:4_Schmid 3x3:5_Schmid 3x3:6_Schmid 3x3:7_Schmid 3x3:8_Schmid 3x3:9_Schmid
-0.4714045207910318 -0.4714045207910318 -0.47140452079103173 0.2357022603955159 0.2357022603955159 0.23570226039551587 0.2357022603955159 0.2357022603955159 0.23570226039551587
0.2357022603955159 0.2357022603955159 0.23570226039551587 -0.4714045207910318 -0.4714045207910318 -0.47140452079103173 0.2357022603955159 0.2357022603955159 0.23570226039551587
0.23570226039551587 0.23570226039551587 0.23570226039551584 0.23570226039551587 0.23570226039551587 0.23570226039551584 -0.4714045207910318 -0.4714045207910318 -0.47140452079103173
-0.4714045207910318 -0.47140452079103173 0.4714045207910318 0.23570226039551587 0.23570226039551584 -0.23570226039551587 -0.2357022603955159 -0.23570226039551587 0.2357022603955159
0.23570226039551584 0.23570226039551578 -0.23570226039551584 -0.4714045207910318 -0.47140452079103173 0.4714045207910318 -0.2357022603955159 -0.23570226039551587 0.2357022603955159
0.2357022603955159 0.23570226039551587 -0.2357022603955159 0.2357022603955159 0.23570226039551587 -0.2357022603955159 0.4714045207910317 0.47140452079103157 -0.4714045207910317
-0.4714045207910318 0.4714045207910318 0.4714045207910318 -0.2357022603955159 0.2357022603955159 0.2357022603955159 -0.2357022603955159 0.2357022603955159 0.2357022603955159
0.23570226039551595 -0.23570226039551595 -0.23570226039551595 0.4714045207910318 -0.4714045207910318 -0.4714045207910318 -0.2357022603955159 0.2357022603955159 0.2357022603955159
0.2357022603955159 -0.2357022603955159 -0.2357022603955159 -0.23570226039551587 0.23570226039551587 0.23570226039551587 0.4714045207910318 -0.4714045207910318 -0.4714045207910318
-0.4714045207910318 0.4714045207910318 -0.47140452079103173 -0.23570226039551587 0.23570226039551587 -0.23570226039551584 0.2357022603955159 -0.2357022603955159 0.23570226039551587
0.23570226039551595 -0.23570226039551595 0.2357022603955159 0.4714045207910318 -0.4714045207910318 0.47140452079103173 0.2357022603955159 -0.2357022603955159 0.23570226039551587
0.23570226039551584 -0.23570226039551584 0.23570226039551578 -0.23570226039551595 0.23570226039551595 -0.2357022603955159 -0.4714045207910318 0.4714045207910318 -0.47140452079103173

View File

@ -0,0 +1,5 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
0.0 45.00000000000001 0.0 1 1
90.0 45.00000000000001 270.0 1 2
45.00000000000001 0.0 0.0 1 3

View File

@ -0,0 +1,26 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
283.60440567265294 9.976439066337804 33.24637065555936 1 1
167.8261034151001 43.397849654402556 183.40022280897963 1 2
262.1156357053931 43.82007387041961 104.07478363123654 1 3
103.604405672653 9.976439066337804 213.24637065555936 1 4
347.8261034151001 43.39784965440255 3.400222808979685 1 5
82.11563570539313 43.82007387041961 284.0747836312365 1 6
76.39559432734703 9.976439066337806 326.75362934444064 1 7
192.17389658489986 43.397849654402556 176.59977719102034 1 8
97.88436429460687 43.82007387041961 255.92521636876344 1 9
256.395594327347 9.976439066337804 146.75362934444064 1 10
12.173896584899929 43.39784965440254 356.59977719102034 1 11
277.8843642946069 43.82007387041961 75.92521636876346 1 12
102.17389658489992 43.39784965440254 266.59977719102034 1 13
346.395594327347 9.976439066337804 56.75362934444064 1 14
7.884364294606862 43.82007387041961 345.9252163687635 1 15
282.17389658489986 43.39784965440254 86.59977719102032 1 16
166.39559432734703 9.976439066337804 236.75362934444058 1 17
187.88436429460683 43.82007387041961 165.92521636876344 1 18
257.8261034151001 43.39784965440255 93.40022280897969 1 19
13.604405672652977 9.976439066337804 303.24637065555936 1 20
352.1156357053931 43.82007387041961 14.074783631236542 1 21
77.82610341510008 43.397849654402556 273.4002228089796 1 22
193.60440567265297 9.976439066337806 123.24637065555939 1 23
172.11563570539317 43.82007387041961 194.07478363123653 1 24

View File

@ -0,0 +1,26 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
303.24637065555936 9.976439066337804 13.604405672652977 1 1
165.92521636876344 43.82007387041961 187.88436429460683 1 2
266.59977719102034 43.39784965440254 102.17389658489992 1 3
123.24637065555939 9.976439066337804 193.604405672653 1 4
345.9252163687635 43.82007387041961 7.884364294606862 1 5
86.59977719102032 43.39784965440254 282.17389658489986 1 6
56.75362934444064 9.976439066337804 346.395594327347 1 7
194.07478363123653 43.82007387041961 172.11563570539317 1 8
93.40022280897969 43.39784965440255 257.8261034151001 1 9
236.75362934444058 9.976439066337804 166.39559432734697 1 10
14.074783631236542 43.82007387041961 352.1156357053931 1 11
273.4002228089796 43.397849654402556 77.82610341510008 1 12
104.07478363123654 43.82007387041961 262.1156357053931 1 13
326.75362934444064 9.976439066337806 76.39559432734703 1 14
3.400222808979685 43.39784965440255 347.8261034151001 1 15
284.0747836312365 43.82007387041961 82.11563570539313 1 16
146.75362934444064 9.976439066337804 256.395594327347 1 17
183.40022280897963 43.397849654402556 167.8261034151001 1 18
255.92521636876344 43.82007387041961 97.88436429460687 1 19
33.24637065555936 9.976439066337804 283.60440567265294 1 20
356.59977719102034 43.39784965440254 12.173896584899929 1 21
75.92521636876346 43.82007387041961 277.8843642946069 1 22
213.24637065555936 9.976439066337804 103.604405672653 1 23
176.59977719102034 43.397849654402556 192.17389658489986 1 24

View File

@ -0,0 +1,26 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
335.7965716606702 10.528779365509317 65.79657166067024 1 1
228.77270547567446 80.40593177313953 85.64260312151849 1 2
131.22729452432552 80.40593177313954 4.357396878481506 1 3
24.20342833932977 10.52877936550932 24.20342833932976 1 4
221.95489158457983 85.70366403943002 80.37863910890589 1 5
138.04510841542015 85.70366403943004 9.621360891094124 1 6
131.22729452432552 80.40593177313953 94.35739687848151 1 7
24.203428339329765 10.52877936550932 114.20342833932976 1 8
221.95489158457983 85.70366403943004 170.37863910890587 1 9
138.04510841542015 85.70366403943004 99.62136089109411 1 10
335.7965716606702 10.52877936550932 155.79657166067025 1 11
228.77270547567448 80.40593177313954 175.6426031215185 1 12
335.7965716606702 10.52877936550932 335.7965716606702 1 13
228.77270547567448 80.40593177313954 355.6426031215185 1 14
131.2272945243255 80.40593177313954 274.35739687848144 1 15
24.203428339329747 10.52877936550932 294.2034283393298 1 16
221.95489158457985 85.70366403943004 350.3786391089059 1 17
138.04510841542015 85.70366403943004 279.6213608910941 1 18
41.95489158457986 94.29633596056998 9.621360891094133 1 19
318.04510841542015 94.29633596056996 80.37863910890589 1 20
155.79657166067025 169.4712206344907 24.203428339329754 1 21
48.77270547567448 99.59406822686046 4.357396878481504 1 22
311.2272945243255 99.59406822686046 85.64260312151852 1 23
204.20342833932975 169.4712206344907 65.79657166067024 1 24

View File

@ -0,0 +1,14 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
225.41555594321144 83.13253115922213 83.08266205989301 1 1
134.58444405678856 83.13253115922211 6.917337940107012 1 2
4.702125169424418e-15 9.735610317245317 45.0 1 3
134.58444405678856 83.13253115922213 276.91733794010696 1 4
225.4155559432114 83.13253115922213 353.082662059893 1 5
0.0 9.735610317245317 315.0 1 6
134.58444405678858 83.13253115922213 96.91733794010702 1 7
225.41555594321142 83.13253115922213 173.082662059893 1 8
0.0 9.735610317245317 135.0 1 9
99.59803029876785 45.81931182053557 166.36129272052355 1 10
260.40196970123213 45.81931182053556 283.6387072794765 1 11
180.0 99.73561031724535 225.0 1 12

View File

@ -0,0 +1,14 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
6.9173379401070045 83.13253115922213 44.58444405678856 1 1
45.0 89.99999999999999 279.7356103172453 1 2
166.36129272052352 45.819311820535574 279.59803029876787 1 3
83.08266205989301 83.13253115922213 225.41555594321144 1 4
256.3612927205235 45.819311820535574 189.59803029876787 1 5
315.0 90.0 9.735610317245369 1 6
186.917337940107 83.13253115922213 224.58444405678856 1 7
315.0 90.0 80.26438968275463 1 8
13.638707279476478 45.81931182053557 260.40196970123213 1 9
263.082662059893 83.13253115922213 45.415555943211444 1 10
103.63870727947646 45.819311820535574 170.40196970123213 1 11
224.99999999999997 90.0 170.26438968275465 1 12

View File

@ -0,0 +1,25 @@
3x3:1_Schmid 3x3:2_Schmid 3x3:3_Schmid 3x3:4_Schmid 3x3:5_Schmid 3x3:6_Schmid 3x3:7_Schmid 3x3:8_Schmid 3x3:9_Schmid
0.0 0.4082482904638631 0.408248290463863 0.0 -0.408248290463863 -0.40824829046386296 0.0 0.4082482904638631 0.408248290463863
0.0 -0.408248290463863 -0.40824829046386296 0.0 -0.408248290463863 -0.40824829046386296 0.0 0.4082482904638631 0.408248290463863
0.0 -0.408248290463863 0.408248290463863 0.0 -0.408248290463863 0.408248290463863 0.0 -0.408248290463863 0.408248290463863
0.0 0.40824829046386285 -0.40824829046386285 0.0 -0.408248290463863 0.408248290463863 0.0 -0.408248290463863 0.408248290463863
-0.40824829046386296 2.4997998108697434e-17 -0.40824829046386285 0.4082482904638631 -2.4997998108697446e-17 0.408248290463863 0.4082482904638631 -2.4997998108697446e-17 0.408248290463863
-0.408248290463863 2.499799810869744e-17 -0.40824829046386296 -0.408248290463863 2.499799810869744e-17 -0.40824829046386296 0.4082482904638631 -2.4997998108697446e-17 0.408248290463863
-0.408248290463863 2.499799810869744e-17 0.408248290463863 -0.408248290463863 2.499799810869744e-17 0.408248290463863 -0.408248290463863 2.499799810869744e-17 0.408248290463863
-0.408248290463863 2.499799810869744e-17 0.408248290463863 0.40824829046386296 -2.4997998108697437e-17 -0.40824829046386296 -0.408248290463863 2.499799810869744e-17 0.408248290463863
-0.40824829046386296 -0.40824829046386285 4.999599621739487e-17 0.4082482904638631 0.408248290463863 -4.999599621739489e-17 0.4082482904638631 0.408248290463863 -4.999599621739489e-17
-0.4082482904638631 -0.408248290463863 4.999599621739489e-17 0.408248290463863 0.40824829046386296 -4.999599621739488e-17 -0.4082482904638631 -0.408248290463863 4.999599621739489e-17
-0.408248290463863 0.408248290463863 0.0 -0.408248290463863 0.408248290463863 0.0 -0.408248290463863 0.408248290463863 0.0
-0.40824829046386296 0.40824829046386296 0.0 -0.40824829046386296 0.40824829046386296 0.0 0.408248290463863 -0.408248290463863 0.0
-0.4714045207910316 -0.23570226039551578 -0.23570226039551576 0.4714045207910318 0.23570226039551587 0.23570226039551584 0.4714045207910318 0.23570226039551587 0.23570226039551584
-0.4714045207910318 0.23570226039551595 0.2357022603955159 -0.4714045207910318 0.23570226039551595 0.2357022603955159 -0.4714045207910318 0.23570226039551595 0.2357022603955159
0.47140452079103173 -0.2357022603955159 0.23570226039551584 0.47140452079103173 -0.2357022603955159 0.23570226039551584 -0.4714045207910318 0.23570226039551595 -0.23570226039551587
0.4714045207910318 0.23570226039551587 -0.23570226039551595 -0.47140452079103173 -0.23570226039551584 0.2357022603955159 0.4714045207910318 0.23570226039551587 -0.23570226039551595
0.2357022603955159 0.4714045207910318 0.23570226039551584 -0.23570226039551587 -0.47140452079103173 -0.23570226039551578 0.2357022603955159 0.4714045207910318 0.23570226039551584
-0.23570226039551587 0.47140452079103173 0.23570226039551587 -0.23570226039551587 0.47140452079103173 0.23570226039551587 0.2357022603955159 -0.4714045207910318 -0.2357022603955159
0.2357022603955159 -0.4714045207910318 0.23570226039551595 0.2357022603955159 -0.4714045207910318 0.23570226039551595 0.2357022603955159 -0.4714045207910318 0.23570226039551595
-0.2357022603955158 -0.4714045207910316 0.23570226039551584 0.2357022603955159 0.4714045207910318 -0.23570226039551595 0.2357022603955159 0.4714045207910318 -0.23570226039551595
0.23570226039551587 0.23570226039551584 0.47140452079103173 0.23570226039551587 0.23570226039551584 0.47140452079103173 -0.2357022603955159 -0.23570226039551587 -0.4714045207910318
-0.2357022603955159 0.2357022603955159 0.4714045207910318 0.23570226039551587 -0.23570226039551587 -0.47140452079103173 -0.2357022603955159 0.2357022603955159 0.4714045207910318
-0.2357022603955158 0.2357022603955158 -0.4714045207910316 0.2357022603955159 -0.2357022603955159 0.4714045207910318 0.2357022603955159 -0.2357022603955159 0.4714045207910318
0.2357022603955159 0.23570226039551587 -0.4714045207910318 0.2357022603955159 0.23570226039551587 -0.4714045207910318 0.2357022603955159 0.23570226039551587 -0.4714045207910318

View File

@ -0,0 +1,13 @@
3x3:1_Schmid 3x3:2_Schmid 3x3:3_Schmid 3x3:4_Schmid 3x3:5_Schmid 3x3:6_Schmid 3x3:7_Schmid 3x3:8_Schmid 3x3:9_Schmid
-0.4714045207910316 -0.23570226039551578 -0.23570226039551576 0.4714045207910318 0.23570226039551587 0.23570226039551584 0.4714045207910318 0.23570226039551587 0.23570226039551584
-0.4714045207910318 0.23570226039551595 0.2357022603955159 -0.4714045207910318 0.23570226039551595 0.2357022603955159 -0.4714045207910318 0.23570226039551595 0.2357022603955159
0.47140452079103173 -0.2357022603955159 0.23570226039551584 0.47140452079103173 -0.2357022603955159 0.23570226039551584 -0.4714045207910318 0.23570226039551595 -0.23570226039551587
0.4714045207910318 0.23570226039551587 -0.23570226039551595 -0.47140452079103173 -0.23570226039551584 0.2357022603955159 0.4714045207910318 0.23570226039551587 -0.23570226039551595
0.2357022603955159 0.4714045207910318 0.23570226039551584 -0.23570226039551587 -0.47140452079103173 -0.23570226039551578 0.2357022603955159 0.4714045207910318 0.23570226039551584
-0.23570226039551587 0.47140452079103173 0.23570226039551587 -0.23570226039551587 0.47140452079103173 0.23570226039551587 0.2357022603955159 -0.4714045207910318 -0.2357022603955159
0.2357022603955159 -0.4714045207910318 0.23570226039551595 0.2357022603955159 -0.4714045207910318 0.23570226039551595 0.2357022603955159 -0.4714045207910318 0.23570226039551595
-0.2357022603955158 -0.4714045207910316 0.23570226039551584 0.2357022603955159 0.4714045207910318 -0.23570226039551595 0.2357022603955159 0.4714045207910318 -0.23570226039551595
0.23570226039551587 0.23570226039551584 0.47140452079103173 0.23570226039551587 0.23570226039551584 0.47140452079103173 -0.2357022603955159 -0.23570226039551587 -0.4714045207910318
-0.2357022603955159 0.2357022603955159 0.4714045207910318 0.23570226039551587 -0.23570226039551587 -0.47140452079103173 -0.2357022603955159 0.2357022603955159 0.4714045207910318
-0.2357022603955158 0.2357022603955158 -0.4714045207910316 0.2357022603955159 -0.2357022603955159 0.4714045207910318 0.2357022603955159 -0.2357022603955159 0.4714045207910318
0.2357022603955159 0.23570226039551587 -0.4714045207910318 0.2357022603955159 0.23570226039551587 -0.4714045207910318 0.2357022603955159 0.23570226039551587 -0.4714045207910318

View File

@ -0,0 +1,34 @@
3x3:1_Schmid 3x3:2_Schmid 3x3:3_Schmid 3x3:4_Schmid 3x3:5_Schmid 3x3:6_Schmid 3x3:7_Schmid 3x3:8_Schmid 3x3:9_Schmid
0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 -0.4999999999999998 0.0 0.0 0.8660254037844388 0.0 0.0 0.0
0.0 0.0 -0.5000000000000001 0.0 0.0 -0.8660254037844386 0.0 0.0 0.0
0.0 1.0 -5.914589856893347e-17 0.0 0.0 0.0 0.0 0.0 0.0
0.4330127018922192 0.24999999999999975 -3.102315069664884e-17 -0.7500000000000002 -0.4330127018922192 5.373367321746164e-17 0.0 0.0 0.0
-0.43301270189221935 0.25000000000000006 1.4502014121821253e-18 -0.7499999999999998 0.4330127018922193 2.5118225271075755e-18 0.0 0.0 0.0
-0.4330127018922194 -0.7499999999999999 6.059609998111558e-17 0.2500000000000001 0.4330127018922194 -3.498517463593857e-17 0.0 0.0 0.0
2.563950248511418e-16 -5.693113199781536e-32 -9.614043519462407e-33 1.0 -2.220446049250313e-16 -3.7496997163046135e-17 0.0 0.0 0.0
0.4330127018922194 -0.75 2.8122747872284606e-17 0.25000000000000006 -0.43301270189221935 1.6236676054415494e-17 0.0 0.0 0.0
-0.38254602783800284 -0.22086305214969287 -0.23426064283290896 0.6625891564490795 0.38254602783800284 0.40575133560034454 0.0 0.0 0.0
0.0 -0.8834522085987724 -0.4685212856658182 0.0 0.0 0.0 0.0 0.0 0.0
0.38254602783800307 -0.22086305214969315 -0.23426064283290912 0.6625891564490792 -0.38254602783800296 -0.40575133560034443 0.0 0.0 0.0
-0.38254602783800284 -0.22086305214969287 0.23426064283290904 0.6625891564490795 0.38254602783800284 -0.4057513356003447 0.0 0.0 0.0
0.0 -0.8834522085987724 0.46852128566581835 0.0 0.0 0.0 0.0 0.0 0.0
0.38254602783800307 -0.22086305214969315 0.23426064283290912 0.6625891564490792 -0.38254602783800296 0.40575133560034443 0.0 0.0 0.0
-0.39955629492721617 -0.23068393443263763 -0.24467726152216654 3.8591083978971935e-17 2.228057272357889e-17 2.3632116092343994e-17 0.6524726973924442 0.3767052874784087 0.39955629492721606
-0.19977814746360817 -0.11534196721631886 -0.12233863076108333 -0.34602590164895664 -0.19977814746360797 -0.21189672420660496 0.6524726973924442 0.3767052874784087 0.39955629492721606
0.0 -0.23068393443263785 -0.12233863076108334 0.0 -0.39955629492721617 -0.211896724206605 0.0 0.7534105749568177 0.3995562949272161
0.0 0.23068393443263768 0.12233863076108326 0.0 -0.3995562949272162 -0.21189672420660502 0.0 0.7534105749568178 0.39955629492721617
-0.199778147463608 0.11534196721631884 0.12233863076108324 0.34602590164895664 -0.1997781474636081 -0.211896724206605 -0.6524726973924442 0.3767052874784089 0.3995562949272161
-0.3995562949272161 0.23068393443263774 0.24467726152216654 -3.859108397897193e-17 2.22805727235789e-17 2.3632116092343994e-17 -0.6524726973924441 0.37670528747840887 0.39955629492721606
-0.39955629492721617 -0.23068393443263763 0.24467726152216662 -3.8591083978971935e-17 -2.228057272357889e-17 2.3632116092344003e-17 -0.6524726973924442 -0.3767052874784087 0.39955629492721617
-0.1997781474636082 -0.11534196721631888 0.1223386307610834 -0.3460259016489568 -0.19977814746360806 0.21189672420660513 -0.6524726973924442 -0.3767052874784087 0.39955629492721617
0.0 -0.23068393443263788 0.12233863076108341 0.0 -0.3995562949272163 0.21189672420660516 0.0 -0.7534105749568177 0.3995562949272162
0.0 0.2306839344326376 -0.12233863076108324 0.0 -0.3995562949272163 0.21189672420660516 0.0 -0.7534105749568177 0.3995562949272162
-0.19977814746360792 0.1153419672163188 -0.12233863076108319 0.34602590164895675 -0.19977814746360814 0.21189672420660505 0.6524726973924441 -0.37670528747840887 0.39955629492721606
-0.3995562949272161 0.23068393443263774 -0.24467726152216654 3.859108397897193e-17 -2.22805727235789e-17 2.3632116092343994e-17 0.6524726973924441 -0.37670528747840887 0.39955629492721606
-0.11134044285378089 -0.19284730395996755 -0.1363636363636364 -0.19284730395996755 -0.3340213285613424 -0.23618874648666507 0.36363636363636365 0.6298366572977734 0.44536177141512323
-0.11134044285378081 0.1928473039599675 0.13636363636363633 0.19284730395996758 -0.3340213285613426 -0.2361887464866651 -0.3636363636363637 0.6298366572977737 0.4453617714151233
-0.44536177141512323 9.889017858258314e-17 0.2727272727272727 -4.301519895922435e-17 9.551292858672588e-33 2.634132215859942e-17 -0.7272727272727272 1.6148698540002275e-16 0.44536177141512323
-0.1113404428537809 -0.1928473039599676 0.13636363636363644 -0.1928473039599676 -0.33402132856134253 0.23618874648666516 -0.36363636363636365 -0.6298366572977734 0.44536177141512323
-0.11134044285378074 0.19284730395996735 -0.13636363636363627 0.19284730395996758 -0.33402132856134253 0.23618874648666516 0.3636363636363636 -0.6298366572977734 0.44536177141512323
-0.44536177141512334 9.889017858258316e-17 -0.2727272727272727 4.3015198959224354e-17 -9.55129285867259e-33 2.634132215859942e-17 0.7272727272727273 -1.6148698540002277e-16 0.44536177141512323

View File

@ -0,0 +1,25 @@
3x3:1_Schmid 3x3:2_Schmid 3x3:3_Schmid 3x3:4_Schmid 3x3:5_Schmid 3x3:6_Schmid 3x3:7_Schmid 3x3:8_Schmid 3x3:9_Schmid
-0.3743506488634663 -0.21613144789263322 -0.4584840372976439 -0.21613144789263333 -0.12478354962115536 -0.2647058823529411 0.4075413664867946 0.23529411764705865 0.4991341984846217
0.0 -1.1032987950073291e-16 -1.1702250894369439e-16 0.0 -0.4991341984846217 -0.5294117647058824 0.0 0.47058823529411753 0.4991341984846218
-0.3743506488634663 0.2161314478926334 0.4584840372976439 0.2161314478926334 -0.1247835496211555 -0.2647058823529412 -0.4075413664867947 0.23529411764705888 0.4991341984846218
-0.3743506488634663 -0.21613144789263322 0.4584840372976439 -0.2161314478926334 -0.12478354962115538 0.2647058823529412 -0.4075413664867946 -0.23529411764705865 0.4991341984846217
0.0 -1.4562117094830577e-16 1.5445457619280876e-16 0.0 -0.4991341984846217 0.5294117647058824 0.0 -0.47058823529411753 0.4991341984846218
-0.3743506488634663 0.2161314478926334 -0.4584840372976439 0.21613144789263342 -0.12478354962115551 0.26470588235294124 0.4075413664867947 -0.23529411764705888 0.4991341984846218
-0.06998542122237655 -0.1212183053462653 -0.04285714285714286 -0.12121830534626532 -0.20995626366712955 -0.07423074889580901 0.45714285714285724 0.7917946548886295 0.279941684889506
-0.06998542122237653 0.12121830534626528 0.04285714285714286 0.12121830534626532 -0.20995626366712958 -0.07423074889580904 -0.45714285714285724 0.7917946548886297 0.2799416848895061
-0.27994168488950616 6.2159540823338e-17 0.08571428571428573 -5.407625012016776e-17 1.2007339593759827e-32 1.6557402499691063e-17 -0.9142857142857143 2.0301221021717148e-16 0.27994168488950605
-0.0699854212223766 -0.12121830534626538 0.04285714285714291 -0.12121830534626538 -0.20995626366712963 0.07423074889580909 -0.45714285714285724 -0.7917946548886295 0.2799416848895062
-0.06998542122237648 0.12121830534626521 -0.04285714285714283 0.12121830534626538 -0.20995626366712966 0.07423074889580906 0.45714285714285724 -0.7917946548886297 0.2799416848895061
-0.27994168488950605 6.215954082333798e-17 -0.08571428571428569 5.407625012016776e-17 -1.2007339593759827e-32 1.6557402499691063e-17 0.9142857142857143 -2.0301221021717148e-16 0.27994168488950605
0.3104371234477526 0.17923095678901296 0.19010313741609627 0.17923095678901305 0.1034790411492508 0.1097560975609756 -0.6759222663683424 -0.39024390243902424 -0.41391616459700337
0.0 7.68600963028337e-17 4.07612214737886e-17 0.0 0.4139161645970035 0.21951219512195125 0.0 -0.7804878048780488 -0.4139161645970034
0.31043712344775254 -0.17923095678901305 -0.19010313741609627 -0.17923095678901302 0.10347904114925086 0.1097560975609756 0.6759222663683423 -0.3902439024390244 -0.41391616459700337
0.3104371234477527 0.179230956789013 -0.19010313741609638 0.17923095678901313 0.10347904114925086 -0.10975609756097568 0.6759222663683424 0.3902439024390242 -0.4139161645970035
0.0 1.3539199431344235e-16 -7.180244797305419e-17 0.0 0.4139161645970036 -0.21951219512195136 0.0 0.7804878048780487 -0.41391616459700353
0.3104371234477525 -0.179230956789013 0.19010313741609622 -0.17923095678901313 0.10347904114925092 -0.10975609756097565 -0.6759222663683423 0.3902439024390244 -0.41391616459700337
0.11134044285378089 0.19284730395996755 0.1363636363636364 0.19284730395996755 0.3340213285613424 0.23618874648666507 -0.36363636363636365 -0.6298366572977734 -0.44536177141512323
0.11134044285378081 -0.1928473039599675 -0.13636363636363633 -0.19284730395996758 0.3340213285613426 0.2361887464866651 0.3636363636363637 -0.6298366572977737 -0.4453617714151233
0.44536177141512323 -9.889017858258314e-17 -0.2727272727272727 4.301519895922435e-17 -9.551292858672588e-33 -2.634132215859942e-17 0.7272727272727272 -1.6148698540002275e-16 -0.44536177141512323
0.1113404428537809 0.1928473039599676 -0.13636363636363644 0.1928473039599676 0.33402132856134253 -0.23618874648666516 0.36363636363636365 0.6298366572977734 -0.44536177141512323
0.11134044285378074 -0.19284730395996735 0.13636363636363627 -0.19284730395996758 0.33402132856134253 -0.23618874648666516 -0.3636363636363636 0.6298366572977734 -0.44536177141512323
0.44536177141512334 -9.889017858258316e-17 0.2727272727272727 -4.3015198959224354e-17 9.55129285867259e-33 -2.634132215859942e-17 -0.7272727272727273 1.6148698540002277e-16 -0.44536177141512323

Some files were not shown because too many files have changed in this diff Show More