Merge branch 'development' of git.damask.mpie.de:damask/DAMASK into typehints_orientation_rotation

This commit is contained in:
Daniel Otto de Mentock 2022-01-13 17:23:24 +01:00
commit 4ba9935ccc
57 changed files with 1374 additions and 877 deletions

View File

@ -43,12 +43,13 @@ jobs:
pip install pytest pip install pytest
- name: Install dependencies - name: Install dependencies
# https://github.com/actions/virtual-environments/issues/4790
run: > run: >
sudo apt-get update && sudo apt-get update &&
sudo apt-get install python3-pip python3-pytest python3-pandas python3-scipy sudo apt-get remove mysql* &&
python3-h5py python3-vtk7 python3-matplotlib python3-yaml -y sudo apt-get install python3-pandas python3-scipy python3-h5py python3-vtk7 python3-matplotlib python3-yaml -y
- name: Run unit tests - name: Run unit tests
run: | run: |
export PYTHONPATH=${PWD}/python export PYTHONPATH=${PWD}/python
COLUMNS=256 python -m pytest python COLUMNS=256 pytest python

View File

@ -36,14 +36,17 @@ variables:
# Names of module files to load # Names of module files to load
# =============================================================================================== # ===============================================================================================
# ++++++++++++ Compiler +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ Compiler +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
COMPILER_INTEL: "Compiler/Intel/19.1.2 Libraries/IMKL/2020" COMPILER_GNU: "Compiler/GNU/10"
COMPILER_GNU: "Compiler/GNU/10" COMPILER_INTELLLVM: "Compiler/oneAPI/2022.0.1 Libraries/IMKL/2022.0.1"
COMPILER_INTEL: "Compiler/Intel/2022.0.1 Libraries/IMKL/2022.0.1"
# ++++++++++++ MPI ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ MPI ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
MPI_INTEL: "MPI/Intel/19.1.2/IntelMPI/2019"
MPI_GNU: "MPI/GNU/10/OpenMPI/4.1.1" MPI_GNU: "MPI/GNU/10/OpenMPI/4.1.1"
MPI_INTELLLVM: "MPI/oneAPI/2022.0.1/IntelMPI/2021.5.0"
MPI_INTEL: "MPI/Intel/2022.0.1/IntelMPI/2021.5.0"
# ++++++++++++ PETSc ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ PETSc ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
PETSC_INTEL: "Libraries/PETSc/3.16.1/Intel-19.1.2-IntelMPI-2019" PETSC_GNU: "Libraries/PETSc/3.16.1/GNU-10-OpenMPI-4.1.1"
PETSC_GNU: "Libraries/PETSc/3.16.1/GNU-10-OpenMPI-4.1.1" PETSC_INTELLLVM: "Libraries/PETSc/3.16.3/oneAPI-2022.0.1-IntelMPI-2021.5.0"
PETSC_INTEL: "Libraries/PETSc/3.16.2/Intel-2022.0.1-IntelMPI-2021.5.0"
# ++++++++++++ MSC Marc +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ MSC Marc +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
MSC: "FEM/MSC/2021.3.1" MSC: "FEM/MSC/2021.3.1"
IntelMarc: "Compiler/Intel/19.1.2 Libraries/IMKL/2020" IntelMarc: "Compiler/Intel/19.1.2 Libraries/IMKL/2020"
@ -76,20 +79,6 @@ mypy:
################################################################################################### ###################################################################################################
test_grid_Intel:
stage: compile
script:
- module load ${COMPILER_INTEL} ${MPI_INTEL} ${PETSC_INTEL}
- cd PRIVATE/testing/pytest
- pytest -k 'compile and grid' --basetemp ${TESTROOT}/compile_grid_Intel
test_mesh_Intel:
stage: compile
script:
- module load ${COMPILER_INTEL} ${MPI_INTEL} ${PETSC_INTEL}
- cd PRIVATE/testing/pytest
- pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_Intel
test_grid_GNU: test_grid_GNU:
stage: compile stage: compile
script: script:
@ -104,6 +93,27 @@ test_mesh_GNU:
- cd PRIVATE/testing/pytest - cd PRIVATE/testing/pytest
- pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_GNU - pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_GNU
test_mesh_IntelLLVM:
stage: compile
script:
- module load ${COMPILER_INTELLLVM} ${MPI_INTELLLVM} ${PETSC_INTELLLVM}
- cd PRIVATE/testing/pytest
- pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_IntelLLVM
test_grid_Intel:
stage: compile
script:
- module load ${COMPILER_INTEL} ${MPI_INTEL} ${PETSC_INTEL}
- cd PRIVATE/testing/pytest
- pytest -k 'compile and grid' --basetemp ${TESTROOT}/compile_grid_Intel
test_mesh_Intel:
stage: compile
script:
- module load ${COMPILER_INTEL} ${MPI_INTEL} ${PETSC_INTEL}
- cd PRIVATE/testing/pytest
- pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_Intel
test_Marc: test_Marc:
stage: compile stage: compile
script: script:

View File

@ -82,20 +82,18 @@ if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel")
include(Compiler-Intel) include(Compiler-Intel)
elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "GNU") elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
include(Compiler-GNU) include(Compiler-GNU)
elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "IntelLLVM")
include(Compiler-IntelLLVM)
else() else()
message(FATAL_ERROR "Compiler type(CMAKE_Fortran_COMPILER_ID) not recognized") message(FATAL_ERROR "Compiler type(CMAKE_Fortran_COMPILER_ID) not recognized")
endif() endif()
file(STRINGS "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/petsc/conf/petscvariables" PETSC_EXTERNAL_LIB REGEX "PETSC_WITH_EXTERNAL_LIB = .*$?") file(STRINGS "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/petsc/conf/petscvariables" PETSC_EXTERNAL_LIB REGEX "PETSC_EXTERNAL_LIB_BASIC = .*$?")
string(REGEX MATCHALL "-[lLW]([^\" ]+)" PETSC_EXTERNAL_LIB "${PETSC_EXTERNAL_LIB}") string(REPLACE "PETSC_EXTERNAL_LIB_BASIC = " "" PETSC_EXTERNAL_LIB "${PETSC_EXTERNAL_LIB}")
list(REMOVE_DUPLICATES PETSC_EXTERNAL_LIB)
string(REPLACE ";" " " PETSC_EXTERNAL_LIB "${PETSC_EXTERNAL_LIB}")
message("PETSC_EXTERNAL_LIB:\n${PETSC_EXTERNAL_LIB}\n") message("PETSC_EXTERNAL_LIB:\n${PETSC_EXTERNAL_LIB}\n")
file(STRINGS "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/petsc/conf/petscvariables" PETSC_INCLUDES REGEX "PETSC_FC_INCLUDES = .*$?") file(STRINGS "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/petsc/conf/petscvariables" PETSC_INCLUDES REGEX "PETSC_FC_INCLUDES = .*$?")
string(REGEX MATCHALL "-I([^\" ]+)" PETSC_INCLUDES "${PETSC_INCLUDES}") string(REPLACE "PETSC_FC_INCLUDES = " "" PETSC_INCLUDES "${PETSC_INCLUDES}")
list(REMOVE_DUPLICATES PETSC_INCLUDES)
string(REPLACE ";" " " PETSC_INCLUDES "${PETSC_INCLUDES}")
message("PETSC_INCLUDES:\n${PETSC_INCLUDES}\n") message("PETSC_INCLUDES:\n${PETSC_INCLUDES}\n")
set(CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${BUILDCMD_PRE} ${OPENMP_FLAGS} ${STANDARD_CHECK} ${OPTIMIZATION_FLAGS} ${COMPILE_FLAGS} ${PRECISION_FLAGS}") set(CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${BUILDCMD_PRE} ${OPENMP_FLAGS} ${STANDARD_CHECK} ${OPTIMIZATION_FLAGS} ${COMPILE_FLAGS} ${PRECISION_FLAGS}")
@ -107,7 +105,7 @@ if(CMAKE_BUILD_TYPE STREQUAL "DEBUG")
endif() endif()
set(CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}} ${PETSC_INCLUDES} ${BUILDCMD_POST}") set(CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}} ${PETSC_INCLUDES} ${BUILDCMD_POST}")
set(CMAKE_Fortran_LINK_EXECUTABLE "${CMAKE_Fortran_LINK_EXECUTABLE} <OBJECTS> -o <TARGET> <LINK_LIBRARIES> ${PETSC_EXTERNAL_LIB} -lz ${BUILDCMD_POST}") set(CMAKE_Fortran_LINK_EXECUTABLE "${CMAKE_Fortran_LINK_EXECUTABLE} <OBJECTS> -o <TARGET> <LINK_LIBRARIES> -L${PETSC_LIBRARY_DIRS} -lpetsc ${PETSC_EXTERNAL_LIB} -lz ${BUILDCMD_POST}")
message("Fortran Compiler Flags:\n${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}}\n") message("Fortran Compiler Flags:\n${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}}\n")
message("C Compiler Flags:\n${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE}}\n") message("C Compiler Flags:\n${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE}}\n")

View File

@ -10,14 +10,12 @@ all: grid mesh
.PHONY: grid .PHONY: grid
grid: grid:
@cmake -B build/grid -DDAMASK_SOLVER=grid -DCMAKE_INSTALL_PREFIX=${PWD} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILDCMD_POST=${BUILDCMD_POST} -DBUILDCMD_PRE=${BUILDCMD_PRE} -DOPTIMIZATION=${OPTIMIZATION} -DOPENMP=${OPENMP} @cmake -B build/grid -DDAMASK_SOLVER=grid -DCMAKE_INSTALL_PREFIX=${PWD} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILDCMD_POST=${BUILDCMD_POST} -DBUILDCMD_PRE=${BUILDCMD_PRE} -DOPTIMIZATION=${OPTIMIZATION} -DOPENMP=${OPENMP}
@cmake --build build/grid --parallel @cmake --build build/grid --parallel --target install
@cmake --install build/grid
.PHONY: mesh .PHONY: mesh
mesh: mesh:
@cmake -B build/mesh -DDAMASK_SOLVER=mesh -DCMAKE_INSTALL_PREFIX=${PWD} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILDCMD_POST=${BUILDCMD_POST} -DBUILDCMD_PRE=${BUILDCMD_PRE} -DOPTIMIZATION=${OPTIMIZATION} -DOPENMP=${OPENMP} @cmake -B build/mesh -DDAMASK_SOLVER=mesh -DCMAKE_INSTALL_PREFIX=${PWD} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILDCMD_POST=${BUILDCMD_POST} -DBUILDCMD_PRE=${BUILDCMD_PRE} -DOPTIMIZATION=${OPTIMIZATION} -DOPENMP=${OPENMP}
@cmake --build build/mesh --parallel @cmake --build build/mesh --parallel --target install
@cmake --install build/mesh
.PHONY: clean .PHONY: clean
clean: clean:

@ -1 +1 @@
Subproject commit e6e1f93a36d63348359a81d7c373083a39977694 Subproject commit b898a8b5552bd9d1c555edc3d8134564dd32fe53

View File

@ -106,8 +106,9 @@ set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0")
#set (DEBUG_FLAGS "${DEBUG_FLAGS},stderrors") #set (DEBUG_FLAGS "${DEBUG_FLAGS},stderrors")
# ... warnings about Fortran standard violations are changed to errors # ... warnings about Fortran standard violations are changed to errors
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all") #set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all")
# generate debug information for parameters # generate debug information for parameters
# Disabled due to ICE when compiling phase_damage.f90 (not understandable, there is no parameter in there)
# Additional options # Additional options
# -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits # -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits

View File

@ -0,0 +1,121 @@
###################################################################################################
# Intel Compiler
###################################################################################################
if (CMAKE_Fortran_COMPILER_VERSION VERSION_LESS 18.0)
message (FATAL_ERROR "Intel Compiler version: ${CMAKE_Fortran_COMPILER_VERSION} not supported")
endif ()
if (OPENMP)
set (OPENMP_FLAGS "-qopenmp")
endif ()
if (OPTIMIZATION STREQUAL "OFF")
set (OPTIMIZATION_FLAGS "-O0")
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
set (OPTIMIZATION_FLAGS "-O2")
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
set (OPTIMIZATION_FLAGS "-ipo -O3 -fp-model fast=2 -xHost")
# -fast = -ipo, -O3, -no-prec-div, -static, -fp-model fast=2, and -xHost"
endif ()
# -assume std_mod_proc_name (included in -standard-semantics) causes problems if other modules
# (PETSc, HDF5) are not compiled with this option (https://software.intel.com/en-us/forums/intel-fortran-compiler-for-linux-and-mac-os-x/topic/62172)
set (STANDARD_CHECK "-stand f18 -assume nostd_mod_proc_name")
set (LINKER_FLAGS "${LINKER_FLAGS} -shared-intel")
# Link against shared Intel libraries instead of static ones
#------------------------------------------------------------------------------------------------
# Fine tuning compilation options
set (COMPILE_FLAGS "${COMPILE_FLAGS} -fpp")
# preprocessor
set (COMPILE_FLAGS "${COMPILE_FLAGS} -ftz")
# flush underflow to zero, automatically set if -O[1,2,3]
set (COMPILE_FLAGS "${COMPILE_FLAGS} -diag-disable")
# disables warnings ...
set (COMPILE_FLAGS "${COMPILE_FLAGS} 5268")
# ... the text exceeds right hand column allowed on the line (we have only comments there)
set (COMPILE_FLAGS "${COMPILE_FLAGS},7624")
# ... about deprecated forall (has nice syntax and most likely a performance advantage)
set (COMPILE_FLAGS "${COMPILE_FLAGS} -warn")
# enables warnings ...
set (COMPILE_FLAGS "${COMPILE_FLAGS} declarations")
# ... any undeclared names (alternative name: -implicitnone)
set (COMPILE_FLAGS "${COMPILE_FLAGS},general")
# ... warning messages and informational messages are issued by the compiler
set (COMPILE_FLAGS "${COMPILE_FLAGS},usage")
# ... questionable programming practices
set (COMPILE_FLAGS "${COMPILE_FLAGS},interfaces")
# ... checks the interfaces of all SUBROUTINEs called and FUNCTIONs invoked in your compilation against an external set of interface blocks
set (COMPILE_FLAGS "${COMPILE_FLAGS},ignore_loc")
# ... %LOC is stripped from an actual argument
set (COMPILE_FLAGS "${COMPILE_FLAGS},alignments")
# ... data that is not naturally aligned
set (COMPILE_FLAGS "${COMPILE_FLAGS},unused")
# ... declared variables that are never used
# Additional options
# -warn: enables warnings, where
# truncated_source: Determines whether warnings occur when source exceeds the maximum column width in fixed-format files.
# (too many warnings because we have comments beyond character 132)
# uncalled: Determines whether warnings occur when a statement function is never called
# all:
# -name as_is: case sensitive Fortran!
#------------------------------------------------------------------------------------------------
# Runtime debugging
set (DEBUG_FLAGS "${DEBUG_FLAGS} -g")
# Generate symbolic debugging information in the object file
set (DEBUG_FLAGS "${DEBUG_FLAGS} -traceback")
# Generate extra information in the object file to provide source file traceback information when a severe error occurs at run time
set (DEBUG_FLAGS "${DEBUG_FLAGS} -gen-interfaces")
# Generate an interface block for each routine. http://software.intel.com/en-us/blogs/2012/01/05/doctor-fortran-gets-explicit-again/
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fp-stack-check")
# Generate extra code after every function call to ensure that the floating-point (FP) stack is in the expected state
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fp-model strict")
# Trap uninitalized variables
set (DEBUG_FLAGS "${DEBUG_FLAGS} -check" )
# Checks at runtime ...
set (DEBUG_FLAGS "${DEBUG_FLAGS} bounds")
# ... if an array index is too small (<1) or too large!
set (DEBUG_FLAGS "${DEBUG_FLAGS},format")
# ... for the data type of an item being formatted for output.
set (DEBUG_FLAGS "${DEBUG_FLAGS},output_conversion")
# ... for the fit of data items within a designated format descriptor field.
set (DEBUG_FLAGS "${DEBUG_FLAGS},pointers")
# ... for certain disassociated or uninitialized pointers or unallocated allocatable objects.
set (DEBUG_FLAGS "${DEBUG_FLAGS},uninit")
# ... for uninitialized variables.
set (DEBUG_FLAGS "${DEBUG_FLAGS} -ftrapuv")
# ... initializes stack local variables to an unusual value to aid error detection
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0")
# ... capture all floating-point exceptions, sets -ftz automatically
# disable due to compiler bug https://community.intel.com/t5/Intel-Fortran-Compiler/false-positive-stand-f18-and-IEEE-SELECTED-REAL-KIND/m-p/1227336
#set (DEBUG_FLAGS "${DEBUG_FLAGS} -warn")
# enables warnings ...
#set (DEBUG_FLAGS "${DEBUG_FLAGS} errors")
# ... warnings are changed to errors
#set (DEBUG_FLAGS "${DEBUG_FLAGS},stderrors")
# ... warnings about Fortran standard violations are changed to errors
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all")
# generate debug information for parameters
# Additional options
# -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits
# -check: Checks at runtime, where
# arg_temp_created: will cause a lot of warnings because we create a bunch of temporary arrays (performance?)
# stack:
#------------------------------------------------------------------------------------------------
# precision settings
set (PRECISION_FLAGS "${PRECISION_FLAGS} -real-size 64")
# set precision for standard real to 32 | 64 | 128 (= 4 | 8 | 16 bytes, type pReal is always 8 bytes)

View File

@ -1,17 +1,12 @@
# Tasan et.al. 2015 Acta Materalia # Tasan et.al. 2015 Acta Materalia
# Tasan et.al. 2015 International Journal of Plasticity # Tasan et.al. 2015 International Journal of Plasticity
# Diehl et.al. 2015 Meccanica # Diehl et.al. 2015 Meccanica
Martensite: N_sl: [12, 12]
lattice: cI a_sl: 2.0
mechanical: dot_gamma_0_sl: 0.001
elastic: {C_11: 417.4e+9, C_12: 242.4e+9, C_44: 211.1e+9, type: Hooke} h_0_sl-sl: 563.0e+9
plastic: h_sl-sl: [1, 1.4, 1, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4]
N_sl: [12, 12] n_sl: 20
a_sl: 2.0 type: phenopowerlaw
dot_gamma_0_sl: 0.001 xi_0_sl: [405.8e+6, 456.7e+6]
h_0_sl-sl: 563.0e+9 xi_inf_sl: [872.9e+6, 971.2e+6]
h_sl-sl: [1, 1.4, 1, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4]
n_sl: 20
type: phenopowerlaw
xi_0_sl: [405.8e+6, 456.7e+6]
xi_inf_sl: [872.9e+6, 971.2e+6]

View File

@ -0,0 +1,6 @@
references:
- H.M. Ledbetter
physica status solidi (a) 85(1):89-96, 1984
https://doi.org/10.1002/pssa.2210850111
lattice: cF
rho: 7937.0

View File

@ -0,0 +1,4 @@
references:
- https://en.wikipedia.org/wiki/Silver
lattice: cF
rho: 10490.0

View File

@ -0,0 +1,7 @@
type: thermalexpansion
references:
- R.H. Bogaard et al.
Thermochimica Acta 218:373-393, 1993
https://doi.org/10.1016/0040-6031(93)80437-F
A_11: 15.0e-6
T_ref: 300.0

View File

@ -0,0 +1,8 @@
type: Hooke
references:
- H.M. Ledbetter
physica status solidi (a) 85(1):89-96, 1984
https://doi.org/10.1002/pssa.2210850111
C_11: 204.6e+9
C_12: 137.7e+9
C_44: 126.2e+9

View File

@ -0,0 +1,22 @@
type: Hooke
references:
- J.R. Neighbours and G.A. Alers,
Physical Review 111:707-712, 1958,
https://doi.org/10.1103/PhysRev.111.707
- Y.A. Chang and L. Himmel,
Journal of Applied Physics 37:3567-3572, 1966,
https://doi.org/10.1063/1.1708903
T_ref: 300
C_11: 122.9e+9
C_11,T: -313.5e+5
C_11,T^2: -107.3e+2
C_12: 91.55e+9
C_12,T: -164.1e+5
C_12,T^2: -681.6e+1
C_44: 42.63e+9
C_44,T: -180.5e+5
C_44,T^2: -353.8e+1

View File

@ -1,8 +1,22 @@
type: Hooke type: Hooke
references: references:
- J. Vallin et al., - G.N. Kamm and G.A. Alers,
Journal of Applied Physics 35(6):1825-1826, 1964, Journal of Applied Physics 35:327-330, 1964,
https://doi.org/10.1063/1.1713749 https://doi.org/10.1063/1.1713309
C_11: 107.3e+9 - D. Gerlich and E.S. Fisher,
C_12: 60.8e+9 Journal of Physics and Chemistry of Solids 30:1197-1205, 1969
C_44: 28.3e+9 https://doi.org/10.1016/0022-3697(69)90377-1
T_ref: 300
C_11: 106.1e+9
C_11,T: -359.3e+5
C_11,T^2: -152.7e+2
C_12: 57.83e+9
C_12,T: -781.6e+4
C_12,T^2: -551.3e+1
C_44: 24.31e+9
C_44,T: -142.9e+5
C_44,T^2: -404.6e+1

View File

@ -1,9 +1,19 @@
type: Hooke type: Hooke
references: references:
- J.P. Hirth and J. Lothe, - D.J. Dever,
Theory of Dislocations, 1982, Journal of Applied Physics 43(8):3293-3301, 1972,
John Wiley & Sons, https://doi.org/10.1063/1.1661710
page 837
C_11: 242.e9 T_ref: 300
C_12: 146.5e+9
C_44: 112.e9 C_11: 231.7e+9
C_11,T: -47.6e+6
C_11,T^2: -54.4e+3
C_12: 135.8e+9
C_12,T: -12.9e+6
C_12,T^2: -7.3e+3
C_44: 116.8e+9
C_44,T: -19.4e+6
C_44,T^2: -2.5e+3

View File

@ -0,0 +1,8 @@
type: Hooke
references:
- S.A. Kim and W.L. Johnson,
Materials Science & Engineering A 452-453:633-639, 2007,
https://doi.org/10.1016/j.msea.2006.11.147
C_11: 268.1e+9
C_12: 111.2e+9
C_44: 79.06e+9

View File

@ -4,7 +4,8 @@ references:
International Journal of Plasticity 134:102779, 2020, International Journal of Plasticity 134:102779, 2020,
https://doi.org/10.1016/j.ijplas.2020.102779 https://doi.org/10.1016/j.ijplas.2020.102779
- K. Sedighiani et al., - K. Sedighiani et al.,
Mechanics of Materials, submitted Mechanics of Materials, 164:104117, 2022,
https://doi.org/10.1016/j.mechmat.2021.104117
output: [rho_dip, rho_mob] output: [rho_dip, rho_mob]
N_sl: [12, 12] N_sl: [12, 12]
b_sl: [2.49e-10, 2.49e-10] b_sl: [2.49e-10, 2.49e-10]

View File

@ -0,0 +1,9 @@
references:
- B.F. Blackwell et al.
Proceedings of 34th National Heat Transfer Conference 2000
https://www.osti.gov/servlets/purl/760791
- R.H. Bogaard et al.
Thermochimica Acta 218:373-393, 1993
https://doi.org/10.1016/0040-6031(93)80437-F
C_p: 470.0
K_11: 14.34

View File

@ -67,9 +67,7 @@ os.system(f'xvfb-run -a {executable} -compile {menu_file}')
print('setting file access rights...') print('setting file access rights...')
files = (glob.glob(str(marc_root/f'marc{marc_version}/tools/*_damask*')) + for file in (glob.glob(str(marc_root/f'marc{marc_version}/tools/*_damask*')) +
glob.glob(str(marc_root/f'mentat{marc_version}/bin/kill[4-6]')) + glob.glob(str(marc_root/f'mentat{marc_version}/bin/kill[4-6]')) +
glob.glob(str(marc_root/f'mentat{marc_version}/bin/submit[4-6]'))) glob.glob(str(marc_root/f'mentat{marc_version}/bin/submit[4-6]'))):
for file in files:
os.chmod(file , 0o755) os.chmod(file , 0o755)

View File

@ -1,71 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(usage='%prog options [ASCIItable(s)]', description = """
Add displacments resulting from deformation gradient field.
Operates on periodic three-dimensional x,y,z-ordered data sets.
Outputs at cell centers or cell nodes (into separate file).
""", version = scriptID)
parser.add_option('-f',
'--defgrad',
dest = 'f',
metavar = 'string',
help = 'label of deformation gradient [%default]')
parser.add_option('-p',
'--pos', '--position',
dest = 'pos',
metavar = 'string',
help = 'label of coordinates [%default]')
parser.add_option('--nodal',
dest = 'nodal',
action = 'store_true',
help = 'output nodal (instead of cell-centered) displacements')
parser.set_defaults(f = 'f',
pos = 'pos',
)
(options,filenames) = parser.parse_args()
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.load(StringIO(''.join(sys.stdin.read())) if name is None else name)
grid,size,origin = damask.grid_filters.cellsSizeOrigin_coordinates0_point(table.get(options.pos))
F = table.get(options.f).reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+(3,3))
if options.nodal:
damask.Table(damask.grid_filters.coordinates0_node(grid,size).reshape(-1,3,order='F'),
{'pos':(3,)})\
.add('avg({}).{}'.format(options.f,options.pos),
damask.grid_filters.displacement_avg_node(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))\
.add('fluct({}).{}'.format(options.f,options.pos),
damask.grid_filters.displacement_fluct_node(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))\
.save((sys.stdout if name is None else os.path.splitext(name)[0]+'_nodal.txt'))
else:
table.add('avg({}).{}'.format(options.f,options.pos),
damask.grid_filters.displacement_avg_point(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))\
.add('fluct({}).{}'.format(options.f,options.pos),
damask.grid_filters.displacement_fluct_point(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))\
.save((sys.stdout if name is None else name))

View File

@ -1 +1 @@
v3.0.0-alpha5-272-g3192a31e1 v3.0.0-alpha5-379-g731222d09

View File

@ -8,6 +8,7 @@ with open(_Path(__file__).parent/_Path('VERSION')) as _f:
version = _re.sub(r'^v','',_f.readline().strip()) version = _re.sub(r'^v','',_f.readline().strip())
__version__ = version __version__ = version
from . import _typehints # noqa
from . import util # noqa from . import util # noqa
from . import seeds # noqa from . import seeds # noqa
from . import tensor # noqa from . import tensor # noqa

View File

@ -3,9 +3,10 @@ import json
import functools import functools
import colorsys import colorsys
from pathlib import Path from pathlib import Path
from typing import Sequence, Union, TextIO from typing import Union, TextIO
import numpy as np import numpy as np
import scipy.interpolate as interp
import matplotlib as mpl import matplotlib as mpl
if os.name == 'posix' and 'DISPLAY' not in os.environ: if os.name == 'posix' and 'DISPLAY' not in os.environ:
mpl.use('Agg') mpl.use('Agg')
@ -13,6 +14,7 @@ import matplotlib.pyplot as plt
from matplotlib import cm from matplotlib import cm
from PIL import Image from PIL import Image
from ._typehints import FloatSequence, FileHandle
from . import util from . import util
from . import Table from . import Table
@ -41,7 +43,7 @@ class Colormap(mpl.colors.ListedColormap):
https://doi.org/10.1016/j.ijplas.2012.09.012 https://doi.org/10.1016/j.ijplas.2012.09.012
Matplotlib colormaps overview Matplotlib colormaps overview
https://matplotlib.org/tutorials/colors/colormaps.html https://matplotlib.org/stable/tutorials/colors/colormaps.html
""" """
@ -77,8 +79,8 @@ class Colormap(mpl.colors.ListedColormap):
@staticmethod @staticmethod
def from_range(low: Sequence[float], def from_range(low: FloatSequence,
high: Sequence[float], high: FloatSequence,
name: str = 'DAMASK colormap', name: str = 'DAMASK colormap',
N: int = 256, N: int = 256,
model: str = 'rgb') -> 'Colormap': model: str = 'rgb') -> 'Colormap':
@ -128,7 +130,7 @@ class Colormap(mpl.colors.ListedColormap):
if model.lower() not in toMsh: if model.lower() not in toMsh:
raise ValueError(f'Invalid color model: {model}.') raise ValueError(f'Invalid color model: {model}.')
low_high = np.vstack((low,high)) low_high = np.vstack((low,high)).astype(float)
out_of_bounds = np.bool_(False) out_of_bounds = np.bool_(False)
if model.lower() == 'rgb': if model.lower() == 'rgb':
@ -141,7 +143,7 @@ class Colormap(mpl.colors.ListedColormap):
out_of_bounds = np.any(low_high[:,0]<0) out_of_bounds = np.any(low_high[:,0]<0)
if out_of_bounds: if out_of_bounds:
raise ValueError(f'{model.upper()} colors {low} | {high} are out of bounds.') raise ValueError(f'{model.upper()} colors {low_high[0]} | {low_high[1]} are out of bounds.')
low_,high_ = map(toMsh[model.lower()],low_high) low_,high_ = map(toMsh[model.lower()],low_high)
msh = map(functools.partial(Colormap._interpolate_msh,low=low_,high=high_),np.linspace(0,1,N)) msh = map(functools.partial(Colormap._interpolate_msh,low=low_,high=high_),np.linspace(0,1,N))
@ -191,19 +193,50 @@ class Colormap(mpl.colors.ListedColormap):
return Colormap.from_range(definition['low'],definition['high'],name,N) return Colormap.from_range(definition['low'],definition['high'],name,N)
def at(self,
fraction : Union[float,FloatSequence]) -> np.ndarray:
"""
Interpolate color at fraction.
Parameters
----------
fraction : float or sequence of float
Fractional coordinate(s) to evaluate Colormap at.
Returns
-------
color : numpy.ndarray, shape(...,4)
RGBA values of interpolated color(s).
Examples
--------
>>> import damask
>>> cmap = damask.Colormap.from_predefined('gray')
>>> cmap.at(0.5)
array([0.5, 0.5, 0.5, 1. ])
>>> 'rgb({},{},{})'.format(*cmap.at(0.5))
'rgb(0.5,0.5,0.5)'
"""
return interp.interp1d(np.linspace(0,1,self.N),
self.colors,
axis=0,
assume_sorted=True)(fraction)
def shade(self, def shade(self,
field: np.ndarray, field: np.ndarray,
bounds: Sequence[float] = None, bounds: FloatSequence = None,
gap: float = None) -> Image: gap: float = None) -> Image:
""" """
Generate PIL image of 2D field using colormap. Generate PIL image of 2D field using colormap.
Parameters Parameters
---------- ----------
field : numpy.array, shape (:,:) field : numpy.ndarray, shape (:,:)
Data to be shaded. Data to be shaded.
bounds : sequence of float, len (2), optional bounds : sequence of float, len (2), optional
Value range (low,high) spanned by colormap. Value range (left,right) spanned by colormap.
gap : field.dtype, optional gap : field.dtype, optional
Transparent value. NaN will always be rendered transparent. Transparent value. NaN will always be rendered transparent.
@ -213,21 +246,20 @@ class Colormap(mpl.colors.ListedColormap):
RGBA image of shaded data. RGBA image of shaded data.
""" """
N = len(self.colors)
mask = np.logical_not(np.isnan(field) if gap is None else \ mask = np.logical_not(np.isnan(field) if gap is None else \
np.logical_or (np.isnan(field), field == gap)) # mask NaN (and gap if present) np.logical_or (np.isnan(field), field == gap)) # mask NaN (and gap if present)
lo,hi = (field[mask].min(),field[mask].max()) if bounds is None else \ l,r = (field[mask].min(),field[mask].max()) if bounds is None else \
(min(bounds[:2]),max(bounds[:2])) np.array(bounds,float)[:2]
delta,avg = hi-lo,0.5*(hi+lo) delta,avg = r-l,0.5*abs(r+l)
if delta * 1e8 <= avg: # delta is similar to numerical noise if abs(delta) * 1e8 <= avg: # delta is similar to numerical noise
hi,lo = hi+0.5*avg,lo-0.5*avg # extend range to have actual data centered within l,r = l-0.5*avg*np.sign(delta),r+0.5*avg*np.sign(delta), # extend range to have actual data centered within
return Image.fromarray( return Image.fromarray(
(np.dstack(( (np.dstack((
self.colors[(np.round(np.clip((field-lo)/(hi-lo),0.0,1.0)*(N-1))).astype(np.uint16),:3], self.colors[(np.round(np.clip((field-l)/delta,0.0,1.0)*(self.N-1))).astype(np.uint16),:3],
mask.astype(float) mask.astype(float)
) )
)*255 )*255
@ -261,7 +293,7 @@ class Colormap(mpl.colors.ListedColormap):
def _get_file_handle(self, def _get_file_handle(self,
fname: Union[TextIO, str, Path, None], fname: Union[FileHandle, None],
suffix: str = '') -> TextIO: suffix: str = '') -> TextIO:
""" """
Provide file handle. Provide file handle.
@ -288,7 +320,7 @@ class Colormap(mpl.colors.ListedColormap):
return fname return fname
def save_paraview(self, fname: Union[TextIO, str, Path] = None): def save_paraview(self, fname: FileHandle = None):
""" """
Save as JSON file for use in Paraview. Save as JSON file for use in Paraview.
@ -315,7 +347,7 @@ class Colormap(mpl.colors.ListedColormap):
fhandle.write('\n') fhandle.write('\n')
def save_ASCII(self, fname: Union[TextIO, str, Path] = None): def save_ASCII(self, fname: FileHandle = None):
""" """
Save as ASCII file. Save as ASCII file.
@ -330,7 +362,7 @@ class Colormap(mpl.colors.ListedColormap):
t.save(self._get_file_handle(fname,'.txt')) t.save(self._get_file_handle(fname,'.txt'))
def save_GOM(self, fname: Union[TextIO, str, Path] = None): def save_GOM(self, fname: FileHandle = None):
""" """
Save as ASCII file for use in GOM Aramis. Save as ASCII file for use in GOM Aramis.
@ -343,14 +375,14 @@ class Colormap(mpl.colors.ListedColormap):
# ToDo: test in GOM # ToDo: test in GOM
GOM_str = '1 1 {name} 9 {name} '.format(name=self.name.replace(" ","_")) \ GOM_str = '1 1 {name} 9 {name} '.format(name=self.name.replace(" ","_")) \
+ '0 1 0 3 0 0 -1 9 \\ 0 0 0 255 255 255 0 0 255 ' \ + '0 1 0 3 0 0 -1 9 \\ 0 0 0 255 255 255 0 0 255 ' \
+ f'30 NO_UNIT 1 1 64 64 64 255 1 0 0 0 0 0 0 3 0 {len(self.colors)}' \ + f'30 NO_UNIT 1 1 64 64 64 255 1 0 0 0 0 0 0 3 0 {self.N}' \
+ ' '.join([f' 0 {c[0]} {c[1]} {c[2]} 255 1' for c in reversed((self.colors*255).astype(int))]) \ + ' '.join([f' 0 {c[0]} {c[1]} {c[2]} 255 1' for c in reversed((self.colors*255).astype(int))]) \
+ '\n' + '\n'
self._get_file_handle(fname,'.legend').write(GOM_str) self._get_file_handle(fname,'.legend').write(GOM_str)
def save_gmsh(self, fname: Union[TextIO, str, Path] = None): def save_gmsh(self, fname: FileHandle = None):
""" """
Save as ASCII file for use in gmsh. Save as ASCII file for use in gmsh.
@ -581,7 +613,7 @@ class Colormap(mpl.colors.ListedColormap):
@staticmethod @staticmethod
def _lab2xyz(lab: np.ndarray, ref_white: np.ndarray = None) -> np.ndarray: def _lab2xyz(lab: np.ndarray, ref_white: np.ndarray = _REF_WHITE) -> np.ndarray:
""" """
CIE Lab to CIE Xyz. CIE Lab to CIE Xyz.
@ -589,6 +621,8 @@ class Colormap(mpl.colors.ListedColormap):
---------- ----------
lab : numpy.ndarray, shape (3) lab : numpy.ndarray, shape (3)
CIE lab values. CIE lab values.
ref_white : numpy.ndarray, shape (3)
Reference white, default value is the standard 2° observer for D65.
Returns Returns
------- -------
@ -607,10 +641,10 @@ class Colormap(mpl.colors.ListedColormap):
f_x**3. if f_x**3. > _EPS else (116.*f_x-16.)/_KAPPA, f_x**3. if f_x**3. > _EPS else (116.*f_x-16.)/_KAPPA,
((lab[0]+16.)/116.)**3 if lab[0]>_KAPPA*_EPS else lab[0]/_KAPPA, ((lab[0]+16.)/116.)**3 if lab[0]>_KAPPA*_EPS else lab[0]/_KAPPA,
f_z**3. if f_z**3. > _EPS else (116.*f_z-16.)/_KAPPA f_z**3. if f_z**3. > _EPS else (116.*f_z-16.)/_KAPPA
])*(ref_white if ref_white is not None else _REF_WHITE) ])*ref_white
@staticmethod @staticmethod
def _xyz2lab(xyz: np.ndarray, ref_white: np.ndarray = None) -> np.ndarray: def _xyz2lab(xyz: np.ndarray, ref_white: np.ndarray = _REF_WHITE) -> np.ndarray:
""" """
CIE Xyz to CIE Lab. CIE Xyz to CIE Lab.
@ -618,6 +652,8 @@ class Colormap(mpl.colors.ListedColormap):
---------- ----------
xyz : numpy.ndarray, shape (3) xyz : numpy.ndarray, shape (3)
CIE Xyz values. CIE Xyz values.
ref_white : numpy.ndarray, shape (3)
Reference white, default value is the standard 2° observer for D65.
Returns Returns
------- -------
@ -629,7 +665,6 @@ class Colormap(mpl.colors.ListedColormap):
http://www.brucelindbloom.com/index.html?Eqn_Lab_to_XYZ.html http://www.brucelindbloom.com/index.html?Eqn_Lab_to_XYZ.html
""" """
ref_white = ref_white if ref_white is not None else _REF_WHITE
f = np.where(xyz/ref_white > _EPS,(xyz/ref_white)**(1./3.),(_KAPPA*xyz/ref_white+16.)/116.) f = np.where(xyz/ref_white > _EPS,(xyz/ref_white)**(1./3.),(_KAPPA*xyz/ref_white+16.)/116.)
return np.array([ return np.array([

View File

@ -114,12 +114,13 @@ class Crystal():
def __repr__(self): def __repr__(self):
"""Represent.""" """Represent."""
return '\n'.join([f'Crystal family {self.family}'] family = f'Crystal family: {self.family}'
+ ([] if self.lattice is None else [f'Bravais lattice {self.lattice}']+ return family if self.lattice is None else \
list(map(lambda x:f'{x[0]}: {x[1]:.5g}', '\n'.join([family,
zip(['a','b','c','α','β','γ',], f'Bravais lattice: {self.lattice}',
self.parameters)))) 'a={:.5g}m, b={:.5g}m, c={:.5g}m'.format(*self.parameters[:3]),
) 'α={:.5g}°, β={:.5g}°, γ={:.5g}°'.format(*np.degrees(self.parameters[3:]))])
def __eq__(self,other): def __eq__(self,other):
""" """
@ -378,7 +379,7 @@ class Crystal():
""" """
_kinematics = { _kinematics = {
'cF': { 'cF': {
'slip' :[np.array([ 'slip': [np.array([
[+0,+1,-1, +1,+1,+1], [+0,+1,-1, +1,+1,+1],
[-1,+0,+1, +1,+1,+1], [-1,+0,+1, +1,+1,+1],
[+1,-1,+0, +1,+1,+1], [+1,-1,+0, +1,+1,+1],
@ -398,7 +399,7 @@ class Crystal():
[+1,+0,-1, +1,+0,+1], [+1,+0,-1, +1,+0,+1],
[+0,+1,+1, +0,+1,-1], [+0,+1,+1, +0,+1,-1],
[+0,+1,-1, +0,+1,+1]])], [+0,+1,-1, +0,+1,+1]])],
'twin' :[np.array([ 'twin': [np.array([
[-2, 1, 1, 1, 1, 1], [-2, 1, 1, 1, 1, 1],
[ 1,-2, 1, 1, 1, 1], [ 1,-2, 1, 1, 1, 1],
[ 1, 1,-2, 1, 1, 1], [ 1, 1,-2, 1, 1, 1],
@ -413,7 +414,7 @@ class Crystal():
[-1, 1, 2, -1, 1,-1]])] [-1, 1, 2, -1, 1,-1]])]
}, },
'cI': { 'cI': {
'slip' :[np.array([ 'slip': [np.array([
[+1,-1,+1, +0,+1,+1], [+1,-1,+1, +0,+1,+1],
[-1,-1,+1, +0,+1,+1], [-1,-1,+1, +0,+1,+1],
[+1,+1,+1, +0,-1,+1], [+1,+1,+1, +0,-1,+1],
@ -464,7 +465,7 @@ class Crystal():
[+1,+1,+1, -3,+2,+1], [+1,+1,+1, -3,+2,+1],
[+1,+1,-1, +3,-2,+1], [+1,+1,-1, +3,-2,+1],
[+1,-1,+1, +3,+2,-1]])], [+1,-1,+1, +3,+2,-1]])],
'twin' :[np.array([ 'twin': [np.array([
[-1, 1, 1, 2, 1, 1], [-1, 1, 1, 2, 1, 1],
[ 1, 1, 1, -2, 1, 1], [ 1, 1, 1, -2, 1, 1],
[ 1, 1,-1, 2,-1, 1], [ 1, 1,-1, 2,-1, 1],
@ -479,7 +480,7 @@ class Crystal():
[ 1, 1, 1, 1, 1,-2]])] [ 1, 1, 1, 1, 1,-2]])]
}, },
'hP': { 'hP': {
'slip' :[np.array([ 'slip': [np.array([
[+2,-1,-1,+0, +0,+0,+0,+1], [+2,-1,-1,+0, +0,+0,+0,+1],
[-1,+2,-1,+0, +0,+0,+0,+1], [-1,+2,-1,+0, +0,+0,+0,+1],
[-1,-1,+2,+0, +0,+0,+0,+1]]), [-1,-1,+2,+0, +0,+0,+0,+1]]),
@ -514,7 +515,7 @@ class Crystal():
[+1,+1,-2,+3, -1,-1,+2,+2], [+1,+1,-2,+3, -1,-1,+2,+2],
[-1,+2,-1,+3, +1,-2,+1,+2], [-1,+2,-1,+3, +1,-2,+1,+2],
[-2,+1,+1,+3, +2,-1,-1,+2]])], [-2,+1,+1,+3, +2,-1,-1,+2]])],
'twin' :[np.array([ 'twin': [np.array([
[-1, 0, 1, 1, 1, 0,-1, 2], # shear = (3-(c/a)^2)/(sqrt(3) c/a) <-10.1>{10.2} [-1, 0, 1, 1, 1, 0,-1, 2], # shear = (3-(c/a)^2)/(sqrt(3) c/a) <-10.1>{10.2}
[ 0,-1, 1, 1, 0, 1,-1, 2], [ 0,-1, 1, 1, 0, 1,-1, 2],
[ 1,-1, 0, 1, -1, 1, 0, 2], [ 1,-1, 0, 1, -1, 1, 0, 2],
@ -542,7 +543,74 @@ class Crystal():
[-1,-1, 2,-3, -1,-1, 2, 2], [-1,-1, 2,-3, -1,-1, 2, 2],
[ 1,-2, 1,-3, 1,-2, 1, 2], [ 1,-2, 1,-3, 1,-2, 1, 2],
[ 2,-1,-1,-3, 2,-1,-1, 2]])] [ 2,-1,-1,-3, 2,-1,-1, 2]])]
}, },
'tI': {
'slip': [np.array([
[+0,+0,+1, +1,+0,+0],
[+0,+0,+1, +0,+1,+0]]),
np.array([
[+0,+0,+1, +1,+1,+0],
[+0,+0,+1, -1,+1,+0]]),
np.array([
[+0,+1,+0, +1,+0,+0],
[+1,+0,+0, +0,+1,+0]]),
np.array([
[+1,-1,+1, +1,+1,+0],
[+1,-1,-1, +1,+1,+0],
[-1,-1,-1, -1,+1,+0],
[-1,-1,+1, -1,+1,+0]]),
np.array([
[+1,-1,+0, +1,+1,+0],
[+1,+1,+0, +1,-1,+0]]),
np.array([
[+0,+1,+1, +1,+0,+0],
[+0,-1,+1, +1,+0,+0],
[-1,+0,+1, +0,+1,+0],
[+1,+0,+1, +0,+1,+0]]),
np.array([
[+0,+1,+0, +0,+0,+1],
[+1,+0,+0, +0,+0,+1]]),
np.array([
[+1,+1,+0, +0,+0,+1],
[-1,+1,+0, +0,+0,+1]]),
np.array([
[+0,+1,-1, +0,+1,+1],
[+0,-1,-1, +0,-1,+1],
[-1,+0,-1, -1,+0,+1],
[+1,+0,-1, +1,+0,+1]]),
np.array([
[+1,-1,+1, +0,+1,+1],
[+1,+1,-1, +0,+1,+1],
[+1,+1,+1, +0,+1,-1],
[-1,+1,+1, +0,+1,-1],
[+1,-1,-1, +1,+0,+1],
[-1,-1,+1, +1,+0,+1],
[+1,+1,+1, +1,+0,-1],
[+1,-1,+1, +1,+0,-1]]),
np.array([
[+1,+0,+0, +0,+1,+1],
[+1,+0,+0, +0,+1,-1],
[+0,+1,+0, +1,+0,+1],
[+0,+1,+0, +1,+0,-1]]),
np.array([
[+0,+1,-1, +2,+1,+1],
[+0,-1,-1, +2,-1,+1],
[+1,+0,-1, +1,+2,+1],
[-1,+0,-1, -1,+2,+1],
[+0,+1,-1, -2,+1,+1],
[+0,-1,-1, -2,-1,+1],
[-1,+0,-1, -1,-2,+1],
[+1,+0,-1, +1,-2,+1]]),
np.array([
[-1,+1,+1, +2,+1,+1],
[-1,-1,+1, +2,-1,+1],
[+1,-1,+1, +1,+2,+1],
[-1,-1,+1, -1,+2,+1],
[+1,+1,+1, -2,+1,+1],
[+1,-1,+1, -2,-1,+1],
[-1,+1,+1, -1,-2,+1],
[+1,+1,+1, +1,-2,+1]])]
}
} }
master = _kinematics[self.lattice][mode] master = _kinematics[self.lattice][mode]
if self.lattice == 'hP': if self.lattice == 'hP':

View File

@ -3,6 +3,9 @@ import copy
import warnings import warnings
import multiprocessing as mp import multiprocessing as mp
from functools import partial from functools import partial
import typing
from typing import Union, Optional, TextIO, List, Sequence
from pathlib import Path
import numpy as np import numpy as np
import pandas as pd import pandas as pd
@ -13,7 +16,8 @@ from . import VTK
from . import util from . import util
from . import grid_filters from . import grid_filters
from . import Rotation from . import Rotation
from . import Table
from ._typehints import FloatSequence, IntSequence
class Grid: class Grid:
""" """
@ -25,30 +29,34 @@ class Grid:
the physical size. the physical size.
""" """
def __init__(self,material,size,origin=[0.0,0.0,0.0],comments=[]): def __init__(self,
material: np.ndarray,
size: FloatSequence,
origin: FloatSequence = np.zeros(3),
comments: Union[str, Sequence[str]] = []):
""" """
New geometry definition for grid solvers. New geometry definition for grid solvers.
Parameters Parameters
---------- ----------
material : numpy.ndarray of shape (:,:,:) material : numpy.ndarray, shape (:,:,:)
Material indices. The shape of the material array defines Material indices. The shape of the material array defines
the number of cells. the number of cells.
size : list or numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of grid in meter. Physical size of grid in meter.
origin : list or numpy.ndarray of shape (3), optional origin : sequence of float, len (3), optional
Coordinates of grid origin in meter. Coordinates of grid origin in meter. Defaults to [0.0,0.0,0.0].
comments : list of str, optional comments : (list of) str, optional
Comments, e.g. history of operations. Comments, e.g. history of operations.
""" """
self.material = material self.material = material
self.size = size self.size = size # type: ignore
self.origin = origin self.origin = origin # type: ignore
self.comments = comments self.comments = comments # type: ignore
def __repr__(self): def __repr__(self) -> str:
"""Basic information on grid definition.""" """Basic information on grid definition."""
mat_min = np.nanmin(self.material) mat_min = np.nanmin(self.material)
mat_max = np.nanmax(self.material) mat_max = np.nanmax(self.material)
@ -62,14 +70,14 @@ class Grid:
]) ])
def __copy__(self): def __copy__(self) -> "Grid":
"""Create deep copy.""" """Create deep copy."""
return copy.deepcopy(self) return copy.deepcopy(self)
copy = __copy__ copy = __copy__
def __eq__(self,other): def __eq__(self, other: object) -> bool:
""" """
Test equality of other. Test equality of other.
@ -79,22 +87,24 @@ class Grid:
Grid to compare self against. Grid to compare self against.
""" """
return (np.allclose(other.size,self.size) if not isinstance(other, Grid):
return NotImplemented
return bool(np.allclose(other.size,self.size)
and np.allclose(other.origin,self.origin) and np.allclose(other.origin,self.origin)
and np.all(other.cells == self.cells) and np.all(other.cells == self.cells)
and np.all(other.material == self.material)) and np.all(other.material == self.material))
@property @property
def material(self): def material(self) -> np.ndarray:
"""Material indices.""" """Material indices."""
return self._material return self._material
@material.setter @material.setter
def material(self,material): def material(self, material: np.ndarray):
if len(material.shape) != 3: if len(material.shape) != 3:
raise ValueError(f'invalid material shape {material.shape}') raise ValueError(f'invalid material shape {material.shape}')
elif material.dtype not in np.sctypes['float'] + np.sctypes['int']: elif material.dtype not in np.sctypes['float'] and material.dtype not in np.sctypes['int']:
raise TypeError(f'invalid material data type {material.dtype}') raise TypeError(f'invalid material data type {material.dtype}')
else: else:
self._material = np.copy(material) self._material = np.copy(material)
@ -105,59 +115,59 @@ class Grid:
@property @property
def size(self): def size(self) -> np.ndarray:
"""Physical size of grid in meter.""" """Physical size of grid in meter."""
return self._size return self._size
@size.setter @size.setter
def size(self,size): def size(self, size: FloatSequence):
if len(size) != 3 or any(np.array(size) < 0): if len(size) != 3 or any(np.array(size) < 0):
raise ValueError(f'invalid size {size}') raise ValueError(f'invalid size {size}')
else: else:
self._size = np.array(size) self._size = np.array(size)
@property @property
def origin(self): def origin(self) -> np.ndarray:
"""Coordinates of grid origin in meter.""" """Coordinates of grid origin in meter."""
return self._origin return self._origin
@origin.setter @origin.setter
def origin(self,origin): def origin(self, origin: FloatSequence):
if len(origin) != 3: if len(origin) != 3:
raise ValueError(f'invalid origin {origin}') raise ValueError(f'invalid origin {origin}')
else: else:
self._origin = np.array(origin) self._origin = np.array(origin)
@property @property
def comments(self): def comments(self) -> List[str]:
"""Comments, e.g. history of operations.""" """Comments, e.g. history of operations."""
return self._comments return self._comments
@comments.setter @comments.setter
def comments(self,comments): def comments(self, comments: Union[str, Sequence[str]]):
self._comments = [str(c) for c in comments] if isinstance(comments,list) else [str(comments)] self._comments = [str(c) for c in comments] if isinstance(comments,list) else [str(comments)]
@property @property
def cells(self): def cells(self) -> np.ndarray:
"""Number of cells in x,y,z direction.""" """Number of cells in x,y,z direction."""
return np.asarray(self.material.shape) return np.asarray(self.material.shape)
@property @property
def N_materials(self): def N_materials(self) -> int:
"""Number of (unique) material indices within grid.""" """Number of (unique) material indices within grid."""
return np.unique(self.material).size return np.unique(self.material).size
@staticmethod @staticmethod
def load(fname): def load(fname: Union[str, Path]) -> "Grid":
""" """
Load from VTK image data file. Load from VTK image data file.
Parameters Parameters
---------- ----------
fname : str or or pathlib.Path fname : str or pathlib.Path
Grid file to read. Valid extension is .vti, which will be appended Grid file to read. Valid extension is .vti, which will be appended
if not given. if not given.
@ -178,8 +188,9 @@ class Grid:
comments=comments) comments=comments)
@typing. no_type_check
@staticmethod @staticmethod
def load_ASCII(fname): def load_ASCII(fname)-> "Grid":
""" """
Load from geom file. Load from geom file.
@ -197,16 +208,18 @@ class Grid:
Grid-based geometry from file. Grid-based geometry from file.
""" """
warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.1.0', DeprecationWarning,2) warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.0.0', DeprecationWarning,2)
try: if isinstance(fname, (str, Path)):
f = open(fname) f = open(fname)
except TypeError: elif isinstance(fname, TextIO):
f = fname f = fname
else:
raise TypeError
f.seek(0) f.seek(0)
try: try:
header_length,keyword = f.readline().split()[:2] header_length_,keyword = f.readline().split()[:2]
header_length = int(header_length) header_length = int(header_length_)
except ValueError: except ValueError:
header_length,keyword = (-1, 'invalid') header_length,keyword = (-1, 'invalid')
if not keyword.startswith('head') or header_length < 3: if not keyword.startswith('head') or header_length < 3:
@ -226,19 +239,19 @@ class Grid:
else: else:
comments.append(line.strip()) comments.append(line.strip())
material = np.empty(cells.prod()) # initialize as flat array material = np.empty(int(cells.prod())) # initialize as flat array
i = 0 i = 0
for line in content[header_length:]: for line in content[header_length:]:
items = line.split('#')[0].split() items = line.split('#')[0].split()
if len(items) == 3: if len(items) == 3:
if items[1].lower() == 'of': if items[1].lower() == 'of':
items = np.ones(int(items[0]))*float(items[2]) material_entry = np.ones(int(items[0]))*float(items[2])
elif items[1].lower() == 'to': elif items[1].lower() == 'to':
items = np.linspace(int(items[0]),int(items[2]), material_entry = np.linspace(int(items[0]),int(items[2]),
abs(int(items[2])-int(items[0]))+1,dtype=float) abs(int(items[2])-int(items[0]))+1,dtype=float)
else: items = list(map(float,items)) else: material_entry = list(map(float, items))
else: items = list(map(float,items)) else: material_entry = list(map(float, items))
material[i:i+len(items)] = items material[i:i+len(material_entry)] = material_entry
i += len(items) i += len(items)
if i != cells.prod(): if i != cells.prod():
@ -251,13 +264,13 @@ class Grid:
@staticmethod @staticmethod
def load_Neper(fname): def load_Neper(fname: Union[str, Path]) -> "Grid":
""" """
Load from Neper VTK file. Load from Neper VTK file.
Parameters Parameters
---------- ----------
fname : str, pathlib.Path, or file handle fname : str or pathlib.Path
Geometry file to read. Geometry file to read.
Returns Returns
@ -276,10 +289,10 @@ class Grid:
@staticmethod @staticmethod
def load_DREAM3D(fname, def load_DREAM3D(fname: Union[str, Path],
feature_IDs=None,cell_data=None, feature_IDs: str = None, cell_data: str = None,
phases='Phases',Euler_angles='EulerAngles', phases: str = 'Phases', Euler_angles: str = 'EulerAngles',
base_group=None): base_group: str = None) -> "Grid":
""" """
Load DREAM.3D (HDF5) file. Load DREAM.3D (HDF5) file.
@ -290,24 +303,24 @@ class Grid:
Parameters Parameters
---------- ----------
fname : str fname : str or or pathlib.Path
Filename of the DREAM.3D (HDF5) file. Filename of the DREAM.3D (HDF5) file.
feature_IDs : str feature_IDs : str, optional
Name of the dataset containing the mapping between cells and Name of the dataset containing the mapping between cells and
grain-wise data. Defaults to 'None', in which case cell-wise grain-wise data. Defaults to 'None', in which case cell-wise
data is used. data is used.
cell_data : str cell_data : str, optional
Name of the group (folder) containing cell-wise data. Defaults to Name of the group (folder) containing cell-wise data. Defaults to
None in wich case it is automatically detected. None in wich case it is automatically detected.
phases : str phases : str, optional
Name of the dataset containing the phase ID. It is not used for Name of the dataset containing the phase ID. It is not used for
grain-wise data, i.e. when feature_IDs is not None. grain-wise data, i.e. when feature_IDs is not None.
Defaults to 'Phases'. Defaults to 'Phases'.
Euler_angles : str Euler_angles : str, optional
Name of the dataset containing the crystallographic orientation as Name of the dataset containing the crystallographic orientation as
Euler angles in radians It is not used for grain-wise data, i.e. Euler angles in radians It is not used for grain-wise data, i.e.
when feature_IDs is not None. Defaults to 'EulerAngles'. when feature_IDs is not None. Defaults to 'EulerAngles'.
base_group : str base_group : str, optional
Path to the group (folder) that contains geometry (_SIMPL_GEOMETRY), Path to the group (folder) that contains geometry (_SIMPL_GEOMETRY),
and grain- or cell-wise data. Defaults to None, in which case and grain- or cell-wise data. Defaults to None, in which case
it is set as the path that contains _SIMPL_GEOMETRY/SPACING. it is set as the path that contains _SIMPL_GEOMETRY/SPACING.
@ -339,7 +352,9 @@ class Grid:
@staticmethod @staticmethod
def from_table(table,coordinates,labels): def from_table(table: Table,
coordinates: str,
labels: Union[str, Sequence[str]]) -> "Grid":
""" """
Create grid from ASCII table. Create grid from ASCII table.
@ -350,7 +365,7 @@ class Grid:
coordinates : str coordinates : str
Label of the vector column containing the spatial coordinates. Label of the vector column containing the spatial coordinates.
Need to be ordered (1./x fast, 3./z slow). Need to be ordered (1./x fast, 3./z slow).
labels : str or list of str labels : (list of) str
Label(s) of the columns containing the material definition. Label(s) of the columns containing the material definition.
Each unique combination of values results in one material ID. Each unique combination of values results in one material ID.
@ -372,28 +387,33 @@ class Grid:
@staticmethod @staticmethod
def _find_closest_seed(seeds, weights, point): def _find_closest_seed(seeds: np.ndarray, weights: np.ndarray, point: np.ndarray) -> np.integer:
return np.argmin(np.sum((np.broadcast_to(point,(len(seeds),3))-seeds)**2,axis=1) - weights) return np.argmin(np.sum((np.broadcast_to(point,(len(seeds),3))-seeds)**2,axis=1) - weights)
@staticmethod @staticmethod
def from_Laguerre_tessellation(cells,size,seeds,weights,material=None,periodic=True): def from_Laguerre_tessellation(cells: IntSequence,
size: FloatSequence,
seeds: np.ndarray,
weights: FloatSequence,
material: IntSequence = None,
periodic: bool = True):
""" """
Create grid from Laguerre tessellation. Create grid from Laguerre tessellation.
Parameters Parameters
---------- ----------
cells : int numpy.ndarray of shape (3) cells : sequence of int, len (3)
Number of cells in x,y,z direction. Number of cells in x,y,z direction.
size : list or numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the grid in meter. Physical size of the grid in meter.
seeds : numpy.ndarray of shape (:,3) seeds : numpy.ndarray, shape (:,3)
Position of the seed points in meter. All points need to lay within the box. Position of the seed points in meter. All points need to lay within the box.
weights : numpy.ndarray of shape (seeds.shape[0]) weights : sequence of float, len (seeds.shape[0])
Weights of the seeds. Setting all weights to 1.0 gives a standard Voronoi tessellation. Weights of the seeds. Setting all weights to 1.0 gives a standard Voronoi tessellation.
material : numpy.ndarray of shape (seeds.shape[0]), optional material : sequence of int, len (seeds.shape[0]), optional
Material ID of the seeds. Material ID of the seeds.
Defaults to None, in which case materials are consecutively numbered. Defaults to None, in which case materials are consecutively numbered.
periodic : Boolean, optional periodic : bool, optional
Assume grid to be periodic. Defaults to True. Assume grid to be periodic. Defaults to True.
Returns Returns
@ -421,29 +441,33 @@ class Grid:
if periodic: material_ %= len(weights) if periodic: material_ %= len(weights)
return Grid(material = material_ if material is None else material[material_], return Grid(material = material_ if material is None else np.array(material)[material_],
size = size, size = size,
comments = util.execution_stamp('Grid','from_Laguerre_tessellation'), comments = util.execution_stamp('Grid','from_Laguerre_tessellation'),
) )
@staticmethod @staticmethod
def from_Voronoi_tessellation(cells,size,seeds,material=None,periodic=True): def from_Voronoi_tessellation(cells: IntSequence,
size: FloatSequence,
seeds: np.ndarray,
material: IntSequence = None,
periodic: bool = True) -> "Grid":
""" """
Create grid from Voronoi tessellation. Create grid from Voronoi tessellation.
Parameters Parameters
---------- ----------
cells : int numpy.ndarray of shape (3) cells : sequence of int, len (3)
Number of cells in x,y,z direction. Number of cells in x,y,z direction.
size : list or numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the grid in meter. Physical size of the grid in meter.
seeds : numpy.ndarray of shape (:,3) seeds : numpy.ndarray, shape (:,3)
Position of the seed points in meter. All points need to lay within the box. Position of the seed points in meter. All points need to lay within the box.
material : numpy.ndarray of shape (seeds.shape[0]), optional material : sequence of int, len (seeds.shape[0]), optional
Material ID of the seeds. Material ID of the seeds.
Defaults to None, in which case materials are consecutively numbered. Defaults to None, in which case materials are consecutively numbered.
periodic : Boolean, optional periodic : bool, optional
Assume grid to be periodic. Defaults to True. Assume grid to be periodic. Defaults to True.
Returns Returns
@ -460,7 +484,7 @@ class Grid:
except TypeError: except TypeError:
material_ = tree.query(coords, n_jobs = int(os.environ.get('OMP_NUM_THREADS',4)))[1] # scipy <1.6 material_ = tree.query(coords, n_jobs = int(os.environ.get('OMP_NUM_THREADS',4)))[1] # scipy <1.6
return Grid(material = (material_ if material is None else material[material_]).reshape(cells), return Grid(material = (material_ if material is None else np.array(material)[material_]).reshape(cells),
size = size, size = size,
comments = util.execution_stamp('Grid','from_Voronoi_tessellation'), comments = util.execution_stamp('Grid','from_Voronoi_tessellation'),
) )
@ -509,15 +533,20 @@ class Grid:
@staticmethod @staticmethod
def from_minimal_surface(cells,size,surface,threshold=0.0,periods=1,materials=(0,1)): def from_minimal_surface(cells: IntSequence,
size: FloatSequence,
surface: str,
threshold: float = 0.0,
periods: int = 1,
materials: IntSequence = (0,1)) -> "Grid":
""" """
Create grid from definition of triply periodic minimal surface. Create grid from definition of triply periodic minimal surface.
Parameters Parameters
---------- ----------
cells : int numpy.ndarray of shape (3) cells : sequence of int, len (3)
Number of cells in x,y,z direction. Number of cells in x,y,z direction.
size : list or numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the grid in meter. Physical size of the grid in meter.
surface : str surface : str
Type of the minimal surface. See notes for details. Type of the minimal surface. See notes for details.
@ -525,7 +554,7 @@ class Grid:
Threshold of the minimal surface. Defaults to 0.0. Threshold of the minimal surface. Defaults to 0.0.
periods : integer, optional. periods : integer, optional.
Number of periods per unit cell. Defaults to 1. Number of periods per unit cell. Defaults to 1.
materials : (int, int), optional materials : sequence of int, len (2)
Material IDs. Defaults to (0,1). Material IDs. Defaults to (0,1).
Returns Returns
@ -566,22 +595,21 @@ class Grid:
>>> import numpy as np >>> import numpy as np
>>> import damask >>> import damask
>>> damask.Grid.from_minimal_surface(np.array([64]*3,int),np.ones(3), >>> damask.Grid.from_minimal_surface([64]*3,np.ones(3)*1.e-4,'Gyroid')
... 'Gyroid') cells : 64 x 64 x 64
cells a b c: 64 x 64 x 64 size : 0.0001 x 0.0001 x 0.0001 /
size x y z: 1.0 x 1.0 x 1.0 origin: 0.0 0.0 0.0 / m
origin x y z: 0.0 0.0 0.0
# materials: 2 # materials: 2
Minimal surface of 'Neovius' type. non-default material IDs. Minimal surface of 'Neovius' type. non-default material IDs.
>>> import numpy as np >>> import numpy as np
>>> import damask >>> import damask
>>> damask.Grid.from_minimal_surface(np.array([80]*3,int),np.ones(3), >>> damask.Grid.from_minimal_surface([80]*3,np.ones(3)*5.e-4,
... 'Neovius',materials=(1,5)) ... 'Neovius',materials=(1,5))
cells a b c: 80 x 80 x 80 cells : 80 x 80 x 80
size x y z: 1.0 x 1.0 x 1.0 size : 0.0005 x 0.0005 x 0.0005 /
origin x y z: 0.0 0.0 0.0 origin: 0.0 0.0 0.0 / m
# materials: 2 (min: 1, max: 5) # materials: 2 (min: 1, max: 5)
""" """
@ -595,7 +623,7 @@ class Grid:
) )
def save(self,fname,compress=True): def save(self, fname: Union[str, Path], compress: bool = True):
""" """
Save as VTK image data file. Save as VTK image data file.
@ -611,10 +639,10 @@ class Grid:
v.add(self.material.flatten(order='F'),'material') v.add(self.material.flatten(order='F'),'material')
v.add_comments(self.comments) v.add_comments(self.comments)
v.save(fname if str(fname).endswith('.vti') else str(fname)+'.vti',parallel=False,compress=compress) v.save(fname,parallel=False,compress=compress)
def save_ASCII(self,fname): def save_ASCII(self, fname: Union[str, TextIO]):
""" """
Save as geom file. Save as geom file.
@ -629,7 +657,7 @@ class Grid:
Compress geometry with 'x of y' and 'a to b'. Compress geometry with 'x of y' and 'a to b'.
""" """
warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.1.0', DeprecationWarning,2) warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.0.0', DeprecationWarning,2)
header = [f'{len(self.comments)+4} header'] + self.comments \ header = [f'{len(self.comments)+4} header'] + self.comments \
+ ['grid a {} b {} c {}'.format(*self.cells), + ['grid a {} b {} c {}'.format(*self.cells),
'size x {} y {} z {}'.format(*self.size), 'size x {} y {} z {}'.format(*self.size),
@ -644,26 +672,33 @@ class Grid:
header='\n'.join(header), fmt=format_string, comments='') header='\n'.join(header), fmt=format_string, comments='')
def show(self): def show(self) -> None:
"""Show on screen.""" """Show on screen."""
VTK.from_rectilinear_grid(self.cells,self.size,self.origin).show() VTK.from_rectilinear_grid(self.cells,self.size,self.origin).show()
def add_primitive(self,dimension,center,exponent, def add_primitive(self,
fill=None,R=Rotation(),inverse=False,periodic=True): dimension: Union[FloatSequence, IntSequence],
center: Union[FloatSequence, IntSequence],
exponent: Union[FloatSequence, float],
fill: int = None,
R: Rotation = Rotation(),
inverse: bool = False,
periodic: bool = True) -> "Grid":
""" """
Insert a primitive geometric object at a given position. Insert a primitive geometric object at a given position.
Parameters Parameters
---------- ----------
dimension : int or float numpy.ndarray of shape (3) dimension : sequence of int or float, len (3)
Dimension (diameter/side length) of the primitive. If given as Dimension (diameter/side length) of the primitive.
integers, cell centers are addressed. If given as integers, cell centers are addressed.
If given as floats, coordinates are addressed. If given as floats, physical coordinates are addressed.
center : int or float numpy.ndarray of shape (3) center : sequence of int or float, len (3)
Center of the primitive. If given as integers, cell centers are addressed. Center of the primitive.
If given as floats, coordinates in space are addressed. If given as integers, cell centers are addressed.
exponent : numpy.ndarray of shape (3) or float If given as floats, physical coordinates are addressed.
exponent : float or sequence of float, len (3)
Exponents for the three axes. Exponents for the three axes.
0 gives octahedron (ǀxǀ^(2^0) + ǀyǀ^(2^0) + ǀzǀ^(2^0) < 1) 0 gives octahedron (ǀxǀ^(2^0) + ǀyǀ^(2^0) + ǀzǀ^(2^0) < 1)
1 gives sphere (ǀxǀ^(2^1) + ǀyǀ^(2^1) + ǀzǀ^(2^1) < 1) 1 gives sphere (ǀxǀ^(2^1) + ǀyǀ^(2^1) + ǀzǀ^(2^1) < 1)
@ -671,10 +706,10 @@ class Grid:
Fill value for primitive. Defaults to material.max()+1. Fill value for primitive. Defaults to material.max()+1.
R : damask.Rotation, optional R : damask.Rotation, optional
Rotation of primitive. Defaults to no rotation. Rotation of primitive. Defaults to no rotation.
inverse : Boolean, optional inverse : bool, optional
Retain original materials within primitive and fill outside. Retain original materials within primitive and fill outside.
Defaults to False. Defaults to False.
periodic : Boolean, optional periodic : bool, optional
Assume grid to be periodic. Defaults to True. Assume grid to be periodic. Defaults to True.
Returns Returns
@ -690,9 +725,9 @@ class Grid:
>>> import damask >>> import damask
>>> g = damask.Grid(np.zeros([64]*3,int), np.ones(3)*1e-4) >>> g = damask.Grid(np.zeros([64]*3,int), np.ones(3)*1e-4)
>>> g.add_primitive(np.ones(3)*5e-5,np.ones(3)*5e-5,1) >>> g.add_primitive(np.ones(3)*5e-5,np.ones(3)*5e-5,1)
cells a b c: 64 x 64 x 64 cells : 64 x 64 x 64
size x y z: 0.0001 x 0.0001 x 0.0001 size : 0.0001 x 0.0001 x 0.0001 /
origin x y z: 0.0 0.0 0.0 origin: 0.0 0.0 0.0 / m
# materials: 2 # materials: 2
Add a cube at the origin. Add a cube at the origin.
@ -701,9 +736,9 @@ class Grid:
>>> import damask >>> import damask
>>> g = damask.Grid(np.zeros([64]*3,int), np.ones(3)*1e-4) >>> g = damask.Grid(np.zeros([64]*3,int), np.ones(3)*1e-4)
>>> g.add_primitive(np.ones(3,int)*32,np.zeros(3),np.inf) >>> g.add_primitive(np.ones(3,int)*32,np.zeros(3),np.inf)
cells a b c: 64 x 64 x 64 cells : 64 x 64 x 64
size x y z: 0.0001 x 0.0001 x 0.0001 size : 0.0001 x 0.0001 x 0.0001 /
origin x y z: 0.0 0.0 0.0 origin: 0.0 0.0 0.0 / m
# materials: 2 # materials: 2
""" """
@ -734,13 +769,13 @@ class Grid:
) )
def mirror(self,directions,reflect=False): def mirror(self, directions: Sequence[str], reflect: bool = False) -> "Grid":
""" """
Mirror grid along given directions. Mirror grid along given directions.
Parameters Parameters
---------- ----------
directions : iterable containing str directions : (sequence of) str
Direction(s) along which the grid is mirrored. Direction(s) along which the grid is mirrored.
Valid entries are 'x', 'y', 'z'. Valid entries are 'x', 'y', 'z'.
reflect : bool, optional reflect : bool, optional
@ -759,9 +794,9 @@ class Grid:
>>> import damask >>> import damask
>>> g = damask.Grid(np.zeros([32]*3,int), np.ones(3)*1e-4) >>> g = damask.Grid(np.zeros([32]*3,int), np.ones(3)*1e-4)
>>> g.mirror('xy',True) >>> g.mirror('xy',True)
cells a b c: 64 x 64 x 32 cells : 64 x 64 x 32
size x y z: 0.0002 x 0.0002 x 0.0001 size : 0.0002 x 0.0002 x 0.0001 /
origin x y z: 0.0 0.0 0.0 origin: 0.0 0.0 0.0 / m
# materials: 1 # materials: 1
""" """
@ -769,7 +804,7 @@ class Grid:
if not set(directions).issubset(valid): if not set(directions).issubset(valid):
raise ValueError(f'invalid direction {set(directions).difference(valid)} specified') raise ValueError(f'invalid direction {set(directions).difference(valid)} specified')
limits = [None,None] if reflect else [-2,0] limits: Sequence[Optional[int]] = [None,None] if reflect else [-2,0]
mat = self.material.copy() mat = self.material.copy()
if 'x' in directions: if 'x' in directions:
@ -786,13 +821,13 @@ class Grid:
) )
def flip(self,directions): def flip(self, directions: Sequence[str]) -> "Grid":
""" """
Flip grid along given directions. Flip grid along given directions.
Parameters Parameters
---------- ----------
directions : iterable containing str directions : (sequence of) str
Direction(s) along which the grid is flipped. Direction(s) along which the grid is flipped.
Valid entries are 'x', 'y', 'z'. Valid entries are 'x', 'y', 'z'.
@ -815,15 +850,15 @@ class Grid:
) )
def scale(self,cells,periodic=True): def scale(self, cells: IntSequence, periodic: bool = True) -> "Grid":
""" """
Scale grid to new cells. Scale grid to new cells.
Parameters Parameters
---------- ----------
cells : numpy.ndarray of shape (3) cells : sequence of int, len (3)
Number of cells in x,y,z direction. Number of cells in x,y,z direction.
periodic : Boolean, optional periodic : bool, optional
Assume grid to be periodic. Defaults to True. Assume grid to be periodic. Defaults to True.
Returns Returns
@ -839,9 +874,9 @@ class Grid:
>>> import damask >>> import damask
>>> g = damask.Grid(np.zeros([32]*3,int),np.ones(3)*1e-4) >>> g = damask.Grid(np.zeros([32]*3,int),np.ones(3)*1e-4)
>>> g.scale(g.cells*2) >>> g.scale(g.cells*2)
cells a b c: 64 x 64 x 64 cells : 64 x 64 x 64
size x y z: 0.0001 x 0.0001 x 0.0001 size : 0.0001 x 0.0001 x 0.0001 /
origin x y z: 0.0 0.0 0.0 origin: 0.0 0.0 0.0 / m
# materials: 1 # materials: 1
""" """
@ -859,7 +894,10 @@ class Grid:
) )
def clean(self,stencil=3,selection=None,periodic=True): def clean(self,
stencil: int = 3,
selection: IntSequence = None,
periodic: bool = True) -> "Grid":
""" """
Smooth grid by selecting most frequent material index within given stencil at each location. Smooth grid by selecting most frequent material index within given stencil at each location.
@ -867,9 +905,9 @@ class Grid:
---------- ----------
stencil : int, optional stencil : int, optional
Size of smoothing stencil. Size of smoothing stencil.
selection : list, optional selection : sequence of int, optional
Field values that can be altered. Defaults to all. Field values that can be altered. Defaults to all.
periodic : Boolean, optional periodic : bool, optional
Assume grid to be periodic. Defaults to True. Assume grid to be periodic. Defaults to True.
Returns Returns
@ -878,7 +916,7 @@ class Grid:
Updated grid-based geometry. Updated grid-based geometry.
""" """
def mostFrequent(arr,selection=None): def mostFrequent(arr: np.ndarray, selection = None):
me = arr[arr.size//2] me = arr[arr.size//2]
if selection is None or me in selection: if selection is None or me in selection:
unique, inverse = np.unique(arr, return_inverse=True) unique, inverse = np.unique(arr, return_inverse=True)
@ -899,7 +937,7 @@ class Grid:
) )
def renumber(self): def renumber(self) -> "Grid":
""" """
Renumber sorted material indices as 0,...,N-1. Renumber sorted material indices as 0,...,N-1.
@ -918,7 +956,7 @@ class Grid:
) )
def rotate(self,R,fill=None): def rotate(self, R: Rotation, fill: int = None) -> "Grid":
""" """
Rotate grid (pad if required). Rotate grid (pad if required).
@ -926,7 +964,7 @@ class Grid:
---------- ----------
R : damask.Rotation R : damask.Rotation
Rotation to apply to the grid. Rotation to apply to the grid.
fill : int or float, optional fill : int, optional
Material index to fill the corners. Defaults to material.max() + 1. Material index to fill the corners. Defaults to material.max() + 1.
Returns Returns
@ -956,17 +994,20 @@ class Grid:
) )
def canvas(self,cells=None,offset=None,fill=None): def canvas(self,
cells: IntSequence = None,
offset: IntSequence = None,
fill: int = None) -> "Grid":
""" """
Crop or enlarge/pad grid. Crop or enlarge/pad grid.
Parameters Parameters
---------- ----------
cells : numpy.ndarray of shape (3) cells : sequence of int, len (3), optional
Number of cells x,y,z direction. Number of cells x,y,z direction.
offset : numpy.ndarray of shape (3) offset : sequence of int, len (3), optional
Offset (measured in cells) from old to new grid [0,0,0]. Offset (measured in cells) from old to new grid [0,0,0].
fill : int or float, optional fill : int, optional
Material index to fill the background. Defaults to material.max() + 1. Material index to fill the background. Defaults to material.max() + 1.
Returns Returns
@ -981,42 +1022,43 @@ class Grid:
>>> import numpy as np >>> import numpy as np
>>> import damask >>> import damask
>>> g = damask.Grid(np.zeros([32]*3,int),np.ones(3)*1e-4) >>> g = damask.Grid(np.zeros([32]*3,int),np.ones(3)*1e-4)
>>> g.canvas(np.array([32,32,16],int)) >>> g.canvas([32,32,16])
cells a b c: 33 x 32 x 16 cells : 33 x 32 x 16
size x y z: 0.0001 x 0.0001 x 5e-05 size : 0.0001 x 0.0001 x 5e-05 /
origin x y z: 0.0 0.0 0.0 origin: 0.0 0.0 0.0 / m
# materials: 1 # materials: 1
""" """
if offset is None: offset = 0 offset_ = np.array(offset,int) if offset is not None else np.zeros(3,int)
cells_ = np.array(cells,int) if cells is not None else self.cells
if fill is None: fill = np.nanmax(self.material) + 1 if fill is None: fill = np.nanmax(self.material) + 1
dtype = float if int(fill) != fill or self.material.dtype in np.sctypes['float'] else int dtype = float if int(fill) != fill or self.material.dtype in np.sctypes['float'] else int
canvas = np.full(self.cells if cells is None else cells,fill,dtype) canvas = np.full(cells_,fill,dtype)
LL = np.clip( offset, 0,np.minimum(self.cells, cells+offset)) LL = np.clip( offset_, 0,np.minimum(self.cells, cells_+offset_))
UR = np.clip( offset+cells, 0,np.minimum(self.cells, cells+offset)) UR = np.clip( offset_+cells_, 0,np.minimum(self.cells, cells_+offset_))
ll = np.clip(-offset, 0,np.minimum( cells,self.cells-offset)) ll = np.clip(-offset_, 0,np.minimum( cells_,self.cells-offset_))
ur = np.clip(-offset+self.cells,0,np.minimum( cells,self.cells-offset)) ur = np.clip(-offset_+self.cells,0,np.minimum( cells_,self.cells-offset_))
canvas[ll[0]:ur[0],ll[1]:ur[1],ll[2]:ur[2]] = self.material[LL[0]:UR[0],LL[1]:UR[1],LL[2]:UR[2]] canvas[ll[0]:ur[0],ll[1]:ur[1],ll[2]:ur[2]] = self.material[LL[0]:UR[0],LL[1]:UR[1],LL[2]:UR[2]]
return Grid(material = canvas, return Grid(material = canvas,
size = self.size/self.cells*np.asarray(canvas.shape), size = self.size/self.cells*np.asarray(canvas.shape),
origin = self.origin+offset*self.size/self.cells, origin = self.origin+offset_*self.size/self.cells,
comments = self.comments+[util.execution_stamp('Grid','canvas')], comments = self.comments+[util.execution_stamp('Grid','canvas')],
) )
def substitute(self,from_material,to_material): def substitute(self, from_material: IntSequence, to_material: IntSequence) -> "Grid":
""" """
Substitute material indices. Substitute material indices.
Parameters Parameters
---------- ----------
from_material : iterable of ints from_material : sequence of int
Material indices to be substituted. Material indices to be substituted.
to_material : iterable of ints to_material : sequence of int
New material indices. New material indices.
Returns Returns
@ -1025,7 +1067,7 @@ class Grid:
Updated grid-based geometry. Updated grid-based geometry.
""" """
def mp(entry,mapper): def mp(entry, mapper):
return mapper[entry] if entry in mapper else entry return mapper[entry] if entry in mapper else entry
mp = np.vectorize(mp) mp = np.vectorize(mp)
@ -1038,7 +1080,7 @@ class Grid:
) )
def sort(self): def sort(self) -> "Grid":
""" """
Sort material indices such that min(material) is located at (0,0,0). Sort material indices such that min(material) is located at (0,0,0).
@ -1060,7 +1102,11 @@ class Grid:
) )
def vicinity_offset(self,vicinity=1,offset=None,trigger=[],periodic=True): def vicinity_offset(self,
vicinity: int = 1,
offset: int = None,
trigger: IntSequence = [],
periodic: bool = True) -> "Grid":
""" """
Offset material index of points in the vicinity of xxx. Offset material index of points in the vicinity of xxx.
@ -1076,10 +1122,10 @@ class Grid:
offset : int, optional offset : int, optional
Offset (positive or negative) to tag material indices, Offset (positive or negative) to tag material indices,
defaults to material.max()+1. defaults to material.max()+1.
trigger : list of ints, optional trigger : sequence of int, optional
List of material indices that trigger a change. List of material indices that trigger a change.
Defaults to [], meaning that any different neighbor triggers a change. Defaults to [], meaning that any different neighbor triggers a change.
periodic : Boolean, optional periodic : bool, optional
Assume grid to be periodic. Defaults to True. Assume grid to be periodic. Defaults to True.
Returns Returns
@ -1088,8 +1134,7 @@ class Grid:
Updated grid-based geometry. Updated grid-based geometry.
""" """
def tainted_neighborhood(stencil,trigger): def tainted_neighborhood(stencil: np.ndarray, trigger):
me = stencil[stencil.shape[0]//2] me = stencil[stencil.shape[0]//2]
return np.any(stencil != me if len(trigger) == 0 else return np.any(stencil != me if len(trigger) == 0 else
np.in1d(stencil,np.array(list(set(trigger) - {me})))) np.in1d(stencil,np.array(list(set(trigger) - {me}))))
@ -1108,15 +1153,15 @@ class Grid:
) )
def get_grain_boundaries(self,periodic=True,directions='xyz'): def get_grain_boundaries(self, periodic: bool = True, directions: Sequence[str] = 'xyz'):
""" """
Create VTK unstructured grid containing grain boundaries. Create VTK unstructured grid containing grain boundaries.
Parameters Parameters
---------- ----------
periodic : Boolean, optional periodic : bool, optional
Assume grid to be periodic. Defaults to True. Assume grid to be periodic. Defaults to True.
directions : iterable containing str, optional directions : (sequence of) string, optional
Direction(s) along which the boundaries are determined. Direction(s) along which the boundaries are determined.
Valid entries are 'x', 'y', 'z'. Defaults to 'xyz'. Valid entries are 'x', 'y', 'z'. Defaults to 'xyz'.

View File

@ -412,8 +412,8 @@ class Orientation(Rotation,Crystal):
Returns Returns
------- -------
in : numpy.ndarray of quaternion.shape in : numpy.ndarray of bool, quaternion.shape
Boolean array indicating whether Rodrigues-Frank vector falls into fundamental zone. Whether Rodrigues-Frank vector falls into fundamental zone.
Notes Notes
----- -----
@ -456,8 +456,8 @@ class Orientation(Rotation,Crystal):
Returns Returns
------- -------
in : numpy.ndarray of quaternion.shape in : numpy.ndarray of bool, quaternion.shape
Boolean array indicating whether Rodrigues-Frank vector falls into disorientation FZ. Whether Rodrigues-Frank vector falls into disorientation FZ.
References References
---------- ----------
@ -532,6 +532,17 @@ class Orientation(Rotation,Crystal):
[ 0.07359167 -0.36505797 0.92807163]] [ 0.07359167 -0.36505797 0.92807163]]
Bunge Eulers / deg: (11.40, 21.86, 0.60) Bunge Eulers / deg: (11.40, 21.86, 0.60)
Plot a sample from the Mackenzie distribution.
>>> import matplotlib.pyplot as plt
>>> import damask
>>> N = 10000
>>> a = damask.Orientation.from_random(shape=N,family='cubic')
>>> b = damask.Orientation.from_random(shape=N,family='cubic')
>>> d = a.disorientation(b).as_axis_angle(degrees=True,pair=True)[1]
>>> plt.hist(d,25)
>>> plt.show()
""" """
if self.family != other.family: if self.family != other.family:
raise NotImplementedError('disorientation between different crystal families') raise NotImplementedError('disorientation between different crystal families')
@ -660,8 +671,8 @@ class Orientation(Rotation,Crystal):
Returns Returns
------- -------
in : numpy.ndarray of shape (...) in : numpy.ndarray, shape (...)
Boolean array indicating whether vector falls into SST. Whether vector falls into SST.
""" """
if not isinstance(vector,np.ndarray) or vector.shape[-1] != 3: if not isinstance(vector,np.ndarray) or vector.shape[-1] != 3:

View File

@ -4,6 +4,7 @@ import fnmatch
import os import os
import copy import copy
import datetime import datetime
import warnings
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
import xml.dom.minidom import xml.dom.minidom
from pathlib import Path from pathlib import Path
@ -27,6 +28,20 @@ h5py3 = h5py.__version__[0] == '3'
chunk_size = 1024**2//8 # for compression in HDF5 chunk_size = 1024**2//8 # for compression in HDF5
def _view_transition(what,datasets,increments,times,phases,homogenizations,fields):
if (datasets is not None and what is None) or (what is not None and datasets is None):
raise ValueError('"what" and "datasets" need to be used as a pair')
if datasets is not None or what is not None:
warnings.warn('Arguments "what" and "datasets" will be removed in DAMASK v3.0.0-alpha7', DeprecationWarning,2)
return what,datasets
if sum(1 for _ in filter(None.__ne__, [increments,times,phases,homogenizations,fields])) > 1:
raise ValueError('Only one out of "increments", "times", "phases", "homogenizations", and "fields" can be used')
else:
if increments is not None: return "increments", increments
if times is not None: return "times", times
if phases is not None: return "phases", phases
if homogenizations is not None: return "homogenizations", homogenizations
if fields is not None: return "fields", fields
def _read(dataset): def _read(dataset):
"""Read a dataset and its metadata into a numpy.ndarray.""" """Read a dataset and its metadata into a numpy.ndarray."""
@ -79,7 +94,7 @@ class Result:
>>> r.add_Cauchy() >>> r.add_Cauchy()
>>> r.add_equivalent_Mises('sigma') >>> r.add_equivalent_Mises('sigma')
>>> r.export_VTK() >>> r.export_VTK()
>>> r_last = r.view('increments',-1) >>> r_last = r.view(increments=-1)
>>> sigma_vM_last = r_last.get('sigma_vM') >>> sigma_vM_last = r_last.get('sigma_vM')
""" """
@ -141,7 +156,7 @@ class Result:
self.fname = Path(fname).absolute() self.fname = Path(fname).absolute()
self._allow_modification = False self._protected = True
def __copy__(self): def __copy__(self):
@ -155,10 +170,10 @@ class Result:
"""Show summary of file content.""" """Show summary of file content."""
visible_increments = self.visible['increments'] visible_increments = self.visible['increments']
first = self.view('increments',visible_increments[0:1]).list_data() first = self.view(increments=visible_increments[0:1]).list_data()
last = '' if len(visible_increments) < 2 else \ last = '' if len(visible_increments) < 2 else \
self.view('increments',visible_increments[-1:]).list_data() self.view(increments=visible_increments[-1:]).list_data()
in_between = '' if len(visible_increments) < 3 else \ in_between = '' if len(visible_increments) < 3 else \
''.join([f'\n{inc}\n ...\n' for inc in visible_increments[1:-1]]) ''.join([f'\n{inc}\n ...\n' for inc in visible_increments[1:-1]])
@ -231,36 +246,6 @@ class Result:
return dup return dup
def modification_enable(self):
"""
Allow modification of existing data.
Returns
-------
modified_view : damask.Result
View without write-protection of existing data.
"""
print(util.warn('Warning: Modification of existing datasets allowed!'))
dup = self.copy()
dup._allow_modification = True
return dup
def modification_disable(self):
"""
Prevent modification of existing data (default case).
Returns
-------
modified_view : damask.Result
View with write-protection of existing data.
"""
dup = self.copy()
dup._allow_modification = False
return dup
def increments_in_range(self,start,end): def increments_in_range(self,start,end):
""" """
Get all increments within a given range. Get all increments within a given range.
@ -285,7 +270,6 @@ class Result:
selected.append(self.increments[i]) selected.append(self.increments[i])
return selected return selected
def times_in_range(self,start,end): def times_in_range(self,start,end):
""" """
Get all increments within a given time range. Get all increments within a given time range.
@ -310,17 +294,38 @@ class Result:
return selected return selected
def view(self,what,datasets): def view(self,what=None,datasets=None,*,
increments=None,
times=None,
phases=None,
homogenizations=None,
fields=None,
protected=None):
""" """
Set view. Set view.
Wildcard matching with '?' and '*' is supported.
True is equivalent to '*', False is equivalent to [].
Parameters Parameters
---------- ----------
what : {'increments', 'times', 'phases', 'homogenizations', 'fields'} what : {'increments', 'times', 'phases', 'homogenizations', 'fields'}
Attribute to change. Attribute to change. DEPRECATED.
datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool
Name of datasets; supports '?' and '*' wildcards. Name of datasets; supports '?' and '*' wildcards. DEPRECATED.
True is equivalent to '*', False is equivalent to []. True is equivalent to '*', False is equivalent to [].
increments: (list of) int, (list of) str, or bool, optional.
Number(s) of increments to select.
times: (list of) float, (list of) str, or bool, optional.
Simulation time(s) of increments to select.
phases: (list of) str, or bool, optional.
Name(s) of phases to select.
homogenizations: (list of) str, or bool, optional.
Name(s) of homogenizations to select.
fields: (list of) str, or bool, optional.
Name(s) of fields to select.
protected: bool, optional.
Protection status of existing data.
Returns Returns
------- -------
@ -333,29 +338,61 @@ class Result:
>>> import damask >>> import damask
>>> r = damask.Result('my_file.hdf5') >>> r = damask.Result('my_file.hdf5')
>>> r_first = r.view('increment',0) >>> r_first = r.view(increment=0)
Get a view that shows all results between simulation times of 10 to 40: Get a view that shows all results between simulation times of 10 to 40:
>>> import damask >>> import damask
>>> r = damask.Result('my_file.hdf5') >>> r = damask.Result('my_file.hdf5')
>>> r_t10to40 = r.view('times',r.times_in_range(10.0,40.0)) >>> r_t10to40 = r.view(times=r.times_in_range(10.0,40.0))
""" """
return self._manage_view('set',what,datasets) v = _view_transition(what,datasets,increments,times,phases,homogenizations,fields)
if protected is not None:
if v is None:
dup = self.copy()
else:
what_,datasets_ = v
dup = self._manage_view('set',what_,datasets_)
if not protected:
print(util.warn('Warning: Modification of existing datasets allowed!'))
dup._protected = protected
else:
what_,datasets_ = v
dup = self._manage_view('set',what_,datasets_)
return dup
def view_more(self,what,datasets): def view_more(self,what=None,datasets=None,*,
increments=None,
times=None,
phases=None,
homogenizations=None,
fields=None):
""" """
Add to view. Add to view.
Wildcard matching with '?' and '*' is supported.
True is equivalent to '*', False is equivalent to [].
Parameters Parameters
---------- ----------
what : {'increments', 'times', 'phases', 'homogenizations', 'fields'} what : {'increments', 'times', 'phases', 'homogenizations', 'fields'}
Attribute to change. Attribute to change. DEPRECATED.
datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool
Name of datasets; supports '?' and '*' wildcards. Name of datasets; supports '?' and '*' wildcards. DEPRECATED.
True is equivalent to '*', False is equivalent to []. True is equivalent to '*', False is equivalent to [].
increments: (list of) int, (list of) str, or bool, optional.
Number(s) of increments to select.
times: (list of) float, (list of) str, or bool, optional.
Simulation time(s) of increments to select.
phases: (list of) str, or bool, optional.
Name(s) of phases to select.
homogenizations: (list of) str, or bool, optional.
Name(s) of homogenizations to select.
fields: (list of) str, or bool, optional.
Name(s) of fields to select.
Returns Returns
------- -------
@ -367,25 +404,44 @@ class Result:
Get a view that shows only results from first and last increment: Get a view that shows only results from first and last increment:
>>> import damask >>> import damask
>>> r_empty = damask.Result('my_file.hdf5').view('increments',False) >>> r_empty = damask.Result('my_file.hdf5').view(increments=False)
>>> r_first = r_empty.view_more('increments',0) >>> r_first = r_empty.view_more(increments=0)
>>> r_first_and_last = r.first.view_more('increments',-1) >>> r_first_and_last = r.first.view_more(increments=-1)
""" """
return self._manage_view('add',what,datasets) what_, datasets_ = _view_transition(what,datasets,increments,times,phases,homogenizations,fields)
return self._manage_view('add',what_,datasets_)
def view_less(self,what,datasets): def view_less(self,what=None,datasets=None,*,
increments=None,
times=None,
phases=None,
homogenizations=None,
fields=None):
""" """
Remove from view. Remove from view.
Wildcard matching with '?' and '*' is supported.
True is equivalent to '*', False is equivalent to [].
Parameters Parameters
---------- ----------
what : {'increments', 'times', 'phases', 'homogenizations', 'fields'} what : {'increments', 'times', 'phases', 'homogenizations', 'fields'}
Attribute to change. Attribute to change. DEPRECATED.
datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool
Name of datasets; supports '?' and '*' wildcards. Name of datasets; supports '?' and '*' wildcards. DEPRECATED.
True is equivalent to '*', False is equivalent to []. True is equivalent to '*', False is equivalent to [].
increments: (list of) int, (list of) str, or bool, optional.
Number(s) of increments to select.
times: (list of) float, (list of) str, or bool, optional.
Simulation time(s) of increments to select.
phases: (list of) str, or bool, optional.
Name(s) of phases to select.
homogenizations: (list of) str, or bool, optional.
Name(s) of homogenizations to select.
fields: (list of) str, or bool, optional.
Name(s) of fields to select.
Returns Returns
------- -------
@ -398,10 +454,11 @@ class Result:
>>> import damask >>> import damask
>>> r_all = damask.Result('my_file.hdf5') >>> r_all = damask.Result('my_file.hdf5')
>>> r_deformed = r_all.view_less('increments',0) >>> r_deformed = r_all.view_less(increments=0)
""" """
return self._manage_view('del',what,datasets) what_, datasets_ = _view_transition(what,datasets,increments,times,phases,homogenizations,fields)
return self._manage_view('del',what_,datasets_)
def rename(self,name_src,name_dst): def rename(self,name_src,name_dst):
@ -424,11 +481,11 @@ class Result:
>>> import damask >>> import damask
>>> r = damask.Result('my_file.hdf5') >>> r = damask.Result('my_file.hdf5')
>>> r_unprotected = r.modification_enable() >>> r_unprotected = r.view(protected=False)
>>> r_unprotected.rename('F','def_grad') >>> r_unprotected.rename('F','def_grad')
""" """
if not self._allow_modification: if self._protected:
raise PermissionError('Renaming datasets not permitted') raise PermissionError('Renaming datasets not permitted')
with h5py.File(self.fname,'a') as f: with h5py.File(self.fname,'a') as f:
@ -463,11 +520,11 @@ class Result:
>>> import damask >>> import damask
>>> r = damask.Result('my_file.hdf5') >>> r = damask.Result('my_file.hdf5')
>>> r_unprotected = r.modification_enable() >>> r_unprotected = r.view(protected=False)
>>> r_unprotected.remove('F') >>> r_unprotected.remove('F')
""" """
if not self._allow_modification: if self._protected:
raise PermissionError('Removing datasets not permitted') raise PermissionError('Removing datasets not permitted')
with h5py.File(self.fname,'a') as f: with h5py.File(self.fname,'a') as f:
@ -1358,7 +1415,7 @@ class Result:
lock.acquire() lock.acquire()
with h5py.File(self.fname, 'a') as f: with h5py.File(self.fname, 'a') as f:
try: try:
if self._allow_modification and '/'.join([group,result['label']]) in f: if not self._protected and '/'.join([group,result['label']]) in f:
dataset = f['/'.join([group,result['label']])] dataset = f['/'.join([group,result['label']])]
dataset[...] = result['data'] dataset[...] = result['data']
dataset.attrs['overwritten'] = True dataset.attrs['overwritten'] = True
@ -1760,7 +1817,7 @@ class Result:
output : (list of) str, optional output : (list of) str, optional
Names of the datasets to export to the file. Names of the datasets to export to the file.
Defaults to '*', in which case all datasets are exported. Defaults to '*', in which case all datasets are exported.
overwrite : boolean, optional overwrite : bool, optional
Overwrite existing configuration files. Overwrite existing configuration files.
Defaults to False. Defaults to False.

View File

@ -678,7 +678,7 @@ class Rotation:
---------- ----------
q : numpy.ndarray of shape (...,4) q : numpy.ndarray of shape (...,4)
Unit quaternion (q_0, q_1, q_2, q_3) in positive real hemisphere, i.e. ǀqǀ = 1, q_0 0. Unit quaternion (q_0, q_1, q_2, q_3) in positive real hemisphere, i.e. ǀqǀ = 1, q_0 0.
accept_homomorph : boolean, optional accept_homomorph : bool, optional
Allow homomorphic variants, i.e. q_0 < 0 (negative real hemisphere). Allow homomorphic variants, i.e. q_0 < 0 (negative real hemisphere).
Defaults to False. Defaults to False.
P : int {-1,1}, optional P : int {-1,1}, optional
@ -713,7 +713,7 @@ class Rotation:
phi : numpy.ndarray of shape (...,3) phi : numpy.ndarray of shape (...,3)
Euler angles (φ_1 [0,2π], ϕ [0,π], φ_2 [0,2π]) Euler angles (φ_1 [0,2π], ϕ [0,π], φ_2 [0,2π])
or (φ_1 [0,360], ϕ [0,180], φ_2 [0,360]) if degrees == True. or (φ_1 [0,360], ϕ [0,180], φ_2 [0,360]) if degrees == True.
degrees : boolean, optional degrees : bool, optional
Euler angles are given in degrees. Defaults to False. Euler angles are given in degrees. Defaults to False.
Notes Notes
@ -744,9 +744,9 @@ class Rotation:
axis_angle : numpy.ndarray of shape (...,4) axis_angle : numpy.ndarray of shape (...,4)
Axis and angle (n_1, n_2, n_3, ω) with ǀnǀ = 1 and ω [0,π] Axis and angle (n_1, n_2, n_3, ω) with ǀnǀ = 1 and ω [0,π]
or ω [0,180] if degrees == True. or ω [0,180] if degrees == True.
degrees : boolean, optional degrees : bool, optional
Angle ω is given in degrees. Defaults to False. Angle ω is given in degrees. Defaults to False.
normalize: boolean, optional normalize: bool, optional
Allow ǀnǀ 1. Defaults to False. Allow ǀnǀ 1. Defaults to False.
P : int {-1,1}, optional P : int {-1,1}, optional
Sign convention. Defaults to -1. Sign convention. Defaults to -1.
@ -780,9 +780,9 @@ class Rotation:
---------- ----------
basis : numpy.ndarray of shape (...,3,3) basis : numpy.ndarray of shape (...,3,3)
Three three-dimensional lattice basis vectors. Three three-dimensional lattice basis vectors.
orthonormal : boolean, optional orthonormal : bool, optional
Basis is strictly orthonormal, i.e. is free of stretch components. Defaults to True. Basis is strictly orthonormal, i.e. is free of stretch components. Defaults to True.
reciprocal : boolean, optional reciprocal : bool, optional
Basis vectors are given in reciprocal (instead of real) space. Defaults to False. Basis vectors are given in reciprocal (instead of real) space. Defaults to False.
""" """
@ -858,7 +858,7 @@ class Rotation:
---------- ----------
rho : numpy.ndarray of shape (...,4) rho : numpy.ndarray of shape (...,4)
RodriguesFrank vector (n_1, n_2, n_3, tan(ω/2)) with ǀnǀ = 1 and ω [0,π]. RodriguesFrank vector (n_1, n_2, n_3, tan(ω/2)) with ǀnǀ = 1 and ω [0,π].
normalize : boolean, optional normalize : bool, optional
Allow ǀnǀ 1. Defaults to False. Allow ǀnǀ 1. Defaults to False.
P : int {-1,1}, optional P : int {-1,1}, optional
Sign convention. Defaults to -1. Sign convention. Defaults to -1.
@ -983,9 +983,9 @@ class Rotation:
N : integer, optional N : integer, optional
Number of discrete orientations to be sampled from the given ODF. Number of discrete orientations to be sampled from the given ODF.
Defaults to 500. Defaults to 500.
degrees : boolean, optional degrees : bool, optional
Euler space grid coordinates are in degrees. Defaults to True. Euler space grid coordinates are in degrees. Defaults to True.
fractions : boolean, optional fractions : bool, optional
ODF values correspond to volume fractions, not probability densities. ODF values correspond to volume fractions, not probability densities.
Defaults to True. Defaults to True.
rng_seed: {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional rng_seed: {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
@ -1039,7 +1039,7 @@ class Rotation:
Standard deviation of (Gaussian) misorientation distribution. Standard deviation of (Gaussian) misorientation distribution.
N : int, optional N : int, optional
Number of samples. Defaults to 500. Number of samples. Defaults to 500.
degrees : boolean, optional degrees : bool, optional
sigma is given in degrees. Defaults to True. sigma is given in degrees. Defaults to True.
rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
A seed to initialize the BitGenerator. A seed to initialize the BitGenerator.
@ -1078,7 +1078,7 @@ class Rotation:
Defaults to 0. Defaults to 0.
N : int, optional N : int, optional
Number of samples. Defaults to 500. Number of samples. Defaults to 500.
degrees : boolean, optional degrees : bool, optional
sigma, alpha, and beta are given in degrees. sigma, alpha, and beta are given in degrees.
rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
A seed to initialize the BitGenerator. A seed to initialize the BitGenerator.

View File

@ -0,0 +1,11 @@
"""Functionality for typehints."""
from typing import Sequence, Union, TextIO
from pathlib import Path
import numpy as np
FloatSequence = Union[np.ndarray,Sequence[float]]
IntSequence = Union[np.ndarray,Sequence[int]]
FileHandle = Union[TextIO, str, Path]

View File

@ -28,8 +28,8 @@ class VTK:
---------- ----------
vtk_data : subclass of vtk.vtkDataSet vtk_data : subclass of vtk.vtkDataSet
Description of geometry and topology, optionally with attached data. Description of geometry and topology, optionally with attached data.
Valid types are vtk.vtkRectilinearGrid, vtk.vtkUnstructuredGrid, Valid types are vtk.vtkImageData, vtk.vtkUnstructuredGrid,
or vtk.vtkPolyData. vtk.vtkPolyData, and vtk.vtkRectilinearGrid.
""" """
self.vtk_data = vtk_data self.vtk_data = vtk_data
@ -242,7 +242,7 @@ class VTK:
---------- ----------
fname : str or pathlib.Path fname : str or pathlib.Path
Filename for writing. Filename for writing.
parallel : boolean, optional parallel : bool, optional
Write data in parallel background process. Defaults to True. Write data in parallel background process. Defaults to True.
compress : bool, optional compress : bool, optional
Compress with zlib algorithm. Defaults to True. Compress with zlib algorithm. Defaults to True.
@ -419,7 +419,7 @@ class VTK:
return writer.GetOutputString() return writer.GetOutputString()
def show(self): def show(self) -> None:
""" """
Render. Render.

View File

@ -12,21 +12,23 @@ the following operations are required for tensorial data:
""" """
from typing import Sequence, Tuple, Union from typing import Tuple as _Tuple
from scipy import spatial as _spatial from scipy import spatial as _spatial
import numpy as _np import numpy as _np
from ._typehints import FloatSequence as _FloatSequence, IntSequence as _IntSequence
def _ks(size: _np.ndarray, cells: Union[_np.ndarray,Sequence[int]], first_order: bool = False) -> _np.ndarray:
def _ks(size: _FloatSequence, cells: _IntSequence, first_order: bool = False) -> _np.ndarray:
""" """
Get wave numbers operator. Get wave numbers operator.
Parameters Parameters
---------- ----------
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the periodic field. Physical size of the periodic field.
cells : numpy.ndarray of shape (3) cells : sequence of int, len (3)
Number of cells. Number of cells.
first_order : bool, optional first_order : bool, optional
Correction for first order derivatives, defaults to False. Correction for first order derivatives, defaults to False.
@ -45,20 +47,20 @@ def _ks(size: _np.ndarray, cells: Union[_np.ndarray,Sequence[int]], first_order:
return _np.stack(_np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij'), axis=-1) return _np.stack(_np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij'), axis=-1)
def curl(size: _np.ndarray, f: _np.ndarray) -> _np.ndarray: def curl(size: _FloatSequence, f: _np.ndarray) -> _np.ndarray:
u""" u"""
Calculate curl of a vector or tensor field in Fourier space. Calculate curl of a vector or tensor field in Fourier space.
Parameters Parameters
---------- ----------
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the periodic field. Physical size of the periodic field.
f : numpy.ndarray of shape (:,:,:,3) or (:,:,:,3,3) f : numpy.ndarray, shape (:,:,:,3) or (:,:,:,3,3)
Periodic field of which the curl is calculated. Periodic field of which the curl is calculated.
Returns Returns
------- -------
× f : numpy.ndarray × f : numpy.ndarray, shape (:,:,:,3) or (:,:,:,3,3)
Curl of f. Curl of f.
""" """
@ -76,20 +78,20 @@ def curl(size: _np.ndarray, f: _np.ndarray) -> _np.ndarray:
return _np.fft.irfftn(curl_,axes=(0,1,2),s=f.shape[:3]) return _np.fft.irfftn(curl_,axes=(0,1,2),s=f.shape[:3])
def divergence(size: _np.ndarray, f: _np.ndarray) -> _np.ndarray: def divergence(size: _FloatSequence, f: _np.ndarray) -> _np.ndarray:
u""" u"""
Calculate divergence of a vector or tensor field in Fourier space. Calculate divergence of a vector or tensor field in Fourier space.
Parameters Parameters
---------- ----------
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the periodic field. Physical size of the periodic field.
f : numpy.ndarray of shape (:,:,:,3) or (:,:,:,3,3) f : numpy.ndarray, shape (:,:,:,3) or (:,:,:,3,3)
Periodic field of which the divergence is calculated. Periodic field of which the divergence is calculated.
Returns Returns
------- -------
· f : numpy.ndarray · f : numpy.ndarray, shape (:,:,:,1) or (:,:,:,3)
Divergence of f. Divergence of f.
""" """
@ -103,20 +105,20 @@ def divergence(size: _np.ndarray, f: _np.ndarray) -> _np.ndarray:
return _np.fft.irfftn(div_,axes=(0,1,2),s=f.shape[:3]) return _np.fft.irfftn(div_,axes=(0,1,2),s=f.shape[:3])
def gradient(size: _np.ndarray, f: _np.ndarray) -> _np.ndarray: def gradient(size: _FloatSequence, f: _np.ndarray) -> _np.ndarray:
u""" u"""
Calculate gradient of a scalar or vector fieldin Fourier space. Calculate gradient of a scalar or vector field in Fourier space.
Parameters Parameters
---------- ----------
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the periodic field. Physical size of the periodic field.
f : numpy.ndarray of shape (:,:,:,1) or (:,:,:,3) f : numpy.ndarray, shape (:,:,:,1) or (:,:,:,3)
Periodic field of which the gradient is calculated. Periodic field of which the gradient is calculated.
Returns Returns
------- -------
f : numpy.ndarray f : numpy.ndarray, shape (:,:,:,3) or (:,:,:,3,3)
Divergence of f. Divergence of f.
""" """
@ -130,29 +132,30 @@ def gradient(size: _np.ndarray, f: _np.ndarray) -> _np.ndarray:
return _np.fft.irfftn(grad_,axes=(0,1,2),s=f.shape[:3]) return _np.fft.irfftn(grad_,axes=(0,1,2),s=f.shape[:3])
def coordinates0_point(cells: Union[ _np.ndarray,Sequence[int]], def coordinates0_point(cells: _IntSequence,
size: _np.ndarray, size: _FloatSequence,
origin: _np.ndarray = _np.zeros(3)) -> _np.ndarray: origin: _FloatSequence = _np.zeros(3)) -> _np.ndarray:
""" """
Cell center positions (undeformed). Cell center positions (undeformed).
Parameters Parameters
---------- ----------
cells : numpy.ndarray of shape (3) cells : sequence of int, len (3)
Number of cells. Number of cells.
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the periodic field. Physical size of the periodic field.
origin : numpy.ndarray, optional origin : sequence of float, len(3), optional
Physical origin of the periodic field. Defaults to [0.0,0.0,0.0]. Physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
Returns Returns
------- -------
x_p_0 : numpy.ndarray x_p_0 : numpy.ndarray, shape (:,:,:,3)
Undeformed cell center coordinates. Undeformed cell center coordinates.
""" """
start = origin + size/_np.array(cells)*.5 size_ = _np.array(size,float)
end = origin + size - size/_np.array(cells)*.5 start = origin + size_/_np.array(cells,int)*.5
end = origin + size_ - size_/_np.array(cells,int)*.5
return _np.stack(_np.meshgrid(_np.linspace(start[0],end[0],cells[0]), return _np.stack(_np.meshgrid(_np.linspace(start[0],end[0],cells[0]),
_np.linspace(start[1],end[1],cells[1]), _np.linspace(start[1],end[1],cells[1]),
@ -160,24 +163,24 @@ def coordinates0_point(cells: Union[ _np.ndarray,Sequence[int]],
axis = -1) axis = -1)
def displacement_fluct_point(size: _np.ndarray, F: _np.ndarray) -> _np.ndarray: def displacement_fluct_point(size: _FloatSequence, F: _np.ndarray) -> _np.ndarray:
""" """
Cell center displacement field from fluctuation part of the deformation gradient field. Cell center displacement field from fluctuation part of the deformation gradient field.
Parameters Parameters
---------- ----------
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the periodic field. Physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray, shape (:,:,:,3,3)
Deformation gradient field. Deformation gradient field.
Returns Returns
------- -------
u_p_fluct : numpy.ndarray u_p_fluct : numpy.ndarray, shape (:,:,:,3)
Fluctuating part of the cell center displacements. Fluctuating part of the cell center displacements.
""" """
integrator = 0.5j*size/_np.pi integrator = 0.5j*_np.array(size,float)/_np.pi
k_s = _ks(size,F.shape[:3],False) k_s = _ks(size,F.shape[:3],False)
k_s_squared = _np.einsum('...l,...l',k_s,k_s) k_s_squared = _np.einsum('...l,...l',k_s,k_s)
@ -192,20 +195,20 @@ def displacement_fluct_point(size: _np.ndarray, F: _np.ndarray) -> _np.ndarray:
return _np.fft.irfftn(displacement,axes=(0,1,2),s=F.shape[:3]) return _np.fft.irfftn(displacement,axes=(0,1,2),s=F.shape[:3])
def displacement_avg_point(size: _np.ndarray, F: _np.ndarray) -> _np.ndarray: def displacement_avg_point(size: _FloatSequence, F: _np.ndarray) -> _np.ndarray:
""" """
Cell center displacement field from average part of the deformation gradient field. Cell center displacement field from average part of the deformation gradient field.
Parameters Parameters
---------- ----------
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the periodic field. Physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray, shape (:,:,:,3,3)
Deformation gradient field. Deformation gradient field.
Returns Returns
------- -------
u_p_avg : numpy.ndarray u_p_avg : numpy.ndarray, shape (:,:,:,3)
Average part of the cell center displacements. Average part of the cell center displacements.
""" """
@ -213,42 +216,42 @@ def displacement_avg_point(size: _np.ndarray, F: _np.ndarray) -> _np.ndarray:
return _np.einsum('ml,ijkl->ijkm',F_avg - _np.eye(3),coordinates0_point(F.shape[:3],size)) return _np.einsum('ml,ijkl->ijkm',F_avg - _np.eye(3),coordinates0_point(F.shape[:3],size))
def displacement_point(size: _np.ndarray, F: _np.ndarray) -> _np.ndarray: def displacement_point(size: _FloatSequence, F: _np.ndarray) -> _np.ndarray:
""" """
Cell center displacement field from deformation gradient field. Cell center displacement field from deformation gradient field.
Parameters Parameters
---------- ----------
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the periodic field. Physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray, shape (:,:,:,3,3)
Deformation gradient field. Deformation gradient field.
Returns Returns
------- -------
u_p : numpy.ndarray u_p : numpy.ndarray, shape (:,:,:,3)
Cell center displacements. Cell center displacements.
""" """
return displacement_avg_point(size,F) + displacement_fluct_point(size,F) return displacement_avg_point(size,F) + displacement_fluct_point(size,F)
def coordinates_point(size: _np.ndarray, F: _np.ndarray, origin: _np.ndarray = _np.zeros(3)) -> _np.ndarray: def coordinates_point(size: _FloatSequence, F: _np.ndarray, origin: _FloatSequence = _np.zeros(3)) -> _np.ndarray:
""" """
Cell center positions. Cell center positions.
Parameters Parameters
---------- ----------
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the periodic field. Physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray, shape (:,:,:,3,3)
Deformation gradient field. Deformation gradient field.
origin : numpy.ndarray of shape (3), optional origin : sequence of float, len(3), optional
Physical origin of the periodic field. Defaults to [0.0,0.0,0.0]. Physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
Returns Returns
------- -------
x_p : numpy.ndarray x_p : numpy.ndarray, shape (:,:,:,3)
Cell center coordinates. Cell center coordinates.
""" """
@ -256,14 +259,14 @@ def coordinates_point(size: _np.ndarray, F: _np.ndarray, origin: _np.ndarray = _
def cellsSizeOrigin_coordinates0_point(coordinates0: _np.ndarray, def cellsSizeOrigin_coordinates0_point(coordinates0: _np.ndarray,
ordered: bool = True) -> Tuple[_np.ndarray,_np.ndarray,_np.ndarray]: ordered: bool = True) -> _Tuple[_np.ndarray,_np.ndarray,_np.ndarray]:
""" """
Return grid 'DNA', i.e. cells, size, and origin from 1D array of point positions. Return grid 'DNA', i.e. cells, size, and origin from 1D array of point positions.
Parameters Parameters
---------- ----------
coordinates0 : numpy.ndarray of shape (:,3) coordinates0 : numpy.ndarray, shape (:,3)
Undeformed cell coordinates. Undeformed cell center coordinates.
ordered : bool, optional ordered : bool, optional
Expect coordinates0 data to be ordered (x fast, z slow). Expect coordinates0 data to be ordered (x fast, z slow).
Defaults to True. Defaults to True.
@ -277,7 +280,7 @@ def cellsSizeOrigin_coordinates0_point(coordinates0: _np.ndarray,
coords = [_np.unique(coordinates0[:,i]) for i in range(3)] coords = [_np.unique(coordinates0[:,i]) for i in range(3)]
mincorner = _np.array(list(map(min,coords))) mincorner = _np.array(list(map(min,coords)))
maxcorner = _np.array(list(map(max,coords))) maxcorner = _np.array(list(map(max,coords)))
cells = _np.array(list(map(len,coords)),'i') cells = _np.array(list(map(len,coords)),int)
size = cells/_np.maximum(cells-1,1) * (maxcorner-mincorner) size = cells/_np.maximum(cells-1,1) * (maxcorner-mincorner)
delta = size/cells delta = size/cells
origin = mincorner - delta*.5 origin = mincorner - delta*.5
@ -305,24 +308,24 @@ def cellsSizeOrigin_coordinates0_point(coordinates0: _np.ndarray,
return (cells,size,origin) return (cells,size,origin)
def coordinates0_node(cells: Union[_np.ndarray,Sequence[int]], def coordinates0_node(cells: _IntSequence,
size: _np.ndarray, size: _FloatSequence,
origin: _np.ndarray = _np.zeros(3)) -> _np.ndarray: origin: _FloatSequence = _np.zeros(3)) -> _np.ndarray:
""" """
Nodal positions (undeformed). Nodal positions (undeformed).
Parameters Parameters
---------- ----------
cells : numpy.ndarray of shape (3) cells : sequence of int, len (3)
Number of cells. Number of cells.
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the periodic field. Physical size of the periodic field.
origin : numpy.ndarray of shape (3), optional origin : sequence of float, len(3), optional
Physical origin of the periodic field. Defaults to [0.0,0.0,0.0]. Physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
Returns Returns
------- -------
x_n_0 : numpy.ndarray x_n_0 : numpy.ndarray, shape (:,:,:,3)
Undeformed nodal coordinates. Undeformed nodal coordinates.
""" """
@ -332,40 +335,40 @@ def coordinates0_node(cells: Union[_np.ndarray,Sequence[int]],
axis = -1) axis = -1)
def displacement_fluct_node(size: _np.ndarray, F: _np.ndarray) -> _np.ndarray: def displacement_fluct_node(size: _FloatSequence, F: _np.ndarray) -> _np.ndarray:
""" """
Nodal displacement field from fluctuation part of the deformation gradient field. Nodal displacement field from fluctuation part of the deformation gradient field.
Parameters Parameters
---------- ----------
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the periodic field. Physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray, shape (:,:,:,3,3)
Deformation gradient field. Deformation gradient field.
Returns Returns
------- -------
u_n_fluct : numpy.ndarray u_n_fluct : numpy.ndarray, shape (:,:,:,3)
Fluctuating part of the nodal displacements. Fluctuating part of the nodal displacements.
""" """
return point_to_node(displacement_fluct_point(size,F)) return point_to_node(displacement_fluct_point(size,F))
def displacement_avg_node(size: _np.ndarray, F: _np.ndarray) -> _np.ndarray: def displacement_avg_node(size: _FloatSequence, F: _np.ndarray) -> _np.ndarray:
""" """
Nodal displacement field from average part of the deformation gradient field. Nodal displacement field from average part of the deformation gradient field.
Parameters Parameters
---------- ----------
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the periodic field. Physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray, shape (:,:,:,3,3)
Deformation gradient field. Deformation gradient field.
Returns Returns
------- -------
u_n_avg : numpy.ndarray u_n_avg : numpy.ndarray, shape (:,:,:,3)
Average part of the nodal displacements. Average part of the nodal displacements.
""" """
@ -373,42 +376,42 @@ def displacement_avg_node(size: _np.ndarray, F: _np.ndarray) -> _np.ndarray:
return _np.einsum('ml,ijkl->ijkm',F_avg - _np.eye(3),coordinates0_node(F.shape[:3],size)) return _np.einsum('ml,ijkl->ijkm',F_avg - _np.eye(3),coordinates0_node(F.shape[:3],size))
def displacement_node(size: _np.ndarray, F: _np.ndarray) -> _np.ndarray: def displacement_node(size: _FloatSequence, F: _np.ndarray) -> _np.ndarray:
""" """
Nodal displacement field from deformation gradient field. Nodal displacement field from deformation gradient field.
Parameters Parameters
---------- ----------
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the periodic field. Physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray, shape (:,:,:,3,3)
Deformation gradient field. Deformation gradient field.
Returns Returns
------- -------
u_p : numpy.ndarray u_p : numpy.ndarray, shape (:,:,:,3)
Nodal displacements. Nodal displacements.
""" """
return displacement_avg_node(size,F) + displacement_fluct_node(size,F) return displacement_avg_node(size,F) + displacement_fluct_node(size,F)
def coordinates_node(size: _np.ndarray, F: _np.ndarray, origin: _np.ndarray = _np.zeros(3)) -> _np.ndarray: def coordinates_node(size: _FloatSequence, F: _np.ndarray, origin: _FloatSequence = _np.zeros(3)) -> _np.ndarray:
""" """
Nodal positions. Nodal positions.
Parameters Parameters
---------- ----------
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the periodic field. Physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray, shape (:,:,:,3,3)
Deformation gradient field. Deformation gradient field.
origin : numpy.ndarray of shape (3), optional origin : sequence of float, len(3), optional
Physical origin of the periodic field. Defaults to [0.0,0.0,0.0]. Physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
Returns Returns
------- -------
x_n : numpy.ndarray x_n : numpy.ndarray, shape (:,:,:,3)
Nodal coordinates. Nodal coordinates.
""" """
@ -416,13 +419,13 @@ def coordinates_node(size: _np.ndarray, F: _np.ndarray, origin: _np.ndarray = _n
def cellsSizeOrigin_coordinates0_node(coordinates0: _np.ndarray, def cellsSizeOrigin_coordinates0_node(coordinates0: _np.ndarray,
ordered: bool = True) -> Tuple[_np.ndarray,_np.ndarray,_np.ndarray]: ordered: bool = True) -> _Tuple[_np.ndarray,_np.ndarray,_np.ndarray]:
""" """
Return grid 'DNA', i.e. cells, size, and origin from 1D array of nodal positions. Return grid 'DNA', i.e. cells, size, and origin from 1D array of nodal positions.
Parameters Parameters
---------- ----------
coordinates0 : numpy.ndarray of shape (:,3) coordinates0 : numpy.ndarray, shape (:,3)
Undeformed nodal coordinates. Undeformed nodal coordinates.
ordered : bool, optional ordered : bool, optional
Expect coordinates0 data to be ordered (x fast, z slow). Expect coordinates0 data to be ordered (x fast, z slow).
@ -437,7 +440,7 @@ def cellsSizeOrigin_coordinates0_node(coordinates0: _np.ndarray,
coords = [_np.unique(coordinates0[:,i]) for i in range(3)] coords = [_np.unique(coordinates0[:,i]) for i in range(3)]
mincorner = _np.array(list(map(min,coords))) mincorner = _np.array(list(map(min,coords)))
maxcorner = _np.array(list(map(max,coords))) maxcorner = _np.array(list(map(max,coords)))
cells = _np.array(list(map(len,coords)),'i') - 1 cells = _np.array(list(map(len,coords)),int) - 1
size = maxcorner-mincorner size = maxcorner-mincorner
origin = mincorner origin = mincorner
@ -463,12 +466,12 @@ def point_to_node(cell_data: _np.ndarray) -> _np.ndarray:
Parameters Parameters
---------- ----------
cell_data : numpy.ndarray of shape (:,:,:,...) cell_data : numpy.ndarray, shape (:,:,:,...)
Data defined on the cell centers of a periodic grid. Data defined on the cell centers of a periodic grid.
Returns Returns
------- -------
node_data : numpy.ndarray of shape (:,:,:,...) node_data : numpy.ndarray, shape (:,:,:,...)
Data defined on the nodes of a periodic grid. Data defined on the nodes of a periodic grid.
""" """
@ -485,12 +488,12 @@ def node_to_point(node_data: _np.ndarray) -> _np.ndarray:
Parameters Parameters
---------- ----------
node_data : numpy.ndarray of shape (:,:,:,...) node_data : numpy.ndarray, shape (:,:,:,...)
Data defined on the nodes of a periodic grid. Data defined on the nodes of a periodic grid.
Returns Returns
------- -------
cell_data : numpy.ndarray of shape (:,:,:,...) cell_data : numpy.ndarray, shape (:,:,:,...)
Data defined on the cell centers of a periodic grid. Data defined on the cell centers of a periodic grid.
""" """
@ -507,7 +510,7 @@ def coordinates0_valid(coordinates0: _np.ndarray) -> bool:
Parameters Parameters
---------- ----------
coordinates0 : numpy.ndarray coordinates0 : numpy.ndarray, shape (:,3)
Array of undeformed cell coordinates. Array of undeformed cell coordinates.
Returns Returns
@ -523,17 +526,17 @@ def coordinates0_valid(coordinates0: _np.ndarray) -> bool:
return False return False
def regrid(size: _np.ndarray, F: _np.ndarray, cells: Union[_np.ndarray,Sequence[int]]) -> _np.ndarray: def regrid(size: _FloatSequence, F: _np.ndarray, cells: _IntSequence) -> _np.ndarray:
""" """
Return mapping from coordinates in deformed configuration to a regular grid. Return mapping from coordinates in deformed configuration to a regular grid.
Parameters Parameters
---------- ----------
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size. Physical size.
F : numpy.ndarray of shape (:,:,:,3,3) F : numpy.ndarray, shape (:,:,:,3,3), shape (:,:,:,3,3)
Deformation gradient field. Deformation gradient field.
cells : numpy.ndarray of shape (3) cells : sequence of int, len (3)
Cell count along x,y,z of remapping grid. Cell count along x,y,z of remapping grid.
""" """

View File

@ -5,7 +5,7 @@ All routines operate on numpy.ndarrays of shape (...,3,3).
""" """
from typing import Sequence from typing import Sequence as _Sequence
import numpy as _np import numpy as _np
@ -243,7 +243,7 @@ def stretch_right(T: _np.ndarray) -> _np.ndarray:
return _polar_decomposition(T,'U')[0] return _polar_decomposition(T,'U')[0]
def _polar_decomposition(T: _np.ndarray, requested: Sequence[str]) -> tuple: def _polar_decomposition(T: _np.ndarray, requested: _Sequence[str]) -> tuple:
""" """
Perform singular value decomposition. Perform singular value decomposition.

View File

@ -1,25 +1,27 @@
"""Functionality for generation of seed points for Voronoi or Laguerre tessellation.""" """Functionality for generation of seed points for Voronoi or Laguerre tessellation."""
from typing import Sequence,Tuple from typing import Tuple as _Tuple
from scipy import spatial as _spatial from scipy import spatial as _spatial
import numpy as _np import numpy as _np
from ._typehints import FloatSequence as _FloatSequence, IntSequence as _IntSequence
from . import util as _util from . import util as _util
from . import grid_filters as _grid_filters from . import grid_filters as _grid_filters
def from_random(size: _np.ndarray, N_seeds: int, cells: _np.ndarray = None, rng_seed=None) -> _np.ndarray: def from_random(size: _FloatSequence, N_seeds: int, cells: _IntSequence = None,
rng_seed=None) -> _np.ndarray:
""" """
Place seeds randomly in space. Place seeds randomly in space.
Parameters Parameters
---------- ----------
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the seeding domain. Physical size of the seeding domain.
N_seeds : int N_seeds : int
Number of seeds. Number of seeds.
cells : numpy.ndarray of shape (3), optional. cells : sequence of int, len (3), optional.
If given, ensures that each seed results in a grain when a standard Voronoi If given, ensures that each seed results in a grain when a standard Voronoi
tessellation is performed using the given grid resolution (i.e. size/cells). tessellation is performed using the given grid resolution (i.e. size/cells).
rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
@ -28,29 +30,30 @@ def from_random(size: _np.ndarray, N_seeds: int, cells: _np.ndarray = None, rng_
Returns Returns
------- -------
coords : numpy.ndarray of shape (N_seeds,3) coords : numpy.ndarray, shape (N_seeds,3)
Seed coordinates in 3D space. Seed coordinates in 3D space.
""" """
size_ = _np.array(size,float)
rng = _np.random.default_rng(rng_seed) rng = _np.random.default_rng(rng_seed)
if cells is None: if cells is None:
coords = rng.random((N_seeds,3)) * size coords = rng.random((N_seeds,3)) * size_
else: else:
grid_coords = _grid_filters.coordinates0_point(cells,size).reshape(-1,3,order='F') grid_coords = _grid_filters.coordinates0_point(cells,size).reshape(-1,3,order='F')
coords = grid_coords[rng.choice(_np.prod(cells),N_seeds, replace=False)] \ coords = grid_coords[rng.choice(_np.prod(cells),N_seeds, replace=False)] \
+ _np.broadcast_to(size/cells,(N_seeds,3))*(rng.random((N_seeds,3))*.5-.25) # wobble without leaving cells + _np.broadcast_to(size_/_np.array(cells,int),(N_seeds,3))*(rng.random((N_seeds,3))*.5-.25) # wobble w/o leaving grid
return coords return coords
def from_Poisson_disc(size: _np.ndarray, N_seeds: int, N_candidates: int, distance: float, def from_Poisson_disc(size: _FloatSequence, N_seeds: int, N_candidates: int, distance: float,
periodic: bool = True, rng_seed=None) -> _np.ndarray: periodic: bool = True, rng_seed=None) -> _np.ndarray:
""" """
Place seeds according to a Poisson disc distribution. Place seeds according to a Poisson disc distribution.
Parameters Parameters
---------- ----------
size : numpy.ndarray of shape (3) size : sequence of float, len (3)
Physical size of the seeding domain. Physical size of the seeding domain.
N_seeds : int N_seeds : int
Number of seeds. Number of seeds.
@ -58,7 +61,7 @@ def from_Poisson_disc(size: _np.ndarray, N_seeds: int, N_candidates: int, distan
Number of candidates to consider for finding best candidate. Number of candidates to consider for finding best candidate.
distance : float distance : float
Minimum acceptable distance to other seeds. Minimum acceptable distance to other seeds.
periodic : boolean, optional periodic : bool, optional
Calculate minimum distance for periodically repeated grid. Calculate minimum distance for periodically repeated grid.
rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
A seed to initialize the BitGenerator. Defaults to None. A seed to initialize the BitGenerator. Defaults to None.
@ -66,13 +69,13 @@ def from_Poisson_disc(size: _np.ndarray, N_seeds: int, N_candidates: int, distan
Returns Returns
------- -------
coords : numpy.ndarray of shape (N_seeds,3) coords : numpy.ndarray, shape (N_seeds,3)
Seed coordinates in 3D space. Seed coordinates in 3D space.
""" """
rng = _np.random.default_rng(rng_seed) rng = _np.random.default_rng(rng_seed)
coords = _np.empty((N_seeds,3)) coords = _np.empty((N_seeds,3))
coords[0] = rng.random(3) * size coords[0] = rng.random(3) * _np.array(size,float)
s = 1 s = 1
i = 0 i = 0
@ -96,8 +99,8 @@ def from_Poisson_disc(size: _np.ndarray, N_seeds: int, N_candidates: int, distan
return coords return coords
def from_grid(grid, selection: Sequence[int] = None, def from_grid(grid, selection: _IntSequence = None, invert_selection: bool = False,
invert: bool = False, average: bool = False, periodic: bool = True) -> Tuple[_np.ndarray, _np.ndarray]: average: bool = False, periodic: bool = True) -> _Tuple[_np.ndarray, _np.ndarray]:
""" """
Create seeds from grid description. Create seeds from grid description.
@ -105,24 +108,24 @@ def from_grid(grid, selection: Sequence[int] = None,
---------- ----------
grid : damask.Grid grid : damask.Grid
Grid from which the material IDs are used as seeds. Grid from which the material IDs are used as seeds.
selection : iterable of integers, optional selection : sequence of int, optional
Material IDs to consider. Material IDs to consider.
invert : boolean, false invert_selection : bool, optional
Consider all material IDs except those in selection. Defaults to False. Consider all material IDs except those in selection. Defaults to False.
average : boolean, optional average : bool, optional
Seed corresponds to center of gravity of material ID cloud. Seed corresponds to center of gravity of material ID cloud.
periodic : boolean, optional periodic : bool, optional
Center of gravity accounts for periodic boundaries. Center of gravity accounts for periodic boundaries.
Returns Returns
------- -------
coords, materials : numpy.ndarray of shape (:,3), numpy.ndarray of shape (:) coords, materials : numpy.ndarray, shape (:,3); numpy.ndarray, shape (:)
Seed coordinates in 3D space, material IDs. Seed coordinates in 3D space, material IDs.
""" """
material = grid.material.reshape((-1,1),order='F') material = grid.material.reshape((-1,1),order='F')
mask = _np.full(grid.cells.prod(),True,dtype=bool) if selection is None else \ mask = _np.full(grid.cells.prod(),True,dtype=bool) if selection is None else \
_np.isin(material,selection,invert=invert).flatten() _np.isin(material,selection,invert=invert_selection).flatten()
coords = _grid_filters.coordinates0_point(grid.cells,grid.size).reshape(-1,3,order='F') coords = _grid_filters.coordinates0_point(grid.cells,grid.size).reshape(-1,3,order='F')
if not average: if not average:

View File

@ -18,11 +18,11 @@ from . import version
__all__=[ __all__=[
'srepr', 'srepr',
'emph', 'deemph', 'warn', 'strikeout', 'emph', 'deemph', 'warn', 'strikeout',
'run', 'run',
'natural_sort', 'natural_sort',
'show_progress', 'show_progress',
'scale_to_coprime', 'scale_to_coprime',
'project_stereographic', 'project_equal_angle', 'project_equal_area',
'hybrid_IA', 'hybrid_IA',
'execution_stamp', 'execution_stamp',
'shapeshifter', 'shapeblender', 'shapeshifter', 'shapeblender',
@ -267,13 +267,13 @@ def scale_to_coprime(v):
return m return m
def project_stereographic(vector,direction='z',normalize=True,keepdims=False): def project_equal_angle(vector,direction='z',normalize=True,keepdims=False):
""" """
Apply stereographic projection to vector. Apply equal-angle projection to vector.
Parameters Parameters
---------- ----------
vector : numpy.ndarray of shape (...,3) vector : numpy.ndarray, shape (...,3)
Vector coordinates to be projected. Vector coordinates to be projected.
direction : str direction : str
Projection direction 'x', 'y', or 'z'. Projection direction 'x', 'y', or 'z'.
@ -281,32 +281,74 @@ def project_stereographic(vector,direction='z',normalize=True,keepdims=False):
normalize : bool normalize : bool
Ensure unit length of input vector. Defaults to True. Ensure unit length of input vector. Defaults to True.
keepdims : bool keepdims : bool
Maintain three-dimensional output coordinates. Maintain three-dimensional output coordinates. Defaults to False.
Default two-dimensional output uses right-handed frame spanned by Two-dimensional output uses right-handed frame spanned by
the next and next-next axis relative to the projection direction, the next and next-next axis relative to the projection direction,
e.g. x-y when projecting along z and z-x when projecting along y. e.g. x-y when projecting along z and z-x when projecting along y.
Returns Returns
------- -------
coordinates : numpy.ndarray of shape (...,2 | 3) coordinates : numpy.ndarray, shape (...,2 | 3)
Projected coordinates. Projected coordinates.
Examples Examples
-------- --------
>>> import damask >>> import damask
>>> import numpy as np >>> import numpy as np
>>> project_stereographic(np.ones(3)) >>> project_equal_angle(np.ones(3))
[0.3660254, 0.3660254] [0.3660254, 0.3660254]
>>> project_stereographic(np.ones(3),direction='x',normalize=False,keepdims=True) >>> project_equal_angle(np.ones(3),direction='x',normalize=False,keepdims=True)
[0, 0.5, 0.5] [0, 0.5, 0.5]
>>> project_stereographic([0,1,1],direction='y',normalize=True,keepdims=False) >>> project_equal_angle([0,1,1],direction='y',normalize=True,keepdims=False)
[0.41421356, 0] [0.41421356, 0]
""" """
shift = 'zyx'.index(direction) shift = 'zyx'.index(direction)
v_ = np.roll(vector/np.linalg.norm(vector,axis=-1,keepdims=True) if normalize else vector, v = np.roll(vector/np.linalg.norm(vector,axis=-1,keepdims=True) if normalize else vector,
shift,axis=-1) shift,axis=-1)
return np.roll(np.block([v_[...,:2]/(1+np.abs(v_[...,2:3])),np.zeros_like(v_[...,2:3])]), return np.roll(np.block([v[...,:2]/(1.0+np.abs(v[...,2:3])),np.zeros_like(v[...,2:3])]),
-shift if keepdims else 0,axis=-1)[...,:3 if keepdims else 2]
def project_equal_area(vector,direction='z',normalize=True,keepdims=False):
"""
Apply equal-area projection to vector.
Parameters
----------
vector : numpy.ndarray, shape (...,3)
Vector coordinates to be projected.
direction : str
Projection direction 'x', 'y', or 'z'.
Defaults to 'z'.
normalize : bool
Ensure unit length of input vector. Defaults to True.
keepdims : bool
Maintain three-dimensional output coordinates. Defaults to False.
Two-dimensional output uses right-handed frame spanned by
the next and next-next axis relative to the projection direction,
e.g. x-y when projecting along z and z-x when projecting along y.
Returns
-------
coordinates : numpy.ndarray, shape (...,2 | 3)
Projected coordinates.
Examples
--------
>>> import damask
>>> import numpy as np
>>> project_equal_area(np.ones(3))
[0.45970084, 0.45970084]
>>> project_equal_area(np.ones(3),direction='x',normalize=False,keepdims=True)
[0.0, 0.70710678, 0.70710678]
>>> project_equal_area([0,1,1],direction='y',normalize=True,keepdims=False)
[0.5411961, 0.0]
"""
shift = 'zyx'.index(direction)
v = np.roll(vector/np.linalg.norm(vector,axis=-1,keepdims=True) if normalize else vector,
shift,axis=-1)
return np.roll(np.block([v[...,:2]/np.sqrt(1.0+np.abs(v[...,2:3])),np.zeros_like(v[...,2:3])]),
-shift if keepdims else 0,axis=-1)[...,:3 if keepdims else 2] -shift if keepdims else 0,axis=-1)[...,:3 if keepdims else 2]

View File

@ -139,6 +139,16 @@ class TestColormap:
c += c c += c
assert (np.allclose(c.colors[:len(c.colors)//2],c.colors[len(c.colors)//2:])) assert (np.allclose(c.colors[:len(c.colors)//2],c.colors[len(c.colors)//2:]))
@pytest.mark.parametrize('N,cmap,at,result',[
(8,'gray',0.5,[0.5,0.5,0.5]),
(17,'gray',0.5,[0.5,0.5,0.5]),
(17,'gray',[0.5,0.75],[[0.5,0.5,0.5],[0.75,0.75,0.75]]),
])
def test_at_value(self, N, cmap, at, result):
assert np.allclose(Colormap.from_predefined(cmap,N=N).at(at)[...,:3],
result,
rtol=0.005)
@pytest.mark.parametrize('bounds',[None,[2,10]]) @pytest.mark.parametrize('bounds',[None,[2,10]])
def test_shade(self,ref_path,update,bounds): def test_shade(self,ref_path,update,bounds):
data = np.add(*np.indices((10, 11))) data = np.add(*np.indices((10, 11)))

View File

@ -79,3 +79,23 @@ class TestCrystal:
a=a,b=b,c=c, a=a,b=b,c=c,
alpha=alpha,beta=beta,gamma=gamma) alpha=alpha,beta=beta,gamma=gamma)
assert np.allclose(points,c.lattice_points) assert np.allclose(points,c.lattice_points)
@pytest.mark.parametrize('crystal,length',
[(Crystal(lattice='cF'),[12,6]),
(Crystal(lattice='cI'),[12,12,24]),
(Crystal(lattice='hP'),[3,3,6,12,6]),
(Crystal(lattice='tI',c=1.2),[2,2,2,4,2,4,2,2,4,8,4,8,8])
])
def test_N_slip(self,crystal,length):
assert [len(s) for s in crystal.kinematics('slip')['direction']] == length
assert [len(s) for s in crystal.kinematics('slip')['plane']] == length
@pytest.mark.parametrize('crystal,length',
[(Crystal(lattice='cF'),[12]),
(Crystal(lattice='cI'),[12]),
(Crystal(lattice='hP'),[6,6,6,6]),
])
def test_N_twin(self,crystal,length):
assert [len(s) for s in crystal.kinematics('twin')['direction']] == length
assert [len(s) for s in crystal.kinematics('twin')['plane']] == length

View File

@ -237,12 +237,27 @@ class TestGrid:
modified) modified)
def test_canvas(self,default): def test_canvas_extend(self,default):
cells = default.cells cells = default.cells
grid_add = np.random.randint(0,30,(3)) cells_add = np.random.randint(0,30,(3))
modified = default.canvas(cells + grid_add) modified = default.canvas(cells + cells_add)
assert np.all(modified.material[:cells[0],:cells[1],:cells[2]] == default.material) assert np.all(modified.material[:cells[0],:cells[1],:cells[2]] == default.material)
@pytest.mark.parametrize('sign',[+1,-1])
@pytest.mark.parametrize('extra_offset',[0,-1])
def test_canvas_move_out(self,sign,extra_offset):
g = Grid(np.zeros(np.random.randint(3,30,(3)),int),np.ones(3))
o = sign*np.ones(3)*g.cells.min() +extra_offset*sign
if extra_offset == 0:
assert np.all(g.canvas(offset=o).material == 1)
else:
assert np.all(np.unique(g.canvas(offset=o).material) == (0,1))
def test_canvas_cells(self,default):
g = Grid(np.zeros(np.random.randint(3,30,(3)),int),np.ones(3))
cells = np.random.randint(1,30,(3))
offset = np.random.randint(-30,30,(3))
assert np.all(g.canvas(cells,offset).cells == cells)
@pytest.mark.parametrize('center1,center2',[(np.random.random(3)*.5,np.random.random()*8), @pytest.mark.parametrize('center1,center2',[(np.random.random(3)*.5,np.random.random()*8),
(np.random.randint(4,8,(3)),np.random.randint(9,12,(3)))]) (np.random.randint(4,8,(3)),np.random.randint(9,12,(3)))])

View File

@ -25,7 +25,7 @@ def default(tmp_path,ref_path):
fname = '12grains6x7x8_tensionY.hdf5' fname = '12grains6x7x8_tensionY.hdf5'
shutil.copy(ref_path/fname,tmp_path) shutil.copy(ref_path/fname,tmp_path)
f = Result(tmp_path/fname) f = Result(tmp_path/fname)
return f.view('times',20.0) return f.view(times=20.0)
@pytest.fixture @pytest.fixture
def single_phase(tmp_path,ref_path): def single_phase(tmp_path,ref_path):
@ -58,14 +58,14 @@ class TestResult:
def test_view_all(self,default): def test_view_all(self,default):
a = default.view('increments',True).get('F') a = default.view(increments=True).get('F')
assert dict_equal(a,default.view('increments','*').get('F')) assert dict_equal(a,default.view(increments='*').get('F'))
assert dict_equal(a,default.view('increments',default.increments_in_range(0,np.iinfo(int).max)).get('F')) assert dict_equal(a,default.view(increments=default.increments_in_range(0,np.iinfo(int).max)).get('F'))
assert dict_equal(a,default.view('times',True).get('F')) assert dict_equal(a,default.view(times=True).get('F'))
assert dict_equal(a,default.view('times','*').get('F')) assert dict_equal(a,default.view(times='*').get('F'))
assert dict_equal(a,default.view('times',default.times_in_range(0.0,np.inf)).get('F')) assert dict_equal(a,default.view(times=default.times_in_range(0.0,np.inf)).get('F'))
@pytest.mark.parametrize('what',['increments','times','phases','fields']) # ToDo: discuss homogenizations @pytest.mark.parametrize('what',['increments','times','phases','fields']) # ToDo: discuss homogenizations
def test_view_none(self,default,what): def test_view_none(self,default,what):
@ -314,7 +314,7 @@ class TestResult:
@pytest.mark.parametrize('overwrite',['off','on']) @pytest.mark.parametrize('overwrite',['off','on'])
def test_add_overwrite(self,default,overwrite): def test_add_overwrite(self,default,overwrite):
last = default.view('increments',-1) last = default.view(increments=-1)
last.add_stress_Cauchy() last.add_stress_Cauchy()
@ -322,9 +322,9 @@ class TestResult:
created_first = datetime.strptime(created_first,'%Y-%m-%d %H:%M:%S%z') created_first = datetime.strptime(created_first,'%Y-%m-%d %H:%M:%S%z')
if overwrite == 'on': if overwrite == 'on':
last = last.modification_enable() last = last.view(protected=False)
else: else:
last = last.modification_disable() last = last.view(protected=True)
time.sleep(2.) time.sleep(2.)
try: try:
@ -344,10 +344,10 @@ class TestResult:
def test_rename(self,default,allowed): def test_rename(self,default,allowed):
if allowed == 'on': if allowed == 'on':
F = default.place('F') F = default.place('F')
default = default.modification_enable() default = default.view(protected=False)
default.rename('F','new_name') default.rename('F','new_name')
assert np.all(F == default.place('new_name')) assert np.all(F == default.place('new_name'))
default = default.modification_disable() default = default.view(protected=True)
with pytest.raises(PermissionError): with pytest.raises(PermissionError):
default.rename('P','another_new_name') default.rename('P','another_new_name')
@ -355,7 +355,7 @@ class TestResult:
@pytest.mark.parametrize('allowed',['off','on']) @pytest.mark.parametrize('allowed',['off','on'])
def test_remove(self,default,allowed): def test_remove(self,default,allowed):
if allowed == 'on': if allowed == 'on':
unsafe = default.modification_enable() unsafe = default.view(protected=False)
unsafe.remove('F') unsafe.remove('F')
assert unsafe.get('F') is None assert unsafe.get('F') is None
else: else:
@ -377,7 +377,7 @@ class TestResult:
@pytest.mark.parametrize('inc',[4,0],ids=range(2)) @pytest.mark.parametrize('inc',[4,0],ids=range(2))
@pytest.mark.xfail(int(vtk.vtkVersion.GetVTKVersion().split('.')[0])<9, reason='missing "Direction" attribute') @pytest.mark.xfail(int(vtk.vtkVersion.GetVTKVersion().split('.')[0])<9, reason='missing "Direction" attribute')
def test_vtk(self,request,tmp_path,ref_path,update,patch_execution_stamp,patch_datetime_now,output,fname,inc): def test_vtk(self,request,tmp_path,ref_path,update,patch_execution_stamp,patch_datetime_now,output,fname,inc):
result = Result(ref_path/fname).view('increments',inc) result = Result(ref_path/fname).view(increments=inc)
os.chdir(tmp_path) os.chdir(tmp_path)
result.export_VTK(output,parallel=False) result.export_VTK(output,parallel=False)
fname = fname.split('.')[0]+f'_inc{(inc if type(inc) == int else inc[0]):0>2}.vti' fname = fname.split('.')[0]+f'_inc{(inc if type(inc) == int else inc[0]):0>2}.vti'
@ -400,7 +400,7 @@ class TestResult:
result.export_VTK(output,mode) result.export_VTK(output,mode)
def test_marc_coordinates(self,ref_path): def test_marc_coordinates(self,ref_path):
result = Result(ref_path/'check_compile_job1.hdf5').view('increments',-1) result = Result(ref_path/'check_compile_job1.hdf5').view(increments=-1)
c_n = result.coordinates0_node + result.get('u_n') c_n = result.coordinates0_node + result.get('u_n')
c_p = result.coordinates0_point + result.get('u_p') c_p = result.coordinates0_point + result.get('u_p')
assert len(c_n) > len(c_p) assert len(c_n) > len(c_p)
@ -440,7 +440,7 @@ class TestResult:
dim_xdmf = reader_xdmf.GetOutput().GetDimensions() dim_xdmf = reader_xdmf.GetOutput().GetDimensions()
bounds_xdmf = reader_xdmf.GetOutput().GetBounds() bounds_xdmf = reader_xdmf.GetOutput().GetBounds()
single_phase.view('increments',0).export_VTK(parallel=False) single_phase.view(increments=0).export_VTK(parallel=False)
fname = os.path.splitext(os.path.basename(single_phase.fname))[0]+'_inc00.vti' fname = os.path.splitext(os.path.basename(single_phase.fname))[0]+'_inc00.vti'
reader_vti = vtk.vtkXMLImageDataReader() reader_vti = vtk.vtkXMLImageDataReader()
reader_vti.SetFileName(fname) reader_vti.SetFileName(fname)

View File

@ -20,7 +20,7 @@ def default():
"""Simple VTK.""" """Simple VTK."""
cells = np.array([5,6,7],int) cells = np.array([5,6,7],int)
size = np.array([.6,1.,.5]) size = np.array([.6,1.,.5])
return VTK.from_rectilinear_grid(cells,size) return VTK.from_image_data(cells,size)
class TestVTK: class TestVTK:
@ -116,7 +116,7 @@ class TestVTK:
def test_add_extension(self,tmp_path,default): def test_add_extension(self,tmp_path,default):
default.save(tmp_path/'default.txt',parallel=False) default.save(tmp_path/'default.txt',parallel=False)
assert os.path.isfile(tmp_path/'default.txt.vtr') assert os.path.isfile(tmp_path/'default.txt.vti')
def test_invalid_get(self,default): def test_invalid_get(self,default):
@ -160,7 +160,7 @@ class TestVTK:
def test_comments(self,tmp_path,default): def test_comments(self,tmp_path,default):
default.add_comments(['this is a comment']) default.add_comments(['this is a comment'])
default.save(tmp_path/'with_comments',parallel=False) default.save(tmp_path/'with_comments',parallel=False)
new = VTK.load(tmp_path/'with_comments.vtr') new = VTK.load(tmp_path/'with_comments.vti')
assert new.get_comments() == ['this is a comment'] assert new.get_comments() == ['this is a comment']
@pytest.mark.xfail(int(vtk.vtkVersion.GetVTKVersion().split('.')[0])<8, reason='missing METADATA') @pytest.mark.xfail(int(vtk.vtkVersion.GetVTKVersion().split('.')[0])<8, reason='missing METADATA')

View File

@ -2,6 +2,8 @@ import pytest
import numpy as np import numpy as np
from damask import grid_filters from damask import grid_filters
from damask import Grid
from damask import seeds
class TestGridFilters: class TestGridFilters:
@ -139,12 +141,19 @@ class TestGridFilters:
else: else:
function(unordered,mode) function(unordered,mode)
def test_regrid(self): def test_regrid_identity(self):
size = np.random.random(3) size = np.random.random(3)
cells = np.random.randint(8,32,(3)) cells = np.random.randint(8,32,(3))
F = np.broadcast_to(np.eye(3), tuple(cells)+(3,3)) F = np.broadcast_to(np.eye(3), tuple(cells)+(3,3))
assert all(grid_filters.regrid(size,F,cells) == np.arange(cells.prod())) assert all(grid_filters.regrid(size,F,cells) == np.arange(cells.prod()))
def test_regrid_double_cells(self):
size = np.random.random(3)
cells = np.random.randint(8,32,(3))
g = Grid.from_Voronoi_tessellation(cells,size,seeds.from_random(size,10))
F = np.broadcast_to(np.eye(3), tuple(cells)+(3,3))
assert all(g.scale(cells*2).material.flatten() ==
g.material.flatten()[grid_filters.regrid(size,F,cells*2)])
@pytest.mark.parametrize('differential_operator',[grid_filters.curl, @pytest.mark.parametrize('differential_operator',[grid_filters.curl,
grid_filters.divergence, grid_filters.divergence,

View File

@ -67,5 +67,5 @@ class TestSeeds:
coords = seeds.from_random(size,N_seeds,cells) coords = seeds.from_random(size,N_seeds,cells)
grid = Grid.from_Voronoi_tessellation(cells,size,coords) grid = Grid.from_Voronoi_tessellation(cells,size,coords)
selection=np.random.randint(N_seeds)+1 selection=np.random.randint(N_seeds)+1
coords,material = seeds.from_grid(grid,average=average,periodic=periodic,invert=invert,selection=[selection]) coords,material = seeds.from_grid(grid,average=average,periodic=periodic,invert_selection=invert,selection=[selection])
assert selection not in material if invert else (selection==material).all() assert selection not in material if invert else (selection==material).all()

View File

@ -59,9 +59,22 @@ class TestUtil:
([1,1,0],'x',False,False,[0.5,0]), ([1,1,0],'x',False,False,[0.5,0]),
([1,1,1],'y',True, True, [0.3660254, 0,0.3660254]), ([1,1,1],'y',True, True, [0.3660254, 0,0.3660254]),
]) ])
def test_project_stereographic(self,point,direction,normalize,keepdims,answer): def test_project_equal_angle(self,point,direction,normalize,keepdims,answer):
assert np.allclose(util.project_stereographic(np.array(point),direction=direction, assert np.allclose(util.project_equal_angle(np.array(point),direction=direction,
normalize=normalize,keepdims=keepdims),answer) normalize=normalize,keepdims=keepdims),answer)
@pytest.mark.parametrize('point,direction,normalize,keepdims,answer',
[
([1,0,0],'z',False,True, [1,0,0]),
([1,0,0],'z',True, False,[1,0]),
([0,1,1],'z',False,True, [0,0.70710678,0]),
([0,1,1],'y',True, False,[0.5411961,0]),
([1,1,0],'x',False,False,[0.70710678,0]),
([1,1,1],'y',True, True, [0.45970084,0,0.45970084]),
])
def test_project_equal_area(self,point,direction,normalize,keepdims,answer):
assert np.allclose(util.project_equal_area(np.array(point),direction=direction,
normalize=normalize,keepdims=keepdims),answer)
@pytest.mark.parametrize('fro,to,mode,answer', @pytest.mark.parametrize('fro,to,mode,answer',
[ [

View File

@ -6,7 +6,7 @@
module LAPACK_interface module LAPACK_interface
interface interface
subroutine dgeev(jobvl,jobvr,n,a,lda,wr,wi,vl,ldvl,vr,ldvr,work,lwork,info) pure subroutine dgeev(jobvl,jobvr,n,a,lda,wr,wi,vl,ldvl,vr,ldvr,work,lwork,info)
use prec use prec
character, intent(in) :: jobvl,jobvr character, intent(in) :: jobvl,jobvr
integer, intent(in) :: n,lda,ldvl,ldvr,lwork integer, intent(in) :: n,lda,ldvl,ldvr,lwork
@ -18,16 +18,16 @@ module LAPACK_interface
integer, intent(out) :: info integer, intent(out) :: info
end subroutine dgeev end subroutine dgeev
subroutine dgesv(n,nrhs,a,lda,ipiv,b,ldb,info) pure subroutine dgesv(n,nrhs,a,lda,ipiv,b,ldb,info)
use prec use prec
integer, intent(in) :: n,nrhs,lda,ldb integer, intent(in) :: n,nrhs,lda,ldb
real(pReal), intent(inout), dimension(lda,n) :: a real(pReal), intent(inout), dimension(lda,n) :: a
integer, intent(out), dimension(n) :: ipiv integer, intent(out), dimension(n) :: ipiv
real(pReal), intent(out), dimension(ldb,nrhs) :: b real(pReal), intent(inout), dimension(ldb,nrhs) :: b
integer, intent(out) :: info integer, intent(out) :: info
end subroutine dgesv end subroutine dgesv
subroutine dgetrf(m,n,a,lda,ipiv,info) pure subroutine dgetrf(m,n,a,lda,ipiv,info)
use prec use prec
integer, intent(in) :: m,n,lda integer, intent(in) :: m,n,lda
real(pReal), intent(inout), dimension(lda,n) :: a real(pReal), intent(inout), dimension(lda,n) :: a
@ -35,16 +35,16 @@ module LAPACK_interface
integer, intent(out) :: info integer, intent(out) :: info
end subroutine dgetrf end subroutine dgetrf
subroutine dgetri(n,a,lda,ipiv,work,lwork,info) pure subroutine dgetri(n,a,lda,ipiv,work,lwork,info)
use prec use prec
integer, intent(in) :: n,lda,lwork integer, intent(in) :: n,lda,lwork
real(pReal), intent(inout), dimension(lda,n) :: a real(pReal), intent(inout), dimension(lda,n) :: a
integer, intent(out), dimension(n) :: ipiv integer, intent(in), dimension(n) :: ipiv
real(pReal), intent(out), dimension(max(1,lwork)) :: work real(pReal), intent(out), dimension(max(1,lwork)) :: work
integer, intent(out) :: info integer, intent(out) :: info
end subroutine dgetri end subroutine dgetri
subroutine dsyev(jobz,uplo,n,a,lda,w,work,lwork,info) pure subroutine dsyev(jobz,uplo,n,a,lda,w,work,lwork,info)
use prec use prec
character, intent(in) :: jobz,uplo character, intent(in) :: jobz,uplo
integer, intent(in) :: n,lda,lwork integer, intent(in) :: n,lda,lwork

View File

@ -9,7 +9,8 @@ module constants
public public
real(pReal), parameter :: & real(pReal), parameter :: &
T_ROOM = 300.0_pReal, & !< Room temperature in K T_ROOM = 300.0_pReal, & !< Room temperature in K. ToDo: IUPAC: 298.15
K_B = 1.38e-23_pReal !< Boltzmann constant in J/Kelvin K_B = 1.38e-23_pReal, & !< Boltzmann constant in J/Kelvin
N_A = 6.02214076e23_pReal !< Avogadro constant in 1/mol
end module constants end module constants

View File

@ -2070,7 +2070,7 @@ end function getlabels
!> @brief Equivalent Poisson's ratio (ν) !> @brief Equivalent Poisson's ratio (ν)
!> @details https://doi.org/10.1143/JPSJ.20.635 !> @details https://doi.org/10.1143/JPSJ.20.635
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
function lattice_equivalent_nu(C,assumption) result(nu) pure function lattice_equivalent_nu(C,assumption) result(nu)
real(pReal), dimension(6,6), intent(in) :: C !< Stiffness tensor (Voigt notation) real(pReal), dimension(6,6), intent(in) :: C !< Stiffness tensor (Voigt notation)
character(len=5), intent(in) :: assumption !< Assumption ('Voigt' = isostrain, 'Reuss' = isostress) character(len=5), intent(in) :: assumption !< Assumption ('Voigt' = isostrain, 'Reuss' = isostress)
@ -2103,7 +2103,7 @@ end function lattice_equivalent_nu
!> @brief Equivalent shear modulus (μ) !> @brief Equivalent shear modulus (μ)
!> @details https://doi.org/10.1143/JPSJ.20.635 !> @details https://doi.org/10.1143/JPSJ.20.635
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
function lattice_equivalent_mu(C,assumption) result(mu) pure function lattice_equivalent_mu(C,assumption) result(mu)
real(pReal), dimension(6,6), intent(in) :: C !< Stiffness tensor (Voigt notation) real(pReal), dimension(6,6), intent(in) :: C !< Stiffness tensor (Voigt notation)
character(len=5), intent(in) :: assumption !< Assumption ('Voigt' = isostrain, 'Reuss' = isostress) character(len=5), intent(in) :: assumption !< Assumption ('Voigt' = isostrain, 'Reuss' = isostress)

View File

@ -512,7 +512,7 @@ end subroutine math_invert33
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief Inversion of symmetriced 3x3x3x3 matrix !> @brief Inversion of symmetriced 3x3x3x3 matrix
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
function math_invSym3333(A) pure function math_invSym3333(A)
real(pReal),dimension(3,3,3,3) :: math_invSym3333 real(pReal),dimension(3,3,3,3) :: math_invSym3333
@ -538,7 +538,7 @@ end function math_invSym3333
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief invert quadratic matrix of arbitrary dimension !> @brief invert quadratic matrix of arbitrary dimension
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine math_invert(InvA, error, A) pure subroutine math_invert(InvA, error, A)
real(pReal), dimension(:,:), intent(in) :: A real(pReal), dimension(:,:), intent(in) :: A
real(pReal), dimension(size(A,1),size(A,1)), intent(out) :: invA real(pReal), dimension(size(A,1),size(A,1)), intent(out) :: invA
@ -895,7 +895,7 @@ pure function math_33toVoigt6_stress(sigma) result(sigma_tilde)
sigma_tilde = [sigma(1,1), sigma(2,2), sigma(3,3), & sigma_tilde = [sigma(1,1), sigma(2,2), sigma(3,3), &
sigma(3,2), sigma(3,1), sigma(1,2)] sigma(3,2), sigma(3,1), sigma(1,2)]
end function math_33toVoigt6_stress end function math_33toVoigt6_stress
@ -910,7 +910,7 @@ pure function math_33toVoigt6_strain(epsilon) result(epsilon_tilde)
epsilon_tilde = [ epsilon(1,1), epsilon(2,2), epsilon(3,3), & epsilon_tilde = [ epsilon(1,1), epsilon(2,2), epsilon(3,3), &
2.0_pReal*epsilon(3,2), 2.0_pReal*epsilon(3,1), 2.0_pReal*epsilon(1,2)] 2.0_pReal*epsilon(3,2), 2.0_pReal*epsilon(3,1), 2.0_pReal*epsilon(1,2)]
end function math_33toVoigt6_strain end function math_33toVoigt6_strain
@ -961,45 +961,42 @@ pure function math_3333toVoigt66(m3333)
end function math_3333toVoigt66 end function math_3333toVoigt66
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief draw a random sample from Gauss variable !> @brief Draw a sample from a normal distribution.
!> @details https://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform
!> https://masuday.github.io/fortran_tutorial/random.html
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
real(pReal) function math_sampleGaussVar(mu, sigma, width) impure elemental subroutine math_normal(x,mu,sigma)
real(pReal), intent(in) :: mu, & !< mean real(pReal), intent(out) :: x
sigma !< standard deviation real(pReal), intent(in), optional :: mu, sigma
real(pReal), intent(in), optional :: width !< cut off as multiples of standard deviation
real(pReal), dimension(2) :: rnd ! random numbers real(pReal) :: sigma_, mu_
real(pReal) :: scatter, & ! normalized scatter around mean real(pReal), dimension(2) :: rnd
width_
if (abs(sigma) < tol_math_check) then
math_sampleGaussVar = mu if (present(mu)) then
mu_ = mu
else else
if (present(width)) then mu_ = 0.0_pReal
width_ = width end if
else
width_ = 3.0_pReal ! use +-3*sigma as default scatter
endif
do if (present(sigma)) then
call random_number(rnd) sigma_ = sigma
scatter = width_ * (2.0_pReal * rnd(1) - 1.0_pReal) else
if (rnd(2) <= exp(-0.5_pReal * scatter**2)) exit ! test if scattered value is drawn sigma_ = 1.0_pReal
enddo end if
math_sampleGaussVar = scatter * sigma call random_number(rnd)
endif x = mu_ + sigma_ * sqrt(-2.0_pReal*log(1.0_pReal-rnd(1)))*cos(2.0_pReal*PI*(1.0_pReal - rnd(2)))
end function math_sampleGaussVar end subroutine math_normal
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief eigenvalues and eigenvectors of symmetric matrix !> @brief eigenvalues and eigenvectors of symmetric matrix
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine math_eigh(w,v,error,m) pure subroutine math_eigh(w,v,error,m)
real(pReal), dimension(:,:), intent(in) :: m !< quadratic matrix to compute eigenvectors and values of real(pReal), dimension(:,:), intent(in) :: m !< quadratic matrix to compute eigenvectors and values of
real(pReal), dimension(size(m,1)), intent(out) :: w !< eigenvalues real(pReal), dimension(size(m,1)), intent(out) :: w !< eigenvalues
@ -1024,7 +1021,7 @@ end subroutine math_eigh
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
!> @details See http://arxiv.org/abs/physics/0610206 (DSYEVH3) !> @details See http://arxiv.org/abs/physics/0610206 (DSYEVH3)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine math_eigh33(w,v,m) pure subroutine math_eigh33(w,v,m)
real(pReal), dimension(3,3),intent(in) :: m !< 3x3 matrix to compute eigenvectors and values of real(pReal), dimension(3,3),intent(in) :: m !< 3x3 matrix to compute eigenvectors and values of
real(pReal), dimension(3), intent(out) :: w !< eigenvalues real(pReal), dimension(3), intent(out) :: w !< eigenvalues
@ -1117,7 +1114,7 @@ end function math_rotationalPart
!> @brief Eigenvalues of symmetric matrix !> @brief Eigenvalues of symmetric matrix
! will return NaN on error ! will return NaN on error
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
function math_eigvalsh(m) pure function math_eigvalsh(m)
real(pReal), dimension(:,:), intent(in) :: m !< symmetric matrix to compute eigenvalues of real(pReal), dimension(:,:), intent(in) :: m !< symmetric matrix to compute eigenvalues of
real(pReal), dimension(size(m,1)) :: math_eigvalsh real(pReal), dimension(size(m,1)) :: math_eigvalsh
@ -1140,7 +1137,7 @@ end function math_eigvalsh
!> but apparently more stable solution and has general LAPACK powered version for arbritrary sized !> but apparently more stable solution and has general LAPACK powered version for arbritrary sized
!> matrices as fallback !> matrices as fallback
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
function math_eigvalsh33(m) pure function math_eigvalsh33(m)
real(pReal), intent(in), dimension(3,3) :: m !< 3x3 symmetric matrix to compute eigenvalues of real(pReal), intent(in), dimension(3,3) :: m !< 3x3 symmetric matrix to compute eigenvalues of
real(pReal), dimension(3) :: math_eigvalsh33,I real(pReal), dimension(3) :: math_eigvalsh33,I
@ -1434,6 +1431,28 @@ subroutine selfTest
if (dNeq0(math_LeviCivita(ijk(1),ijk(2),ijk(3)))) & if (dNeq0(math_LeviCivita(ijk(1),ijk(2),ijk(3)))) &
error stop 'math_LeviCivita' error stop 'math_LeviCivita'
normal_distribution: block
integer, parameter :: N = 1000000
real(pReal), dimension(:), allocatable :: r
real(pReal) :: mu, sigma
allocate(r(N))
call random_number(mu)
call random_number(sigma)
sigma = 1.0_pReal + sigma*5.0_pReal
mu = (mu-0.5_pReal)*10_pReal
call math_normal(r,mu,sigma)
if (abs(mu -sum(r)/real(N,pReal))>5.0e-2_pReal) &
error stop 'math_normal(mu)'
mu = sum(r)/real(N,pReal)
if (abs(sigma**2 -1.0_pReal/real(N-1,pReal) * sum((r-mu)**2))/sigma > 5.0e-2_pReal) &
error stop 'math_normal(sigma)'
end block normal_distribution
end subroutine selfTest end subroutine selfTest
end module math end module math

View File

@ -155,7 +155,7 @@ module phase
real(pReal), dimension(3,3) :: P real(pReal), dimension(3,3) :: P
end function phase_P end function phase_P
module function thermal_T(ph,en) result(T) pure module function thermal_T(ph,en) result(T)
integer, intent(in) :: ph,en integer, intent(in) :: ph,en
real(pReal) :: T real(pReal) :: T
end function thermal_T end function thermal_T

View File

@ -168,17 +168,17 @@ submodule(phase) mechanical
integer, intent(in) :: ph,en integer, intent(in) :: ph,en
end function plastic_dislotwin_homogenizedC end function plastic_dislotwin_homogenizedC
module function elastic_C66(ph,en) result(C66) pure module function elastic_C66(ph,en) result(C66)
real(pReal), dimension(6,6) :: C66 real(pReal), dimension(6,6) :: C66
integer, intent(in) :: ph, en integer, intent(in) :: ph, en
end function elastic_C66 end function elastic_C66
module function elastic_mu(ph,en) result(mu) pure module function elastic_mu(ph,en) result(mu)
real(pReal) :: mu real(pReal) :: mu
integer, intent(in) :: ph, en integer, intent(in) :: ph, en
end function elastic_mu end function elastic_mu
module function elastic_nu(ph,en) result(nu) pure module function elastic_nu(ph,en) result(nu)
real(pReal) :: nu real(pReal) :: nu
integer, intent(in) :: ph, en integer, intent(in) :: ph, en
end function elastic_nu end function elastic_nu

View File

@ -30,7 +30,7 @@ module subroutine elastic_init(phases)
phase, & phase, &
mech, & mech, &
elastic elastic
logical :: thermal_active
print'(/,1x,a)', '<<<+- phase:mechanical:elastic init -+>>>' print'(/,1x,a)', '<<<+- phase:mechanical:elastic init -+>>>'
print'(/,1x,a)', '<<<+- phase:mechanical:elastic:Hooke init -+>>>' print'(/,1x,a)', '<<<+- phase:mechanical:elastic:Hooke init -+>>>'
@ -86,7 +86,7 @@ end subroutine elastic_init
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief return 6x6 elasticity tensor !> @brief return 6x6 elasticity tensor
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
module function elastic_C66(ph,en) result(C66) pure module function elastic_C66(ph,en) result(C66)
integer, intent(in) :: & integer, intent(in) :: &
ph, & ph, &
@ -140,7 +140,7 @@ end function elastic_C66
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief return shear modulus !> @brief return shear modulus
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
module function elastic_mu(ph,en) result(mu) pure module function elastic_mu(ph,en) result(mu)
integer, intent(in) :: & integer, intent(in) :: &
ph, & ph, &
@ -157,7 +157,7 @@ end function elastic_mu
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief return Poisson ratio !> @brief return Poisson ratio
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
module function elastic_nu(ph,en) result(nu) pure module function elastic_nu(ph,en) result(nu)
integer, intent(in) :: & integer, intent(in) :: &
ph, & ph, &

View File

@ -73,47 +73,37 @@ submodule(phase:mechanical) plastic
en en
end subroutine kinehardening_LpAndItsTangent end subroutine kinehardening_LpAndItsTangent
module subroutine dislotwin_LpAndItsTangent(Lp,dLp_dMp,Mp,T,ph,en) module subroutine dislotwin_LpAndItsTangent(Lp,dLp_dMp,Mp,ph,en)
real(pReal), dimension(3,3), intent(out) :: & real(pReal), dimension(3,3), intent(out) :: &
Lp Lp
real(pReal), dimension(3,3,3,3), intent(out) :: & real(pReal), dimension(3,3,3,3), intent(out) :: &
dLp_dMp dLp_dMp
real(pReal), dimension(3,3), intent(in) :: & real(pReal), dimension(3,3), intent(in) :: &
Mp Mp
real(pReal), intent(in) :: &
T
integer, intent(in) :: & integer, intent(in) :: &
ph, & ph, &
en en
end subroutine dislotwin_LpAndItsTangent end subroutine dislotwin_LpAndItsTangent
pure module subroutine dislotungsten_LpAndItsTangent(Lp,dLp_dMp,Mp,T,ph,en) pure module subroutine dislotungsten_LpAndItsTangent(Lp,dLp_dMp,Mp,ph,en)
real(pReal), dimension(3,3), intent(out) :: & real(pReal), dimension(3,3), intent(out) :: &
Lp Lp
real(pReal), dimension(3,3,3,3), intent(out) :: & real(pReal), dimension(3,3,3,3), intent(out) :: &
dLp_dMp dLp_dMp
real(pReal), dimension(3,3), intent(in) :: & real(pReal), dimension(3,3), intent(in) :: &
Mp Mp
real(pReal), intent(in) :: &
T
integer, intent(in) :: & integer, intent(in) :: &
ph, & ph, &
en en
end subroutine dislotungsten_LpAndItsTangent end subroutine dislotungsten_LpAndItsTangent
module subroutine nonlocal_LpAndItsTangent(Lp,dLp_dMp, & module subroutine nonlocal_LpAndItsTangent(Lp,dLp_dMp,Mp,ph,en)
Mp,Temperature,ph,en)
real(pReal), dimension(3,3), intent(out) :: & real(pReal), dimension(3,3), intent(out) :: &
Lp Lp
real(pReal), dimension(3,3,3,3), intent(out) :: & real(pReal), dimension(3,3,3,3), intent(out) :: &
dLp_dMp dLp_dMp
real(pReal), dimension(3,3), intent(in) :: & real(pReal), dimension(3,3), intent(in) :: &
Mp !< Mandel stress Mp !< Mandel stress
real(pReal), intent(in) :: &
Temperature
integer, intent(in) :: & integer, intent(in) :: &
ph, & ph, &
en en
@ -282,13 +272,13 @@ module subroutine plastic_LpAndItsTangents(Lp, dLp_dS, dLp_dFi, &
call kinehardening_LpAndItsTangent(Lp,dLp_dMp,Mp,ph,en) call kinehardening_LpAndItsTangent(Lp,dLp_dMp,Mp,ph,en)
case (PLASTIC_NONLOCAL_ID) plasticType case (PLASTIC_NONLOCAL_ID) plasticType
call nonlocal_LpAndItsTangent(Lp,dLp_dMp,Mp, thermal_T(ph,en),ph,en) call nonlocal_LpAndItsTangent(Lp,dLp_dMp,Mp,ph,en)
case (PLASTIC_DISLOTWIN_ID) plasticType case (PLASTIC_DISLOTWIN_ID) plasticType
call dislotwin_LpAndItsTangent(Lp,dLp_dMp,Mp, thermal_T(ph,en),ph,en) call dislotwin_LpAndItsTangent(Lp,dLp_dMp,Mp,ph,en)
case (PLASTIC_DISLOTUNGSTEN_ID) plasticType case (PLASTIC_DISLOTUNGSTEN_ID) plasticType
call dislotungsten_LpAndItsTangent(Lp,dLp_dMp,Mp, thermal_T(ph,en),ph,en) call dislotungsten_LpAndItsTangent(Lp,dLp_dMp,Mp,ph,en)
end select plasticType end select plasticType

View File

@ -257,27 +257,27 @@ end function plastic_dislotungsten_init
!> @brief Calculate plastic velocity gradient and its tangent. !> @brief Calculate plastic velocity gradient and its tangent.
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
pure module subroutine dislotungsten_LpAndItsTangent(Lp,dLp_dMp, & pure module subroutine dislotungsten_LpAndItsTangent(Lp,dLp_dMp, &
Mp,T,ph,en) Mp,ph,en)
real(pReal), dimension(3,3), intent(out) :: & real(pReal), dimension(3,3), intent(out) :: &
Lp !< plastic velocity gradient Lp !< plastic velocity gradient
real(pReal), dimension(3,3,3,3), intent(out) :: & real(pReal), dimension(3,3,3,3), intent(out) :: &
dLp_dMp !< derivative of Lp with respect to the Mandel stress dLp_dMp !< derivative of Lp with respect to the Mandel stress
real(pReal), dimension(3,3), intent(in) :: & real(pReal), dimension(3,3), intent(in) :: &
Mp !< Mandel stress Mp !< Mandel stress
real(pReal), intent(in) :: &
T !< temperature
integer, intent(in) :: & integer, intent(in) :: &
ph, & ph, &
en en
integer :: & integer :: &
i,k,l,m,n i,k,l,m,n
real(pReal) :: &
T !< temperature
real(pReal), dimension(param(ph)%sum_N_sl) :: & real(pReal), dimension(param(ph)%sum_N_sl) :: &
dot_gamma_pos,dot_gamma_neg, & dot_gamma_pos,dot_gamma_neg, &
ddot_gamma_dtau_pos,ddot_gamma_dtau_neg ddot_gamma_dtau_pos,ddot_gamma_dtau_neg
T = thermal_T(ph,en)
Lp = 0.0_pReal Lp = 0.0_pReal
dLp_dMp = 0.0_pReal dLp_dMp = 0.0_pReal

View File

@ -476,18 +476,18 @@ module function plastic_dislotwin_homogenizedC(ph,en) result(homogenizedC)
C66_tw, & C66_tw, &
C66_tr C66_tr
integer :: i integer :: i
real(pReal) :: f_unrotated real(pReal) :: f_matrix
C = elastic_C66(ph,en) C = elastic_C66(ph,en)
associate(prm => param(ph), stt => state(ph)) associate(prm => param(ph), stt => state(ph))
f_unrotated = 1.0_pReal & f_matrix = 1.0_pReal &
- sum(stt%f_tw(1:prm%sum_N_tw,en)) & - sum(stt%f_tw(1:prm%sum_N_tw,en)) &
- sum(stt%f_tr(1:prm%sum_N_tr,en)) - sum(stt%f_tr(1:prm%sum_N_tr,en))
homogenizedC = f_unrotated * C homogenizedC = f_matrix * C
twinActive: if (prm%sum_N_tw > 0) then twinActive: if (prm%sum_N_tw > 0) then
C66_tw = lattice_C66_twin(prm%N_tw,C,phase_lattice(ph),phase_cOverA(ph)) C66_tw = lattice_C66_twin(prm%N_tw,C,phase_lattice(ph),phase_cOverA(ph))
@ -513,20 +513,20 @@ end function plastic_dislotwin_homogenizedC
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief Calculate plastic velocity gradient and its tangent. !> @brief Calculate plastic velocity gradient and its tangent.
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
module subroutine dislotwin_LpAndItsTangent(Lp,dLp_dMp,Mp,T,ph,en) module subroutine dislotwin_LpAndItsTangent(Lp,dLp_dMp,Mp,ph,en)
real(pReal), dimension(3,3), intent(out) :: Lp real(pReal), dimension(3,3), intent(out) :: Lp
real(pReal), dimension(3,3,3,3), intent(out) :: dLp_dMp real(pReal), dimension(3,3,3,3), intent(out) :: dLp_dMp
real(pReal), dimension(3,3), intent(in) :: Mp real(pReal), dimension(3,3), intent(in) :: Mp
integer, intent(in) :: ph,en integer, intent(in) :: ph,en
real(pReal), intent(in) :: T
integer :: i,k,l,m,n integer :: i,k,l,m,n
real(pReal) :: & real(pReal) :: &
f_unrotated,StressRatio_p,& f_matrix,StressRatio_p,&
E_kB_T, & E_kB_T, &
ddot_gamma_dtau, & ddot_gamma_dtau, &
tau tau, &
T
real(pReal), dimension(param(ph)%sum_N_sl) :: & real(pReal), dimension(param(ph)%sum_N_sl) :: &
dot_gamma_sl,ddot_gamma_dtau_sl dot_gamma_sl,ddot_gamma_dtau_sl
real(pReal), dimension(param(ph)%sum_N_tw) :: & real(pReal), dimension(param(ph)%sum_N_tw) :: &
@ -556,69 +556,71 @@ module subroutine dislotwin_LpAndItsTangent(Lp,dLp_dMp,Mp,T,ph,en)
0, 1, 1 & 0, 1, 1 &
],pReal),[ 3,6]) ],pReal),[ 3,6])
associate(prm => param(ph), stt => state(ph))
f_unrotated = 1.0_pReal &
- sum(stt%f_tw(1:prm%sum_N_tw,en)) &
- sum(stt%f_tr(1:prm%sum_N_tr,en))
T = thermal_T(ph,en)
Lp = 0.0_pReal Lp = 0.0_pReal
dLp_dMp = 0.0_pReal dLp_dMp = 0.0_pReal
call kinetics_sl(Mp,T,ph,en,dot_gamma_sl,ddot_gamma_dtau_sl) associate(prm => param(ph), stt => state(ph))
slipContribution: do i = 1, prm%sum_N_sl
Lp = Lp + dot_gamma_sl(i)*prm%P_sl(1:3,1:3,i)
forall (k=1:3,l=1:3,m=1:3,n=1:3) &
dLp_dMp(k,l,m,n) = dLp_dMp(k,l,m,n) &
+ ddot_gamma_dtau_sl(i) * prm%P_sl(k,l,i) * prm%P_sl(m,n,i)
end do slipContribution
call kinetics_tw(Mp,T,dot_gamma_sl,ph,en,dot_gamma_tw,ddot_gamma_dtau_tw) f_matrix = 1.0_pReal &
twinContibution: do i = 1, prm%sum_N_tw - sum(stt%f_tw(1:prm%sum_N_tw,en)) &
Lp = Lp + dot_gamma_tw(i)*prm%P_tw(1:3,1:3,i) - sum(stt%f_tr(1:prm%sum_N_tr,en))
forall (k=1:3,l=1:3,m=1:3,n=1:3) &
dLp_dMp(k,l,m,n) = dLp_dMp(k,l,m,n) &
+ ddot_gamma_dtau_tw(i)* prm%P_tw(k,l,i)*prm%P_tw(m,n,i)
end do twinContibution
call kinetics_tr(Mp,T,dot_gamma_sl,ph,en,dot_gamma_tr,ddot_gamma_dtau_tr) call kinetics_sl(Mp,T,ph,en,dot_gamma_sl,ddot_gamma_dtau_sl)
transContibution: do i = 1, prm%sum_N_tr slipContribution: do i = 1, prm%sum_N_sl
Lp = Lp + dot_gamma_tr(i)*prm%P_tr(1:3,1:3,i) Lp = Lp + dot_gamma_sl(i)*prm%P_sl(1:3,1:3,i)
forall (k=1:3,l=1:3,m=1:3,n=1:3) & forall (k=1:3,l=1:3,m=1:3,n=1:3) &
dLp_dMp(k,l,m,n) = dLp_dMp(k,l,m,n) & dLp_dMp(k,l,m,n) = dLp_dMp(k,l,m,n) &
+ ddot_gamma_dtau_tr(i)* prm%P_tr(k,l,i)*prm%P_tr(m,n,i) + ddot_gamma_dtau_sl(i) * prm%P_sl(k,l,i) * prm%P_sl(m,n,i)
end do transContibution end do slipContribution
Lp = Lp * f_unrotated call kinetics_tw(Mp,T,dot_gamma_sl,ph,en,dot_gamma_tw,ddot_gamma_dtau_tw)
dLp_dMp = dLp_dMp * f_unrotated twinContibution: do i = 1, prm%sum_N_tw
Lp = Lp + dot_gamma_tw(i)*prm%P_tw(1:3,1:3,i)
forall (k=1:3,l=1:3,m=1:3,n=1:3) &
dLp_dMp(k,l,m,n) = dLp_dMp(k,l,m,n) &
+ ddot_gamma_dtau_tw(i)* prm%P_tw(k,l,i)*prm%P_tw(m,n,i)
end do twinContibution
shearBandingContribution: if (dNeq0(prm%v_sb)) then call kinetics_tr(Mp,T,dot_gamma_sl,ph,en,dot_gamma_tr,ddot_gamma_dtau_tr)
transContibution: do i = 1, prm%sum_N_tr
Lp = Lp + dot_gamma_tr(i)*prm%P_tr(1:3,1:3,i)
forall (k=1:3,l=1:3,m=1:3,n=1:3) &
dLp_dMp(k,l,m,n) = dLp_dMp(k,l,m,n) &
+ ddot_gamma_dtau_tr(i)* prm%P_tr(k,l,i)*prm%P_tr(m,n,i)
end do transContibution
E_kB_T = prm%E_sb/(K_B*T) Lp = Lp * f_matrix
call math_eigh33(eigValues,eigVectors,Mp) ! is Mp symmetric by design? dLp_dMp = dLp_dMp * f_matrix
do i = 1,6 shearBandingContribution: if (dNeq0(prm%v_sb)) then
P_sb = 0.5_pReal * math_outer(matmul(eigVectors,sb_sComposition(1:3,i)),&
matmul(eigVectors,sb_mComposition(1:3,i)))
tau = math_tensordot(Mp,P_sb)
significantShearBandStress: if (abs(tau) > tol_math_check) then E_kB_T = prm%E_sb/(K_B*T)
StressRatio_p = (abs(tau)/prm%xi_sb)**prm%p_sb call math_eigh33(eigValues,eigVectors,Mp) ! is Mp symmetric by design?
dot_gamma_sb = sign(prm%v_sb*exp(-E_kB_T*(1-StressRatio_p)**prm%q_sb), tau)
ddot_gamma_dtau = abs(dot_gamma_sb)*E_kB_T*prm%p_sb*prm%q_sb/prm%xi_sb &
* (abs(tau)/prm%xi_sb)**(prm%p_sb-1.0_pReal) &
* (1.0_pReal-StressRatio_p)**(prm%q_sb-1.0_pReal)
Lp = Lp + dot_gamma_sb * P_sb do i = 1,6
forall (k=1:3,l=1:3,m=1:3,n=1:3) & P_sb = 0.5_pReal * math_outer(matmul(eigVectors,sb_sComposition(1:3,i)),&
dLp_dMp(k,l,m,n) = dLp_dMp(k,l,m,n) & matmul(eigVectors,sb_mComposition(1:3,i)))
+ ddot_gamma_dtau * P_sb(k,l) * P_sb(m,n) tau = math_tensordot(Mp,P_sb)
end if significantShearBandStress
end do
end if shearBandingContribution significantShearBandStress: if (abs(tau) > tol_math_check) then
StressRatio_p = (abs(tau)/prm%xi_sb)**prm%p_sb
dot_gamma_sb = sign(prm%v_sb*exp(-E_kB_T*(1-StressRatio_p)**prm%q_sb), tau)
ddot_gamma_dtau = abs(dot_gamma_sb)*E_kB_T*prm%p_sb*prm%q_sb/prm%xi_sb &
* (abs(tau)/prm%xi_sb)**(prm%p_sb-1.0_pReal) &
* (1.0_pReal-StressRatio_p)**(prm%q_sb-1.0_pReal)
end associate Lp = Lp + dot_gamma_sb * P_sb
forall (k=1:3,l=1:3,m=1:3,n=1:3) &
dLp_dMp(k,l,m,n) = dLp_dMp(k,l,m,n) &
+ ddot_gamma_dtau * P_sb(k,l) * P_sb(m,n)
end if significantShearBandStress
end do
end if shearBandingContribution
end associate
end subroutine dislotwin_LpAndItsTangent end subroutine dislotwin_LpAndItsTangent
@ -638,7 +640,7 @@ module subroutine dislotwin_dotState(Mp,T,ph,en)
integer :: i integer :: i
real(pReal) :: & real(pReal) :: &
f_unrotated, & f_matrix, &
d_hat, & d_hat, &
v_cl, & !< climb velocity v_cl, & !< climb velocity
tau, & tau, &
@ -661,9 +663,9 @@ module subroutine dislotwin_dotState(Mp,T,ph,en)
mu = elastic_mu(ph,en) mu = elastic_mu(ph,en)
nu = elastic_nu(ph,en) nu = elastic_nu(ph,en)
f_unrotated = 1.0_pReal & f_matrix = 1.0_pReal &
- sum(stt%f_tw(1:prm%sum_N_tw,en)) & - sum(stt%f_tw(1:prm%sum_N_tw,en)) &
- sum(stt%f_tr(1:prm%sum_N_tr,en)) - sum(stt%f_tr(1:prm%sum_N_tr,en))
call kinetics_sl(Mp,T,ph,en,dot_gamma_sl) call kinetics_sl(Mp,T,ph,en,dot_gamma_sl)
dot%gamma_sl(:,en) = abs(dot_gamma_sl) dot%gamma_sl(:,en) = abs(dot_gamma_sl)
@ -709,10 +711,10 @@ module subroutine dislotwin_dotState(Mp,T,ph,en)
- dot_rho_dip_climb - dot_rho_dip_climb
call kinetics_tw(Mp,T,dot_gamma_sl,ph,en,dot_gamma_tw) call kinetics_tw(Mp,T,dot_gamma_sl,ph,en,dot_gamma_tw)
dot%f_tw(:,en) = f_unrotated*dot_gamma_tw/prm%gamma_char dot%f_tw(:,en) = f_matrix*dot_gamma_tw/prm%gamma_char
call kinetics_tr(Mp,T,dot_gamma_sl,ph,en,dot_gamma_tr) call kinetics_tr(Mp,T,dot_gamma_sl,ph,en,dot_gamma_tr)
dot%f_tr(:,en) = f_unrotated*dot_gamma_tr dot%f_tr(:,en) = f_matrix*dot_gamma_tr
end associate end associate

View File

@ -86,6 +86,8 @@ module function plastic_kinehardening_init() result(myPlasticity)
print'(/,1x,a)', '<<<+- phase:mechanical:plastic:kinehardening init -+>>>' print'(/,1x,a)', '<<<+- phase:mechanical:plastic:kinehardening init -+>>>'
print'(/,a,i0)', ' # phases: ',count(myPlasticity); flush(IO_STDOUT) print'(/,a,i0)', ' # phases: ',count(myPlasticity); flush(IO_STDOUT)
print'(/,1x,a)', 'J.A. Wollmershauser et al., International Journal of Fatigue 36:181193, 2012'
print'( 1x,a)', 'https://doi.org/10.1016/j.ijfatigue.2011.07.008'
phases => config_material%get('phase') phases => config_material%get('phase')
allocate(param(phases%length)) allocate(param(phases%length))

View File

@ -741,7 +741,7 @@ end subroutine nonlocal_dependentState
!> @brief calculates plastic velocity gradient and its tangent !> @brief calculates plastic velocity gradient and its tangent
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
module subroutine nonlocal_LpAndItsTangent(Lp,dLp_dMp, & module subroutine nonlocal_LpAndItsTangent(Lp,dLp_dMp, &
Mp,Temperature,ph,en) Mp,ph,en)
real(pReal), dimension(3,3), intent(out) :: & real(pReal), dimension(3,3), intent(out) :: &
Lp !< plastic velocity gradient Lp !< plastic velocity gradient
real(pReal), dimension(3,3,3,3), intent(out) :: & real(pReal), dimension(3,3,3,3), intent(out) :: &
@ -749,9 +749,6 @@ module subroutine nonlocal_LpAndItsTangent(Lp,dLp_dMp, &
integer, intent(in) :: & integer, intent(in) :: &
ph, & ph, &
en en
real(pReal), intent(in) :: &
Temperature !< temperature
real(pReal), dimension(3,3), intent(in) :: & real(pReal), dimension(3,3), intent(in) :: &
Mp Mp
!< derivative of Lp with respect to Mp !< derivative of Lp with respect to Mp
@ -771,67 +768,72 @@ module subroutine nonlocal_LpAndItsTangent(Lp,dLp_dMp, &
real(pReal), dimension(param(ph)%sum_N_sl) :: & real(pReal), dimension(param(ph)%sum_N_sl) :: &
tau, & !< resolved shear stress including backstress terms tau, & !< resolved shear stress including backstress terms
dot_gamma !< shear rate dot_gamma !< shear rate
real(pReal) :: &
Temperature !< temperature
Temperature = thermal_T(ph,en)
Lp = 0.0_pReal
dLp_dMp = 0.0_pReal
associate(prm => param(ph),dst=>dependentState(ph),stt=>state(ph)) associate(prm => param(ph),dst=>dependentState(ph),stt=>state(ph))
!*** shortcut to state variables !*** shortcut to state variables
rho = getRho(ph,en) rho = getRho(ph,en)
rhoSgl = rho(:,sgl) rhoSgl = rho(:,sgl)
do s = 1,prm%sum_N_sl do s = 1,prm%sum_N_sl
tau(s) = math_tensordot(Mp, prm%P_sl(1:3,1:3,s)) tau(s) = math_tensordot(Mp, prm%P_sl(1:3,1:3,s))
tauNS(s,1) = tau(s) tauNS(s,1) = tau(s)
tauNS(s,2) = tau(s) tauNS(s,2) = tau(s)
if (tau(s) > 0.0_pReal) then if (tau(s) > 0.0_pReal) then
tauNS(s,3) = math_tensordot(Mp, +prm%P_nS_pos(1:3,1:3,s)) tauNS(s,3) = math_tensordot(Mp, +prm%P_nS_pos(1:3,1:3,s))
tauNS(s,4) = math_tensordot(Mp, -prm%P_nS_neg(1:3,1:3,s)) tauNS(s,4) = math_tensordot(Mp, -prm%P_nS_neg(1:3,1:3,s))
else else
tauNS(s,3) = math_tensordot(Mp, +prm%P_nS_neg(1:3,1:3,s)) tauNS(s,3) = math_tensordot(Mp, +prm%P_nS_neg(1:3,1:3,s))
tauNS(s,4) = math_tensordot(Mp, -prm%P_nS_pos(1:3,1:3,s)) tauNS(s,4) = math_tensordot(Mp, -prm%P_nS_pos(1:3,1:3,s))
end if end if
end do
tauNS = tauNS + spread(dst%tau_back(:,en),2,4)
tau = tau + dst%tau_back(:,en)
! edges
call kinetics(v(:,1), dv_dtau(:,1), dv_dtauNS(:,1), &
tau, tauNS(:,1), dst%tau_pass(:,en),1,Temperature, ph)
v(:,2) = v(:,1)
dv_dtau(:,2) = dv_dtau(:,1)
dv_dtauNS(:,2) = dv_dtauNS(:,1)
!screws
if (prm%nonSchmidActive) then
do t = 3,4
call kinetics(v(:,t), dv_dtau(:,t), dv_dtauNS(:,t), &
tau, tauNS(:,t), dst%tau_pass(:,en),2,Temperature, ph)
end do end do
else tauNS = tauNS + spread(dst%tau_back(:,en),2,4)
v(:,3:4) = spread(v(:,1),2,2) tau = tau + dst%tau_back(:,en)
dv_dtau(:,3:4) = spread(dv_dtau(:,1),2,2)
dv_dtauNS(:,3:4) = spread(dv_dtauNS(:,1),2,2)
end if
stt%v(:,en) = pack(v,.true.) ! edges
call kinetics(v(:,1), dv_dtau(:,1), dv_dtauNS(:,1), &
tau, tauNS(:,1), dst%tau_pass(:,en),1,Temperature, ph)
v(:,2) = v(:,1)
dv_dtau(:,2) = dv_dtau(:,1)
dv_dtauNS(:,2) = dv_dtauNS(:,1)
!*** Bauschinger effect !screws
forall (s = 1:prm%sum_N_sl, t = 5:8, rhoSgl(s,t) * v(s,t-4) < 0.0_pReal) & if (prm%nonSchmidActive) then
rhoSgl(s,t-4) = rhoSgl(s,t-4) + abs(rhoSgl(s,t)) do t = 3,4
call kinetics(v(:,t), dv_dtau(:,t), dv_dtauNS(:,t), &
tau, tauNS(:,t), dst%tau_pass(:,en),2,Temperature, ph)
end do
else
v(:,3:4) = spread(v(:,1),2,2)
dv_dtau(:,3:4) = spread(dv_dtau(:,1),2,2)
dv_dtauNS(:,3:4) = spread(dv_dtauNS(:,1),2,2)
end if
dot_gamma = sum(rhoSgl(:,1:4) * v, 2) * prm%b_sl stt%v(:,en) = pack(v,.true.)
Lp = 0.0_pReal !*** Bauschinger effect
dLp_dMp = 0.0_pReal forall (s = 1:prm%sum_N_sl, t = 5:8, rhoSgl(s,t) * v(s,t-4) < 0.0_pReal) &
do s = 1,prm%sum_N_sl rhoSgl(s,t-4) = rhoSgl(s,t-4) + abs(rhoSgl(s,t))
Lp = Lp + dot_gamma(s) * prm%P_sl(1:3,1:3,s)
forall (i=1:3,j=1:3,k=1:3,l=1:3) & dot_gamma = sum(rhoSgl(:,1:4) * v, 2) * prm%b_sl
dLp_dMp(i,j,k,l) = dLp_dMp(i,j,k,l) &
+ prm%P_sl(i,j,s) * prm%P_sl(k,l,s) & do s = 1,prm%sum_N_sl
* sum(rhoSgl(s,1:4) * dv_dtau(s,1:4)) * prm%b_sl(s) & Lp = Lp + dot_gamma(s) * prm%P_sl(1:3,1:3,s)
+ prm%P_sl(i,j,s) & forall (i=1:3,j=1:3,k=1:3,l=1:3) &
* (+ prm%P_nS_pos(k,l,s) * rhoSgl(s,3) * dv_dtauNS(s,3) & dLp_dMp(i,j,k,l) = dLp_dMp(i,j,k,l) &
- prm%P_nS_neg(k,l,s) * rhoSgl(s,4) * dv_dtauNS(s,4)) * prm%b_sl(s) + prm%P_sl(i,j,s) * prm%P_sl(k,l,s) &
end do * sum(rhoSgl(s,1:4) * dv_dtau(s,1:4)) * prm%b_sl(s) &
+ prm%P_sl(i,j,s) &
* (+ prm%P_nS_pos(k,l,s) * rhoSgl(s,3) * dv_dtauNS(s,3) &
- prm%P_nS_neg(k,l,s) * rhoSgl(s,4) * dv_dtauNS(s,4)) * prm%b_sl(s)
end do
end associate end associate
@ -870,7 +872,8 @@ module subroutine plastic_nonlocal_deltaState(Mp,ph,en)
dUpper, & ! current maximum stable dipole distance for edges and screws dUpper, & ! current maximum stable dipole distance for edges and screws
dUpperOld, & ! old maximum stable dipole distance for edges and screws dUpperOld, & ! old maximum stable dipole distance for edges and screws
deltaDUpper ! change in maximum stable dipole distance for edges and screws deltaDUpper ! change in maximum stable dipole distance for edges and screws
associate(prm => param(ph),dst => dependentState(ph),del => deltaState(ph)) associate(prm => param(ph),dst => dependentState(ph),del => deltaState(ph))
mu = elastic_mu(ph,en) mu = elastic_mu(ph,en)
@ -1394,78 +1397,79 @@ module subroutine plastic_nonlocal_updateCompatibility(orientation,ph,i,e)
belowThreshold belowThreshold
type(rotation) :: mis type(rotation) :: mis
associate(prm => param(ph)) associate(prm => param(ph))
ns = prm%sum_N_sl ns = prm%sum_N_sl
en = material_phaseMemberAt(1,i,e) en = material_phaseMemberAt(1,i,e)
!*** start out fully compatible !*** start out fully compatible
my_compatibility = 0.0_pReal my_compatibility = 0.0_pReal
forall(s1 = 1:ns) my_compatibility(:,s1,s1,:) = 1.0_pReal forall(s1 = 1:ns) my_compatibility(:,s1,s1,:) = 1.0_pReal
neighbors: do n = 1,nIPneighbors neighbors: do n = 1,nIPneighbors
neighbor_e = IPneighborhood(1,n,i,e) neighbor_e = IPneighborhood(1,n,i,e)
neighbor_i = IPneighborhood(2,n,i,e) neighbor_i = IPneighborhood(2,n,i,e)
neighbor_me = material_phaseMemberAt(1,neighbor_i,neighbor_e) neighbor_me = material_phaseMemberAt(1,neighbor_i,neighbor_e)
neighbor_phase = material_phaseAt(1,neighbor_e) neighbor_phase = material_phaseAt(1,neighbor_e)
if (neighbor_e <= 0 .or. neighbor_i <= 0) then if (neighbor_e <= 0 .or. neighbor_i <= 0) then
!* FREE SURFACE !* FREE SURFACE
forall(s1 = 1:ns) my_compatibility(:,s1,s1,n) = sqrt(prm%chi_surface) forall(s1 = 1:ns) my_compatibility(:,s1,s1,n) = sqrt(prm%chi_surface)
elseif (neighbor_phase /= ph) then elseif (neighbor_phase /= ph) then
!* PHASE BOUNDARY !* PHASE BOUNDARY
if (plasticState(neighbor_phase)%nonlocal .and. plasticState(ph)%nonlocal) & if (plasticState(neighbor_phase)%nonlocal .and. plasticState(ph)%nonlocal) &
forall(s1 = 1:ns) my_compatibility(:,s1,s1,n) = 0.0_pReal forall(s1 = 1:ns) my_compatibility(:,s1,s1,n) = 0.0_pReal
elseif (prm%chi_GB >= 0.0_pReal) then elseif (prm%chi_GB >= 0.0_pReal) then
!* GRAIN BOUNDARY !* GRAIN BOUNDARY
if (any(dNeq(phase_O_0(ph)%data(en)%asQuaternion(), & if (any(dNeq(phase_O_0(ph)%data(en)%asQuaternion(), &
phase_O_0(neighbor_phase)%data(neighbor_me)%asQuaternion())) .and. & phase_O_0(neighbor_phase)%data(neighbor_me)%asQuaternion())) .and. &
plasticState(neighbor_phase)%nonlocal) & plasticState(neighbor_phase)%nonlocal) &
forall(s1 = 1:ns) my_compatibility(:,s1,s1,n) = sqrt(prm%chi_GB) forall(s1 = 1:ns) my_compatibility(:,s1,s1,n) = sqrt(prm%chi_GB)
else else
!* GRAIN BOUNDARY ? !* GRAIN BOUNDARY ?
!* Compatibility defined by relative orientation of slip systems: !* Compatibility defined by relative orientation of slip systems:
!* The my_compatibility value is defined as the product of the slip normal projection and the slip direction projection. !* The my_compatibility value is defined as the product of the slip normal projection and the slip direction projection.
!* Its sign is always positive for screws, for edges it has the same sign as the slip normal projection. !* Its sign is always positive for screws, for edges it has the same sign as the slip normal projection.
!* Since the sum for each slip system can easily exceed one (which would result in a transmissivity larger than one), !* Since the sum for each slip system can easily exceed one (which would result in a transmissivity larger than one),
!* only values above or equal to a certain threshold value are considered. This threshold value is chosen, such that !* only values above or equal to a certain threshold value are considered. This threshold value is chosen, such that
!* the number of compatible slip systems is minimized with the sum of the original compatibility values exceeding one. !* the number of compatible slip systems is minimized with the sum of the original compatibility values exceeding one.
!* Finally the smallest compatibility value is decreased until the sum is exactly equal to one. !* Finally the smallest compatibility value is decreased until the sum is exactly equal to one.
!* All values below the threshold are set to zero. !* All values below the threshold are set to zero.
mis = orientation(ph)%data(en)%misorientation(orientation(neighbor_phase)%data(neighbor_me)) mis = orientation(ph)%data(en)%misorientation(orientation(neighbor_phase)%data(neighbor_me))
mySlipSystems: do s1 = 1,ns mySlipSystems: do s1 = 1,ns
neighborSlipSystems: do s2 = 1,ns neighborSlipSystems: do s2 = 1,ns
my_compatibility(1,s2,s1,n) = math_inner(prm%slip_normal(1:3,s1), & my_compatibility(1,s2,s1,n) = math_inner(prm%slip_normal(1:3,s1), &
mis%rotate(prm%slip_normal(1:3,s2))) & mis%rotate(prm%slip_normal(1:3,s2))) &
* abs(math_inner(prm%slip_direction(1:3,s1), & * abs(math_inner(prm%slip_direction(1:3,s1), &
mis%rotate(prm%slip_direction(1:3,s2)))) mis%rotate(prm%slip_direction(1:3,s2))))
my_compatibility(2,s2,s1,n) = abs(math_inner(prm%slip_normal(1:3,s1), & my_compatibility(2,s2,s1,n) = abs(math_inner(prm%slip_normal(1:3,s1), &
mis%rotate(prm%slip_normal(1:3,s2)))) & mis%rotate(prm%slip_normal(1:3,s2)))) &
* abs(math_inner(prm%slip_direction(1:3,s1), & * abs(math_inner(prm%slip_direction(1:3,s1), &
mis%rotate(prm%slip_direction(1:3,s2)))) mis%rotate(prm%slip_direction(1:3,s2))))
end do neighborSlipSystems end do neighborSlipSystems
my_compatibilitySum = 0.0_pReal my_compatibilitySum = 0.0_pReal
belowThreshold = .true. belowThreshold = .true.
do while (my_compatibilitySum < 1.0_pReal .and. any(belowThreshold)) do while (my_compatibilitySum < 1.0_pReal .and. any(belowThreshold))
thresholdValue = maxval(my_compatibility(2,:,s1,n), belowThreshold) ! screws always positive thresholdValue = maxval(my_compatibility(2,:,s1,n), belowThreshold) ! screws always positive
nThresholdValues = real(count(my_compatibility(2,:,s1,n) >= thresholdValue),pReal) nThresholdValues = real(count(my_compatibility(2,:,s1,n) >= thresholdValue),pReal)
where (my_compatibility(2,:,s1,n) >= thresholdValue) belowThreshold = .false. where (my_compatibility(2,:,s1,n) >= thresholdValue) belowThreshold = .false.
if (my_compatibilitySum + thresholdValue * nThresholdValues > 1.0_pReal) & if (my_compatibilitySum + thresholdValue * nThresholdValues > 1.0_pReal) &
where (abs(my_compatibility(:,:,s1,n)) >= thresholdValue) & where (abs(my_compatibility(:,:,s1,n)) >= thresholdValue) &
my_compatibility(:,:,s1,n) = sign((1.0_pReal - my_compatibilitySum)/nThresholdValues,& my_compatibility(:,:,s1,n) = sign((1.0_pReal - my_compatibilitySum)/nThresholdValues,&
my_compatibility(:,:,s1,n)) my_compatibility(:,:,s1,n))
my_compatibilitySum = my_compatibilitySum + nThresholdValues * thresholdValue my_compatibilitySum = my_compatibilitySum + nThresholdValues * thresholdValue
end do end do
where(belowThreshold) my_compatibility(1,:,s1,n) = 0.0_pReal where(belowThreshold) my_compatibility(1,:,s1,n) = 0.0_pReal
where(belowThreshold) my_compatibility(2,:,s1,n) = 0.0_pReal where(belowThreshold) my_compatibility(2,:,s1,n) = 0.0_pReal
end do mySlipSystems end do mySlipSystems
end if end if
end do neighbors end do neighbors
compatibility(:,:,:,:,i,e) = my_compatibility compatibility(:,:,:,:,i,e) = my_compatibility
end associate end associate
@ -1592,21 +1596,15 @@ subroutine stateInit(ini,phase,Nentries)
stt%rhoSglMobile(s,e) = densityBinning stt%rhoSglMobile(s,e) = densityBinning
end do end do
else ! homogeneous distribution with noise else ! homogeneous distribution with noise
do e = 1, Nentries do f = 1,size(ini%N_sl,1)
do f = 1,size(ini%N_sl,1) from = 1 + sum(ini%N_sl(1:f-1))
from = 1 + sum(ini%N_sl(1:f-1)) upto = sum(ini%N_sl(1:f))
upto = sum(ini%N_sl(1:f)) call math_normal(stt%rho_sgl_mob_edg_pos(from:upto,:),ini%rho_u_ed_pos_0(f),ini%sigma_rho_u)
do s = from,upto call math_normal(stt%rho_sgl_mob_edg_neg(from:upto,:),ini%rho_u_ed_neg_0(f),ini%sigma_rho_u)
noise = [math_sampleGaussVar(0.0_pReal, ini%sigma_rho_u), & call math_normal(stt%rho_sgl_mob_scr_pos(from:upto,:),ini%rho_u_sc_pos_0(f),ini%sigma_rho_u)
math_sampleGaussVar(0.0_pReal, ini%sigma_rho_u)] call math_normal(stt%rho_sgl_mob_scr_neg(from:upto,:),ini%rho_u_sc_neg_0(f),ini%sigma_rho_u)
stt%rho_sgl_mob_edg_pos(s,e) = ini%rho_u_ed_pos_0(f) + noise(1) stt%rho_dip_edg(from:upto,:) = ini%rho_d_ed_0(f)
stt%rho_sgl_mob_edg_neg(s,e) = ini%rho_u_ed_neg_0(f) + noise(1) stt%rho_dip_scr(from:upto,:) = ini%rho_d_sc_0(f)
stt%rho_sgl_mob_scr_pos(s,e) = ini%rho_u_sc_pos_0(f) + noise(2)
stt%rho_sgl_mob_scr_neg(s,e) = ini%rho_u_sc_neg_0(f) + noise(2)
end do
stt%rho_dip_edg(from:upto,e) = ini%rho_d_ed_0(f)
stt%rho_dip_scr(from:upto,e) = ini%rho_d_sc_0(f)
end do
end do end do
end if end if
@ -1652,53 +1650,55 @@ pure subroutine kinetics(v, dv_dtau, dv_dtauNS, tau, tauNS, tauThreshold, c, T,
criticalStress_P, & !< maximum obstacle strength criticalStress_P, & !< maximum obstacle strength
criticalStress_S !< maximum obstacle strength criticalStress_S !< maximum obstacle strength
associate(prm => param(ph))
v = 0.0_pReal v = 0.0_pReal
dv_dtau = 0.0_pReal dv_dtau = 0.0_pReal
dv_dtauNS = 0.0_pReal dv_dtauNS = 0.0_pReal
do s = 1,prm%sum_N_sl associate(prm => param(ph))
if (abs(tau(s)) > tauThreshold(s)) then
!* Peierls contribution do s = 1,prm%sum_N_sl
tauEff = max(0.0_pReal, abs(tauNS(s)) - tauThreshold(s)) if (abs(tau(s)) > tauThreshold(s)) then
lambda_P = prm%b_sl(s)
activationVolume_P = prm%w *prm%b_sl(s)**3
criticalStress_P = prm%peierlsStress(s,c)
activationEnergy_P = criticalStress_P * activationVolume_P
tauRel_P = min(1.0_pReal, tauEff / criticalStress_P)
tPeierls = 1.0_pReal / prm%nu_a &
* exp(activationEnergy_P / (K_B * T) &
* (1.0_pReal - tauRel_P**prm%p)**prm%q)
dtPeierls_dtau = merge(tPeierls * prm%p * prm%q * activationVolume_P / (K_B * T) &
* (1.0_pReal - tauRel_P**prm%p)**(prm%q-1.0_pReal) * tauRel_P**(prm%p-1.0_pReal), &
0.0_pReal, &
tauEff < criticalStress_P)
! Contribution from solid solution strengthening !* Peierls contribution
tauEff = abs(tau(s)) - tauThreshold(s) tauEff = max(0.0_pReal, abs(tauNS(s)) - tauThreshold(s))
lambda_S = prm%b_sl(s) / sqrt(prm%c_sol) lambda_P = prm%b_sl(s)
activationVolume_S = prm%f_sol * prm%b_sl(s)**3 / sqrt(prm%c_sol) activationVolume_P = prm%w *prm%b_sl(s)**3
criticalStress_S = prm%Q_sol / activationVolume_S criticalStress_P = prm%peierlsStress(s,c)
tauRel_S = min(1.0_pReal, tauEff / criticalStress_S) activationEnergy_P = criticalStress_P * activationVolume_P
tSolidSolution = 1.0_pReal / prm%nu_a & tauRel_P = min(1.0_pReal, tauEff / criticalStress_P)
* exp(prm%Q_sol / (K_B * T)* (1.0_pReal - tauRel_S**prm%p)**prm%q) tPeierls = 1.0_pReal / prm%nu_a &
dtSolidSolution_dtau = merge(tSolidSolution * prm%p * prm%q * activationVolume_S / (K_B * T) & * exp(activationEnergy_P / (K_B * T) &
* (1.0_pReal - tauRel_S**prm%p)**(prm%q-1.0_pReal)* tauRel_S**(prm%p-1.0_pReal), & * (1.0_pReal - tauRel_P**prm%p)**prm%q)
0.0_pReal, & dtPeierls_dtau = merge(tPeierls * prm%p * prm%q * activationVolume_P / (K_B * T) &
tauEff < criticalStress_S) * (1.0_pReal - tauRel_P**prm%p)**(prm%q-1.0_pReal) * tauRel_P**(prm%p-1.0_pReal), &
0.0_pReal, &
tauEff < criticalStress_P)
!* viscous glide velocity ! Contribution from solid solution strengthening
tauEff = abs(tau(s)) - tauThreshold(s) tauEff = abs(tau(s)) - tauThreshold(s)
lambda_S = prm%b_sl(s) / sqrt(prm%c_sol)
activationVolume_S = prm%f_sol * prm%b_sl(s)**3 / sqrt(prm%c_sol)
criticalStress_S = prm%Q_sol / activationVolume_S
tauRel_S = min(1.0_pReal, tauEff / criticalStress_S)
tSolidSolution = 1.0_pReal / prm%nu_a &
* exp(prm%Q_sol / (K_B * T)* (1.0_pReal - tauRel_S**prm%p)**prm%q)
dtSolidSolution_dtau = merge(tSolidSolution * prm%p * prm%q * activationVolume_S / (K_B * T) &
* (1.0_pReal - tauRel_S**prm%p)**(prm%q-1.0_pReal)* tauRel_S**(prm%p-1.0_pReal), &
0.0_pReal, &
tauEff < criticalStress_S)
!* viscous glide velocity
tauEff = abs(tau(s)) - tauThreshold(s)
v(s) = sign(1.0_pReal,tau(s)) & v(s) = sign(1.0_pReal,tau(s)) &
/ (tPeierls / lambda_P + tSolidSolution / lambda_S + prm%B /(prm%b_sl(s) * tauEff)) / (tPeierls / lambda_P + tSolidSolution / lambda_S + prm%B /(prm%b_sl(s) * tauEff))
dv_dtau(s) = v(s)**2 * (dtSolidSolution_dtau / lambda_S + prm%B / (prm%b_sl(s) * tauEff**2)) dv_dtau(s) = v(s)**2 * (dtSolidSolution_dtau / lambda_S + prm%B / (prm%b_sl(s) * tauEff**2))
dv_dtauNS(s) = v(s)**2 * dtPeierls_dtau / lambda_P dv_dtauNS(s) = v(s)**2 * dtPeierls_dtau / lambda_P
end if end if
end do end do
end associate end associate

View File

@ -271,7 +271,7 @@ end subroutine thermal_forward
!---------------------------------------------------------------------------------------------- !----------------------------------------------------------------------------------------------
!< @brief Get temperature (for use by non-thermal physics) !< @brief Get temperature (for use by non-thermal physics)
!---------------------------------------------------------------------------------------------- !----------------------------------------------------------------------------------------------
module function thermal_T(ph,en) result(T) pure module function thermal_T(ph,en) result(T)
integer, intent(in) :: ph, en integer, intent(in) :: ph, en
real(pReal) :: T real(pReal) :: T

View File

@ -372,7 +372,7 @@ end function rotTensor4
!--------------------------------------------------------------------------------------------------- !---------------------------------------------------------------------------------------------------
!> @brief Rotate a rank-4 tensor in Voigt 6x6 notation passively (default) or actively. !> @brief Rotate a rank-4 stiffness tensor in Voigt 6x6 notation passively (default) or actively.
!> @details: https://scicomp.stackexchange.com/questions/35600 !> @details: https://scicomp.stackexchange.com/questions/35600
!! ToDo: Need to check active/passive !!! !! ToDo: Need to check active/passive !!!
!--------------------------------------------------------------------------------------------------- !---------------------------------------------------------------------------------------------------
@ -393,11 +393,11 @@ pure function rotStiffness(self,C,active) result(cRot)
R = self%asMatrix() R = self%asMatrix()
endif endif
M = reshape([R(1,1)**2.0_pReal, R(2,1)**2.0_pReal, R(3,1)**2.0_pReal, & M = reshape([R(1,1)**2, R(2,1)**2, R(3,1)**2, &
R(2,1)*R(3,1), R(1,1)*R(3,1), R(1,1)*R(2,1), & R(2,1)*R(3,1), R(1,1)*R(3,1), R(1,1)*R(2,1), &
R(1,2)**2.0_pReal, R(2,2)**2.0_pReal, R(3,2)**2.0_pReal, & R(1,2)**2, R(2,2)**2, R(3,2)**2, &
R(2,2)*R(3,2), R(1,2)*R(3,2), R(1,2)*R(2,2), & R(2,2)*R(3,2), R(1,2)*R(3,2), R(1,2)*R(2,2), &
R(1,3)**2.0_pReal, R(2,3)**2.0_pReal, R(3,3)**2.0_pReal, & R(1,3)**2, R(2,3)**2, R(3,3)**2, &
R(2,3)*R(3,3), R(1,3)*R(3,3), R(1,3)*R(2,3), & R(2,3)*R(3,3), R(1,3)*R(3,3), R(1,3)*R(2,3), &
2.0_pReal*R(1,2)*R(1,3), 2.0_pReal*R(2,2)*R(2,3), 2.0_pReal*R(3,2)*R(3,3), & 2.0_pReal*R(1,2)*R(1,3), 2.0_pReal*R(2,2)*R(2,3), 2.0_pReal*R(3,2)*R(3,3), &
R(2,2)*R(3,3)+R(2,3)*R(3,2), R(1,2)*R(3,3)+R(1,3)*R(3,2), R(1,2)*R(2,3)+R(1,3)*R(2,2), & R(2,2)*R(3,3)+R(2,3)*R(3,2), R(1,2)*R(3,3)+R(1,3)*R(3,2), R(1,2)*R(2,3)+R(1,3)*R(2,2), &