Merge branch 'development' into python-module

This commit is contained in:
Philip Eisenlohr 2020-01-13 15:24:08 -05:00
commit edbee3a419
126 changed files with 2465 additions and 6866 deletions

4
.gitattributes vendored
View File

@ -3,8 +3,8 @@
# always use LF, even if the files are edited on windows, they need to be compiled/used on unix
* text eol=lf
# Denote all files that are truly binary and should not be modified.
# Denote all files that are binary and should not be modified.
*.png binary
*.jpg binary
*.cae binary
*.hdf5 binary
*.pdf binary

View File

@ -115,13 +115,6 @@ Pytest:
- release
###################################################################################################
OrientationRelationship:
stage: preprocessing
script: OrientationRelationship/test.py
except:
- master
- release
Pre_SeedGeneration:
stage: preprocessing
script: PreProcessing_SeedGeneration/test.py
@ -398,7 +391,6 @@ Marc_compileIfort:
stage: compileMarc
script:
- module load $IntelMarc $HDF5Marc $MSC
- export DAMASK_HDF5=ON
- Marc_compileIfort/test.py
except:
- master
@ -409,7 +401,6 @@ Hex_elastic:
stage: marc
script:
- module load $IntelMarc $HDF5Marc $MSC
- export DAMASK_HDF5=ON
- Hex_elastic/test.py
except:
- master
@ -419,7 +410,6 @@ CubicFCC_elastic:
stage: marc
script:
- module load $IntelMarc $HDF5Marc $MSC
- export DAMASK_HDF5=ON
- CubicFCC_elastic/test.py
except:
- master
@ -429,7 +419,6 @@ CubicBCC_elastic:
stage: marc
script:
- module load $IntelMarc $HDF5Marc $MSC
- export DAMASK_HDF5=ON
- CubicBCC_elastic/test.py
except:
- master
@ -439,7 +428,6 @@ J2_plasticBehavior:
stage: marc
script:
- module load $IntelMarc $HDF5Marc $MSC
- export DAMASK_HDF5=ON
- J2_plasticBehavior/test.py
except:
- master
@ -506,18 +494,6 @@ GridSolver:
- master
- release
Processing:
stage: createDocumentation
script:
- cd $DAMASKROOT/processing/pre
- $DAMASKROOT/PRIVATE/documenting/scriptHelpToWiki.py --debug *.py
- cd $DAMASKROOT/processing/post
- rm vtk2ang.py DAD*.py
- $DAMASKROOT/PRIVATE/documenting/scriptHelpToWiki.py --debug *.py
except:
- master
- release
##################################################################################################
backupData:
stage: saveDocumentation
@ -528,7 +504,6 @@ backupData:
- mv $TESTROOT/performance/time.png $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
- mv $TESTROOT/performance/memory.png $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
- mv $DAMASKROOT/PRIVATE/documenting/DAMASK_* $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
- mv $DAMASKROOT/processing $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
only:
- development

4
CONFIG
View File

@ -1,11 +1,7 @@
# "set"-syntax needed only for tcsh (but works with bash and zsh)
# DAMASK_ROOT will be expanded
set DAMASK_NUM_THREADS = 4
set MSC_ROOT = /opt/msc
set MARC_VERSION = 2019
set ABAQUS_VERSION = 2019
set DAMASK_HDF5 = ON

@ -1 +1 @@
Subproject commit 524e86c117d816e3bd873eed7663e258a6f2e139
Subproject commit 036faecca39b46fd2328597ca858cbb04e37f79a

View File

@ -1 +1 @@
v2.0.3-1237-g5a2053cd
v2.0.3-1406-g5fc1abae

View File

@ -2,129 +2,129 @@
# GNU Compiler
###################################################################################################
if (OPENMP)
set (OPENMP_FLAGS "-fopenmp")
endif ()
if (OPENMP)
set (OPENMP_FLAGS "-fopenmp")
endif ()
if (OPTIMIZATION STREQUAL "OFF")
set (OPTIMIZATION_FLAGS "-O0" )
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
set (OPTIMIZATION_FLAGS "-O2")
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
set (OPTIMIZATION_FLAGS "-O3 -ffast-math -funroll-loops -ftree-vectorize")
endif ()
if (OPTIMIZATION STREQUAL "OFF")
set (OPTIMIZATION_FLAGS "-O0" )
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
set (OPTIMIZATION_FLAGS "-O2")
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
set (OPTIMIZATION_FLAGS "-O3 -ffast-math -funroll-loops -ftree-vectorize")
endif ()
set (STANDARD_CHECK "-std=f2008ts -pedantic-errors" )
set (LINKER_FLAGS "${LINKER_FLAGS} -Wl")
# options parsed directly to the linker
set (LINKER_FLAGS "${LINKER_FLAGS},-undefined,dynamic_lookup" )
# ensure to link against dynamic libraries
set (STANDARD_CHECK "-std=f2008ts -pedantic-errors" )
set (LINKER_FLAGS "${LINKER_FLAGS} -Wl")
# options parsed directly to the linker
set (LINKER_FLAGS "${LINKER_FLAGS},-undefined,dynamic_lookup" )
# ensure to link against dynamic libraries
#------------------------------------------------------------------------------------------------
# Fine tuning compilation options
set (COMPILE_FLAGS "${COMPILE_FLAGS} -xf95-cpp-input")
# preprocessor
set (COMPILE_FLAGS "${COMPILE_FLAGS} -xf95-cpp-input")
# preprocessor
set (COMPILE_FLAGS "${COMPILE_FLAGS} -ffree-line-length-132")
# restrict line length to the standard 132 characters (lattice.f90 require more characters)
set (COMPILE_FLAGS "${COMPILE_FLAGS} -ffree-line-length-132")
# restrict line length to the standard 132 characters (lattice.f90 require more characters)
set (COMPILE_FLAGS "${COMPILE_FLAGS} -fimplicit-none")
# assume "implicit none" even if not present in source
set (COMPILE_FLAGS "${COMPILE_FLAGS} -fimplicit-none")
# assume "implicit none" even if not present in source
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wall")
# sets the following Fortran options:
# -Waliasing: warn about possible aliasing of dummy arguments. Specifically, it warns if the same actual argument is associated with a dummy argument with "INTENT(IN)" and a dummy argument with "INTENT(OUT)" in a call with an explicit interface.
# -Wampersand: checks if a character expression is continued proberly by an ampersand at the end of the line and at the beginning of the new line
# -Warray-bounds: checks if array reference is out of bounds at compile time. use -fcheck-bounds to also check during runtime
# -Wconversion: warn about implicit conversions between different type
# -Wsurprising: warn when "suspicious" code constructs are encountered. While technically legal these usually indicate that an error has been made.
# -Wc-binding-type:
# -Wintrinsics-std: only standard intrisics are available, e.g. "call flush(6)" will cause an error
# -Wno-tabs: do not allow tabs in source
# -Wintrinsic-shadow: warn if a user-defined procedure or module procedure has the same name as an intrinsic
# -Wline-truncation:
# -Wtarget-lifetime:
# -Wreal-q-constant: warn about real-literal-constants with 'q' exponent-letter
# -Wunused: a number of unused-xxx warnings
# and sets the general (non-Fortran options) options:
# -Waddress
# -Warray-bounds (only with -O2)
# -Wc++11-compat
# -Wchar-subscripts
# -Wcomment
# -Wformat
# -Wmaybe-uninitialized
# -Wnonnull
# -Wparentheses
# -Wpointer-sign
# -Wreorder
# -Wreturn-type
# -Wsequence-point
# -Wstrict-aliasing
# -Wstrict-overflow=1
# -Wswitch
# -Wtrigraphs
# -Wuninitialized
# -Wunknown-pragmas
# -Wunused-function
# -Wunused-label
# -Wunused-value
# -Wunused-variable
# -Wvolatile-register-var
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wall")
# sets the following Fortran options:
# -Waliasing: warn about possible aliasing of dummy arguments. Specifically, it warns if the same actual argument is associated with a dummy argument with "INTENT(IN)" and a dummy argument with "INTENT(OUT)" in a call with an explicit interface.
# -Wampersand: checks if a character expression is continued proberly by an ampersand at the end of the line and at the beginning of the new line
# -Warray-bounds: checks if array reference is out of bounds at compile time. use -fcheck-bounds to also check during runtime
# -Wconversion: warn about implicit conversions between different type
# -Wsurprising: warn when "suspicious" code constructs are encountered. While technically legal these usually indicate that an error has been made.
# -Wc-binding-type:
# -Wintrinsics-std: only standard intrisics are available, e.g. "call flush(6)" will cause an error
# -Wno-tabs: do not allow tabs in source
# -Wintrinsic-shadow: warn if a user-defined procedure or module procedure has the same name as an intrinsic
# -Wline-truncation:
# -Wtarget-lifetime:
# -Wreal-q-constant: warn about real-literal-constants with 'q' exponent-letter
# -Wunused: a number of unused-xxx warnings
# and sets the general (non-Fortran options) options:
# -Waddress
# -Warray-bounds (only with -O2)
# -Wc++11-compat
# -Wchar-subscripts
# -Wcomment
# -Wformat
# -Wmaybe-uninitialized
# -Wnonnull
# -Wparentheses
# -Wpointer-sign
# -Wreorder
# -Wreturn-type
# -Wsequence-point
# -Wstrict-aliasing
# -Wstrict-overflow=1
# -Wswitch
# -Wtrigraphs
# -Wuninitialized
# -Wunknown-pragmas
# -Wunused-function
# -Wunused-label
# -Wunused-value
# -Wunused-variable
# -Wvolatile-register-var
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wextra")
# sets the following Fortran options:
# -Wunuses-parameter:
# -Wcompare-reals:
# and sets the general (non-Fortran options) options:
# -Wclobbered
# -Wempty-body
# -Wignored-qualifiers
# -Wmissing-field-initializers
# -Woverride-init
# -Wsign-compare
# -Wtype-limits
# -Wuninitialized
# -Wunused-but-set-parameter (only with -Wunused or -Wall)
# -Wno-globals
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wextra")
# sets the following Fortran options:
# -Wunuses-parameter:
# -Wcompare-reals:
# and sets the general (non-Fortran options) options:
# -Wclobbered
# -Wempty-body
# -Wignored-qualifiers
# -Wmissing-field-initializers
# -Woverride-init
# -Wsign-compare
# -Wtype-limits
# -Wuninitialized
# -Wunused-but-set-parameter (only with -Wunused or -Wall)
# -Wno-globals
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wcharacter-truncation")
# warn if character expressions (strings) are truncated
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wcharacter-truncation")
# warn if character expressions (strings) are truncated
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wunderflow")
# produce a warning when numerical constant expressions are encountered, which yield an UNDERFLOW during compilation
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wunderflow")
# produce a warning when numerical constant expressions are encountered, which yield an UNDERFLOW during compilation
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wsuggest-attribute=pure")
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wsuggest-attribute=noreturn")
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wconversion-extra")
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wimplicit-procedure")
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wno-unused-parameter")
set (COMPILE_FLAGS "${COMPILE_FLAGS} -ffpe-summary=all")
# print summary of floating point exeptions (invalid,zero,overflow,underflow,inexact,denormal)
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wsuggest-attribute=pure")
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wsuggest-attribute=noreturn")
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wconversion-extra")
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wimplicit-procedure")
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wno-unused-parameter")
set (COMPILE_FLAGS "${COMPILE_FLAGS} -ffpe-summary=all")
# print summary of floating point exeptions (invalid,zero,overflow,underflow,inexact,denormal)
# Additional options
# -Warray-temporarieswarnings: because we have many temporary arrays (performance issue?):
# -Wimplicit-interface: no interfaces for lapack/MPI routines
# -Wunsafe-loop-optimizations: warn if the loop cannot be optimized due to nontrivial assumptions.
# Additional options
# -Warray-temporarieswarnings: because we have many temporary arrays (performance issue?):
# -Wimplicit-interface: no interfaces for lapack/MPI routines
# -Wunsafe-loop-optimizations: warn if the loop cannot be optimized due to nontrivial assumptions.
#------------------------------------------------------------------------------------------------
# Runtime debugging
set (DEBUG_FLAGS "${DEBUG_FLAGS} -ffpe-trap=invalid,zero,overflow")
# stop execution if floating point exception is detected (NaN is silent)
set (DEBUG_FLAGS "${DEBUG_FLAGS} -ffpe-trap=invalid,zero,overflow")
# stop execution if floating point exception is detected (NaN is silent)
set (DEBUG_FLAGS "${DEBUG_FLAGS} -g")
# Generate symbolic debugging information in the object file
set (DEBUG_FLAGS "${DEBUG_FLAGS} -g")
# Generate symbolic debugging information in the object file
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fbacktrace")
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fdump-core")
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fcheck=all")
# checks for (array-temps,bounds,do,mem,pointer,recursion)
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fbacktrace")
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fdump-core")
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fcheck=all")
# checks for (array-temps,bounds,do,mem,pointer,recursion)
# Additional options
# -ffpe-trap=precision,denormal,underflow
# Additional options
# -ffpe-trap=precision,denormal,underflow
#------------------------------------------------------------------------------------------------
# precision settings
set (PRECISION_FLAGS "${PRECISION_FLAGS} -fdefault-real-8")
# set precision to 8 bytes for standard real (=8 for pReal). Will set size of double to 16 bytes as long as -fdefault-double-8 is not set
set (PRECISION_FLAGS "${PRECISION_FLAGS} -fdefault-double-8")
# set precision to 8 bytes for double real, would be 16 bytes if -fdefault-real-8 is used
set (PRECISION_FLAGS "${PRECISION_FLAGS} -fdefault-real-8")
# set precision to 8 bytes for standard real (=8 for pReal). Will set size of double to 16 bytes as long as -fdefault-double-8 is not set
set (PRECISION_FLAGS "${PRECISION_FLAGS} -fdefault-double-8")
# set precision to 8 bytes for double real, would be 16 bytes if -fdefault-real-8 is used

View File

@ -1,116 +1,116 @@
###################################################################################################
# Intel Compiler
###################################################################################################
if (OPENMP)
set (OPENMP_FLAGS "-qopenmp -parallel")
endif ()
if (OPENMP)
set (OPENMP_FLAGS "-qopenmp -parallel")
endif ()
if (OPTIMIZATION STREQUAL "OFF")
set (OPTIMIZATION_FLAGS "-O0 -no-ip")
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
set (OPTIMIZATION_FLAGS "-O2")
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
set (OPTIMIZATION_FLAGS "-ipo -O3 -no-prec-div -fp-model fast=2 -xHost")
# -fast = -ipo, -O3, -no-prec-div, -static, -fp-model fast=2, and -xHost"
endif ()
if (OPTIMIZATION STREQUAL "OFF")
set (OPTIMIZATION_FLAGS "-O0 -no-ip")
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
set (OPTIMIZATION_FLAGS "-O2")
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
set (OPTIMIZATION_FLAGS "-ipo -O3 -no-prec-div -fp-model fast=2 -xHost")
# -fast = -ipo, -O3, -no-prec-div, -static, -fp-model fast=2, and -xHost"
endif ()
# -assume std_mod_proc_name (included in -standard-semantics) causes problems if other modules
# (PETSc, HDF5) are not compiled with this option (https://software.intel.com/en-us/forums/intel-fortran-compiler-for-linux-and-mac-os-x/topic/62172)
set (STANDARD_CHECK "-stand f15 -standard-semantics -assume nostd_mod_proc_name")
set (LINKER_FLAGS "${LINKER_FLAGS} -shared-intel")
# Link against shared Intel libraries instead of static ones
# -assume std_mod_proc_name (included in -standard-semantics) causes problems if other modules
# (PETSc, HDF5) are not compiled with this option (https://software.intel.com/en-us/forums/intel-fortran-compiler-for-linux-and-mac-os-x/topic/62172)
set (STANDARD_CHECK "-stand f15 -standard-semantics -assume nostd_mod_proc_name")
set (LINKER_FLAGS "${LINKER_FLAGS} -shared-intel")
# Link against shared Intel libraries instead of static ones
#------------------------------------------------------------------------------------------------
# Fine tuning compilation options
set (COMPILE_FLAGS "${COMPILE_FLAGS} -fpp")
# preprocessor
set (COMPILE_FLAGS "${COMPILE_FLAGS} -fpp")
# preprocessor
set (COMPILE_FLAGS "${COMPILE_FLAGS} -ftz")
# flush underflow to zero, automatically set if -O[1,2,3]
set (COMPILE_FLAGS "${COMPILE_FLAGS} -ftz")
# flush underflow to zero, automatically set if -O[1,2,3]
set (COMPILE_FLAGS "${COMPILE_FLAGS} -diag-disable")
# disables warnings ...
set (COMPILE_FLAGS "${COMPILE_FLAGS} 5268")
# ... the text exceeds right hand column allowed on the line (we have only comments there)
set (COMPILE_FLAGS "${COMPILE_FLAGS},7624")
# ... about deprecated forall (has nice syntax and most likely a performance advantage)
set (COMPILE_FLAGS "${COMPILE_FLAGS} -diag-disable")
# disables warnings ...
set (COMPILE_FLAGS "${COMPILE_FLAGS} 5268")
# ... the text exceeds right hand column allowed on the line (we have only comments there)
set (COMPILE_FLAGS "${COMPILE_FLAGS},7624")
# ... about deprecated forall (has nice syntax and most likely a performance advantage)
set (COMPILE_FLAGS "${COMPILE_FLAGS} -warn")
# enables warnings ...
set (COMPILE_FLAGS "${COMPILE_FLAGS} declarations")
# ... any undeclared names (alternative name: -implicitnone)
set (COMPILE_FLAGS "${COMPILE_FLAGS},general")
# ... warning messages and informational messages are issued by the compiler
set (COMPILE_FLAGS "${COMPILE_FLAGS},usage")
# ... questionable programming practices
set (COMPILE_FLAGS "${COMPILE_FLAGS},interfaces")
# ... checks the interfaces of all SUBROUTINEs called and FUNCTIONs invoked in your compilation against an external set of interface blocks
set (COMPILE_FLAGS "${COMPILE_FLAGS},ignore_loc")
# ... %LOC is stripped from an actual argument
set (COMPILE_FLAGS "${COMPILE_FLAGS},alignments")
# ... data that is not naturally aligned
set (COMPILE_FLAGS "${COMPILE_FLAGS},unused")
# ... declared variables that are never used
set (COMPILE_FLAGS "${COMPILE_FLAGS} -warn")
# enables warnings ...
set (COMPILE_FLAGS "${COMPILE_FLAGS} declarations")
# ... any undeclared names (alternative name: -implicitnone)
set (COMPILE_FLAGS "${COMPILE_FLAGS},general")
# ... warning messages and informational messages are issued by the compiler
set (COMPILE_FLAGS "${COMPILE_FLAGS},usage")
# ... questionable programming practices
set (COMPILE_FLAGS "${COMPILE_FLAGS},interfaces")
# ... checks the interfaces of all SUBROUTINEs called and FUNCTIONs invoked in your compilation against an external set of interface blocks
set (COMPILE_FLAGS "${COMPILE_FLAGS},ignore_loc")
# ... %LOC is stripped from an actual argument
set (COMPILE_FLAGS "${COMPILE_FLAGS},alignments")
# ... data that is not naturally aligned
set (COMPILE_FLAGS "${COMPILE_FLAGS},unused")
# ... declared variables that are never used
# Additional options
# -warn: enables warnings, where
# truncated_source: Determines whether warnings occur when source exceeds the maximum column width in fixed-format files.
# (too many warnings because we have comments beyond character 132)
# uncalled: Determines whether warnings occur when a statement function is never called
# all:
# -name as_is: case sensitive Fortran!
# Additional options
# -warn: enables warnings, where
# truncated_source: Determines whether warnings occur when source exceeds the maximum column width in fixed-format files.
# (too many warnings because we have comments beyond character 132)
# uncalled: Determines whether warnings occur when a statement function is never called
# all:
# -name as_is: case sensitive Fortran!
#------------------------------------------------------------------------------------------------
# Runtime debugging
set (DEBUG_FLAGS "${DEBUG_FLAGS} -g")
# Generate symbolic debugging information in the object file
set (DEBUG_FLAGS "${DEBUG_FLAGS} -g")
# Generate symbolic debugging information in the object file
set (DEBUG_FLAGS "${DEBUG_FLAGS} -traceback")
# Generate extra information in the object file to provide source file traceback information when a severe error occurs at run time
set (DEBUG_FLAGS "${DEBUG_FLAGS} -traceback")
# Generate extra information in the object file to provide source file traceback information when a severe error occurs at run time
set (DEBUG_FLAGS "${DEBUG_FLAGS} -gen-interfaces")
# Generate an interface block for each routine. http://software.intel.com/en-us/blogs/2012/01/05/doctor-fortran-gets-explicit-again/
set (DEBUG_FLAGS "${DEBUG_FLAGS} -gen-interfaces")
# Generate an interface block for each routine. http://software.intel.com/en-us/blogs/2012/01/05/doctor-fortran-gets-explicit-again/
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fp-stack-check")
# Generate extra code after every function call to ensure that the floating-point (FP) stack is in the expected state
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fp-stack-check")
# Generate extra code after every function call to ensure that the floating-point (FP) stack is in the expected state
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fp-model strict")
# Trap uninitalized variables
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fp-model strict")
# Trap uninitalized variables
set (DEBUG_FLAGS "${DEBUG_FLAGS} -check" )
# Checks at runtime ...
set (DEBUG_FLAGS "${DEBUG_FLAGS} bounds")
# ... if an array index is too small (<1) or too large!
set (DEBUG_FLAGS "${DEBUG_FLAGS},format")
# ... for the data type of an item being formatted for output.
set (DEBUG_FLAGS "${DEBUG_FLAGS},output_conversion")
# ... for the fit of data items within a designated format descriptor field.
set (DEBUG_FLAGS "${DEBUG_FLAGS},pointers")
# ... for certain disassociated or uninitialized pointers or unallocated allocatable objects.
set (DEBUG_FLAGS "${DEBUG_FLAGS},uninit")
# ... for uninitialized variables.
set (DEBUG_FLAGS "${DEBUG_FLAGS} -ftrapuv")
# ... initializes stack local variables to an unusual value to aid error detection
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0")
# ... capture all floating-point exceptions, sets -ftz automatically
set (DEBUG_FLAGS "${DEBUG_FLAGS} -check" )
# Checks at runtime ...
set (DEBUG_FLAGS "${DEBUG_FLAGS} bounds")
# ... if an array index is too small (<1) or too large!
set (DEBUG_FLAGS "${DEBUG_FLAGS},format")
# ... for the data type of an item being formatted for output.
set (DEBUG_FLAGS "${DEBUG_FLAGS},output_conversion")
# ... for the fit of data items within a designated format descriptor field.
set (DEBUG_FLAGS "${DEBUG_FLAGS},pointers")
# ... for certain disassociated or uninitialized pointers or unallocated allocatable objects.
set (DEBUG_FLAGS "${DEBUG_FLAGS},uninit")
# ... for uninitialized variables.
set (DEBUG_FLAGS "${DEBUG_FLAGS} -ftrapuv")
# ... initializes stack local variables to an unusual value to aid error detection
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0")
# ... capture all floating-point exceptions, sets -ftz automatically
set (DEBUG_FLAGS "${DEBUG_FLAGS} -warn")
# enables warnings ...
set (DEBUG_FLAGS "${DEBUG_FLAGS} errors")
# ... warnings are changed to errors
set (DEBUG_FLAGS "${DEBUG_FLAGS},stderrors")
# ... warnings about Fortran standard violations are changed to errors
set (DEBUG_FLAGS "${DEBUG_FLAGS} -warn")
# enables warnings ...
set (DEBUG_FLAGS "${DEBUG_FLAGS} errors")
# ... warnings are changed to errors
set (DEBUG_FLAGS "${DEBUG_FLAGS},stderrors")
# ... warnings about Fortran standard violations are changed to errors
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all")
# generate debug information for parameters
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all")
# generate debug information for parameters
# Additional options
# -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits
# -check: Checks at runtime, where
# arg_temp_created: will cause a lot of warnings because we create a bunch of temporary arrays (performance?)
# stack:
# Additional options
# -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits
# -check: Checks at runtime, where
# arg_temp_created: will cause a lot of warnings because we create a bunch of temporary arrays (performance?)
# stack:
#------------------------------------------------------------------------------------------------
# precision settings
set (PRECISION_FLAGS "${PRECISION_FLAGS} -real-size 64")
# set precision for standard real to 32 | 64 | 128 (= 4 | 8 | 16 bytes, type pReal is always 8 bytes)
set (PRECISION_FLAGS "${PRECISION_FLAGS} -real-size 64")
# set precision for standard real to 32 | 64 | 128 (= 4 | 8 | 16 bytes, type pReal is always 8 bytes)

View File

@ -1,25 +1,24 @@
###################################################################################################
# PGI Compiler
###################################################################################################
elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "PGI")
if (OPTIMIZATION STREQUAL "OFF")
set (OPTIMIZATION_FLAGS "-O0" )
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
set (OPTIMIZATION_FLAGS "-O2")
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
set (OPTIMIZATION_FLAGS "-O3")
endif ()
if (OPTIMIZATION STREQUAL "OFF")
set (OPTIMIZATION_FLAGS "-O0" )
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
set (OPTIMIZATION_FLAGS "-O2")
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
set (OPTIMIZATION_FLAGS "-O3")
endif ()
#------------------------------------------------------------------------------------------------
# Fine tuning compilation options
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Mpreprocess")
# preprocessor
set (COMPILE_FLAGS "${COMPILE_FLAGS} -Mpreprocess")
# preprocessor
set (STANDARD_CHECK "-Mallocatable=03")
set (STANDARD_CHECK "-Mallocatable=03")
#------------------------------------------------------------------------------------------------
# Runtime debugging
set (DEBUG_FLAGS "${DEBUG_FLAGS} -g")
# Includes debugging information in the object module; sets the optimization level to zero unless a -O option is present on the command line
set (DEBUG_FLAGS "${DEBUG_FLAGS} -g")
# Includes debugging information in the object module; sets the optimization level to zero unless a -O option is present on the command line

22
env/DAMASK.csh vendored
View File

@ -7,12 +7,6 @@ set DAMASK_ROOT=`python -c "import os,sys; print(os.path.realpath(os.path.expand
source $DAMASK_ROOT/CONFIG
# add BRANCH if DAMASK_ROOT is a git repository
cd $DAMASK_ROOT >/dev/null
set BRANCH = `git branch 2>/dev/null| grep -E '^\* ')`
cd - >/dev/null
# if DAMASK_BIN is present
set path = ($DAMASK_ROOT/bin $path)
set SOLVER=`which DAMASK_spectral`
@ -21,20 +15,12 @@ if ( "x$DAMASK_NUM_THREADS" == "x" ) then
set DAMASK_NUM_THREADS=1
endif
# currently, there is no information that unlimited causes problems
# currently, there is no information that unlimited stack size causes problems
# still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it
# more info https://jblevins.org/log/segfault
# https://stackoverflow.com/questions/79923/what-and-where-are-the-stack-and-heap
# http://superuser.com/questions/220059/what-parameters-has-ulimit
limit datasize unlimited # maximum heap size (kB)
# http://superuser.com/questions/220059/what-parameters-has-ulimit
limit stacksize unlimited # maximum stack size (kB)
endif
if ( `limit | grep memoryuse` != "" ) then
limit memoryuse unlimited # maximum physical memory size
endif
if ( `limit | grep vmemoryuse` != "" ) then
limit vmemoryuse unlimited # maximum virtual memory size
endif
# disable output in case of scp
if ( $?prompt ) then
@ -44,8 +30,8 @@ if ( $?prompt ) then
echo https://damask.mpie.de
echo
echo Using environment with ...
echo "DAMASK $DAMASK_ROOT $BRANCH"
echo "Spectral Solver $SOLVER"
echo "DAMASK $DAMASK_ROOT"
echo "Grid Solver $SOLVER"
echo "Post Processing $PROCESSING"
if ( $?PETSC_DIR) then
echo "PETSc location $PETSC_DIR"

11
env/DAMASK.sh vendored
View File

@ -43,15 +43,12 @@ PROCESSING=$(type -p postResults || true 2>/dev/null)
[ "x$DAMASK_NUM_THREADS" == "x" ] && DAMASK_NUM_THREADS=1
# currently, there is no information that unlimited causes problems
# currently, there is no information that unlimited stack size causes problems
# still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it
# more info https://jblevins.org/log/segfault
# https://stackoverflow.com/questions/79923/what-and-where-are-the-stack-and-heap
# http://superuser.com/questions/220059/what-parameters-has-ulimit
ulimit -d unlimited 2>/dev/null # maximum heap size (kB)
# http://superuser.com/questions/220059/what-parameters-has-ulimit
ulimit -s unlimited 2>/dev/null # maximum stack size (kB)
ulimit -v unlimited 2>/dev/null # maximum virtual memory size
ulimit -m unlimited 2>/dev/null # maximum physical memory size
# disable output in case of scp
if [ ! -z "$PS1" ]; then
@ -62,7 +59,7 @@ if [ ! -z "$PS1" ]; then
echo
echo Using environment with ...
echo "DAMASK $DAMASK_ROOT $BRANCH"
echo "Spectral Solver $SOLVER"
echo "Grid Solver $SOLVER"
echo "Post Processing $PROCESSING"
if [ "x$PETSC_DIR" != "x" ]; then
echo -n "PETSc location "
@ -96,7 +93,7 @@ fi
export DAMASK_NUM_THREADS
export PYTHONPATH=$DAMASK_ROOT/python:$PYTHONPATH
for var in BASE STAT SOLVER PROCESSING FREE DAMASK_BIN BRANCH; do
for var in BASE STAT SOLVER PROCESSING BRANCH; do
unset "${var}"
done
for var in DAMASK MSC; do

12
env/DAMASK.zsh vendored
View File

@ -24,7 +24,6 @@ unset -f set
# add BRANCH if DAMASK_ROOT is a git repository
cd $DAMASK_ROOT >/dev/null; BRANCH=$(git branch 2>/dev/null| grep -E '^\* '); cd - >/dev/null
# add DAMASK_BIN if present
PATH=${DAMASK_ROOT}/bin:$PATH
SOLVER=$(which DAMASK_spectral || true 2>/dev/null)
@ -35,15 +34,12 @@ PROCESSING=$(which postResults || true 2>/dev/null)
[[ "x$DAMASK_NUM_THREADS" == "x" ]] && DAMASK_NUM_THREADS=1
# currently, there is no information that unlimited causes problems
# currently, there is no information that unlimited stack size causes problems
# still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it
# more info https://jblevins.org/log/segfault
# https://stackoverflow.com/questions/79923/what-and-where-are-the-stack-and-heap
# http://superuser.com/questions/220059/what-parameters-has-ulimit
ulimit -d unlimited 2>/dev/null # maximum heap size (kB)
# http://superuser.com/questions/220059/what-parameters-has-ulimit
ulimit -s unlimited 2>/dev/null # maximum stack size (kB)
ulimit -v unlimited 2>/dev/null # maximum virtual memory size
ulimit -m unlimited 2>/dev/null # maximum physical memory size
# disable output in case of scp
if [ ! -z "$PS1" ]; then
@ -54,7 +50,7 @@ if [ ! -z "$PS1" ]; then
echo
echo "Using environment with ..."
echo "DAMASK $DAMASK_ROOT $BRANCH"
echo "Spectral Solver $SOLVER"
echo "Grid Solver $SOLVER"
echo "Post Processing $PROCESSING"
if [ "x$PETSC_DIR" != "x" ]; then
echo -n "PETSc location "
@ -90,7 +86,7 @@ fi
export DAMASK_NUM_THREADS
export PYTHONPATH=$DAMASK_ROOT/python:$PYTHONPATH
for var in BASE STAT SOLVER PROCESSING FREE DAMASK_BIN BRANCH; do
for var in SOLVER PROCESSING BRANCH; do
unset "${var}"
done
for var in DAMASK MSC; do

View File

@ -1,3 +1,3 @@
thermal conduction
initialT 300.0
t0 270.0
(output) temperature

View File

@ -6,7 +6,7 @@
mech none # isostrain 1 grain
thermal adiabatic # thermal strain (stress) induced mass transport
initialT 300.0
t0 330.0
(output) temperature
#-------------------#

View File

@ -99,14 +99,9 @@ else
fi
# DAMASK uses the HDF5 compiler wrapper around the Intel compiler
if test "$DAMASK_HDF5" = "ON";then
H5FC="$(h5fc -shlib -show)"
HDF5_LIB=${H5FC//ifort/}
FCOMP="$H5FC -DDAMASK_HDF5"
echo $FCOMP
else
FCOMP=ifort
fi
H5FC="$(h5fc -shlib -show)"
HDF5_LIB=${H5FC//ifort/}
FCOMP="$H5FC -DDAMASK_HDF5"
# AEM
if test "$MARCDLLOUTDIR" = ""; then

View File

@ -99,14 +99,9 @@ else
fi
# DAMASK uses the HDF5 compiler wrapper around the Intel compiler
if test "$DAMASK_HDF5" = "ON";then
H5FC="$(h5fc -shlib -show)"
HDF5_LIB=${H5FC//ifort/}
FCOMP="$H5FC -DDAMASK_HDF5"
echo $FCOMP
else
FCOMP=ifort
fi
H5FC="$(h5fc -shlib -show)"
HDF5_LIB=${H5FC//ifort/}
FCOMP="$H5FC -DDAMASK_HDF5"
# AEM
if test "$MARCDLLOUTDIR" = ""; then

View File

@ -100,11 +100,9 @@ else
fi
# DAMASK uses the HDF5 compiler wrapper around the Intel compiler
if test "$DAMASK_HDF5" = "ON";then
H5FC="$(h5fc -shlib -show)"
HDF5_LIB=${H5FC//ifort/}
FCOMP="$H5FC -DDAMASK_HDF5"
fi
H5FC="$(h5fc -shlib -show)"
HDF5_LIB=${H5FC//ifort/}
FCOMP="$H5FC -DDAMASK_HDF5"
# AEM
if test "$MARCDLLOUTDIR" = ""; then

View File

@ -1,8 +1,10 @@
#!/usr/bin/env python2.7
# -*- coding: UTF-8 no BOM -*-
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
@ -19,47 +21,10 @@ Convert TSL/EDAX *.ang file to ASCIItable
""", version = scriptID)
(options, filenames) = parser.parse_args()
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
outname = os.path.splitext(name)[0]+'.txt' if name else name,
buffered = False, labeled = False)
except: continue
damask.util.report(scriptName,name)
damask.util.report(scriptName,name)
# --- interpret header -----------------------------------------------------------------------------
table.head_read()
# --- read comments --------------------------------------------------------------------------------
table.info_clear()
while table.data_read(advance = False) and table.line.startswith('#'): # cautiously (non-progressing) read header
table.info_append(table.line) # add comment to info part
table.data_read() # wind forward
table.labels_clear()
table.labels_append(['1_Euler','2_Euler','3_Euler',
'1_pos','2_pos',
'IQ','CI','PhaseID','Intensity','Fit',
], # OIM Analysis 7.2 Manual, p 403 (of 517)
reset = True)
# ------------------------------------------ assemble header ---------------------------------------
table.head_write()
#--- write remainder of data file ------------------------------------------------------------------
outputAlive = True
while outputAlive and table.data_read():
outputAlive = table.data_write()
# ------------------------------------------ finalize output ---------------------------------------
table.close()
table = damask.Table.from_ang(StringIO(''.join(sys.stdin.read())) if name is None else name)
table.to_ASCII(sys.stdout if name is None else os.path.splitext(name)[0]+'.txt')

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -39,61 +39,36 @@ for filename in options.filenames:
results = damask.DADF5(filename)
if not results.structured: continue
delta = results.size/results.grid*0.5
x, y, z = np.meshgrid(np.linspace(delta[2],results.size[2]-delta[2],results.grid[2]),
np.linspace(delta[1],results.size[1]-delta[1],results.grid[1]),
np.linspace(delta[0],results.size[0]-delta[0],results.grid[0]),
indexing = 'ij')
coords = np.concatenate((z[:,:,:,None],y[:,:,:,None],x[:,:,:,None]),axis = 3)
if results.version_major == 0 and results.version_minor >= 5:
coords = damask.grid_filters.cell_coord0(results.grid,results.size,results.origin)
else:
coords = damask.grid_filters.cell_coord0(results.grid,results.size)
N_digits = int(np.floor(np.log10(int(results.increments[-1][3:]))))+1
N_digits = 5 # hack to keep test intact
for i,inc in enumerate(results.iter_visible('increments')):
print('Output step {}/{}'.format(i+1,len(results.increments)))
header = '1 header\n'
data = np.array([int(inc[3:]) for j in range(np.product(results.grid))]).reshape([np.product(results.grid),1])
header+= 'inc'
coords = coords.reshape([np.product(results.grid),3])
data = np.concatenate((data,coords),1)
header+=' 1_pos 2_pos 3_pos'
table = damask.Table(np.ones(np.product(results.grid),dtype=int)*int(inc[3:]),{'inc':(1,)})
table.add('pos',coords.reshape((-1,3)))
results.set_visible('materialpoints',False)
results.set_visible('constituents', True)
for label in options.con:
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0,plain=True)
d = np.product(np.shape(array)[1:])
data = np.concatenate((data,np.reshape(array,[np.product(results.grid),d])),1)
if d>1:
header+= ''.join([' {}_{}'.format(j+1,label) for j in range(d)])
else:
header+=' '+label
if len(x) != 0:
table.add(label,results.read_dataset(x,0,plain=True).reshape((results.grid.prod(),-1)))
results.set_visible('constituents', False)
results.set_visible('materialpoints',True)
for label in options.mat:
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0,plain=True)
d = np.product(np.shape(array)[1:])
data = np.concatenate((data,np.reshape(array,[np.product(results.grid),d])),1)
if d>1:
header+= ''.join([' {}_{}'.format(j+1,label) for j in range(d)])
else:
header+=' '+label
if len(x) != 0:
table.add(label,results.read_dataset(x,0,plain=True).reshape((results.grid.prod(),-1)))
dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir))
if not os.path.isdir(dirname):
os.mkdir(dirname,0o755)
file_out = '{}_inc{}.txt'.format(os.path.splitext(os.path.split(filename)[-1])[0],
inc[3:].zfill(N_digits))
np.savetxt(os.path.join(dirname,file_out),data,header=header,comments='')
table.to_ASCII(os.path.join(dirname,file_out))

View File

@ -1,144 +0,0 @@
#!/usr/bin/env python3
import os
import argparse
import h5py
import numpy as np
import vtk
from vtk.util import numpy_support
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = argparse.ArgumentParser()
#ToDo: We need to decide on a way of handling arguments of variable lentght
#https://stackoverflow.com/questions/15459997/passing-integer-lists-to-python
#parser.add_argument('--version', action='version', version='%(prog)s {}'.format(scriptID))
parser.add_argument('filenames', nargs='+',
help='DADF5 files')
parser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string',
help='name of subdirectory relative to the location of the DADF5 file to hold output')
parser.add_argument('--mat', nargs='+',
help='labels for materialpoint',dest='mat')
parser.add_argument('--con', nargs='+',
help='labels for constituent',dest='con')
options = parser.parse_args()
if options.mat is None: options.mat=[]
if options.con is None: options.con=[]
# --- loop over input files ------------------------------------------------------------------------
for filename in options.filenames:
results = damask.DADF5(filename)
if results.structured: # for grid solvers use rectilinear grid
grid = vtk.vtkRectilinearGrid()
coordArray = [vtk.vtkDoubleArray(),
vtk.vtkDoubleArray(),
vtk.vtkDoubleArray(),
]
grid.SetDimensions(*(results.grid+1))
for dim in [0,1,2]:
for c in np.linspace(0,results.size[dim],1+results.grid[dim]):
coordArray[dim].InsertNextValue(c)
grid.SetXCoordinates(coordArray[0])
grid.SetYCoordinates(coordArray[1])
grid.SetZCoordinates(coordArray[2])
else:
nodes = vtk.vtkPoints()
with h5py.File(filename) as f:
nodes.SetData(numpy_support.numpy_to_vtk(f['/geometry/x_n'][()],deep=True))
grid = vtk.vtkUnstructuredGrid()
grid.SetPoints(nodes)
grid.Allocate(f['/geometry/T_c'].shape[0])
for i in f['/geometry/T_c']:
grid.InsertNextCell(vtk.VTK_HEXAHEDRON,8,i-1)
N_digits = int(np.floor(np.log10(int(results.increments[-1][3:]))))+1
for i,inc in enumerate(results.iter_visible('increments')):
print('Output step {}/{}'.format(i+1,len(results.increments)))
vtk_data = []
results.set_visible('materialpoints',False)
results.set_visible('constituents', True)
for label in options.con:
for p in results.iter_visible('con_physics'):
if p != 'generic':
for c in results.iter_visible('constituents'):
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
grid.GetCellData().AddArray(vtk_data[-1])
else:
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
grid.GetCellData().AddArray(vtk_data[-1])
results.set_visible('constituents', False)
results.set_visible('materialpoints',True)
for label in options.mat:
for p in results.iter_visible('mat_physics'):
if p != 'generic':
for m in results.iter_visible('materialpoints'):
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
grid.GetCellData().AddArray(vtk_data[-1])
else:
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
grid.GetCellData().AddArray(vtk_data[-1])
writer = vtk.vtkXMLRectilinearGridWriter() if results.structured else \
vtk.vtkXMLUnstructuredGridWriter()
results.set_visible('constituents', False)
results.set_visible('materialpoints',False)
x = results.get_dataset_location('u_n')
vtk_data.append(numpy_support.numpy_to_vtk(num_array=results.read_dataset(x,0),deep=True,array_type=vtk.VTK_DOUBLE))
vtk_data[-1].SetName('u')
grid.GetPointData().AddArray(vtk_data[-1])
dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir))
if not os.path.isdir(dirname):
os.mkdir(dirname,0o755)
file_out = '{}_inc{}.{}'.format(os.path.splitext(os.path.split(filename)[-1])[0],
inc[3:].zfill(N_digits),
writer.GetDefaultFileExtension())
writer.SetCompressorTypeToZLib()
writer.SetDataModeToBinary()
writer.SetFileName(os.path.join(dirname,file_out))
writer.SetInputData(grid)
writer.Write()

View File

@ -1,124 +0,0 @@
#!/usr/bin/env python3
import os
import argparse
import numpy as np
import vtk
from vtk.util import numpy_support
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = argparse.ArgumentParser()
#ToDo: We need to decide on a way of handling arguments of variable lentght
#https://stackoverflow.com/questions/15459997/passing-integer-lists-to-python
#parser.add_argument('--version', action='version', version='%(prog)s {}'.format(scriptID))
parser.add_argument('filenames', nargs='+',
help='DADF5 files')
parser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string',
help='name of subdirectory relative to the location of the DADF5 file to hold output')
parser.add_argument('--mat', nargs='+',
help='labels for materialpoint',dest='mat')
parser.add_argument('--con', nargs='+',
help='labels for constituent',dest='con')
options = parser.parse_args()
if options.mat is None: options.mat=[]
if options.con is None: options.con=[]
# --- loop over input files ------------------------------------------------------------------------
for filename in options.filenames:
results = damask.DADF5(filename)
Points = vtk.vtkPoints()
Vertices = vtk.vtkCellArray()
for c in results.cell_coordinates():
pointID = Points.InsertNextPoint(c)
Vertices.InsertNextCell(1)
Vertices.InsertCellPoint(pointID)
Polydata = vtk.vtkPolyData()
Polydata.SetPoints(Points)
Polydata.SetVerts(Vertices)
Polydata.Modified()
N_digits = int(np.floor(np.log10(int(results.increments[-1][3:]))))+1
for i,inc in enumerate(results.iter_visible('increments')):
print('Output step {}/{}'.format(i+1,len(results.increments)))
vtk_data = []
results.set_visible('materialpoints',False)
results.set_visible('constituents', True)
for label in options.con:
for p in results.iter_visible('con_physics'):
if p != 'generic':
for c in results.iter_visible('constituents'):
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
Polydata.GetCellData().AddArray(vtk_data[-1])
else:
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
Polydata.GetCellData().AddArray(vtk_data[-1])
results.set_visible('constituents', False)
results.set_visible('materialpoints',True)
for label in options.mat:
for p in results.iter_visible('mat_physics'):
if p != 'generic':
for m in results.iter_visible('materialpoints'):
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
Polydata.GetCellData().AddArray(vtk_data[-1])
else:
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
Polydata.GetCellData().AddArray(vtk_data[-1])
writer = vtk.vtkXMLPolyDataWriter()
dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir))
if not os.path.isdir(dirname):
os.mkdir(dirname,0o755)
file_out = '{}_inc{}.{}'.format(os.path.splitext(os.path.split(filename)[-1])[0],
inc[3:].zfill(N_digits),
writer.GetDefaultFileExtension())
writer.SetCompressorTypeToZLib()
writer.SetDataModeToBinary()
writer.SetFileName(os.path.join(dirname,file_out))
writer.SetInputData(Polydata)
writer.Write()

View File

@ -4,7 +4,7 @@ import os
import sys
from optparse import OptionParser
import re
import collections
from collections.abc import Iterable
import math # noqa
import scipy # noqa
@ -18,7 +18,7 @@ scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
def listify(x):
return x if isinstance(x, collections.Iterable) else [x]
return x if isinstance(x, Iterable) else [x]
# --------------------------------------------------------------------
@ -65,9 +65,10 @@ for i in range(len(options.formulas)):
if filenames == []: filenames = [None]
for name in filenames:
try: table = damask.ASCIItable(name = name,
buffered = False)
except: continue
try:
table = damask.ASCIItable(name = name, buffered = False)
except IOError:
continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header -------------------------------------------

View File

@ -1,11 +1,11 @@
#!/usr/bin/env python3
import os
import math
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
import scipy.ndimage
import damask
@ -13,78 +13,6 @@ import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
#--------------------------------------------------------------------------------------------------
def cell2node(cellData,grid):
nodeData = 0.0
datalen = np.array(cellData.shape[3:]).prod()
for i in range(datalen):
node = scipy.ndimage.convolve(cellData.reshape(tuple(grid[::-1])+(datalen,))[...,i],
np.ones((2,2,2))/8., # 2x2x2 neighborhood of cells
mode = 'wrap',
origin = -1, # offset to have cell origin as center
) # now averaged at cell origins
node = np.append(node,node[np.newaxis,0,:,:,...],axis=0) # wrap along z
node = np.append(node,node[:,0,np.newaxis,:,...],axis=1) # wrap along y
node = np.append(node,node[:,:,0,np.newaxis,...],axis=2) # wrap along x
nodeData = node[...,np.newaxis] if i==0 else np.concatenate((nodeData,node[...,np.newaxis]),axis=-1)
return nodeData
#--------------------------------------------------------------------------------------------------
def deformationAvgFFT(F,grid,size,nodal=False,transformed=False):
"""Calculate average cell center (or nodal) deformation for deformation gradient field specified in each grid cell"""
if nodal:
x, y, z = np.meshgrid(np.linspace(0,size[2],1+grid[2]),
np.linspace(0,size[1],1+grid[1]),
np.linspace(0,size[0],1+grid[0]),
indexing = 'ij')
else:
x, y, z = np.meshgrid(np.linspace(size[2]/grid[2]/2.,size[2]-size[2]/grid[2]/2.,grid[2]),
np.linspace(size[1]/grid[1]/2.,size[1]-size[1]/grid[1]/2.,grid[1]),
np.linspace(size[0]/grid[0]/2.,size[0]-size[0]/grid[0]/2.,grid[0]),
indexing = 'ij')
origCoords = np.concatenate((z[:,:,:,None],y[:,:,:,None],x[:,:,:,None]),axis = 3)
F_fourier = F if transformed else np.fft.rfftn(F,axes=(0,1,2)) # transform or use provided data
Favg = np.real(F_fourier[0,0,0,:,:])/grid.prod() # take zero freq for average
avgDeformation = np.einsum('ml,ijkl->ijkm',Favg,origCoords) # dX = Favg.X
return avgDeformation
#--------------------------------------------------------------------------------------------------
def displacementFluctFFT(F,grid,size,nodal=False,transformed=False):
"""Calculate cell center (or nodal) displacement for deformation gradient field specified in each grid cell"""
integrator = 0.5j * size / math.pi
kk, kj, ki = np.meshgrid(np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2])),
np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1])),
np.arange(grid[0]//2+1),
indexing = 'ij')
k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3)
k_sSquared = np.einsum('...l,...l',k_s,k_s)
k_sSquared[0,0,0] = 1.0 # ignore global average frequency
#--------------------------------------------------------------------------------------------------
# integration in Fourier space
displacement_fourier = -np.einsum('ijkml,ijkl,l->ijkm',
F if transformed else np.fft.rfftn(F,axes=(0,1,2)),
k_s,
integrator,
) / k_sSquared[...,np.newaxis]
#--------------------------------------------------------------------------------------------------
# backtransformation to real space
displacement = np.fft.irfftn(displacement_fourier,grid[::-1],axes=(0,1,2))
return cell2node(displacement,grid) if nodal else displacement
def volTetrahedron(coords):
"""
Return the volume of the tetrahedron with given vertices or sides.
@ -133,10 +61,10 @@ def volTetrahedron(coords):
def volumeMismatch(size,F,nodes):
"""
Calculates the volume mismatch
Calculates the volume mismatch.
volume mismatch is defined as the difference between volume of reconstructed
(compatible) cube and determinant of defgrad at the FP
(compatible) cube and determinant of deformation gradient at Fourier point.
"""
coords = np.empty([8,3])
vMismatch = np.empty(grid[::-1])
@ -169,11 +97,11 @@ def volumeMismatch(size,F,nodes):
def shapeMismatch(size,F,nodes,centres):
"""
Routine to calculate the shape mismatch
Routine to calculate the shape mismatch.
shape mismatch is defined as difference between the vectors from the central point to
the corners of reconstructed (combatible) volume element and the vectors calculated by deforming
the initial volume element with the current deformation gradient
the initial volume element with the current deformation gradient.
"""
coordsInitial = np.empty([8,3])
sMismatch = np.empty(grid[::-1])
@ -241,92 +169,29 @@ parser.set_defaults(pos = 'pos',
)
(options,filenames) = parser.parse_args()
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
remarks = []
if table.label_dimension(options.defgrad) != 9:
errors.append('deformation gradient "{}" is not a 3x3 tensor.'.format(options.defgrad))
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
grid,size,origin = damask.grid_filters.cell_coord0_2_DNA(table.get(options.pos))
coordDim = table.label_dimension(options.pos)
if not 3 >= coordDim >= 1:
errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos))
elif coordDim < 3:
remarks.append('appending {} dimension{} to coordinates "{}"...'.format(3-coordDim,
's' if coordDim < 2 else '',
options.pos))
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss=True)
continue
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray([options.defgrad,options.pos])
table.data_rewind()
if table.data[:,9:].shape[1] < 3:
table.data = np.hstack((table.data,
np.zeros((table.data.shape[0],
3-table.data[:,9:].shape[1]),dtype='f'))) # fill coords up to 3D with zeros
grid,size = damask.util.coordGridAndSize(table.data[:,9:12])
N = grid.prod()
if N != len(table.data): errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*grid))
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
F = table.get(options.defgrad).reshape(grid[2],grid[1],grid[0],3,3)
nodes = damask.grid_filters.node_coord(size,F)
# -----------------------------process data and assemble header -------------------------------------
F_fourier = np.fft.rfftn(table.data[:,:9].reshape(grid[2],grid[1],grid[0],3,3),axes=(0,1,2)) # perform transform only once...
nodes = displacementFluctFFT(F_fourier,grid,size,True,transformed=True)\
+ deformationAvgFFT (F_fourier,grid,size,True,transformed=True)
if options.shape:
table.labels_append(['shapeMismatch({})'.format(options.defgrad)])
centres = displacementFluctFFT(F_fourier,grid,size,False,transformed=True)\
+ deformationAvgFFT (F_fourier,grid,size,False,transformed=True)
centers = damask.grid_filters.cell_coord(size,F)
shapeMismatch = shapeMismatch( size,table.get(options.defgrad).reshape(grid[2],grid[1],grid[0],3,3),nodes,centers)
table.add('shapeMismatch(({}))'.format(options.defgrad),
shapeMismatch.reshape((-1,1)),
scriptID+' '+' '.join(sys.argv[1:]))
if options.volume:
table.labels_append(['volMismatch({})'.format(options.defgrad)])
volumeMismatch = volumeMismatch(size,table.get(options.defgrad).reshape(grid[2],grid[1],grid[0],3,3),nodes)
table.add('volMismatch(({}))'.format(options.defgrad),
volumeMismatch.reshape((-1,1)),
scriptID+' '+' '.join(sys.argv[1:]))
table.head_write()
if options.shape:
shapeMismatch = shapeMismatch( size,table.data[:,:9].reshape(grid[2],grid[1],grid[0],3,3),nodes,centres)
if options.volume:
volumeMismatch = volumeMismatch(size,table.data[:,:9].reshape(grid[2],grid[1],grid[0],3,3),nodes)
# ------------------------------------------ output data -------------------------------------------
for i in range(grid[2]):
for j in range(grid[1]):
for k in range(grid[0]):
table.data_read()
if options.shape: table.data_append(shapeMismatch[i,j,k])
if options.volume: table.data_append(volumeMismatch[i,j,k])
table.data_write()
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -2,6 +2,7 @@
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
@ -22,79 +23,26 @@ Add cumulative (sum of first to current row) values for given label(s).
""", version = scriptID)
parser.add_option('-l','--label',
dest='label',
dest='labels',
action = 'extend', metavar = '<string LIST>',
help = 'columns to cumulate')
parser.add_option('-p','--product',
dest='product', action = 'store_true',
help = 'product of values instead of sum')
(options,filenames) = parser.parse_args()
if options.label is None:
parser.error('no data column(s) specified.')
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
if options.labels is None:
parser.error('no data column(s) specified.')
for name in filenames:
try:
table = damask.ASCIItable(name = name,
buffered = False)
except IOError: continue
damask.util.report(scriptName,name)
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for label in options.labels:
table.add('cum_{}({})'.format('prod' if options.product else 'sum',label),
np.cumprod(table.get(label),0) if options.product else np.cumsum(table.get(label),0),
scriptID+' '+' '.join(sys.argv[1:]))
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
remarks = []
columns = []
dims = []
how = 'prod' if options.product else 'sum'
for what in options.label:
dim = table.label_dimension(what)
if dim < 0: remarks.append('column {} not found...'.format(what))
else:
dims.append(dim)
columns.append(table.label_index(what))
table.labels_append('cum_{}({})'.format(how,what) if dim == 1 else
['{}_cum_{}({})'.format(i+1,how,what) for i in range(dim)] ) # extend ASCII header with new labels
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write()
# ------------------------------------------ process data ------------------------------------------
mask = []
for col,dim in zip(columns,dims): mask += range(col,col+dim) # isolate data columns to cumulate
cumulated = np.ones(len(mask)) if options.product else np.zeros(len(mask)) # prepare output field
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
if options.product:
for i,col in enumerate(mask):
cumulated[i] *= float(table.data[col]) # cumulate values (multiplication)
else:
for i,col in enumerate(mask):
cumulated[i] += float(table.data[col]) # cumulate values (addition)
table.data_append(cumulated)
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -2,6 +2,7 @@
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
@ -12,48 +13,6 @@ import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
def merge_dicts(*dict_args):
"""Given any number of dicts, shallow copy and merge into a new dict, with precedence going to key value pairs in latter dicts."""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def curlFFT(geomdim,field):
"""Calculate curl of a vector or tensor field by transforming into Fourier space."""
shapeFFT = np.array(np.shape(field))[0:3]
grid = np.array(np.shape(field)[2::-1])
N = grid.prod() # field size
n = np.array(np.shape(field)[3:]).prod() # data size
field_fourier = np.fft.rfftn(field,axes=(0,1,2),s=shapeFFT)
curl_fourier = np.empty(field_fourier.shape,'c16')
# differentiation in Fourier space
TWOPIIMG = 2.0j*np.pi
einsums = {
3:'slm,ijkl,ijkm->ijks', # vector, 3 -> 3
9:'slm,ijkl,ijknm->ijksn', # tensor, 3x3 -> 3x3
}
k_sk = np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2]))/geomdim[0]
if grid[2]%2 == 0: k_sk[grid[2]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011)
k_sj = np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1]))/geomdim[1]
if grid[1]%2 == 0: k_sj[grid[1]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011)
k_si = np.arange(grid[0]//2+1)/geomdim[2]
kk, kj, ki = np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij')
k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3).astype('c16')
e = np.zeros((3, 3, 3))
e[0, 1, 2] = e[1, 2, 0] = e[2, 0, 1] = 1.0 # Levi-Civita symbols
e[0, 2, 1] = e[2, 1, 0] = e[1, 0, 2] = -1.0
curl_fourier = np.einsum(einsums[n],e,k_s,field_fourier)*TWOPIIMG
return np.fft.irfftn(curl_fourier,axes=(0,1,2),s=shapeFFT).reshape([N,n])
# --------------------------------------------------------------------
# MAIN
@ -61,8 +20,7 @@ def curlFFT(geomdim,field):
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Add column(s) containing curl of requested column(s).
Operates on periodic ordered three-dimensional data sets
of vector and tensor fields.
Operates on periodic ordered three-dimensional data sets of vector and tensor fields.
""", version = scriptID)
parser.add_option('-p','--pos','--periodiccellcenter',
@ -70,93 +28,30 @@ parser.add_option('-p','--pos','--periodiccellcenter',
type = 'string', metavar = 'string',
help = 'label of coordinates [%default]')
parser.add_option('-l','--label',
dest = 'data',
dest = 'labels',
action = 'extend', metavar = '<string LIST>',
help = 'label(s) of field values')
parser.set_defaults(pos = 'pos',
)
(options,filenames) = parser.parse_args()
if options.data is None: parser.error('no data column specified.')
# --- define possible data types -------------------------------------------------------------------
datatypes = {
3: {'name': 'vector',
'shape': [3],
},
9: {'name': 'tensor',
'shape': [3,3],
},
}
# --- loop over input files ------------------------------------------------------------------------
if filenames == []: filenames = [None]
if options.labels is None: parser.error('no data column specified.')
for name in filenames:
try: table = damask.ASCIItable(name = name,buffered = False)
except: continue
damask.util.report(scriptName,name)
damask.util.report(scriptName,name)
# --- interpret header ----------------------------------------------------------------------------
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
grid,size,origin = damask.grid_filters.cell_coord0_2_DNA(table.get(options.pos))
table.head_read()
remarks = []
errors = []
active = []
coordDim = table.label_dimension(options.pos)
if coordDim != 3:
errors.append('coordinates "{}" must be three-dimensional.'.format(options.pos))
else: coordCol = table.label_index(options.pos)
for me in options.data:
dim = table.label_dimension(me)
if dim in datatypes:
active.append(merge_dicts({'label':me},datatypes[dim]))
remarks.append('differentiating {} "{}"...'.format(datatypes[dim]['name'],me))
else:
remarks.append('skipping "{}" of dimension {}...'.format(me,dim) if dim != -1 else \
'"{}" not found...'.format(me) )
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for data in active:
table.labels_append(['{}_curlFFT({})'.format(i+1,data['label'])
for i in range(np.prod(np.array(data['shape'])))]) # extend ASCII header with new labels
table.head_write()
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
grid,size = damask.util.coordGridAndSize(table.data[:,table.label_indexrange(options.pos)])
# ------------------------------------------ process value field -----------------------------------
stack = [table.data]
for data in active:
# we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
stack.append(curlFFT(size[::-1],
table.data[:,table.label_indexrange(data['label'])].
reshape(grid[::-1].tolist()+data['shape'])))
# ------------------------------------------ output result -----------------------------------------
if len(stack) > 1: table.data = np.hstack(tuple(stack))
table.data_writeArray('%.12g')
# ------------------------------------------ output finalization -----------------------------------
table.close() # close input ASCII table (works for stdin)
for label in options.labels:
field = table.get(label)
shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor
field = field.reshape(np.append(grid[::-1],shape))
table.add('curlFFT({})'.format(label),
damask.grid_filters.curl(size[::-1],field).reshape((-1,np.prod(shape))),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -2,6 +2,7 @@
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
@ -30,7 +31,7 @@ def derivative(coordinates,what):
(coordinates[0] - coordinates[1])
result[-1,:] = (what[-1,:] - what[-2,:]) / \
(coordinates[-1] - coordinates[-2])
return result
@ -48,78 +49,26 @@ parser.add_option('-c','--coordinates',
type = 'string', metavar='string',
help = 'heading of coordinate column')
parser.add_option('-l','--label',
dest = 'label',
dest = 'labels',
action = 'extend', metavar = '<string LIST>',
help = 'heading of column(s) to differentiate')
(options,filenames) = parser.parse_args()
if options.coordinates is None:
parser.error('no coordinate column specified.')
if options.label is None:
parser.error('no data column specified.')
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
if options.coordinates is None:
parser.error('no coordinate column specified.')
if options.labels is None:
parser.error('no data column specified.')
for name in filenames:
try: table = damask.ASCIItable(name = name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for label in options.labels:
table.add('d({})/d({})'.format(label,options.coordinates),
derivative(table.get(options.coordinates),table.get(label)),
scriptID+' '+' '.join(sys.argv[1:]))
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
remarks = []
columns = []
dims = []
if table.label_dimension(options.coordinates) != 1:
errors.append('coordinate column {} is not scalar.'.format(options.coordinates))
for what in options.label:
dim = table.label_dimension(what)
if dim < 0: remarks.append('column {} not found...'.format(what))
else:
dims.append(dim)
columns.append(table.label_index(what))
table.labels_append('d({})/d({})'.format(what,options.coordinates) if dim == 1 else
['{}_d({})/d({})'.format(i+1,what,options.coordinates) for i in range(dim)] ) # extend ASCII header with new labels
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write()
# ------------------------------------------ process data ------------------------------------------
table.data_readArray()
mask = []
for col,dim in zip(columns,dims): mask += range(col,col+dim) # isolate data columns to differentiate
differentiated = derivative(table.data[:,table.label_index(options.coordinates)].reshape((len(table.data),1)),
table.data[:,mask]) # calculate numerical derivative
table.data = np.hstack((table.data,differentiated))
# ------------------------------------------ output result -----------------------------------------
table.data_writeArray()
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -2,10 +2,10 @@
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
import scipy.ndimage
import damask
@ -14,79 +14,6 @@ scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
#--------------------------------------------------------------------------------------------------
def cell2node(cellData,grid):
nodeData = 0.0
datalen = np.array(cellData.shape[3:]).prod()
for i in range(datalen):
node = scipy.ndimage.convolve(cellData.reshape(tuple(grid[::-1])+(datalen,))[...,i],
np.ones((2,2,2))/8., # 2x2x2 neighborhood of cells
mode = 'wrap',
origin = -1, # offset to have cell origin as center
) # now averaged at cell origins
node = np.append(node,node[np.newaxis,0,:,:,...],axis=0) # wrap along z
node = np.append(node,node[:,0,np.newaxis,:,...],axis=1) # wrap along y
node = np.append(node,node[:,:,0,np.newaxis,...],axis=2) # wrap along x
nodeData = node[...,np.newaxis] if i==0 else np.concatenate((nodeData,node[...,np.newaxis]),axis=-1)
return nodeData
#--------------------------------------------------------------------------------------------------
def displacementAvgFFT(F,grid,size,nodal=False,transformed=False):
"""Calculate average cell center (or nodal) displacement for deformation gradient field specified in each grid cell"""
if nodal:
x, y, z = np.meshgrid(np.linspace(0,size[2],1+grid[2]),
np.linspace(0,size[1],1+grid[1]),
np.linspace(0,size[0],1+grid[0]),
indexing = 'ij')
else:
delta = size/grid*0.5
x, y, z = np.meshgrid(np.linspace(delta[2],size[2]-delta[2],grid[2]),
np.linspace(delta[1],size[1]-delta[1],grid[1]),
np.linspace(delta[0],size[0]-delta[0],grid[0]),
indexing = 'ij')
origCoords = np.concatenate((z[:,:,:,None],y[:,:,:,None],x[:,:,:,None]),axis = 3)
F_fourier = F if transformed else np.fft.rfftn(F,axes=(0,1,2)) # transform or use provided data
Favg = np.real(F_fourier[0,0,0,:,:])/grid.prod() # take zero freq for average
avgDisplacement = np.einsum('ml,ijkl->ijkm',Favg-np.eye(3),origCoords) # dX = Favg.X
return avgDisplacement
#--------------------------------------------------------------------------------------------------
def displacementFluctFFT(F,grid,size,nodal=False,transformed=False):
"""Calculate cell center (or nodal) displacement for deformation gradient field specified in each grid cell"""
integrator = 0.5j * size / np.pi
kk, kj, ki = np.meshgrid(np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2])),
np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1])),
np.arange(grid[0]//2+1),
indexing = 'ij')
k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3)
k_sSquared = np.einsum('...l,...l',k_s,k_s)
k_sSquared[0,0,0] = 1.0 # ignore global average frequency
#--------------------------------------------------------------------------------------------------
# integration in Fourier space
displacement_fourier = -np.einsum('ijkml,ijkl,l->ijkm',
F if transformed else np.fft.rfftn(F,axes=(0,1,2)),
k_s,
integrator,
) / k_sSquared[...,np.newaxis]
#--------------------------------------------------------------------------------------------------
# backtransformation to real space
displacement = np.fft.irfftn(displacement_fourier,grid[::-1],axes=(0,1,2))
return cell2node(displacement,grid) if nodal else displacement
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
@ -100,7 +27,7 @@ Outputs at cell centers or cell nodes (into separate file).
parser.add_option('-f',
'--defgrad',
dest = 'defgrad',
dest = 'f',
metavar = 'string',
help = 'label of deformation gradient [%default]')
parser.add_option('-p',
@ -113,108 +40,34 @@ parser.add_option('--nodal',
action = 'store_true',
help = 'output nodal (instead of cell-centered) displacements')
parser.set_defaults(defgrad = 'f',
pos = 'pos',
parser.set_defaults(f = 'f',
pos = 'pos',
)
(options,filenames) = parser.parse_args()
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
outname = (os.path.splitext(name)[0] +
'_nodal' +
os.path.splitext(name)[1]) if (options.nodal and name) else None
try: table = damask.ASCIItable(name = name,
outname = outname,
buffered = False)
except: continue
damask.util.report(scriptName,'{}{}'.format(name if name else '',
' --> {}'.format(outname) if outname else ''))
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
remarks = []
if table.label_dimension(options.defgrad) != 9:
errors.append('deformation gradient "{}" is not a 3x3 tensor.'.format(options.defgrad))
coordDim = table.label_dimension(options.pos)
if not 3 >= coordDim >= 1:
errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos))
elif coordDim < 3:
remarks.append('appending {} dimension{} to coordinates "{}"...'.format(3-coordDim,
's' if coordDim < 2 else '',
options.pos))
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss=True)
continue
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray([options.defgrad,options.pos])
table.data_rewind()
if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape
if table.data[:,9:].shape[1] < 3:
table.data = np.hstack((table.data,
np.zeros((table.data.shape[0],
3-table.data[:,9:].shape[1]),dtype='f'))) # fill coords up to 3D with zeros
grid,size = damask.util.coordGridAndSize(table.data[:,9:12])
N = grid.prod()
if N != len(table.data): errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*grid))
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ process data ------------------------------------------
F_fourier = np.fft.rfftn(table.data[:,:9].reshape(grid[2],grid[1],grid[0],3,3),axes=(0,1,2)) # perform transform only once...
fluctDisplacement = displacementFluctFFT(F_fourier,grid,size,options.nodal,transformed=True)
avgDisplacement = displacementAvgFFT (F_fourier,grid,size,options.nodal,transformed=True)
# ------------------------------------------ assemble header ---------------------------------------
if options.nodal:
table.info_clear()
table.labels_clear()
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append((['{}_pos' .format(i+1) for i in range(3)] if options.nodal else []) +
['{}_avg({}).{}' .format(i+1,options.defgrad,options.pos) for i in range(3)] +
['{}_fluct({}).{}'.format(i+1,options.defgrad,options.pos) for i in range(3)] )
table.head_write()
# ------------------------------------------ output data -------------------------------------------
Zrange = np.linspace(0,size[2],1+grid[2]) if options.nodal else range(grid[2])
Yrange = np.linspace(0,size[1],1+grid[1]) if options.nodal else range(grid[1])
Xrange = np.linspace(0,size[0],1+grid[0]) if options.nodal else range(grid[0])
for i,z in enumerate(Zrange):
for j,y in enumerate(Yrange):
for k,x in enumerate(Xrange):
if options.nodal: table.data_clear()
else: table.data_read()
table.data_append([x,y,z] if options.nodal else [])
table.data_append(list( avgDisplacement[i,j,k,:]))
table.data_append(list(fluctDisplacement[i,j,k,:]))
table.data_write()
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
grid,size,origin = damask.grid_filters.cell_coord0_2_DNA(table.get(options.pos))
F = table.get(options.f).reshape(np.append(grid[::-1],(3,3)))
if options.nodal:
table = damask.Table(damask.grid_filters.node_coord0(grid[::-1],size[::-1]).reshape((-1,3)),
{'pos':(3,)})
table.add('avg({}).{}'.format(options.f,options.pos),
damask.grid_filters.node_displacement_avg(size[::-1],F).reshape((-1,3)),
scriptID+' '+' '.join(sys.argv[1:]))
table.add('fluct({}).{}'.format(options.f,options.pos),
damask.grid_filters.node_displacement_fluct(size[::-1],F).reshape((-1,3)),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else os.path.splitext(name)[0]+'_nodal.txt')
else:
table.add('avg({}).{}'.format(options.f,options.pos),
damask.grid_filters.cell_displacement_avg(size[::-1],F).reshape((-1,3)),
scriptID+' '+' '.join(sys.argv[1:]))
table.add('fluct({}).{}'.format(options.f,options.pos),
damask.grid_filters.cell_displacement_fluct(size[::-1],F).reshape((-1,3)),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -2,6 +2,7 @@
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
@ -12,53 +13,14 @@ import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
def merge_dicts(*dict_args):
"""Given any number of dicts, shallow copy and merge into a new dict, with precedence going to key value pairs in latter dicts."""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def divFFT(geomdim,field):
"""Calculate divergence of a vector or tensor field by transforming into Fourier space."""
shapeFFT = np.array(np.shape(field))[0:3]
grid = np.array(np.shape(field)[2::-1])
N = grid.prod() # field size
n = np.array(np.shape(field)[3:]).prod() # data size
field_fourier = np.fft.rfftn(field,axes=(0,1,2),s=shapeFFT)
div_fourier = np.empty(field_fourier.shape[0:len(np.shape(field))-1],'c16')
# differentiation in Fourier space
TWOPIIMG = 2.0j*np.pi
einsums = {
3:'ijkl,ijkl->ijk', # vector, 3 -> 1
9:'ijkm,ijklm->ijkl', # tensor, 3x3 -> 3
}
k_sk = np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2]))/geomdim[0]
if grid[2]%2 == 0: k_sk[grid[2]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011)
k_sj = np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1]))/geomdim[1]
if grid[1]%2 == 0: k_sj[grid[1]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011)
k_si = np.arange(grid[0]//2+1)/geomdim[2]
kk, kj, ki = np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij')
k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3).astype('c16')
div_fourier = np.einsum(einsums[n],k_s,field_fourier)*TWOPIIMG
return np.fft.irfftn(div_fourier,axes=(0,1,2),s=shapeFFT).reshape([N,n//3])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog option(s) [ASCIItable(s)]', description = """
Add column(s) containing curl of requested column(s).
Operates on periodic ordered three-dimensional data sets
of vector and tensor fields.
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Add column(s) containing divergence of requested column(s).
Operates on periodic ordered three-dimensional data sets of vector and tensor fields.
""", version = scriptID)
parser.add_option('-p','--pos','--periodiccellcenter',
@ -66,95 +28,30 @@ parser.add_option('-p','--pos','--periodiccellcenter',
type = 'string', metavar = 'string',
help = 'label of coordinates [%default]')
parser.add_option('-l','--label',
dest = 'data',
dest = 'labels',
action = 'extend', metavar = '<string LIST>',
help = 'label(s) of field values')
parser.set_defaults(pos = 'pos',
)
(options,filenames) = parser.parse_args()
if options.data is None: parser.error('no data column specified.')
# --- define possible data types -------------------------------------------------------------------
datatypes = {
3: {'name': 'vector',
'shape': [3],
},
9: {'name': 'tensor',
'shape': [3,3],
},
}
# --- loop over input files ------------------------------------------------------------------------
if filenames == []: filenames = [None]
if options.labels is None: parser.error('no data column specified.')
for name in filenames:
try: table = damask.ASCIItable(name = name,buffered = False)
except: continue
damask.util.report(scriptName,name)
damask.util.report(scriptName,name)
# --- interpret header ----------------------------------------------------------------------------
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
grid,size,origin = damask.grid_filters.cell_coord0_2_DNA(table.get(options.pos))
table.head_read()
remarks = []
errors = []
active = []
coordDim = table.label_dimension(options.pos)
if coordDim != 3:
errors.append('coordinates "{}" must be three-dimensional.'.format(options.pos))
else: coordCol = table.label_index(options.pos)
for me in options.data:
dim = table.label_dimension(me)
if dim in datatypes:
active.append(merge_dicts({'label':me},datatypes[dim]))
remarks.append('differentiating {} "{}"...'.format(datatypes[dim]['name'],me))
else:
remarks.append('skipping "{}" of dimension {}...'.format(me,dim) if dim != -1 else \
'"{}" not found...'.format(me) )
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for data in active:
table.labels_append(['divFFT({})'.format(data['label']) if data['shape'] == [3] \
else '{}_divFFT({})'.format(i+1,data['label'])
for i in range(np.prod(np.array(data['shape']))//3)]) # extend ASCII header with new labels
table.head_write()
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
grid,size = damask.util.coordGridAndSize(table.data[:,table.label_indexrange(options.pos)])
# ------------------------------------------ process value field -----------------------------------
stack = [table.data]
for data in active:
# we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
stack.append(divFFT(size[::-1],
table.data[:,table.label_indexrange(data['label'])].
reshape(grid[::-1].tolist()+data['shape'])))
# ------------------------------------------ output result -----------------------------------------
if len(stack) > 1: table.data = np.hstack(tuple(stack))
table.data_writeArray('%.12g')
# ------------------------------------------ output finalization -----------------------------------
table.close() # close input ASCII table (works for stdin)
for label in options.labels:
field = table.get(label)
shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor
field = field.reshape(np.append(grid[::-1],shape))
table.add('divFFT({})'.format(label),
damask.grid_filters.divergence(size[::-1],field).reshape((-1,np.prod(shape)//3)),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -2,6 +2,7 @@
import os
import sys
from io import StringIO
from optparse import OptionParser
import itertools
@ -121,13 +122,14 @@ parser.set_defaults(pos = 'pos',
)
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
if options.type is None:
parser.error('no feature type selected.')
parser.error('no feature type selected.')
if not set(options.type).issubset(set(list(itertools.chain(*map(lambda x: x['names'],features))))):
parser.error('type must be chosen from (%s).'%(', '.join(map(lambda x:'|'.join(x['names']),features))) )
parser.error('type must be chosen from (%s).'%(', '.join(map(lambda x:'|'.join(x['names']),features))) )
if 'biplane' in options.type and 'boundary' in options.type:
parser.error('only one from aliases "biplane" and "boundary" possible.')
parser.error('only one from aliases "biplane" and "boundary" possible.')
feature_list = []
for i,feature in enumerate(features):
@ -137,104 +139,49 @@ for i,feature in enumerate(features):
feature_list.append(i) # remember valid features
break
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try: table = damask.ASCIItable(name = name, buffered = False)
except: continue
damask.util.report(scriptName,name)
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
grid,size,origin = damask.grid_filters.cell_coord0_2_DNA(table.get(options.pos))
table.head_read()
neighborhood = neighborhoods[options.neighborhood]
diffToNeighbor = np.empty(list(grid+2)+[len(neighborhood)],'i')
microstructure = periodic_3Dpad(table.get(options.id).astype('i').reshape(grid,order='F'))
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
remarks = []
if not 3 >= table.label_dimension(options.pos) >= 1:
errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos))
if table.label_dimension(options.id) != 1: errors.append('grain identifier {} not found.'.format(options.id))
else: idCol = table.label_index(options.id)
if remarks != []:
damask.util.croak(remarks)
remarks = []
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for feature in feature_list:
table.labels_append('ED_{}({})'.format(features[feature]['names'][0],options.id)) # extend ASCII header with new labels
table.head_write()
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
grid,size = damask.util.coordGridAndSize(table.data[:,table.label_indexrange(options.pos)])
N = grid.prod()
if N != len(table.data): errors.append('data count {} does not match grid {}.'.format(N,'x'.join(map(str,grid))))
else: remarks.append('grid: {}x{}x{}'.format(*grid))
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ process value field -----------------------------------
stack = [table.data]
neighborhood = neighborhoods[options.neighborhood]
diffToNeighbor = np.empty(list(grid+2)+[len(neighborhood)],'i')
microstructure = periodic_3Dpad(table.data[:,idCol].astype('i').reshape(grid,order='F'))
for i,p in enumerate(neighborhood):
stencil = np.zeros((3,3,3),'i')
stencil[1,1,1] = -1
stencil[p[0]+1,
p[1]+1,
p[2]+1] = 1
diffToNeighbor[:,:,:,i] = ndimage.convolve(microstructure,stencil) # compare ID at each point...
for i,p in enumerate(neighborhood):
stencil = np.zeros((3,3,3),'i')
stencil[1,1,1] = -1
stencil[p[0]+1,
p[1]+1,
p[2]+1] = 1
diffToNeighbor[:,:,:,i] = ndimage.convolve(microstructure,stencil) # compare ID at each point...
# ...to every one in the specified neighborhood
# for same IDs at both locations ==> 0
diffToNeighbor = np.sort(diffToNeighbor) # sort diff such that number of changes in diff (steps)...
diffToNeighbor = np.sort(diffToNeighbor) # sort diff such that number of changes in diff (steps)...
# ...reflects number of unique neighbors
uniques = np.where(diffToNeighbor[1:-1,1:-1,1:-1,0] != 0, 1,0) # initialize unique value counter (exclude myself [= 0])
uniques = np.where(diffToNeighbor[1:-1,1:-1,1:-1,0] != 0, 1,0) # initialize unique value counter (exclude myself [= 0])
for i in range(1,len(neighborhood)): # check remaining points in neighborhood
uniques += np.where(np.logical_and(
diffToNeighbor[1:-1,1:-1,1:-1,i] != 0, # not myself?
diffToNeighbor[1:-1,1:-1,1:-1,i] != diffToNeighbor[1:-1,1:-1,1:-1,i-1],
), # flip of ID difference detected?
1,0) # count that flip
for i in range(1,len(neighborhood)): # check remaining points in neighborhood
uniques += np.where(np.logical_and(
diffToNeighbor[1:-1,1:-1,1:-1,i] != 0, # not myself?
diffToNeighbor[1:-1,1:-1,1:-1,i] != diffToNeighbor[1:-1,1:-1,1:-1,i-1],
), # flip of ID difference detected?
1,0) # count that flip
distance = np.ones((len(feature_list),grid[0],grid[1],grid[2]),'d')
distance = np.ones((len(feature_list),grid[0],grid[1],grid[2]),'d')
for i,feature_id in enumerate(feature_list):
distance[i,:,:,:] = np.where(uniques >= features[feature_id]['aliens'],0.0,1.0) # seed with 0.0 when enough unique neighbor IDs are present
distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[options.scale]*3
for i,feature_id in enumerate(feature_list):
distance[i,:,:,:] = np.where(uniques >= features[feature_id]['aliens'],0.0,1.0) # seed with 0.0 when enough unique neighbor IDs are present
distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[options.scale]*3
distance = distance.reshape([len(feature_list),grid.prod(),1],order='F')
for i in range(len(feature_list)):
stack.append(distance[i,:])
distance = distance.reshape([len(feature_list),grid.prod(),1],order='F')
# ------------------------------------------ output result -----------------------------------------
if len(stack) > 1: table.data = np.hstack(tuple(stack))
table.data_writeArray('%.12g')
for i,feature in enumerate(feature_list):
table.add('ED_{}({})'.format(features[feature]['names'][0],options.id),
distance[i,:],
scriptID+' '+' '.join(sys.argv[1:]))
# ------------------------------------------ output finalization -----------------------------------
table.close() # close input ASCII table (works for stdin)
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -2,9 +2,9 @@
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
from scipy import ndimage
import damask
@ -30,7 +30,7 @@ parser.add_option('-p','--pos','--periodiccellcenter',
type = 'string', metavar = 'string',
help = 'label of coordinates [%default]')
parser.add_option('-s','--scalar',
dest = 'scalar',
dest = 'labels',
action = 'extend', metavar = '<string LIST>',
help = 'label(s) of scalar field values')
parser.add_option('-o','--order',
@ -56,78 +56,21 @@ parser.set_defaults(pos = 'pos',
)
(options,filenames) = parser.parse_args()
if options.scalar is None:
parser.error('no data column specified.')
# --- loop over input files ------------------------------------------------------------------------
if filenames == []: filenames = [None]
if options.labels is None: parser.error('no data column specified.')
for name in filenames:
try: table = damask.ASCIItable(name = name,buffered = False)
except: continue
damask.util.report(scriptName,name)
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
damask.grid_filters.coord0_check(table.get(options.pos))
table.head_read()
for label in options.labels:
table.add('Gauss{}({})'.format(options.sigma,label),
ndimage.filters.gaussian_filter(table.get(label).reshape((-1)),
options.sigma,options.order,
mode = 'wrap' if options.periodic else 'nearest'),
scriptID+' '+' '.join(sys.argv[1:]))
# ------------------------------------------ sanity checks ----------------------------------------
items = {
'scalar': {'dim': 1, 'shape': [1], 'labels':options.scalar, 'active':[], 'column': []},
}
errors = []
remarks = []
column = {}
if table.label_dimension(options.pos) != 3: errors.append('coordinates {} are not a vector.'.format(options.pos))
else: colCoord = table.label_index(options.pos)
for type, data in items.items():
for what in (data['labels'] if data['labels'] is not None else []):
dim = table.label_dimension(what)
if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type))
else:
items[type]['active'].append(what)
items[type]['column'].append(table.label_index(what))
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for type, data in items.items():
for label in data['active']:
table.labels_append(['Gauss{}({})'.format(options.sigma,label)]) # extend ASCII header with new labels
table.head_write()
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
grid,size = damask.util.coordGridAndSize(table.data[:,table.label_indexrange(options.pos)])
# ------------------------------------------ process value field -----------------------------------
stack = [table.data]
for type, data in items.items():
for i,label in enumerate(data['active']):
stack.append(ndimage.filters.gaussian_filter(table.data[:,data['column'][i]],
options.sigma,options.order,
mode = 'wrap' if options.periodic else 'nearest'
).reshape([table.data.shape[0],1])
)
# ------------------------------------------ output result -----------------------------------------
if len(stack) > 1: table.data = np.hstack(tuple(stack))
table.data_writeArray('%.12g')
# ------------------------------------------ output finalization -----------------------------------
table.close() # close input ASCII table (works for stdin)
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -2,6 +2,7 @@
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
@ -12,44 +13,6 @@ import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
def merge_dicts(*dict_args):
"""Given any number of dicts, shallow copy and merge into a new dict, with precedence going to key value pairs in latter dicts."""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def gradFFT(geomdim,field):
"""Calculate gradient of a vector or scalar field by transforming into Fourier space."""
shapeFFT = np.array(np.shape(field))[0:3]
grid = np.array(np.shape(field)[2::-1])
N = grid.prod() # field size
n = np.array(np.shape(field)[3:]).prod() # data size
field_fourier = np.fft.rfftn(field,axes=(0,1,2),s=shapeFFT)
grad_fourier = np.empty(field_fourier.shape+(3,),'c16')
# differentiation in Fourier space
TWOPIIMG = 2.0j*np.pi
einsums = {
1:'ijkl,ijkm->ijkm', # scalar, 1 -> 3
3:'ijkl,ijkm->ijklm', # vector, 3 -> 3x3
}
k_sk = np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2]))/geomdim[0]
if grid[2]%2 == 0: k_sk[grid[2]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011)
k_sj = np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1]))/geomdim[1]
if grid[1]%2 == 0: k_sj[grid[1]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011)
k_si = np.arange(grid[0]//2+1)/geomdim[2]
kk, kj, ki = np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij')
k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3).astype('c16')
grad_fourier = np.einsum(einsums[n],field_fourier,k_s)*TWOPIIMG
return np.fft.irfftn(grad_fourier,axes=(0,1,2),s=shapeFFT).reshape([N,3*n])
# --------------------------------------------------------------------
# MAIN
@ -57,9 +20,7 @@ def gradFFT(geomdim,field):
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Add column(s) containing gradient of requested column(s).
Operates on periodic ordered three-dimensional data sets
of vector and scalar fields.
Operates on periodic ordered three-dimensional data sets of scalar and vector fields.
""", version = scriptID)
parser.add_option('-p','--pos','--periodiccellcenter',
@ -67,7 +28,7 @@ parser.add_option('-p','--pos','--periodiccellcenter',
type = 'string', metavar = 'string',
help = 'label of coordinates [%default]')
parser.add_option('-l','--label',
dest = 'data',
dest = 'labels',
action = 'extend', metavar = '<string LIST>',
help = 'label(s) of field values')
@ -75,85 +36,22 @@ parser.set_defaults(pos = 'pos',
)
(options,filenames) = parser.parse_args()
if options.data is None: parser.error('no data column specified.')
# --- define possible data types -------------------------------------------------------------------
datatypes = {
1: {'name': 'scalar',
'shape': [1],
},
3: {'name': 'vector',
'shape': [3],
},
}
# --- loop over input files ------------------------------------------------------------------------
if filenames == []: filenames = [None]
if options.labels is None: parser.error('no data column specified.')
for name in filenames:
try: table = damask.ASCIItable(name = name,buffered = False)
except: continue
damask.util.report(scriptName,name)
damask.util.report(scriptName,name)
# --- interpret header ----------------------------------------------------------------------------
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
grid,size,origin = damask.grid_filters.cell_coord0_2_DNA(table.get(options.pos))
table.head_read()
remarks = []
errors = []
active = []
coordDim = table.label_dimension(options.pos)
if coordDim != 3:
errors.append('coordinates "{}" must be three-dimensional.'.format(options.pos))
else: coordCol = table.label_index(options.pos)
for me in options.data:
dim = table.label_dimension(me)
if dim in datatypes:
active.append(merge_dicts({'label':me},datatypes[dim]))
remarks.append('differentiating {} "{}"...'.format(datatypes[dim]['name'],me))
else:
remarks.append('skipping "{}" of dimension {}...'.format(me,dim) if dim != -1 else \
'"{}" not found...'.format(me) )
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for data in active:
table.labels_append(['{}_gradFFT({})'.format(i+1,data['label'])
for i in range(coordDim*np.prod(np.array(data['shape'])))]) # extend ASCII header with new labels
table.head_write()
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
grid,size = damask.util.coordGridAndSize(table.data[:,table.label_indexrange(options.pos)])
# ------------------------------------------ process value field -----------------------------------
stack = [table.data]
for data in active:
# we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
stack.append(gradFFT(size[::-1],
table.data[:,table.label_indexrange(data['label'])].
reshape(grid[::-1].tolist()+data['shape'])))
# ------------------------------------------ output result -----------------------------------------
if len(stack) > 1: table.data = np.hstack(tuple(stack))
table.data_writeArray('%.12g')
# ------------------------------------------ output finalization -----------------------------------
table.close() # close input ASCII table (works for stdin)
for label in options.labels:
field = table.get(label)
shape = (1,) if np.prod(field.shape)//np.prod(grid) == 1 else (3,) # scalar or vector
field = field.reshape(np.append(grid[::-1],shape))
table.add('gradFFT({})'.format(label),
damask.grid_filters.gradient(size[::-1],field).reshape((-1,np.prod(shape)*3)),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -2,6 +2,7 @@
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
@ -43,54 +44,25 @@ parser.set_defaults(pole = (0.0,0.0,1.0),
)
(options, filenames) = parser.parse_args()
if filenames == []: filenames = [None]
# damask.Orientation requires Bravais lattice, but we are only interested in symmetry
symmetry2lattice={'cubic':'bcc','hexagonal':'hex','tetragonal':'bct'}
symmetry2lattice={'cubic':'fcc','hexagonal':'hex','tetragonal':'bct'}
lattice = symmetry2lattice[options.symmetry]
pole = np.array(options.pole)
pole /= np.linalg.norm(pole)
# --- loop over input files ------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
if not table.label_dimension(options.quaternion) == 4:
damask.util.croak('input {} does not have dimension 4.'.format(options.quaternion))
table.close(dismiss = True) # close ASCIItable and remove empty file
continue
column = table.label_index(options.quaternion)
# ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(['{}_IPF_{:g}{:g}{:g}_{sym}'.format(i+1,*options.pole,sym = options.symmetry.lower()) for i in range(3)])
table.head_write()
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
o = damask.Orientation(np.array(list(map(float,table.data[column:column+4]))),
lattice = lattice).reduced()
table.data_append(o.IPFcolor(pole))
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
orientation = table.get(options.quaternion)
color = np.empty((orientation.shape[0],3))
for i,o in enumerate(orientation):
color[i] = damask.Orientation(o,lattice = lattice).IPFcolor(pole)
table.add('IPF_{:g}{:g}{:g}_{sym}'.format(*options.pole,sym = options.symmetry.lower()),
color,
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -2,6 +2,7 @@
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
@ -42,7 +43,7 @@ parser.add_option('-n','--norm',
type = 'choice', choices = normChoices, metavar='string',
help = 'type of element-wise p-norm [frobenius] {%s}'%(','.join(map(str,normChoices))))
parser.add_option('-l','--label',
dest = 'label',
dest = 'labels',
action = 'extend', metavar = '<string LIST>',
help = 'heading of column(s) to calculate norm of')
@ -50,62 +51,25 @@ parser.set_defaults(norm = 'frobenius',
)
(options,filenames) = parser.parse_args()
if options.norm.lower() not in normChoices:
parser.error('invalid norm ({}) specified.'.format(options.norm))
if options.label is None:
parser.error('no data column specified.')
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
if options.norm.lower() not in normChoices:
parser.error('invalid norm ({}) specified.'.format(options.norm))
if options.labels is None:
parser.error('no data column specified.')
for name in filenames:
try:
table = damask.ASCIItable(name = name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for label in options.labels:
data = table.get(label)
data_norm = np.empty((data.shape[0],1))
for i,d in enumerate(data):
data_norm[i] = norm(options.norm.capitalize(),d)
table.head_read()
table.add('norm{}({})'.format(options.norm.capitalize(),label),
data_norm,
scriptID+' '+' '.join(sys.argv[1:]))
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
remarks = []
columns = []
dims = []
for what in options.label:
dim = table.label_dimension(what)
if dim < 0: remarks.append('column {} not found...'.format(what))
else:
dims.append(dim)
columns.append(table.label_index(what))
table.labels_append('norm{}({})'.format(options.norm.capitalize(),what)) # extend ASCII header with new labels
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write()
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
for column,dim in zip(columns,dims):
table.data_append(norm(options.norm.capitalize(),
map(float,table.data[column:column+dim])))
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------
table.close() # close input ASCII table (works for stdin)
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -125,9 +125,10 @@ R = damask.Rotation.fromAxisAngle(np.array(options.labrotation),options.degrees,
if filenames == []: filenames = [None]
for name in filenames:
try: table = damask.ASCIItable(name = name,
buffered = False)
except Exception: continue
try:
table = damask.ASCIItable(name = name, buffered = False)
except IOError:
continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------

View File

@ -2,6 +2,7 @@
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
@ -42,52 +43,23 @@ parser.set_defaults(pole = (1.0,0.0,0.0),
)
(options, filenames) = parser.parse_args()
if filenames == []: filenames = [None]
pole = np.array(options.pole)
pole /= np.linalg.norm(pole)
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
if not table.label_dimension(options.quaternion) == 4:
damask.util.croak('input {} does not have dimension 4.'.format(options.quaternion))
table.close(dismiss = True) # close ASCIItable and remove empty file
continue
column = table.label_index(options.quaternion)
# ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(['{}_pole_{}{}{}'.format(i+1,*options.pole) for i in range(2)])
table.head_write()
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
o = damask.Rotation(np.array(list(map(float,table.data[column:column+4]))))
rotatedPole = o*pole # rotate pole according to crystal orientation
(x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection
table.data_append([np.sqrt(x*x+y*y),np.arctan2(y,x)] if options.polar else [x,y]) # cartesian coordinates
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
orientation = table.get(options.quaternion)
poles = np.empty((orientation.shape[0],2))
for i,o in enumerate(orientation):
rotatedPole = damask.Rotation(o)*pole # rotate pole according to crystal orientation
(x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection
poles[i] = [np.sqrt(x*x+y*y),np.arctan2(y,x)] if options.polar else [x,y] # cartesian coordinates
table.add('pole_{}{}{}'.format(*options.pole),
poles,
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -65,7 +65,8 @@ for name in filenames:
outname = os.path.join(os.path.dirname(name),
prefix+os.path.basename(name)) if name else name,
buffered = False)
except: continue
except IOError:
continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
@ -95,7 +96,7 @@ for name in filenames:
table.data_readArray()
if (options.grid is None or options.size is None):
grid,size = damask.util.coordGridAndSize(table.data[:,table.label_indexrange(options.pos)])
grid,size,origin = damask.grid_filters.cell_coord0_2_DNA(table.data[:,table.label_indexrange(options.pos)])
else:
grid = np.array(options.grid,'i')
size = np.array(options.size,'d')

View File

@ -55,7 +55,8 @@ for name in filenames:
outname = os.path.join(os.path.dirname(name),
prefix+os.path.basename(name)) if name else name,
buffered = False)
except: continue
except IOError:
continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
@ -82,7 +83,7 @@ for name in filenames:
table.data_readArray(options.pos)
table.data_rewind()
grid,size = damask.util.coordGridAndSize(table.data)
grid,size,origin = damask.grid_filters.cell_coord0_2_DNA(table.data)
packing = np.array(options.packing,'i')
outSize = grid*packing

View File

@ -2,10 +2,10 @@
import os
import sys
from io import StringIO
from optparse import OptionParser
import vtk
import numpy as np
import damask
@ -33,49 +33,20 @@ parser.set_defaults(pos = 'pos',
)
(options, filenames) = parser.parse_args()
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try: table = damask.ASCIItable(name = name,
buffered = False,
readonly = True)
except: continue
damask.util.report(scriptName,name)
# --- interpret header ----------------------------------------------------------------------------
table.head_read()
errors = []
remarks = []
coordDim = table.label_dimension(options.pos)
if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos))
elif coordDim < 3: remarks.append('appending {} dimension{} to coordinates "{}"...'.format(3-coordDim,
's' if coordDim < 2 else '',
options.pos))
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss=True)
continue
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
# ------------------------------------------ process data ---------------------------------------
table.data_readArray(options.pos)
if table.data.shape[1] < 3:
table.data = np.hstack((table.data,
np.zeros((table.data.shape[0],
3-table.data.shape[1]),dtype='f'))) # fill coords up to 3D with zeros
Polydata = vtk.vtkPolyData()
Points = vtk.vtkPoints()
Vertices = vtk.vtkCellArray()
for p in table.data:
for p in table.get(options.pos):
pointID = Points.InsertNextPoint(p)
Vertices.InsertNextCell(1)
Vertices.InsertCellPoint(pointID)
@ -104,5 +75,3 @@ for name in filenames:
writer.Write()
if name is None: sys.stdout.write(writer.GetOutputString())
table.close()

View File

@ -2,6 +2,7 @@
import os
import sys
from io import StringIO
from optparse import OptionParser
import vtk
@ -40,48 +41,14 @@ parser.set_defaults(mode = 'cell',
)
(options, filenames) = parser.parse_args()
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try: table = damask.ASCIItable(name = name,
buffered = False,
labeled = True,
readonly = True,
)
except: continue
damask.util.report(scriptName,name)
# --- interpret header ----------------------------------------------------------------------------
table.head_read()
remarks = []
errors = []
coordDim = table.label_dimension(options.pos)
if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos))
elif coordDim < 3: remarks.append('appending {} dimension{} to coordinates "{}"...'.format(3-coordDim,
's' if coordDim < 2 else '',
options.pos))
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss=True)
continue
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray(options.pos)
if table.data.shape[1] < 3:
table.data = np.hstack((table.data,
np.zeros((table.data.shape[0],
3-table.data.shape[1]),dtype='f'))) # fill coords up to 3D with zeros
coords = [np.unique(table.data[:,i]) for i in range(3)]
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
coords = [np.unique(table.get(options.pos)[:,i]) for i in range(3)]
if options.mode == 'cell':
coords = [0.5 * np.array([3.0 * coords[i][0] - coords[i][0 + int(len(coords[i]) > 1)]] + \
[coords[i][j-1] + coords[i][j] for j in range(1,len(coords[i]))] + \
@ -90,13 +57,6 @@ for name in filenames:
grid = np.array(list(map(len,coords)),'i')
N = grid.prod() if options.mode == 'point' else (grid-1).prod()
if N != len(table.data):
errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*(grid - (options.mode == 'cell')) ))
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ process data ---------------------------------------
rGrid = vtk.vtkRectilinearGrid()
@ -135,5 +95,3 @@ for name in filenames:
writer.Write()
if name is None: sys.stdout.write(writer.GetOutputString())
table.close()

View File

@ -145,7 +145,6 @@ for name in filenames:
config_header += ['<microstructure>']
for i in range(np.nanmax(microstructure)):
config_header += ['[{}{}]'.format(label,i+1),
'crystallite 1',
'(constituent)\tphase {}\ttexture {}\tfraction 1.0'.format(phase[i],i+1),
]

View File

@ -126,15 +126,12 @@ for i in range(3,np.max(microstructure)):
config_header = ['<microstructure>',
'[canal]',
'crystallite 1',
'(constituent)\tphase 1\ttexture 1\tfraction 1.0',
'[interstitial]',
'crystallite 1',
'(constituent)\tphase 2\ttexture 2\tfraction 1.0'
]
for i in range(3,np.max(microstructure)):
config_header += ['[Point{}]'.format(i-2),
'crystallite 1',
'(constituent)\tphase 3\ttexture {}\tfraction 1.0'.format(i)
]

View File

@ -78,36 +78,15 @@ for name in filenames:
table = damask.ASCIItable(name = name,readonly=True)
table.head_read() # read ASCII header info
# ------------------------------------------ sanity checks ---------------------------------------
coordDim = table.label_dimension(options.pos)
errors = []
if not 3 >= coordDim >= 2:
errors.append('coordinates "{}" need to have two or three dimensions.'.format(options.pos))
if not np.all(table.label_dimension(label) == dim):
errors.append('input "{}" needs to have dimension {}.'.format(label,dim))
if options.phase and table.label_dimension(options.phase) != 1:
errors.append('phase column "{}" is not scalar.'.format(options.phase))
if errors != []:
damask.util.croak(errors)
continue
table.data_readArray([options.pos] \
+ (label if isinstance(label, list) else [label]) \
+ ([options.phase] if options.phase else []))
if coordDim == 2:
table.data = np.insert(table.data,2,np.zeros(len(table.data)),axis=1) # add zero z coordinate for two-dimensional input
if options.phase is None:
table.data = np.column_stack((table.data,np.ones(len(table.data)))) # add single phase if no phase column given
grid,size = damask.util.coordGridAndSize(table.data[:,0:3])
coords = [np.unique(table.data[:,i]) for i in range(3)]
mincorner = np.array(list(map(min,coords)))
origin = mincorner - 0.5*size/grid # shift from cell center to corner
grid,size,origin = damask.grid_filters.cell_coord0_2_DNA(table.data[:,0:3])
indices = np.lexsort((table.data[:,0],table.data[:,1],table.data[:,2])) # indices of position when sorting x fast, z slow
microstructure = np.empty(grid,dtype = int) # initialize empty microstructure
@ -142,7 +121,6 @@ for name in filenames:
config_header += ['<microstructure>']
for i,data in enumerate(unique):
config_header += ['[Grain{}]'.format(i+1),
'crystallite 1',
'(constituent)\tphase {}\ttexture {}\tfraction 1.0'.format(int(data[4]),i+1),
]

View File

@ -290,7 +290,6 @@ for name in filenames:
config_header += ['<microstructure>']
for ID in grainIDs:
config_header += ['[Grain{}]'.format(ID),
'crystallite 1',
'(constituent)\tphase {}\ttexture {}\tfraction 1.0'.format(options.phase,ID)
]

View File

@ -2,10 +2,8 @@
import os
import sys
from optparse import OptionParser
from io import StringIO
import numpy as np
from optparse import OptionParser
import damask
@ -24,38 +22,25 @@ Translate geom description into ASCIItable containing position and microstructur
""", version = scriptID)
(options, filenames) = parser.parse_args()
if filenames == []: filenames = [None]
for name in filenames:
damask.util.report(scriptName,name)
damask.util.report(scriptName,name)
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
damask.util.croak(geom)
damask.util.croak(geom)
coord0 = damask.grid_filters.cell_coord0(geom.grid,geom.size,geom.origin).reshape((-1,3),order='F')
# --- generate grid --------------------------------------------------------------------------------
comments = geom.comments \
+ [scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta {}\tb {}\tc {}".format(*geom.grid),
"size\tx {}\ty {}\tz {}".format(*geom.size),
"origin\tx {}\ty {}\tz {}".format(*geom.origin),
"homogenization\t{}".format(geom.homogenization)]
grid = geom.get_grid()
size = geom.get_size()
origin = geom.get_origin()
table = damask.Table(coord0,{'pos':(3,)},comments)
table.add('microstructure',geom.microstructure.reshape((-1,1)))
x = (0.5 + np.arange(grid[0],dtype=float))/grid[0]*size[0]+origin[0]
y = (0.5 + np.arange(grid[1],dtype=float))/grid[1]*size[1]+origin[1]
z = (0.5 + np.arange(grid[2],dtype=float))/grid[2]*size[2]+origin[2]
xx = np.tile( x, grid[1]* grid[2])
yy = np.tile(np.repeat(y,grid[0] ),grid[2])
zz = np.repeat(z,grid[0]*grid[1])
# --- create ASCII table --------------------------------------------------------------------------
table = damask.ASCIItable(outname = os.path.splitext(name)[0]+'.txt' if name else name)
table.info_append(geom.get_comments() + [scriptID + '\t' + ' '.join(sys.argv[1:])])
table.labels_append(['{}_{}'.format(1+i,'pos') for i in range(3)]+['microstructure'])
table.head_write()
table.output_flush()
table.data = np.squeeze(np.dstack((xx,yy,zz,geom.microstructure.flatten('F'))),axis=0)
table.data_writeArray()
table.close()
table.to_ASCII(sys.stdout if name is None else \
os.path.splitext(name)[0]+'.txt')

View File

@ -19,7 +19,7 @@ def integerFactorization(i):
return j
def binAsBins(bin,intervals):
"""Explode compound bin into 3D bins list"""
"""Explode compound bin into 3D bins list."""
bins = [0]*3
bins[0] = (bin//(intervals[1] * intervals[2])) % intervals[0]
bins[1] = (bin//intervals[2]) % intervals[1]
@ -27,17 +27,17 @@ def binAsBins(bin,intervals):
return bins
def binsAsBin(bins,intervals):
"""Implode 3D bins into compound bin"""
"""Implode 3D bins into compound bin."""
return (bins[0]*intervals[1] + bins[1])*intervals[2] + bins[2]
def EulersAsBins(Eulers,intervals,deltas,center):
"""Return list of Eulers translated into 3D bins list"""
"""Return list of Eulers translated into 3D bins list."""
return [int((euler+(0.5-center)*delta)//delta)%interval \
for euler,delta,interval in zip(Eulers,deltas,intervals) \
]
def binAsEulers(bin,intervals,deltas,center):
"""Compound bin number translated into list of Eulers"""
"""Compound bin number translated into list of Eulers."""
Eulers = [0.0]*3
Eulers[2] = (bin%intervals[2] + center)*deltas[2]
Eulers[1] = (bin//intervals[2]%intervals[1] + center)*deltas[1]
@ -45,7 +45,7 @@ def binAsEulers(bin,intervals,deltas,center):
return Eulers
def directInvRepetitions(probability,scale):
"""Calculate number of samples drawn by direct inversion"""
"""Calculate number of samples drawn by direct inversion."""
nDirectInv = 0
for bin in range(len(probability)): # loop over bins
nDirectInv += int(round(probability[bin]*scale)) # calc repetition
@ -56,7 +56,7 @@ def directInvRepetitions(probability,scale):
# ----- efficient algorithm ---------
def directInversion (ODF,nSamples):
"""ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)"""
"""ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)."""
nOptSamples = max(ODF['nNonZero'],nSamples) # random subsampling if too little samples requested
nInvSamples = 0
@ -118,7 +118,7 @@ def directInversion (ODF,nSamples):
# ----- trial and error algorithms ---------
def MonteCarloEulers (ODF,nSamples):
"""ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)"""
"""ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)."""
countMC = 0
maxdV_V = max(ODF['dV_V'])
orientations = np.zeros((nSamples,3),'f')
@ -141,7 +141,7 @@ def MonteCarloEulers (ODF,nSamples):
def MonteCarloBins (ODF,nSamples):
"""ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)"""
"""ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)."""
countMC = 0
maxdV_V = max(ODF['dV_V'])
orientations = np.zeros((nSamples,3),'f')
@ -163,7 +163,7 @@ def MonteCarloBins (ODF,nSamples):
def TothVanHoutteSTAT (ODF,nSamples):
"""ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)"""
"""ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)."""
orientations = np.zeros((nSamples,3),'f')
reconstructedODF = np.zeros(ODF['nBins'],'f')
unitInc = 1.0/nSamples
@ -211,10 +211,6 @@ parser.add_option('-p','--phase',
dest = 'phase',
type = 'int', metavar = 'int',
help = 'phase index to be used [%default]')
parser.add_option('--crystallite',
dest = 'crystallite',
type = 'int', metavar = 'int',
help = 'crystallite index to be used [%default]')
parser.add_option('-r', '--rnd',
dest = 'randomSeed',
type = 'int', metavar = 'int', \
@ -223,7 +219,6 @@ parser.set_defaults(randomSeed = None,
number = 500,
algorithm = 'IA',
phase = 1,
crystallite = 1,
ang = True,
)
@ -240,7 +235,7 @@ if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name, buffered = False, readonly=True)
except:
except IOError:
continue
damask.util.report(scriptName,name)
@ -351,7 +346,6 @@ for name in filenames:
for i,ID in enumerate(range(nSamples)):
materialConfig += ['[Grain%s]'%(str(ID+1).zfill(formatwidth)),
'crystallite %i'%options.crystallite,
'(constituent) phase %i texture %s fraction 1.0'%(options.phase,str(ID+1).rjust(formatwidth)),
]

View File

@ -1,9 +1,10 @@
#!/usr/bin/env python3
# -*- coding: UTF-8 no BOM -*-
import os,sys
import numpy as np
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
@ -191,78 +192,45 @@ parser.add_option('-p', '--port',
dest = 'port',
type = 'int', metavar = 'int',
help = 'Mentat connection port [%default]')
parser.add_option('--homogenization',
dest = 'homogenization',
type = 'int', metavar = 'int',
help = 'homogenization index to be used [auto]')
parser.set_defaults(port = None,
homogenization = None,
)
parser.set_defaults(port = None,
)
(options, filenames) = parser.parse_args()
if options.port:
try:
import py_mentat
except:
parser.error('no valid Mentat release found.')
if options.port is not None:
try:
import py_mentat
except ImportError:
parser.error('no valid Mentat release found.')
# --- loop over input files ------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
outname = os.path.splitext(name)[0]+'.proc' if name else name,
buffered = False, labeled = False)
except: continue
damask.util.report(scriptName,name)
# --- interpret header ----------------------------------------------------------------------------
table.head_read()
info,extra_header = table.head_getGeom()
if options.homogenization: info['homogenization'] = options.homogenization
damask.util.report(scriptName,name)
damask.util.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'size x y z: %s'%(' x '.join(map(str,info['size']))),
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i'%info['homogenization'],
'microstructures: %i'%info['microstructures'],
])
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
microstructure = geom.get_microstructure().flatten(order='F')
errors = []
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# --- read data ------------------------------------------------------------------------------------
microstructure = table.microstructure_read(info['grid']).reshape(info['grid'].prod(),order='F') # read microstructure
cmds = [\
init(),
mesh(info['grid'],info['size']),
material(),
geometry(),
initial_conditions(info['homogenization'],microstructure),
'*identify_sets',
'*show_model',
'*redraw',
'*draw_automatic',
]
outputLocals = {}
if options.port:
py_mentat.py_connect('',options.port)
output(cmds,outputLocals,'Mentat')
py_mentat.py_disconnect()
else:
output(cmds,outputLocals,table.__IO__['out']) # bad hack into internals of table class...
table.close()
cmds = [\
init(),
mesh(geom.grid,geom.size),
material(),
geometry(),
initial_conditions(geom.homogenization,microstructure),
'*identify_sets',
'*show_model',
'*redraw',
'*draw_automatic',
]
outputLocals = {}
if options.port:
py_mentat.py_connect('',options.port)
output(cmds,outputLocals,'Mentat')
py_mentat.py_disconnect()
else:
with sys.stdout if name is None else open(os.path.splitext(name)[0]+'.proc','w') as f:
output(cmds,outputLocals,f)

View File

@ -78,13 +78,11 @@ def rcbOrientationParser(content,idcolumn):
damask.util.croak('You might not have chosen the correct column for the grain IDs! '+
'Please check the "--id" option.')
raise
except:
raise
return grains
def rcbParser(content,M,size,tolerance,idcolumn,segmentcolumn):
"""Parser for TSL-OIM reconstructed boundary files"""
"""Parser for TSL-OIM reconstructed boundary files."""
# find bounding box
boxX = [1.*sys.maxint,-1.*sys.maxint]
boxY = [1.*sys.maxint,-1.*sys.maxint]
@ -99,8 +97,6 @@ def rcbParser(content,M,size,tolerance,idcolumn,segmentcolumn):
damask.util.croak('You might not have chosen the correct column for the segment end points! '+
'Please check the "--segment" option.')
raise
except:
raise
(x[0],y[0]) = (M[0]*x[0]+M[1]*y[0],M[2]*x[0]+M[3]*y[0]) # apply transformation to coordinates
(x[1],y[1]) = (M[0]*x[1]+M[1]*y[1],M[2]*x[1]+M[3]*y[1]) # to get rcb --> Euler system
boxX[0] = min(boxX[0],x[0],x[1])
@ -728,7 +724,7 @@ def image(name,imgsize,marginX,marginY,rcData):
# -------------------------
def inside(x,y,points):
"""Tests whether point(x,y) is within polygon described by points"""
"""Tests whether point(x,y) is within polygon described by points."""
inside = False
npoints=len(points)
(x1,y1) = points[npoints-1] # start with last point of points
@ -750,7 +746,7 @@ def inside(x,y,points):
# -------------------------
def fftbuild(rcData,height,xframe,yframe,grid,extrusion):
"""Build array of grain numbers"""
"""Build array of grain numbers."""
maxX = -1.*sys.maxint
maxY = -1.*sys.maxint
for line in rcData['point']: # find data range
@ -883,7 +879,7 @@ try:
boundaryFile = open(args[0])
boundarySegments = boundaryFile.readlines()
boundaryFile.close()
except:
except IOError:
damask.util.croak('unable to read boundary file "{}".'.format(args[0]))
raise
@ -941,19 +937,15 @@ if any(output in options.output for output in ['spectral','mentat']):
for i,grain in enumerate(rcData['grainMapping']):
config+=['[grain{}]'.format(grain),
'crystallite\t1',
'(constituent)\tphase 1\ttexture {}\tfraction 1.0'.format(i+1)]
if (options.xmargin > 0.0):
config+=['[x-margin]',
'crystallite\t1',
'(constituent)\tphase 2\ttexture {}\tfraction 1.0\n'.format(len(rcData['grainMapping'])+1)]
if (options.ymargin > 0.0):
config+=['[y-margin]',
'crystallite\t1',
'(constituent)\tphase 2\ttexture {}\tfraction 1.0\n'.format(len(rcData['grainMapping'])+1)]
if (options.xmargin > 0.0 and options.ymargin > 0.0):
config+=['[xy-margin]',
'crystallite\t1',
'(constituent)\tphase 2\ttexture {}\tfraction 1.0\n'.format(len(rcData['grainMapping'])+1)]
if (options.xmargin > 0.0 or options.ymargin > 0.0):

View File

@ -6,7 +6,6 @@ do
vtk_addPointCloudData $seeds \
--data microstructure,weight \
--inplace \
--vtk ${seeds%.*}.vtp \
done

View File

@ -1,9 +1,12 @@
#!/usr/bin/env python3
# -*- coding: UTF-8 no BOM -*-
import os,sys
import numpy as np
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
@ -29,88 +32,39 @@ parser.add_option('-b',
action = 'extend', metavar = '<int LIST>',
dest = 'blacklist',
help = 'blacklist of grain IDs')
parser.add_option('-p',
'--pos', '--seedposition',
dest = 'pos',
type = 'string', metavar = 'string',
help = 'label of coordinates [%default]')
parser.set_defaults(whitelist = [],
blacklist = [],
pos = 'pos',
)
(options,filenames) = parser.parse_args()
options.whitelist = list(map(int,options.whitelist))
options.blacklist = list(map(int,options.blacklist))
# --- loop over output files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
options.whitelist = [int(i) for i in options.whitelist]
options.blacklist = [int(i) for i in options.blacklist]
for name in filenames:
try: table = damask.ASCIItable(name = name,
outname = os.path.splitext(name)[0]+'.seeds' if name else name,
buffered = False,
labeled = False)
except: continue
damask.util.report(scriptName,name)
damask.util.report(scriptName,name)
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
microstructure = geom.get_microstructure().reshape((-1,1),order='F')
# --- interpret header ----------------------------------------------------------------------------
mask = np.logical_and(np.in1d(microstructure,options.whitelist,invert=False) if options.whitelist else \
np.full(geom.grid.prod(),True,dtype=bool),
np.in1d(microstructure,options.blacklist,invert=True) if options.blacklist else \
np.full(geom.grid.prod(),True,dtype=bool))
seeds = np.concatenate((damask.grid_filters.cell_coord0(geom.grid,geom.size).reshape((-1,3)),
microstructure),
axis=1)[mask]
comments = geom.comments \
+ [scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta {}\tb {}\tc {}".format(*geom.grid),
"size\tx {}\ty {}\tz {}".format(*geom.size),
"origin\tx {}\ty {}\tz {}".format(*geom.origin),
"homogenization\t{}".format(geom.homogenization)]
table.head_read()
info,extra_header = table.head_getGeom()
damask.util.report_geom(info)
errors = []
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# --- read data ------------------------------------------------------------------------------------
microstructure = table.microstructure_read(info['grid']) # read (linear) microstructure
# --- generate grid --------------------------------------------------------------------------------
x = (0.5 + np.arange(info['grid'][0],dtype=float))/info['grid'][0]*info['size'][0]+info['origin'][0]
y = (0.5 + np.arange(info['grid'][1],dtype=float))/info['grid'][1]*info['size'][1]+info['origin'][1]
z = (0.5 + np.arange(info['grid'][2],dtype=float))/info['grid'][2]*info['size'][2]+info['origin'][2]
xx = np.tile( x, info['grid'][1]* info['grid'][2])
yy = np.tile(np.repeat(y,info['grid'][0] ),info['grid'][2])
zz = np.repeat(z,info['grid'][0]*info['grid'][1])
mask = np.logical_and(np.in1d(microstructure,options.whitelist,invert=False) if options.whitelist != []
else np.full_like(microstructure,True,dtype=bool),
np.in1d(microstructure,options.blacklist,invert=True ) if options.blacklist != []
else np.full_like(microstructure,True,dtype=bool))
# ------------------------------------------ assemble header ---------------------------------------
table.info_clear()
table.info_append(extra_header+[
scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta {}\tb {}\tc {}".format(*info['grid']),
"size\tx {}\ty {}\tz {}".format(*info['size']),
"origin\tx {}\ty {}\tz {}".format(*info['origin']),
"homogenization\t{}".format(info['homogenization']),
"microstructures\t{}".format(info['microstructures']),
])
table.labels_clear()
table.labels_append(['{dim}_{label}'.format(dim = 1+i,label = options.pos) for i in range(3)]+['microstructure'])
table.head_write()
table.output_flush()
# --- write seeds information ------------------------------------------------------------
table.data = np.squeeze(np.dstack((xx,yy,zz,microstructure)))[mask]
table.data_writeArray()
# ------------------------------------------ finalize output ---------------------------------------
table.close()
table = damask.Table(seeds,{'pos':(3,),'microstructure':(1,)},comments)
table.to_ASCII(sys.stdout if name is None else \
os.path.splitext(name)[0]+'.seeds')

View File

@ -1,11 +1,14 @@
#!/usr/bin/env python3
# -*- coding: UTF-8 no BOM -*-
import os,math,sys
import numpy as np
import damask
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
@ -35,117 +38,58 @@ parser.add_option('-y',
action = 'store_true',
dest = 'y',
help = 'poke 45 deg along y')
parser.add_option('-p','--position',
dest = 'position',
type = 'string', metavar = 'string',
help = 'column label for coordinates [%default]')
parser.set_defaults(x = False,
y = False,
box = [0.0,1.0,0.0,1.0,0.0,1.0],
N = 16,
position = 'pos',
)
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
options.box = np.array(options.box).reshape(3,2)
# --- loop over output files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
outname = os.path.splitext(name)[-2]+'_poked_{}.seeds'.format(options.N) if name else name,
buffered = False, labeled = False)
except: continue
damask.util.report(scriptName,name)
# --- interpret header ----------------------------------------------------------------------------
table.head_read()
info,extra_header = table.head_getGeom()
damask.util.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'size x y z: %s'%(' x '.join(map(str,info['size']))),
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i'%info['homogenization'],
'microstructures: %i'%info['microstructures'],
])
errors = []
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# --- read data ------------------------------------------------------------------------------------
microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure
# --- do work ------------------------------------------------------------------------------------
newInfo = {
'microstructures': 0,
}
offset = (np.amin(options.box, axis=1)*info['grid']/info['size']).astype(int)
box = np.amax(options.box, axis=1) - np.amin(options.box, axis=1)
Nx = int(options.N/math.sqrt(options.N*info['size'][1]*box[1]/info['size'][0]/box[0]))
Ny = int(options.N/math.sqrt(options.N*info['size'][0]*box[0]/info['size'][1]/box[1]))
Nz = int(box[2]*info['grid'][2])
damask.util.croak('poking {} x {} x {} in box {} {} {}...'.format(Nx,Ny,Nz,*box))
seeds = np.zeros((Nx*Ny*Nz,4),'d')
grid = np.zeros(3,'i')
n = 0
for i in range(Nx):
for j in range(Ny):
grid[0] = round((i+0.5)*box[0]*info['grid'][0]/Nx-0.5)+offset[0]
grid[1] = round((j+0.5)*box[1]*info['grid'][1]/Ny-0.5)+offset[1]
for k in range(Nz):
grid[2] = k + offset[2]
grid %= info['grid']
seeds[n,0:3] = (0.5+grid)/info['grid'] # normalize coordinates to box
seeds[n, 3] = microstructure[grid[0],grid[1],grid[2]]
if options.x: grid[0] += 1
if options.y: grid[1] += 1
n += 1
newInfo['microstructures'] = len(np.unique(seeds[:,3]))
# --- report ---------------------------------------------------------------------------------------
if (newInfo['microstructures'] != info['microstructures']):
damask.util.croak('--> microstructures: %i'%newInfo['microstructures'])
# ------------------------------------------ assemble header ---------------------------------------
table.info_clear()
table.info_append(extra_header+[
scriptID + ' ' + ' '.join(sys.argv[1:]),
"poking\ta {}\tb {}\tc {}".format(Nx,Ny,Nz),
"grid\ta {}\tb {}\tc {}".format(*info['grid']),
"size\tx {}\ty {}\tz {}".format(*info['size']),
"origin\tx {}\ty {}\tz {}".format(*info['origin']),
"homogenization\t{}".format(info['homogenization']),
"microstructures\t{}".format(newInfo['microstructures']),
])
table.labels_clear()
table.labels_append(['{dim}_{label}'.format(dim = 1+i,label = options.position) for i in range(3)]+['microstructure'])
table.head_write()
table.output_flush()
# --- write seeds information ------------------------------------------------------------
table.data = seeds
table.data_writeArray()
damask.util.report(scriptName,name)
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
# --- output finalization --------------------------------------------------------------------------
table.close() # close ASCII table
offset =(np.amin(options.box, axis=1)*geom.grid/geom.size).astype(int)
box = np.amax(options.box, axis=1) \
- np.amin(options.box, axis=1)
Nx = int(options.N/np.sqrt(options.N*geom.size[1]*box[1]/geom.size[0]/box[0]))
Ny = int(options.N/np.sqrt(options.N*geom.size[0]*box[0]/geom.size[1]/box[1]))
Nz = int(box[2]*geom.grid[2])
damask.util.croak('poking {} x {} x {} in box {} {} {}...'.format(Nx,Ny,Nz,*box))
seeds = np.zeros((Nx*Ny*Nz,4),'d')
g = np.zeros(3,'i')
n = 0
for i in range(Nx):
for j in range(Ny):
g[0] = round((i+0.5)*box[0]*geom.grid[0]/Nx-0.5)+offset[0]
g[1] = round((j+0.5)*box[1]*geom.grid[1]/Ny-0.5)+offset[1]
for k in range(Nz):
g[2] = k + offset[2]
g %= geom.grid
seeds[n,0:3] = (g+0.5)/geom.grid # normalize coordinates to box
seeds[n, 3] = geom.microstructure[g[0],g[1],g[2]]
if options.x: g[0] += 1
if options.y: g[1] += 1
n += 1
comments = geom.comments \
+ [scriptID + ' ' + ' '.join(sys.argv[1:]),
"poking\ta {}\tb {}\tc {}".format(Nx,Ny,Nz),
"grid\ta {}\tb {}\tc {}".format(*geom.grid),
"size\tx {}\ty {}\tz {}".format(*geom.size),
"origin\tx {}\ty {}\tz {}".format(*geom.origin),
"homogenization\t{}".format(geom.homogenization)]
table = damask.Table(seeds,{'pos':(3,),'microstructure':(1,)},comments)
table.to_ASCII(sys.stdout if name is None else \
os.path.splitext(name)[0]+'_poked_{}.seeds'.format(options.N))

View File

@ -1,9 +1,10 @@
"""Main aggregator."""
import os
import re
name = 'damask'
with open(os.path.join(os.path.dirname(__file__),'VERSION')) as f:
version = f.readline().strip()
version = re.sub(r'^v','',f.readline().strip())
# classes
from .environment import Environment # noqa
@ -13,7 +14,7 @@ from .asciitable import ASCIItable # noqa
from .config import Material # noqa
from .colormaps import Colormap, Color # noqa
from .orientation import Symmetry, Lattice, Rotation, Orientation # noqa
from .dadf5 import DADF5 # noqa
from .dadf5 import DADF5 # noqa
from .geom import Geom # noqa
from .solver import Solver # noqa
@ -22,7 +23,9 @@ from .util import extendableOption # noqa
# functions in modules
from . import mechanics # noqa
from . import grid_filters # noqa
# clean temporary variables
del os
del re
del f

View File

@ -1,7 +1,10 @@
from queue import Queue
import re
import glob
import os
import vtk
from vtk.util import numpy_support
import h5py
import numpy as np
@ -37,7 +40,7 @@ class DADF5():
self.version_major = f.attrs['DADF5-major']
self.version_minor = f.attrs['DADF5-minor']
if self.version_major != 0 or not 2 <= self.version_minor <= 4:
if self.version_major != 0 or not 2 <= self.version_minor <= 5:
raise TypeError('Unsupported DADF5 version {} '.format(f.attrs['DADF5-version']))
self.structured = 'grid' in f['geometry'].attrs.keys()
@ -45,6 +48,9 @@ class DADF5():
if self.structured:
self.grid = f['geometry'].attrs['grid']
self.size = f['geometry'].attrs['size']
if self.version_major == 0 and self.version_minor >= 5:
self.origin = f['geometry'].attrs['origin']
r=re.compile('inc[0-9]+')
increments_unsorted = {int(i[3:]):i for i in f.keys() if r.match(i)}
@ -360,7 +366,7 @@ class DADF5():
f[k]
path.append(k)
except KeyError as e:
print('unable to locate geometry dataset: {}'.format(str(e)))
pass
for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']):
for oo in self.iter_visible(o):
for pp in self.iter_visible(p):
@ -369,7 +375,7 @@ class DADF5():
f[k]
path.append(k)
except KeyError as e:
print('unable to locate {} dataset: {}'.format(o,str(e)))
pass
return path
@ -433,7 +439,7 @@ class DADF5():
np.linspace(delta[1],self.size[1]-delta[1],self.grid[1]),
np.linspace(delta[0],self.size[0]-delta[0],self.grid[0]),
)
return np.concatenate((x[:,:,:,None],y[:,:,:,None],y[:,:,:,None]),axis = 3).reshape([np.product(self.grid),3])
return np.concatenate((x[:,:,:,None],y[:,:,:,None],z[:,:,:,None]),axis = 3).reshape([np.product(self.grid),3])
else:
with h5py.File(self.fname,'r') as f:
return f['geometry/x_c'][()]
@ -830,7 +836,7 @@ class DADF5():
N_not_calculated = len(todo)
while N_not_calculated > 0:
result = results.get()
with h5py.File(self.fname,'a') as f: # write to file
with h5py.File(self.fname,'a') as f: # write to file
dataset_out = f[result['group']].create_dataset(result['label'],data=result['data'])
for k in result['meta'].keys():
dataset_out.attrs[k] = result['meta'][k].encode()
@ -841,3 +847,142 @@ class DADF5():
N_added +=1
pool.wait_completion()
def to_vtk(self,labels,mode='Cell'):
"""
Export to vtk cell/point data.
Parameters
----------
labels : list of str
Labels of the datasets to be exported.
mode : str, either 'Cell' or 'Point'
Export in cell format or point format.
Default value is 'Cell'.
"""
if mode=='Cell':
if self.structured:
coordArray = [vtk.vtkDoubleArray(),vtk.vtkDoubleArray(),vtk.vtkDoubleArray()]
for dim in [0,1,2]:
for c in np.linspace(0,self.size[dim],1+self.grid[dim]):
coordArray[dim].InsertNextValue(c)
vtk_geom = vtk.vtkRectilinearGrid()
vtk_geom.SetDimensions(*(self.grid+1))
vtk_geom.SetXCoordinates(coordArray[0])
vtk_geom.SetYCoordinates(coordArray[1])
vtk_geom.SetZCoordinates(coordArray[2])
else:
nodes = vtk.vtkPoints()
with h5py.File(self.fname) as f:
nodes.SetData(numpy_support.numpy_to_vtk(f['/geometry/x_n'][()],deep=True))
vtk_geom = vtk.vtkUnstructuredGrid()
vtk_geom.SetPoints(nodes)
vtk_geom.Allocate(f['/geometry/T_c'].shape[0])
for i in f['/geometry/T_c']:
vtk_geom.InsertNextCell(vtk.VTK_HEXAHEDRON,8,i-1) # not for all elements!
elif mode == 'Point':
Points = vtk.vtkPoints()
Vertices = vtk.vtkCellArray()
for c in self.cell_coordinates():
pointID = Points.InsertNextPoint(c)
Vertices.InsertNextCell(1)
Vertices.InsertCellPoint(pointID)
vtk_geom = vtk.vtkPolyData()
vtk_geom.SetPoints(Points)
vtk_geom.SetVerts(Vertices)
vtk_geom.Modified()
N_digits = int(np.floor(np.log10(int(self.increments[-1][3:]))))+1
for i,inc in enumerate(self.iter_visible('increments')):
vtk_data = []
materialpoints_backup = self.visible['materialpoints'].copy()
self.set_visible('materialpoints',False)
for label in labels:
for p in self.iter_visible('con_physics'):
if p != 'generic':
for c in self.iter_visible('constituents'):
x = self.get_dataset_location(label)
if len(x) == 0:
continue
array = self.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),
deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) #ToDo: hard coded 1!
vtk_geom.GetCellData().AddArray(vtk_data[-1])
else:
x = self.get_dataset_location(label)
if len(x) == 0:
continue
array = self.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),
deep=True,array_type= vtk.VTK_DOUBLE))
ph_name = re.compile(r'(?<=(constituent\/))(.*?)(?=(generic))') # identify phase name
dset_name = '1_' + re.sub(ph_name,r'',x[0].split('/',1)[1]) # removing phase name
vtk_data[-1].SetName(dset_name)
vtk_geom.GetCellData().AddArray(vtk_data[-1])
self.set_visible('materialpoints',materialpoints_backup)
constituents_backup = self.visible['constituents'].copy()
self.set_visible('constituents',False)
for label in labels:
for p in self.iter_visible('mat_physics'):
if p != 'generic':
for m in self.iter_visible('materialpoints'):
x = self.get_dataset_location(label)
if len(x) == 0:
continue
array = self.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),
deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) #ToDo: why 1_?
vtk_geom.GetCellData().AddArray(vtk_data[-1])
else:
x = self.get_dataset_location(label)
if len(x) == 0:
continue
array = self.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),
deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
vtk_geom.GetCellData().AddArray(vtk_data[-1])
self.set_visible('constituents',constituents_backup)
if mode=='Cell':
writer = vtk.vtkXMLRectilinearGridWriter() if self.structured else \
vtk.vtkXMLUnstructuredGridWriter()
x = self.get_dataset_location('u_n')
vtk_data.append(numpy_support.numpy_to_vtk(num_array=self.read_dataset(x,0),
deep=True,array_type=vtk.VTK_DOUBLE))
vtk_data[-1].SetName('u')
vtk_geom.GetPointData().AddArray(vtk_data[-1])
elif mode == 'Point':
writer = vtk.vtkXMLPolyDataWriter()
file_out = '{}_inc{}.{}'.format(os.path.splitext(os.path.basename(self.fname))[0],
inc[3:].zfill(N_digits),
writer.GetDefaultFileExtension())
writer.SetCompressorTypeToZLib()
writer.SetDataModeToBinary()
writer.SetFileName(file_out)
writer.SetInputData(vtk_geom)
writer.Write()

View File

@ -205,6 +205,9 @@ class Geom():
else:
self.homogenization = homogenization
@property
def grid(self):
return self.get_grid()
def get_microstructure(self):
"""Return the microstructure representation."""

View File

@ -0,0 +1,375 @@
from scipy import spatial
import numpy as np
def __ks(size,grid,first_order=False):
"""
Get wave numbers operator.
Parameters
----------
size : numpy.ndarray
physical size of the periodic field.
"""
k_sk = np.where(np.arange(grid[0])>grid[0]//2,np.arange(grid[0])-grid[0],np.arange(grid[0]))/size[0]
if grid[0]%2 == 0 and first_order: k_sk[grid[0]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011)
k_sj = np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1]))/size[1]
if grid[1]%2 == 0 and first_order: k_sj[grid[1]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011)
k_si = np.arange(grid[2]//2+1)/size[2]
kk, kj, ki = np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij')
return np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3)
def curl(size,field):
"""
Calculate curl of a vector or tensor field in Fourier space.
Parameters
----------
size : numpy.ndarray
physical size of the periodic field.
"""
n = np.prod(field.shape[3:])
k_s = __ks(size,field.shape[:3],True)
e = np.zeros((3, 3, 3))
e[0, 1, 2] = e[1, 2, 0] = e[2, 0, 1] = +1.0 # Levi-Civita symbol
e[0, 2, 1] = e[2, 1, 0] = e[1, 0, 2] = -1.0
field_fourier = np.fft.rfftn(field,axes=(0,1,2))
curl = (np.einsum('slm,ijkl,ijkm ->ijks', e,k_s,field_fourier)*2.0j*np.pi if n == 3 else # vector, 3 -> 3
np.einsum('slm,ijkl,ijknm->ijksn',e,k_s,field_fourier)*2.0j*np.pi) # tensor, 3x3 -> 3x3
return np.fft.irfftn(curl,axes=(0,1,2),s=field.shape[:3])
def divergence(size,field):
"""
Calculate divergence of a vector or tensor field in Fourier space.
Parameters
----------
size : numpy.ndarray
physical size of the periodic field.
"""
n = np.prod(field.shape[3:])
k_s = __ks(size,field.shape[:3],True)
field_fourier = np.fft.rfftn(field,axes=(0,1,2))
divergence = (np.einsum('ijkl,ijkl ->ijk', k_s,field_fourier)*2.0j*np.pi if n == 3 else # vector, 3 -> 1
np.einsum('ijkm,ijklm->ijkl',k_s,field_fourier)*2.0j*np.pi) # tensor, 3x3 -> 3
return np.fft.irfftn(divergence,axes=(0,1,2),s=field.shape[:3])
def gradient(size,field):
"""
Calculate gradient of a vector or scalar field in Fourier space.
Parameters
----------
size : numpy.ndarray
physical size of the periodic field.
"""
n = np.prod(field.shape[3:])
k_s = __ks(size,field.shape[:3],True)
field_fourier = np.fft.rfftn(field,axes=(0,1,2))
gradient = (np.einsum('ijkl,ijkm->ijkm', field_fourier,k_s)*2.0j*np.pi if n == 1 else # scalar, 1 -> 3
np.einsum('ijkl,ijkm->ijklm',field_fourier,k_s)*2.0j*np.pi) # vector, 3 -> 3x3
return np.fft.irfftn(gradient,axes=(0,1,2),s=field.shape[:3])
def cell_coord0(grid,size,origin=np.zeros(3)):
"""
Cell center positions (undeformed).
Parameters
----------
grid : numpy.ndarray
number of grid points.
size : numpy.ndarray
physical size of the periodic field.
origin : numpy.ndarray, optional
physical origin of the periodic field. Default is [0.0,0.0,0.0].
"""
start = origin + size/grid*.5
end = origin - size/grid*.5 + size
x, y, z = np.meshgrid(np.linspace(start[2],end[2],grid[2]),
np.linspace(start[1],end[1],grid[1]),
np.linspace(start[0],end[0],grid[0]),
indexing = 'ij')
return np.concatenate((z[:,:,:,None],y[:,:,:,None],x[:,:,:,None]),axis = 3)
def cell_displacement_fluct(size,F):
"""
Cell center displacement field from fluctuation part of the deformation gradient field.
Parameters
----------
size : numpy.ndarray
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
"""
integrator = 0.5j*size/np.pi
k_s = __ks(size,F.shape[:3],False)
k_s_squared = np.einsum('...l,...l',k_s,k_s)
k_s_squared[0,0,0] = 1.0
displacement = -np.einsum('ijkml,ijkl,l->ijkm',
np.fft.rfftn(F,axes=(0,1,2)),
k_s,
integrator,
) / k_s_squared[...,np.newaxis]
return np.fft.irfftn(displacement,axes=(0,1,2),s=F.shape[:3])
def cell_displacement_avg(size,F):
"""
Cell center displacement field from average part of the deformation gradient field.
Parameters
----------
size : numpy.ndarray
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
"""
F_avg = np.average(F,axis=(0,1,2))
return np.einsum('ml,ijkl->ijkm',F_avg-np.eye(3),cell_coord0(F.shape[:3][::-1],size))
def cell_displacement(size,F):
"""
Cell center displacement field from deformation gradient field.
Parameters
----------
size : numpy.ndarray
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
"""
return cell_displacement_avg(size,F) + cell_displacement_fluct(size,F)
def cell_coord(size,F,origin=np.zeros(3)):
"""
Cell center positions.
Parameters
----------
size : numpy.ndarray
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
origin : numpy.ndarray, optional
physical origin of the periodic field. Default is [0.0,0.0,0.0].
"""
return cell_coord0(F.shape[:3][::-1],size,origin) + cell_displacement(size,F)
def cell_coord0_2_DNA(coord0,ordered=True):
"""
Return grid 'DNA', i.e. grid, size, and origin from array of cell positions.
Parameters
----------
coord0 : numpy.ndarray
array of undeformed cell coordinates.
ordered : bool, optional
expect coord0 data to be ordered (x fast, z slow).
"""
coords = [np.unique(coord0[:,i]) for i in range(3)]
mincorner = np.array(list(map(min,coords)))
maxcorner = np.array(list(map(max,coords)))
grid = np.array(list(map(len,coords)),'i')
size = grid/np.maximum(grid-1,1) * (maxcorner-mincorner)
delta = size/grid
origin = mincorner - delta*.5
if grid.prod() != len(coord0):
raise ValueError('Data count {} does not match grid {}.'.format(len(coord0),grid))
start = origin + delta*.5
end = origin - delta*.5 + size
if not np.allclose(coords[0],np.linspace(start[0],end[0],grid[0])) and \
np.allclose(coords[1],np.linspace(start[1],end[1],grid[1])) and \
np.allclose(coords[2],np.linspace(start[2],end[2],grid[2])):
raise ValueError('Regular grid spacing violated.')
if ordered and not np.allclose(coord0.reshape(tuple(grid[::-1])+(3,)),cell_coord0(grid,size,origin)):
raise ValueError('Input data is not a regular grid.')
return (grid,size,origin)
def coord0_check(coord0):
"""
Check whether coordinates lie on a regular grid.
Parameters
----------
coord0 : numpy.ndarray
array of undeformed cell coordinates.
"""
cell_coord0_2_DNA(coord0,ordered=True)
def node_coord0(grid,size,origin=np.zeros(3)):
"""
Nodal positions (undeformed).
Parameters
----------
grid : numpy.ndarray
number of grid points.
size : numpy.ndarray
physical size of the periodic field.
origin : numpy.ndarray, optional
physical origin of the periodic field. Default is [0.0,0.0,0.0].
"""
x, y, z = np.meshgrid(np.linspace(origin[2],size[2]+origin[2],1+grid[2]),
np.linspace(origin[1],size[1]+origin[1],1+grid[1]),
np.linspace(origin[0],size[0]+origin[0],1+grid[0]),
indexing = 'ij')
return np.concatenate((z[:,:,:,None],y[:,:,:,None],x[:,:,:,None]),axis = 3)
def node_displacement_fluct(size,F):
"""
Nodal displacement field from fluctuation part of the deformation gradient field.
Parameters
----------
size : numpy.ndarray
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
"""
return cell_2_node(cell_displacement_fluct(size,F))
def node_displacement_avg(size,F):
"""
Nodal displacement field from average part of the deformation gradient field.
Parameters
----------
size : numpy.ndarray
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
"""
F_avg = np.average(F,axis=(0,1,2))
return np.einsum('ml,ijkl->ijkm',F_avg-np.eye(3),node_coord0(F.shape[:3][::-1],size))
def node_displacement(size,F):
"""
Nodal displacement field from deformation gradient field.
Parameters
----------
size : numpy.ndarray
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
"""
return node_displacement_avg(size,F) + node_displacement_fluct(size,F)
def node_coord(size,F,origin=np.zeros(3)):
"""
Nodal positions.
Parameters
----------
size : numpy.ndarray
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
origin : numpy.ndarray, optional
physical origin of the periodic field. Default is [0.0,0.0,0.0].
"""
return node_coord0(F.shape[:3][::-1],size,origin) + node_displacement(size,F)
def cell_2_node(cell_data):
"""Interpolate periodic cell data to nodal data."""
n = ( cell_data + np.roll(cell_data,1,(0,1,2))
+ np.roll(cell_data,1,(0,)) + np.roll(cell_data,1,(1,)) + np.roll(cell_data,1,(2,))
+ np.roll(cell_data,1,(0,1)) + np.roll(cell_data,1,(1,2)) + np.roll(cell_data,1,(2,0)))*0.125
return np.pad(n,((0,1),(0,1),(0,1))+((0,0),)*len(cell_data.shape[3:]),mode='wrap')
def node_2_cell(node_data):
"""Interpolate periodic nodal data to cell data."""
c = ( node_data + np.roll(node_data,1,(0,1,2))
+ np.roll(node_data,1,(0,)) + np.roll(node_data,1,(1,)) + np.roll(node_data,1,(2,))
+ np.roll(node_data,1,(0,1)) + np.roll(node_data,1,(1,2)) + np.roll(node_data,1,(2,0)))*0.125
return c[:-1,:-1,:-1]
def node_coord0_2_DNA(coord0,ordered=False):
"""
Return grid 'DNA', i.e. grid, size, and origin from array of nodal positions.
Parameters
----------
coord0 : numpy.ndarray
array of undeformed nodal coordinates
ordered : bool, optional
expect coord0 data to be ordered (x fast, z slow).
"""
coords = [np.unique(coord0[:,i]) for i in range(3)]
mincorner = np.array(list(map(min,coords)))
maxcorner = np.array(list(map(max,coords)))
grid = np.array(list(map(len,coords)),'i') - 1
size = maxcorner-mincorner
origin = mincorner
if (grid+1).prod() != len(coord0):
raise ValueError('Data count {} does not match grid {}.'.format(len(coord0),grid))
if not np.allclose(coords[0],np.linspace(mincorner[0],maxcorner[0],grid[0]+1)) and \
np.allclose(coords[1],np.linspace(mincorner[1],maxcorner[1],grid[1]+1)) and \
np.allclose(coords[2],np.linspace(mincorner[2],maxcorner[2],grid[2]+1)):
raise ValueError('Regular grid spacing violated.')
if ordered and not np.allclose(coord0.reshape(tuple((grid+1)[::-1])+(3,)),node_coord0(grid,size,origin)):
raise ValueError('Input data is not a regular grid.')
return (grid,size,origin)
def regrid(size,F,new_grid):
"""tbd."""
c = cell_coord0(F.shape[:3][::-1],size) \
+ cell_displacement_avg(size,F) \
+ cell_displacement_fluct(size,F)
outer = np.dot(np.average(F,axis=(0,1,2)),size)
for d in range(3):
c[np.where(c[:,:,:,d]<0)] += outer[d]
c[np.where(c[:,:,:,d]>outer[d])] -= outer[d]
tree = spatial.cKDTree(c.reshape((-1,3)),boxsize=outer)
return tree.query(cell_coord0(new_grid,outer))[1]

View File

@ -170,9 +170,18 @@ class Rotation:
################################################################################################
# convert to different orientation representations (numpy arrays)
def asQuaternion(self):
"""Unit quaternion: (q, p_1, p_2, p_3)."""
return self.quaternion.asArray()
def asQuaternion(self,
quaternion = False):
"""
Unit quaternion [q, p_1, p_2, p_3] unless quaternion == True: damask.quaternion object.
Parameters
----------
quaternion : bool, optional
return quaternion as DAMASK object.
"""
return self.quaternion if quaternion else self.quaternion.asArray()
def asEulers(self,
degrees = False):
@ -190,33 +199,36 @@ class Rotation:
return eu
def asAxisAngle(self,
degrees = False):
degrees = False,
pair = False):
"""
Axis angle pair: ([n_1, n_2, n_3], ω).
Axis angle representation [n_1, n_2, n_3, ω] unless pair == True: ([n_1, n_2, n_3], ω).
Parameters
----------
degrees : bool, optional
return rotation angle in degrees.
pair : bool, optional
return tuple of axis and angle.
"""
ax = qu2ax(self.quaternion.asArray())
if degrees: ax[3] = np.degrees(ax[3])
return ax
return (ax[:3],np.degrees(ax[3])) if pair else ax
def asMatrix(self):
"""Rotation matrix."""
return qu2om(self.quaternion.asArray())
def asRodrigues(self,
vector=False):
vector = False):
"""
Rodrigues-Frank vector: ([n_1, n_2, n_3], tan(ω/2)).
Rodrigues-Frank vector representation [n_1, n_2, n_3, tan(ω/2)] unless vector == True: [n_1, n_2, n_3] * tan(ω/2).
Parameters
----------
vector : bool, optional
return as array of length 3, i.e. scale the unit vector giving the rotation axis.
return as actual Rodrigues--Frank vector, i.e. rotation axis scaled by tan(ω/2).
"""
ro = qu2ro(self.quaternion.asArray())
@ -252,8 +264,8 @@ class Rotation:
acceptHomomorph = False,
P = -1):
qu = quaternion if isinstance(quaternion, np.ndarray) and quaternion.dtype == np.dtype(float) \
else np.array(quaternion,dtype=float)
qu = quaternion if isinstance(quaternion, np.ndarray) and quaternion.dtype == np.dtype(float) \
else np.array(quaternion,dtype=float)
if P > 0: qu[1:4] *= -1 # convert from P=1 to P=-1
if qu[0] < 0.0:
if acceptHomomorph:
@ -701,14 +713,14 @@ class Symmetry:
v = np.array(vector,dtype=float)
if proper: # check both improper ...
theComponents = np.dot(basis['improper'],v)
theComponents = np.around(np.dot(basis['improper'],v),12)
inSST = np.all(theComponents >= 0.0)
if not inSST: # ... and proper SST
theComponents = np.dot(basis['proper'],v)
theComponents = np.around(np.dot(basis['proper'],v),12)
inSST = np.all(theComponents >= 0.0)
else:
v[2] = abs(v[2]) # z component projects identical
theComponents = np.dot(basis['improper'],v) # for positive and negative values
theComponents = np.around(np.dot(basis['improper'],v),12) # for positive and negative values
inSST = np.all(theComponents >= 0.0)
if color: # have to return color array
@ -875,7 +887,7 @@ class Lattice:
[[ 17, 12, 5],[ 17, 7, 17]],
[[ 5, 17, 12],[ 17, 17, 7]],
[[ 12, -5,-17],[ 7,-17,-17]],
[[-17,-12, 5],[-17, 7, 17]]],dtype='float')}
[[-17,-12, 5],[-17,-7, 17]]],dtype='float')}
# Greninger--Troiano' orientation relationship for fcc <-> bcc transformation
# from Y. He et al., Journal of Applied Crystallography 39:72-81, 2006
@ -901,7 +913,7 @@ class Lattice:
[[-17,-17, 7],[-17, -5, 12]],
[[ 7,-17,-17],[ 12,-17, -5]],
[[ 17, -7,-17],[ 5, -12,-17]],
[[ 17,-17, 7],[ 17, -5,-12]],
[[ 17,-17, -7],[ 17, -5,-12]],
[[ -7, 17,-17],[-12, 17, -5]],
[[-17, 7,-17],[ -5, 12,-17]],
[[-17, 17, -7],[-17, 5,-12]]],dtype='float'),
@ -957,7 +969,7 @@ class Lattice:
[[ 2, 1, -1],[ 0, -1, 1]],
[[ -1, -2, -1],[ 0, -1, 1]],
[[ -1, 1, 2],[ 0, -1, 1]],
[[ -1, 2, 1],[ 0, -1, 1]],
[[ 2, -1, 1],[ 0, -1, 1]], #It is wrong in the paper, but matrix is correct
[[ -1, 2, 1],[ 0, -1, 1]],
[[ -1, -1, -2],[ 0, -1, 1]]],dtype='float')}
@ -1025,7 +1037,7 @@ class Lattice:
https://doi.org/10.1016/j.actamat.2004.11.021
"""
models={'KS':self.KS, 'GT':self.GT, "GT'":self.GTprime,
models={'KS':self.KS, 'GT':self.GT, 'GT_prime':self.GTprime,
'NW':self.NW, 'Pitsch': self.Pitsch, 'Bain':self.Bain}
try:
relationship = models[model]
@ -1046,13 +1058,13 @@ class Lattice:
for miller in np.hstack((relationship['planes'],relationship['directions'])):
myPlane = miller[myPlane_id]/ np.linalg.norm(miller[myPlane_id])
myDir = miller[myDir_id]/ np.linalg.norm(miller[myDir_id])
myMatrix = np.array([myDir,np.cross(myPlane,myDir),myPlane]).T
myMatrix = np.array([myDir,np.cross(myPlane,myDir),myPlane])
otherPlane = miller[otherPlane_id]/ np.linalg.norm(miller[otherPlane_id])
otherDir = miller[otherDir_id]/ np.linalg.norm(miller[otherDir_id])
otherMatrix = np.array([otherDir,np.cross(otherPlane,otherDir),otherPlane]).T
otherMatrix = np.array([otherDir,np.cross(otherPlane,otherDir),otherPlane])
r['rotations'].append(Rotation.fromMatrix(np.dot(otherMatrix,myMatrix.T)))
r['rotations'].append(Rotation.fromMatrix(np.dot(otherMatrix.T,myMatrix)))
return r
@ -1126,10 +1138,9 @@ class Orientation:
return (Orientation(r,self.lattice), i,j, k == 1) if symmetries else r # disorientation ...
# ... own sym, other sym,
# self-->other: True, self<--other: False
def inFZ(self):
return self.lattice.symmetry.inFZ(self.rotation.asRodrigues(vector=True))
def equivalentOrientations(self,members=[]):
"""List of orientations which are symmetrically equivalent."""
@ -1144,7 +1155,8 @@ class Orientation:
def relatedOrientations(self,model):
"""List of orientations related by the given orientation relationship."""
r = self.lattice.relationOperations(model)
return [self.__class__(self.rotation*o,r['lattice']) for o in r['rotations']]
return [self.__class__(o*self.rotation,r['lattice']) for o in r['rotations']]
def reduced(self):
"""Transform orientation to fall into fundamental zone according to symmetry."""
@ -1152,7 +1164,8 @@ class Orientation:
if self.lattice.symmetry.inFZ(me.rotation.asRodrigues(vector=True)): break
return self.__class__(me.rotation,self.lattice)
def inversePole(self,
axis,
proper = False,
@ -1192,9 +1205,9 @@ class Orientation:
ref = orientations[0]
for o in orientations:
closest.append(o.equivalentOrientations(
ref.disorientation(o,
SST = False, # select (o[ther]'s) sym orientation
symmetries = True)[2]).rotation) # with lowest misorientation
ref.disorientation(o,
SST = False, # select (o[ther]'s) sym orientation
symmetries = True)[2]).rotation) # with lowest misorientation
return Orientation(Rotation.fromAverage(closest,weights),ref.lattice)

View File

@ -3,6 +3,8 @@ import re
import pandas as pd
import numpy as np
from . import version
class Table():
"""Store spreadsheet-like data."""
@ -20,7 +22,7 @@ class Table():
Additional, human-readable information.
"""
self.comments = [] if comments is None else [c for c in comments]
self.comments = ['table.py v {}'.format(version)] if not comments else [c for c in comments]
self.data = pd.DataFrame(data=data)
self.shapes = shapes
self.__label_condensed()
@ -69,13 +71,16 @@ class Table():
f = open(fname)
except TypeError:
f = fname
f.seek(0)
header,keyword = f.readline().split()
if keyword == 'header':
header = int(header)
else:
raise Exception
comments = [f.readline()[:-1] for i in range(1,header)]
comments = ['table.py:from_ASCII v {}'.format(version)]
comments+= [f.readline()[:-1] for i in range(1,header)]
labels = f.readline().split()
shapes = {}
@ -95,9 +100,49 @@ class Table():
return Table(data,shapes,comments)
@staticmethod
def from_ang(fname):
"""
Create table from TSL ang file.
A valid TSL ang file needs to contains the following columns:
* Euler angles (Bunge notation) in radians, 3 floats, label 'eu'.
* Spatial position in meters, 2 floats, label 'pos'.
* Image quality, 1 float, label 'IQ'.
* Confidence index, 1 float, label 'CI'.
* Phase ID, 1 int, label 'ID'.
* SEM signal, 1 float, label 'intensity'.
* Fit, 1 float, label 'fit'.
Parameters
----------
fname : file, str, or pathlib.Path
Filename or file for reading.
"""
shapes = {'eu':(3,), 'pos':(2,),
'IQ':(1,), 'CI':(1,), 'ID':(1,), 'intensity':(1,), 'fit':(1,)}
try:
f = open(fname)
except TypeError:
f = fname
f.seek(0)
content = f.readlines()
comments = ['table.py:from_ang v {}'.format(version)]
for line in content:
if line.startswith('#'):
comments.append(line.strip())
else:
break
data = np.loadtxt(content)
return Table(data,shapes,comments)
@property
def labels(self):
"""Return the labels of all columns."""
return list(self.shapes.keys())
@ -203,7 +248,7 @@ class Table():
'' if info is None else ': {}'.format(info),
))
self.shapes[label_new] = self.shapes.pop(label_old)
self.shapes = {(label if label is not label_old else label_new):self.shapes[label] for label in self.shapes}
def sort_by(self,labels,ascending=True):
@ -234,8 +279,9 @@ class Table():
Filename or file for reading.
"""
seen = set()
labels = []
for l in self.shapes:
for l in [x for x in self.data.columns if not (x in seen or seen.add(x))]:
if(self.shapes[l] == (1,)):
labels.append('{}'.format(l))
elif(len(self.shapes[l]) == 1):

View File

@ -7,9 +7,6 @@ from optparse import Option
from queue import Queue
from threading import Thread
import numpy as np
class bcolors:
"""
ASCII Colors (Blender code).
@ -64,19 +61,6 @@ def report(who = None,
croak( (emph(who)+': ' if who is not None else '') + (what if what is not None else '') + '\n' )
# -----------------------------
def report_geom(info,
what = ['grid','size','origin','homogenization','microstructures']):
"""Reports (selected) geometry information."""
output = {
'grid' : 'grid a b c: {}'.format(' x '.join(list(map(str,info['grid' ])))),
'size' : 'size x y z: {}'.format(' x '.join(list(map(str,info['size' ])))),
'origin' : 'origin x y z: {}'.format(' : '.join(list(map(str,info['origin'])))),
'homogenization' : 'homogenization: {}'.format(info['homogenization']),
'microstructures' : 'microstructures: {}'.format(info['microstructures']),
}
for item in what: croak(output[item.lower()])
# -----------------------------
def emph(what):
"""Formats string with emphasis."""
@ -119,30 +103,6 @@ def execute(cmd,
if process.returncode != 0: raise RuntimeError('{} failed with returncode {}'.format(cmd,process.returncode))
return out,error
def coordGridAndSize(coordinates):
"""Determines grid count and overall physical size along each dimension of an ordered array of coordinates."""
dim = coordinates.shape[1]
coords = [np.unique(coordinates[:,i]) for i in range(dim)]
mincorner = np.array(list(map(min,coords)))
maxcorner = np.array(list(map(max,coords)))
grid = np.array(list(map(len,coords)),'i')
size = grid/np.maximum(np.ones(dim,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1)
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other ones
delta = size/grid
N = grid.prod()
if N != len(coordinates):
raise ValueError('Data count {} does not match grid {}.'.format(len(coordinates),' x '.join(map(repr,grid))))
if np.any(np.abs(np.log10((coords[0][1:]-coords[0][:-1])/delta[0])) > 0.01) \
or np.any(np.abs(np.log10((coords[1][1:]-coords[1][:-1])/delta[1])) > 0.01):
raise ValueError('regular grid spacing {} violated.'.format(' x '.join(map(repr,delta))))
if dim==3 and np.any(np.abs(np.log10((coords[2][1:]-coords[2][:-1])/delta[2])) > 0.01):
raise ValueError('regular grid spacing {} violated.'.format(' x '.join(map(repr,delta))))
return grid,size
# -----------------------------
class extendableOption(Option):
"""
@ -221,263 +181,6 @@ class return_message():
return srepr(self.message)
def leastsqBound(func, x0, args=(), bounds=None, Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
from scipy.optimize import _minpack
"""
Non-linear least square fitting (Levenberg-Marquardt method) with
bounded parameters.
the codes of transformation between int <-> ext refers to the work of
Jonathan J. Helmus: https://github.com/jjhelmus/leastsqbound-scipy
other codes refer to the source code of minpack.py:
An internal parameter list is used to enforce contraints on the fitting
parameters. The transfomation is based on that of MINUIT package.
please see: F. James and M. Winkler. MINUIT User's Guide, 2004.
bounds : list
(min, max) pairs for each parameter, use None for 'min' or 'max'
when there is no bound in that direction.
For example: if there are two parameters needed to be fitting, then
bounds is [(min1,max1), (min2,max2)]
This function is based on 'leastsq' of minpack.py, the annotation of
other parameters can be found in 'least_squares.py'.
"""
def _check_func(checker, argname, thefunc, x0, args, numinputs,
output_shape=None):
from numpy import shape
"""The same as that of minpack.py"""
res = np.atleast_1d(thefunc(*((x0[:numinputs],) + args)))
if (output_shape is not None) and (shape(res) != output_shape):
if (output_shape[0] != 1):
if len(output_shape) > 1:
if output_shape[1] == 1:
return shape(res)
msg = "%s: there is a mismatch between the input and output " \
"shape of the '%s' argument" % (checker, argname)
func_name = getattr(thefunc, '__name__', None)
if func_name:
msg += " '%s'." % func_name
else:
msg += "."
raise TypeError(msg)
if np.issubdtype(res.dtype, np.inexact):
dt = res.dtype
else:
dt = dtype(float)
return shape(res), dt
def _int2extGrad(p_int, bounds):
"""Calculate the gradients of transforming the internal (unconstrained) to external (constrained) parameter."""
grad = np.empty_like(p_int)
for i, (x, bound) in enumerate(zip(p_int, bounds)):
lower, upper = bound
if lower is None and upper is None: # No constraints
grad[i] = 1.0
elif upper is None: # only lower bound
grad[i] = x/np.sqrt(x*x + 1.0)
elif lower is None: # only upper bound
grad[i] = -x/np.sqrt(x*x + 1.0)
else: # lower and upper bounds
grad[i] = (upper - lower)*np.cos(x)/2.0
return grad
def _int2extFunc(bounds):
"""Transform internal parameters into external parameters."""
local = [_int2extLocal(b) for b in bounds]
def _transform_i2e(p_int):
p_ext = np.empty_like(p_int)
p_ext[:] = [i(j) for i, j in zip(local, p_int)]
return p_ext
return _transform_i2e
def _ext2intFunc(bounds):
"""Transform external parameters into internal parameters."""
local = [_ext2intLocal(b) for b in bounds]
def _transform_e2i(p_ext):
p_int = np.empty_like(p_ext)
p_int[:] = [i(j) for i, j in zip(local, p_ext)]
return p_int
return _transform_e2i
def _int2extLocal(bound):
"""Transform a single internal parameter to an external parameter."""
lower, upper = bound
if lower is None and upper is None: # no constraints
return lambda x: x
elif upper is None: # only lower bound
return lambda x: lower - 1.0 + np.sqrt(x*x + 1.0)
elif lower is None: # only upper bound
return lambda x: upper + 1.0 - np.sqrt(x*x + 1.0)
else:
return lambda x: lower + ((upper - lower)/2.0)*(np.sin(x) + 1.0)
def _ext2intLocal(bound):
"""Transform a single external parameter to an internal parameter."""
lower, upper = bound
if lower is None and upper is None: # no constraints
return lambda x: x
elif upper is None: # only lower bound
return lambda x: np.sqrt((x - lower + 1.0)**2 - 1.0)
elif lower is None: # only upper bound
return lambda x: np.sqrt((x - upper - 1.0)**2 - 1.0)
else:
return lambda x: np.arcsin((2.0*(x - lower)/(upper - lower)) - 1.0)
i2e = _int2extFunc(bounds)
e2i = _ext2intFunc(bounds)
x0 = np.asarray(x0).flatten()
n = len(x0)
if len(bounds) != n:
raise ValueError('the length of bounds is inconsistent with the number of parameters ')
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = np.finfo(dtype).eps
def funcWarp(x, *args):
return func(i2e(x), *args)
xi0 = e2i(x0)
if Dfun is None:
if maxfev == 0:
maxfev = 200*(n + 1)
retval = _minpack._lmdif(funcWarp, xi0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if maxfev == 0:
maxfev = 100*(n + 1)
def DfunWarp(x, *args):
return Dfun(i2e(x), *args)
retval = _minpack._lmder(funcWarp, DfunWarp, xi0, args, full_output, col_deriv,
ftol, xtol, gtol, maxfev, factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible.""" % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError],
'unknown': ["Unknown error.", TypeError]}
info = retval[-1] # The FORTRAN return value
if info not in [1, 2, 3, 4] and not full_output:
if info in [5, 6, 7, 8]:
np.warnings.warn(errors[info][0], RuntimeWarning)
else:
try:
raise errors[info][1](errors[info][0])
except KeyError:
raise errors['unknown'][1](errors['unknown'][0])
mesg = errors[info][0]
x = i2e(retval[0])
if full_output:
grad = _int2extGrad(retval[0], bounds)
retval[1]['fjac'] = (retval[1]['fjac'].T / np.take(grad,
retval[1]['ipvt'] - 1)).T
cov_x = None
if info in [1, 2, 3, 4]:
from numpy.dual import inv
from numpy.linalg import LinAlgError
perm = np.take(np.eye(n), retval[1]['ipvt'] - 1, 0)
r = np.triu(np.transpose(retval[1]['fjac'])[:n, :])
R = np.dot(r, perm)
try:
cov_x = inv(np.dot(np.transpose(R), R))
except LinAlgError as inverror:
print(inverror)
pass
return (x, cov_x) + retval[1:-1] + (mesg, info)
else:
return (x, info)
def _general_function(params, ydata, xdata, function):
return function(xdata, *params) - ydata
def _weighted_general_function(params, ydata, xdata, function, weights):
return (function(xdata, *params) - ydata)*weights
def curve_fit_bound(f, xdata, ydata, p0=None, sigma=None, bounds=None, **kw):
"""Similar as 'curve_fit' in minpack.py."""
if p0 is None:
# determine number of parameters by inspecting the function
import inspect
args, varargs, varkw, defaults = inspect.getargspec(f)
if len(args) < 2:
msg = "Unable to determine number of fit parameters."
raise ValueError(msg)
if 'self' in args:
p0 = [1.0] * (len(args)-2)
else:
p0 = [1.0] * (len(args)-1)
if np.isscalar(p0):
p0 = np.array([p0])
args = (ydata, xdata, f)
if sigma is None:
func = _general_function
else:
func = _weighted_general_function
args += (1.0/np.asarray(sigma),)
return_full = kw.pop('full_output', False)
res = leastsqBound(func, p0, args=args, bounds = bounds, full_output=True, **kw)
(popt, pcov, infodict, errmsg, ier) = res
if ier not in [1, 2, 3, 4]:
msg = "Optimal parameters not found: " + errmsg
raise RuntimeError(msg)
if (len(ydata) > len(p0)) and pcov is not None:
s_sq = (func(popt, *args)**2).sum()/(len(ydata)-len(p0))
pcov = pcov * s_sq
else:
pcov = np.inf
return (popt, pcov, infodict, errmsg, ier) if return_full else (popt, pcov)
class ThreadPool:
"""Pool of threads consuming tasks from a queue."""

View File

@ -1,8 +1,9 @@
import setuptools
import os
import re
with open(os.path.join(os.path.dirname(__file__),'damask/VERSION')) as f:
version = f.readline().strip()
version = re.sub(r'^v','',f.readline().strip())
setuptools.setup(
name="damask",
@ -15,9 +16,8 @@ setuptools.setup(
packages=setuptools.find_packages(),
include_package_data=True,
install_requires = [
"numpy",
"scipy",
"pandas",
"scipy",
"h5py",
"vtk",
],

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,38 @@
% Start MTEX first in Matlab
tmp = matlab.desktop.editor.getActive;
cd(fileparts(tmp.Filename));
symmetry = {crystalSymmetry('m-3m', [1 1 1], 'mineral', 'Iron', 'color', 'light blue')}
% plotting convention
setMTEXpref('xAxisDirection','north');
setMTEXpref('zAxisDirection','outOfPlane');
lattice_types = {'BCC','FCC'};
models = {'Bain','GT','GT_prime','KS','NW','Pitsch'};
rotation = containers.Map;
rotation('BCC') = 'Passive Rotation';
rotation('FCC') = 'Active Rotation';
for lattice = lattice_types
for p = 0:length(models)/3-1
EBSD_data = {loadEBSD(strcat(lattice,'_',models{p*3+1},'.txt'),symmetry,'interface','generic',...
'ColumnNames', { 'phi1' 'Phi' 'phi2' 'x' 'y'}, 'Bunge', rotation(char(lattice))),
loadEBSD(strcat(lattice,'_',models{p*3+2},'.txt'),symmetry,'interface','generic',...
'ColumnNames', { 'phi1' 'Phi' 'phi2' 'x' 'y'}, 'Bunge', rotation(char(lattice))),
loadEBSD(strcat(lattice,'_',models{p*3+3},'.txt'),symmetry,'interface','generic',...
'ColumnNames', { 'phi1' 'Phi' 'phi2' 'x' 'y'}, 'Bunge', rotation(char(lattice)))}
h = [Miller(1,0,0,symmetry{1}),Miller(1,1,0,symmetry{1}),Miller(1,1,1,symmetry{1})]; % 3 pole figures
plotPDF(EBSD_data{1}.orientations,h,'MarkerSize',5,'MarkerColor','r','DisplayName',models{p*3+1})
hold on
plotPDF(EBSD_data{2}.orientations,h,'MarkerSize',4,'MarkerColor','b','DisplayName',models{p*3+2})
plotPDF(EBSD_data{3}.orientations,h,'MarkerSize',3,'MarkerColor','g','DisplayName',models{p*3+3})
legend('show','location','southoutside','Interpreter', 'none')
orient('landscape')
print('-bestfit',strcat(int2str(p+1),'_',char(lattice),'.pdf'),'-dpdf')
close
end
end

View File

@ -0,0 +1,5 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
0.0 45.00000000000001 0.0 1 1
90.0 45.00000000000001 270.0 1 2
45.00000000000001 0.0 0.0 1 3

View File

@ -0,0 +1,26 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
283.60440567265294 9.976439066337804 33.24637065555936 1 1
167.8261034151001 43.397849654402556 183.40022280897963 1 2
262.1156357053931 43.82007387041961 104.07478363123654 1 3
103.604405672653 9.976439066337804 213.24637065555936 1 4
347.8261034151001 43.39784965440255 3.400222808979685 1 5
82.11563570539313 43.82007387041961 284.0747836312365 1 6
76.39559432734703 9.976439066337806 326.75362934444064 1 7
192.17389658489986 43.397849654402556 176.59977719102034 1 8
97.88436429460687 43.82007387041961 255.92521636876344 1 9
256.395594327347 9.976439066337804 146.75362934444064 1 10
12.173896584899929 43.39784965440254 356.59977719102034 1 11
277.8843642946069 43.82007387041961 75.92521636876346 1 12
102.17389658489992 43.39784965440254 266.59977719102034 1 13
346.395594327347 9.976439066337804 56.75362934444064 1 14
7.884364294606862 43.82007387041961 345.9252163687635 1 15
282.17389658489986 43.39784965440254 86.59977719102032 1 16
166.39559432734703 9.976439066337804 236.75362934444058 1 17
187.88436429460683 43.82007387041961 165.92521636876344 1 18
257.8261034151001 43.39784965440255 93.40022280897969 1 19
13.604405672652977 9.976439066337804 303.24637065555936 1 20
352.1156357053931 43.82007387041961 14.074783631236542 1 21
77.82610341510008 43.397849654402556 273.4002228089796 1 22
193.60440567265297 9.976439066337806 123.24637065555939 1 23
172.11563570539317 43.82007387041961 194.07478363123653 1 24

View File

@ -0,0 +1,26 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
303.24637065555936 9.976439066337804 13.604405672652977 1 1
165.92521636876344 43.82007387041961 187.88436429460683 1 2
266.59977719102034 43.39784965440254 102.17389658489992 1 3
123.24637065555939 9.976439066337804 193.604405672653 1 4
345.9252163687635 43.82007387041961 7.884364294606862 1 5
86.59977719102032 43.39784965440254 282.17389658489986 1 6
56.75362934444064 9.976439066337804 346.395594327347 1 7
194.07478363123653 43.82007387041961 172.11563570539317 1 8
93.40022280897969 43.39784965440255 257.8261034151001 1 9
236.75362934444058 9.976439066337804 166.39559432734697 1 10
14.074783631236542 43.82007387041961 352.1156357053931 1 11
273.4002228089796 43.397849654402556 77.82610341510008 1 12
104.07478363123654 43.82007387041961 262.1156357053931 1 13
326.75362934444064 9.976439066337806 76.39559432734703 1 14
3.400222808979685 43.39784965440255 347.8261034151001 1 15
284.0747836312365 43.82007387041961 82.11563570539313 1 16
146.75362934444064 9.976439066337804 256.395594327347 1 17
183.40022280897963 43.397849654402556 167.8261034151001 1 18
255.92521636876344 43.82007387041961 97.88436429460687 1 19
33.24637065555936 9.976439066337804 283.60440567265294 1 20
356.59977719102034 43.39784965440254 12.173896584899929 1 21
75.92521636876346 43.82007387041961 277.8843642946069 1 22
213.24637065555936 9.976439066337804 103.604405672653 1 23
176.59977719102034 43.397849654402556 192.17389658489986 1 24

View File

@ -0,0 +1,26 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
335.7965716606702 10.528779365509317 65.79657166067024 1 1
228.77270547567446 80.40593177313953 85.64260312151849 1 2
131.22729452432552 80.40593177313954 4.357396878481506 1 3
24.20342833932977 10.52877936550932 24.20342833932976 1 4
221.95489158457983 85.70366403943002 80.37863910890589 1 5
138.04510841542015 85.70366403943004 9.621360891094124 1 6
131.22729452432552 80.40593177313953 94.35739687848151 1 7
24.203428339329765 10.52877936550932 114.20342833932976 1 8
221.95489158457983 85.70366403943004 170.37863910890587 1 9
138.04510841542015 85.70366403943004 99.62136089109411 1 10
335.7965716606702 10.52877936550932 155.79657166067025 1 11
228.77270547567448 80.40593177313954 175.6426031215185 1 12
335.7965716606702 10.52877936550932 335.7965716606702 1 13
228.77270547567448 80.40593177313954 355.6426031215185 1 14
131.2272945243255 80.40593177313954 274.35739687848144 1 15
24.203428339329747 10.52877936550932 294.2034283393298 1 16
221.95489158457985 85.70366403943004 350.3786391089059 1 17
138.04510841542015 85.70366403943004 279.6213608910941 1 18
41.95489158457986 94.29633596056998 9.621360891094133 1 19
318.04510841542015 94.29633596056996 80.37863910890589 1 20
155.79657166067025 169.4712206344907 24.203428339329754 1 21
48.77270547567448 99.59406822686046 4.357396878481504 1 22
311.2272945243255 99.59406822686046 85.64260312151852 1 23
204.20342833932975 169.4712206344907 65.79657166067024 1 24

View File

@ -0,0 +1,14 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
225.41555594321144 83.13253115922213 83.08266205989301 1 1
134.58444405678856 83.13253115922211 6.917337940107012 1 2
4.702125169424418e-15 9.735610317245317 45.0 1 3
134.58444405678856 83.13253115922213 276.91733794010696 1 4
225.4155559432114 83.13253115922213 353.082662059893 1 5
0.0 9.735610317245317 315.0 1 6
134.58444405678858 83.13253115922213 96.91733794010702 1 7
225.41555594321142 83.13253115922213 173.082662059893 1 8
0.0 9.735610317245317 135.0 1 9
99.59803029876785 45.81931182053557 166.36129272052355 1 10
260.40196970123213 45.81931182053556 283.6387072794765 1 11
180.0 99.73561031724535 225.0 1 12

View File

@ -0,0 +1,14 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
6.9173379401070045 83.13253115922213 44.58444405678856 1 1
45.0 89.99999999999999 279.7356103172453 1 2
166.36129272052352 45.819311820535574 279.59803029876787 1 3
83.08266205989301 83.13253115922213 225.41555594321144 1 4
256.3612927205235 45.819311820535574 189.59803029876787 1 5
315.0 90.0 9.735610317245369 1 6
186.917337940107 83.13253115922213 224.58444405678856 1 7
315.0 90.0 80.26438968275463 1 8
13.638707279476478 45.81931182053557 260.40196970123213 1 9
263.082662059893 83.13253115922213 45.415555943211444 1 10
103.63870727947646 45.819311820535574 170.40196970123213 1 11
224.99999999999997 90.0 170.26438968275465 1 12

View File

@ -0,0 +1,5 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
180.0 45.00000000000001 180.0 1 1
270.0 45.00000000000001 90.0 1 2
315.0 0.0 0.0 1 3

View File

@ -0,0 +1,26 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
146.75362934444064 9.976439066337804 256.395594327347 1 1
356.59977719102034 43.39784965440254 12.173896584899929 1 2
75.92521636876346 43.82007387041961 277.8843642946069 1 3
326.75362934444064 9.976439066337806 76.39559432734703 1 4
176.59977719102034 43.397849654402556 192.17389658489986 1 5
255.92521636876344 43.82007387041961 97.88436429460687 1 6
213.24637065555936 9.976439066337804 103.604405672653 1 7
3.400222808979685 43.39784965440255 347.8261034151001 1 8
284.0747836312365 43.82007387041961 82.11563570539313 1 9
33.24637065555936 9.976439066337804 283.60440567265294 1 10
183.40022280897963 43.397849654402556 167.8261034151001 1 11
104.07478363123654 43.82007387041961 262.1156357053931 1 12
273.4002228089796 43.397849654402556 77.82610341510008 1 13
123.24637065555939 9.976439066337806 193.60440567265297 1 14
194.07478363123653 43.82007387041961 172.11563570539317 1 15
93.40022280897969 43.39784965440255 257.8261034151001 1 16
303.24637065555936 9.976439066337804 13.604405672652977 1 17
14.074783631236542 43.82007387041961 352.1156357053931 1 18
86.59977719102032 43.39784965440254 282.17389658489986 1 19
236.75362934444058 9.976439066337804 166.39559432734703 1 20
165.92521636876344 43.82007387041961 187.88436429460683 1 21
266.59977719102034 43.39784965440254 102.17389658489992 1 22
56.75362934444064 9.976439066337804 346.395594327347 1 23
345.9252163687635 43.82007387041961 7.884364294606862 1 24

View File

@ -0,0 +1,26 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
166.39559432734697 9.976439066337804 236.75362934444058 1 1
352.1156357053931 43.82007387041961 14.074783631236542 1 2
77.82610341510008 43.397849654402556 273.4002228089796 1 3
346.395594327347 9.976439066337804 56.75362934444064 1 4
172.11563570539317 43.82007387041961 194.07478363123653 1 5
257.8261034151001 43.39784965440255 93.40022280897969 1 6
193.604405672653 9.976439066337804 123.24637065555939 1 7
7.884364294606862 43.82007387041961 345.9252163687635 1 8
282.17389658489986 43.39784965440254 86.59977719102032 1 9
13.604405672652977 9.976439066337804 303.24637065555936 1 10
187.88436429460683 43.82007387041961 165.92521636876344 1 11
102.17389658489992 43.39784965440254 266.59977719102034 1 12
277.8843642946069 43.82007387041961 75.92521636876346 1 13
103.604405672653 9.976439066337804 213.24637065555936 1 14
192.17389658489986 43.397849654402556 176.59977719102034 1 15
97.88436429460687 43.82007387041961 255.92521636876344 1 16
283.60440567265294 9.976439066337804 33.24637065555936 1 17
12.173896584899929 43.39784965440254 356.59977719102034 1 18
82.11563570539313 43.82007387041961 284.0747836312365 1 19
256.395594327347 9.976439066337804 146.75362934444064 1 20
167.8261034151001 43.397849654402556 183.40022280897963 1 21
262.1156357053931 43.82007387041961 104.07478363123654 1 22
76.39559432734703 9.976439066337806 326.75362934444064 1 23
347.8261034151001 43.39784965440255 3.400222808979685 1 24

View File

@ -0,0 +1,26 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
114.20342833932975 10.52877936550932 204.20342833932972 1 1
94.3573968784815 80.40593177313954 311.22729452432543 1 2
175.6426031215185 80.40593177313954 48.77270547567447 1 3
155.79657166067025 10.52877936550932 155.79657166067025 1 4
99.62136089109411 85.70366403943004 318.04510841542015 1 5
170.37863910890587 85.70366403943002 41.954891584579855 1 6
85.64260312151852 80.40593177313954 48.77270547567448 1 7
65.79657166067024 10.52877936550932 155.79657166067025 1 8
9.621360891094124 85.70366403943004 318.04510841542015 1 9
80.37863910890587 85.70366403943004 41.95489158457987 1 10
24.203428339329758 10.52877936550932 204.20342833932975 1 11
4.357396878481486 80.40593177313954 311.2272945243255 1 12
204.20342833932972 10.52877936550932 204.20342833932972 1 13
184.35739687848147 80.40593177313954 311.2272945243255 1 14
265.64260312151845 80.40593177313953 48.77270547567449 1 15
245.79657166067025 10.528779365509317 155.79657166067025 1 16
189.62136089109413 85.70366403943004 318.04510841542015 1 17
260.3786391089059 85.70366403943002 41.954891584579855 1 18
170.37863910890587 94.29633596056996 138.04510841542015 1 19
99.62136089109411 94.29633596056998 221.95489158457983 1 20
155.79657166067025 169.4712206344907 24.203428339329754 1 21
175.64260312151848 99.59406822686046 131.22729452432552 1 22
94.35739687848151 99.59406822686046 228.77270547567446 1 23
114.20342833932975 169.4712206344907 335.7965716606702 1 24

View File

@ -0,0 +1,14 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
96.91733794010702 83.13253115922213 314.5844440567886 1 1
173.082662059893 83.13253115922211 45.41555594321143 1 2
135.0 9.735610317245317 180.0 1 3
263.082662059893 83.13253115922213 45.415555943211444 1 4
186.91733794010702 83.13253115922211 314.5844440567886 1 5
224.99999999999997 9.735610317245317 180.0 1 6
83.082662059893 83.13253115922213 45.415555943211444 1 7
6.917337940106983 83.13253115922211 314.5844440567886 1 8
45.0 9.73561031724532 180.0 1 9
13.638707279476469 45.81931182053557 80.40196970123216 1 10
256.36129272052347 45.81931182053556 279.59803029876775 1 11
315.0 99.73561031724536 0.0 1 12

View File

@ -0,0 +1,14 @@
1 header
1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos
135.41555594321144 83.13253115922213 173.082662059893 1 1
260.26438968275465 90.0 135.0 1 2
260.40196970123213 45.81931182053557 13.638707279476478 1 3
314.5844440567886 83.13253115922213 96.91733794010702 1 4
350.40196970123213 45.81931182053557 283.6387072794765 1 5
170.26438968275465 90.0 224.99999999999997 1 6
315.4155559432114 83.13253115922213 353.08266205989304 1 7
99.73561031724536 90.0 225.0 1 8
279.59803029876787 45.819311820535574 166.36129272052352 1 9
134.58444405678856 83.13253115922213 276.91733794010696 1 10
9.598030298767851 45.819311820535574 76.36129272052355 1 11
9.735610317245369 90.0 315.0 1 12

View File

@ -0,0 +1,138 @@
# TEM_PIXperUM 1.000000
# x-star 240.000000
# y-star 240.000000
# z-star 240.000000
# WorkingDistance 20.000000
#
# Phase 1
# MaterialName Iron(Alpha)
# Formula
# Info
# Symmetry 43
# LatticeConstants 2.870 2.870 2.870 90.000 90.000 90.000
# NumberFamilies 100
# hklFamilies 9223440 0 2 32763 0.000000 32763
# hklFamilies 0 0 0 9218712 0.000000 9218712
# hklFamilies 0 0 3801155 0 0.000000 0
# hklFamilies 5570652 6619251 7536754 -1203738484 0.000000 -1203738484
# hklFamilies 7143516 5111900 7864421 32763 0.000000 32763
# hklFamilies 6488180 7274604 6553717 9220480 0.000000 9220480
# hklFamilies 3145820 2949169 3145777 0 0.000000 0
# hklFamilies 3014704 7209057 103 9220488 0.000000 9220488
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 0 0 9220032 0.000000 9220032
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 0 0 -1203728363 0.000000 -1203728363
# hklFamilies 0 0 0 32763 0.000000 32763
# hklFamilies 0 0 0 9218628 0.000000 9218628
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 0 0 9218504 0.000000 9218504
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 0 0 9219904 0.000000 9219904
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 0 0 0 -0.000046 0
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 0 0 256 0.000000 256
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 0 0 -1203753636 0.000000 -1203753636
# hklFamilies 0 0 0 32763 0.000000 32763
# hklFamilies 0 0 0 9220576 0.000000 9220576
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 0 0 9218736 0.000000 9218736
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 0 0 103219574 0.000000 103219574
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 0 0 9220576 0.000000 9220576
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 0 0 9220692 0.000000 9220692
# hklFamilies 1434293657 0 0 0 0.000000 0
# hklFamilies 0 0 0 9218584 0.000000 9218584
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 0 0 9219976 0.000000 9219976
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 0 0 256 0.000000 256
# hklFamilies 0 0 69473872 0 0.000000 0
# hklFamilies 0 1889785611 -1546188227 -1203753636 -0.000046 -1203753636
# hklFamilies 9224144 0 1434294456 32763 0.000000 32763
# hklFamilies 0 9224160 0 9220672 0.000000 9220672
# hklFamilies -1168390977 32763 851982 0 0.000000 0
# hklFamilies 0 304 0 9218816 0.000000 9218816
# hklFamilies 27030208 0 1434297593 0 0.000000 0
# hklFamilies 0 9224160 0 101654020 0.000000 101654020
# hklFamilies 9224064 0 0 0 0.000000 0
# hklFamilies 0 25563456 0 9220672 0.000000 9220672
# hklFamilies 9224544 0 25559040 0 0.000000 0
# hklFamilies 0 25559788 0 9220788 0.000000 9220788
# hklFamilies 176 0 304 24 0.000000 24
# hklFamilies 0 25562304 0 4 0.000000 4
# hklFamilies 9224208 0 0 0 0.000000 0
# hklFamilies 0 281 0 9220032 0.000000 9220032
# hklFamilies 0 0 0 0 0.000000 0
# hklFamilies 0 -1168390977 32763 9220660 0.000000 9220660
# hklFamilies 21 0 -1168390977 8 0.000000 8
# hklFamilies 32763 2490388 0 24 0.000000 24
# hklFamilies 48 0 69650048 25 0.000000 25
# hklFamilies 0 -1216995621 32763 65535 -0.000046 65535
# hklFamilies 0 0 25562688 1 0.000000 1
# hklFamilies 0 0 21776 0 -0.000058 0
# hklFamilies 25562688 0 25559724 0 0.000000 0
# hklFamilies 0 25559040 0 1179652 0.000000 1179652
# hklFamilies 25559724 0 25562304 32763 0.000000 32763
# hklFamilies 0 48 0 9219904 0.000000 9219904
# hklFamilies 25562304 0 28 0 0.000000 0
# hklFamilies 0 0 0 8781958 0.000000 8781958
# hklFamilies 31 0 0 0 0.000000 0
# hklFamilies 0 0 0 103304392 0.000000 103304392
# hklFamilies 3 0 48 0 0.000000 0
# hklFamilies 0 9224505 0 103219694 -0.000046 103219694
# hklFamilies 27000832 0 -1168393705 0 0.000000 0
# hklFamilies 32763 25559040 0 9220192 0.000000 9220192
# hklFamilies 0 32763 31 0 0.000000 0
# hklFamilies 0 0 0 9219872 0.000000 9219872
# hklFamilies 69729712 0 9224640 0 0.000000 0
# hklFamilies 0 69729904 0 1397706823 0.000000 1397706823
# hklFamilies 69911504 0 0 59 0.000000 59
# hklFamilies 0 27007968 0 103219200 0.000000 103219200
# hklFamilies 0 0 -1216843775 0 0.000000 0
# hklFamilies 32763 69911504 0 0 0.000000 0
# hklFamilies -1168296496 32763 9225328 0 0.000000 0
# hklFamilies 0 1434343267 0 9632160 0.000000 9632160
# hklFamilies 69908840 0 -1216995621 0 0.000000 0
# hklFamilies 32763 256 0 9632112 0.000000 9632112
# hklFamilies 0 0 399376220 0 0.000000 0
# hklFamilies 21776 1966087 4456474 262148 0.000000 262148
# hklFamilies 9224704 0 1434198234 0 0.000000 0
# hklFamilies 0 0 0 9704044 0.000000 9704044
# hklFamilies -1168373699 32763 1 0 0.000000 0
# hklFamilies 0 69911504 0 94961568 -0.000046 94961568
# hklFamilies 1 0 69911504 0 0.000000 0
# hklFamilies 0 10 0 9220016 0.000000 9220016
# hklFamilies -1 0 27030208 0 0.000000 0
# hklFamilies 0 1434488087 18 9219992 -0.000046 9219992
# ElasticConstants 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
# ElasticConstants 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
# ElasticConstants 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
# ElasticConstants 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
# ElasticConstants 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
# ElasticConstants 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
# Categories1 1 1 1 1
#
# GRID: SqrGrid
# XSTEP: 0.050000
# YSTEP: 0.050000
# NCOLS_ODD: 2
# NCOLS_EVEN: 2
# NROWS: 2
#
# OPERATOR:
#
# SAMPLEID:
#
# SCANID:
#
0.0 0.0 0.0 0.00 0.00 60.0 20.0 1 2.0 1.5
0.0 2.0 0.0 0.05 0.00 60.0 20.0 1 2.0 1.5
0.0 2.0 0.0 0.00 0.05 60.0 20.0 1 2.0 1.5
0.0 0.0 1.0 0.05 0.05 60.0 20.0 1 2.0 1.5

View File

@ -1,7 +1,11 @@
import os
import pytest
import numpy as np
import damask
from damask import Rotation
from damask import Orientation
n = 1000
@ -10,6 +14,11 @@ def default():
"""A set of n random rotations."""
return [Rotation.fromRandom() for r in range(n)]
@pytest.fixture
def reference_dir(reference_dir_base):
"""Directory containing reference results."""
return os.path.join(reference_dir_base,'Rotation')
class TestRotation:
@ -18,38 +27,55 @@ class TestRotation:
assert np.allclose(rot.asQuaternion(),
Rotation.fromEulers(rot.asEulers()).asQuaternion())
def test_AxisAngle(self,default):
for rot in default:
assert np.allclose(rot.asEulers(),
Rotation.fromAxisAngle(rot.asAxisAngle()).asEulers())
def test_Matrix(self,default):
for rot in default:
assert np.allclose(rot.asAxisAngle(),
Rotation.fromMatrix(rot.asMatrix()).asAxisAngle())
def test_Rodriques(self,default):
for rot in default:
assert np.allclose(rot.asMatrix(),
Rotation.fromRodrigues(rot.asRodrigues()).asMatrix())
def test_Homochoric(self,default):
for rot in default:
assert np.allclose(rot.asRodrigues(),
Rotation.fromHomochoric(rot.asHomochoric()).asRodrigues())
def test_Cubochoric(self,default):
for rot in default:
assert np.allclose(rot.asHomochoric(),
Rotation.fromCubochoric(rot.asCubochoric()).asHomochoric())
def test_Quaternion(self,default):
for rot in default:
assert np.allclose(rot.asCubochoric(),
Rotation.fromQuaternion(rot.asQuaternion()).asCubochoric())
@pytest.mark.parametrize('model',['Bain','KS','GT','GT_prime','NW','Pitsch'])
@pytest.mark.parametrize('lattice',['fcc','bcc'])
def test_relationship_forward_backward(self,model,lattice):
ori = Orientation(Rotation.fromRandom(),lattice)
for i,r in enumerate(ori.relatedOrientations(model)):
ori2 = r.relatedOrientations(model)[i]
misorientation = ori.rotation.misorientation(ori2.rotation)
assert misorientation.asAxisAngle(degrees=True)[3]<1.0e-5
@pytest.mark.parametrize('model',['Bain','KS','GT','GT_prime','NW','Pitsch'])
@pytest.mark.parametrize('lattice',['fcc','bcc'])
def test_relationship_reference(self,update,reference_dir,model,lattice):
reference = os.path.join(reference_dir,'{}_{}.txt'.format(lattice,model))
ori = Orientation(Rotation(),lattice)
eu = np.array([o.rotation.asEulers(degrees=True) for o in ori.relatedOrientations(model)])
if update:
coords = np.array([(1,i+1) for i,x in enumerate(eu)])
table = damask.Table(eu,{'Eulers':(3,)})
table.add('pos',coords)
table.to_ASCII(reference)
assert np.allclose(eu,damask.Table.from_ASCII(reference).get('Eulers'))

View File

@ -47,6 +47,17 @@ class TestTable:
new = Table.from_ASCII(f)
assert all(default.data==new.data)
def test_read_ang_str(self,reference_dir):
new = Table.from_ang(os.path.join(reference_dir,'simple.ang'))
assert new.data.shape == (4,10) and \
new.labels == ['eu', 'pos', 'IQ', 'CI', 'ID', 'intensity', 'fit']
def test_read_ang_file(self,reference_dir):
f = open(os.path.join(reference_dir,'simple.ang'))
new = Table.from_ang(f)
assert new.data.shape == (4,10) and \
new.labels == ['eu', 'pos', 'IQ', 'CI', 'ID', 'intensity', 'fit']
@pytest.mark.parametrize('fname',['datatype-mix.txt','whitespace-mix.txt'])
def test_read_strange(self,reference_dir,fname):
with open(os.path.join(reference_dir,fname)) as f:
@ -65,11 +76,13 @@ class TestTable:
default.add('nine',d,'random data')
assert np.allclose(d,default.get('nine'))
def test_rename_equivalent(self,default):
v = default.get('v')
default.rename('v','u')
u = default.get('u')
assert np.all(v == u)
def test_rename_equivalent(self):
x = np.random.random((5,13))
t = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data'])
s = t.get('s')
t.rename('s','u')
u = t.get('u')
assert np.all(s == u)
def test_rename_gone(self,default):
default.rename('v','V')

View File

@ -0,0 +1,79 @@
import pytest
import numpy as np
from damask import grid_filters
class TestGridFilters:
def test_cell_coord0(self):
size = np.random.random(3)
grid = np.random.randint(8,32,(3))
coord = grid_filters.cell_coord0(grid,size)
assert np.allclose(coord[0,0,0],size/grid*.5) and coord.shape == tuple(grid[::-1]) + (3,)
def test_node_coord0(self):
size = np.random.random(3)
grid = np.random.randint(8,32,(3))
coord = grid_filters.node_coord0(grid,size)
assert np.allclose(coord[-1,-1,-1],size) and coord.shape == tuple(grid[::-1]+1) + (3,)
def test_coord0(self):
size = np.random.random(3)
grid = np.random.randint(8,32,(3))
c = grid_filters.cell_coord0(grid+1,size+size/grid)
n = grid_filters.node_coord0(grid,size) + size/grid*.5
assert np.allclose(c,n)
@pytest.mark.parametrize('mode',[('cell'),('node')])
def test_grid_DNA(self,mode):
"""Ensure that xx_coord0_2_DNA is the inverse of xx_coord0."""
grid = np.random.randint(8,32,(3))
size = np.random.random(3)
origin = np.random.random(3)
coord0 = eval('grid_filters.{}_coord0(grid,size,origin)'.format(mode)) # noqa
_grid,_size,_origin = eval('grid_filters.{}_coord0_2_DNA(coord0.reshape((-1,3)))'.format(mode))
assert np.allclose(grid,_grid) and np.allclose(size,_size) and np.allclose(origin,_origin)
def test_displacement_fluct_equivalence(self):
"""Ensure that fluctuations are periodic."""
size = np.random.random(3)
grid = np.random.randint(8,32,(3))
F = np.random.random(tuple(grid)+(3,3))
assert np.allclose(grid_filters.node_displacement_fluct(size,F),
grid_filters.cell_2_node(grid_filters.cell_displacement_fluct(size,F)))
def test_interpolation_nonperiodic(self):
size = np.random.random(3)
grid = np.random.randint(8,32,(3))
F = np.random.random(tuple(grid)+(3,3))
assert np.allclose(grid_filters.node_coord(size,F) [1:-1,1:-1,1:-1],grid_filters.cell_2_node(
grid_filters.cell_coord(size,F))[1:-1,1:-1,1:-1])
@pytest.mark.parametrize('mode',[('cell'),('node')])
def test_coord0_origin(self,mode):
origin= np.random.random(3)
size = np.random.random(3) # noqa
grid = np.random.randint(8,32,(3))
shifted = eval('grid_filters.{}_coord0(grid,size,origin)'.format(mode))
unshifted = eval('grid_filters.{}_coord0(grid,size)'.format(mode))
if mode == 'cell':
assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid[::-1]) +(3,)))
elif mode == 'node':
assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid[::-1]+1)+(3,)))
@pytest.mark.parametrize('mode',[('cell'),('node')])
def test_displacement_avg_vanishes(self,mode):
"""Ensure that random fluctuations in F do not result in average displacement."""
size = np.random.random(3) # noqa
grid = np.random.randint(8,32,(3))
F = np.random.random(tuple(grid)+(3,3))
F += np.eye(3) - np.average(F,axis=(0,1,2))
assert np.allclose(eval('grid_filters.{}_displacement_avg(size,F)'.format(mode)),0.0)
@pytest.mark.parametrize('mode',[('cell'),('node')])
def test_displacement_fluct_vanishes(self,mode):
"""Ensure that constant F does not result in fluctuating displacement."""
size = np.random.random(3) # noqa
grid = np.random.randint(8,32,(3))
F = np.broadcast_to(np.random.random((3,3)), tuple(grid)+(3,3)) # noqa
assert np.allclose(eval('grid_filters.{}_displacement_fluct(size,F)'.format(mode)),0.0)

View File

@ -87,10 +87,8 @@ subroutine CPFEM_initAll(el,ip)
call math_init
call rotations_init
call FE_init
#ifdef DAMASK_HDF5
call HDF5_utilities_init
call results_init
#endif
call mesh_init(ip, el)
call lattice_init
call material_init
@ -374,7 +372,6 @@ subroutine CPFEM_results(inc,time)
integer(pInt), intent(in) :: inc
real(pReal), intent(in) :: time
#ifdef DAMASK_HDF5
call results_openJobFile
call results_addIncrement(inc,time)
call constitutive_results
@ -382,7 +379,6 @@ subroutine CPFEM_results(inc,time)
call homogenization_results
call results_removeLink('current') ! ToDo: put this into closeJobFile
call results_closeJobFile
#endif
end subroutine CPFEM_results

View File

@ -14,10 +14,10 @@ module CPFEM2
use material
use lattice
use IO
use HDF5
use DAMASK_interface
use results
use discretization
use HDF5
use HDF5_utilities
use homogenization
use constitutive
@ -65,7 +65,6 @@ subroutine CPFEM_initAll
call constitutive_init
call crystallite_init
call homogenization_init
call materialpoint_postResults
call CPFEM_init
end subroutine CPFEM_initAll

View File

@ -143,9 +143,6 @@ subroutine UMAT(STRESS,STATEV,DDSDDE,SSE,SPD,SCD,&
outdatedByNewInc, &
outdatedFFN1, &
lastStep
use homogenization, only: &
materialpoint_sizeResults, &
materialpoint_results
implicit none
integer(pInt), intent(in) :: &
@ -332,7 +329,7 @@ subroutine UMAT(STRESS,STATEV,DDSDDE,SSE,SPD,SCD,&
ddsdde(6,:) = ddsdde_h(5,:)
end if
statev = materialpoint_results(1:min(nstatv,materialpoint_sizeResults),npt,mesh_FEasCP('elem', noel))
statev = 0
if (terminallyIll) pnewdt = 0.5_pReal ! force cutback directly ?
!$ call omp_set_num_threads(defaultNumThreadsInt) ! reset number of threads to stored default value

View File

@ -5,9 +5,7 @@
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
!--------------------------------------------------------------------------------------------------
module HDF5_utilities
#if defined(PETSc) || defined(DAMASK_HDF5)
use HDF5
#endif
#ifdef PETSc
use PETSC
#endif
@ -20,7 +18,6 @@ module HDF5_utilities
implicit none
public
#if defined(PETSc) || defined(DAMASK_HDF5)
!--------------------------------------------------------------------------------------------------
!> @brief reads integer or float data of defined shape from file ! ToDo: order of arguments wrong
!> @details for parallel IO, all dimension except for the last need to match
@ -279,8 +276,8 @@ logical function HDF5_objectExists(loc_id,path)
integer(HID_T), intent(in) :: loc_id
character(len=*), intent(in), optional :: path
integer :: hdferr
character(len=256) :: p
integer :: hdferr
character(len=pStringLen) :: p
if (present(path)) then
p = trim(path)
@ -308,10 +305,10 @@ subroutine HDF5_addAttribute_str(loc_id,attrLabel,attrValue,path)
character(len=*), intent(in) :: attrLabel, attrValue
character(len=*), intent(in), optional :: path
integer :: hdferr
integer(HID_T) :: attr_id, space_id, type_id
logical :: attrExists
character(len=256) :: p
integer :: hdferr
integer(HID_T) :: attr_id, space_id, type_id
logical :: attrExists
character(len=pStringLen) :: p
if (present(path)) then
p = trim(path)
@ -355,10 +352,10 @@ subroutine HDF5_addAttribute_int(loc_id,attrLabel,attrValue,path)
integer, intent(in) :: attrValue
character(len=*), intent(in), optional :: path
integer :: hdferr
integer(HID_T) :: attr_id, space_id
logical :: attrExists
character(len=256) :: p
integer :: hdferr
integer(HID_T) :: attr_id, space_id
logical :: attrExists
character(len=pStringLen) :: p
if (present(path)) then
p = trim(path)
@ -396,10 +393,10 @@ subroutine HDF5_addAttribute_real(loc_id,attrLabel,attrValue,path)
real(pReal), intent(in) :: attrValue
character(len=*), intent(in), optional :: path
integer :: hdferr
integer(HID_T) :: attr_id, space_id
logical :: attrExists
character(len=256) :: p
integer :: hdferr
integer(HID_T) :: attr_id, space_id
logical :: attrExists
character(len=pStringLen) :: p
if (present(path)) then
p = trim(path)
@ -441,7 +438,7 @@ subroutine HDF5_addAttribute_int_array(loc_id,attrLabel,attrValue,path)
integer(HID_T) :: attr_id, space_id
integer(HSIZE_T),dimension(1) :: array_size
logical :: attrExists
character(len=256) :: p
character(len=pStringLen) :: p
if (present(path)) then
p = trim(path)
@ -485,7 +482,7 @@ subroutine HDF5_addAttribute_real_array(loc_id,attrLabel,attrValue,path)
integer(HID_T) :: attr_id, space_id
integer(HSIZE_T),dimension(1) :: array_size
logical :: attrExists
character(len=256) :: p
character(len=pStringLen) :: p
if (present(path)) then
p = trim(path)
@ -1928,6 +1925,5 @@ subroutine finalize_write(plist_id, dset_id, filespace_id, memspace_id)
if (hdferr < 0) call IO_error(1,ext_msg='finalize_write: h5sclose_f/memspace_id')
end subroutine finalize_write
#endif
end module HDF5_Utilities

View File

@ -11,9 +11,9 @@ module IO
implicit none
private
character(len=5), parameter, public :: &
character(len=*), parameter, public :: &
IO_EOF = '#EOF#' !< end of file string
character(len=207), parameter, private :: &
character(len=*), parameter, private :: &
IO_DIVIDER = '───────────────────'//&
'───────────────────'//&
'───────────────────'//&
@ -32,8 +32,7 @@ module IO
IO_intValue, &
IO_lc, &
IO_error, &
IO_warning, &
IO_intOut
IO_warning
#if defined(Marc4DAMASK) || defined(Abaqus)
public :: &
IO_open_inputFile, &
@ -244,12 +243,12 @@ subroutine IO_open_inputFile(fileUnit)
integer, allocatable, dimension(:) :: chunkPos
character(len=65536) :: line,fname
character(len=pStringLen :: line,fname
logical :: createSuccess,fexist
do
read(unit2,'(A65536)',END=220) line
read(unit2,'(A256)',END=220) line
chunkPos = IO_stringPos(line)
if (IO_lc(IO_StringValue(line,chunkPos,1))=='*include') then
@ -402,7 +401,7 @@ function IO_stringValue(string,chunkPos,myChunk,silent)
character(len=:), allocatable :: IO_stringValue
logical, optional,intent(in) :: silent !< switch to trigger verbosity
character(len=16), parameter :: MYNAME = 'IO_stringValue: '
character(len=*), parameter :: MYNAME = 'IO_stringValue: '
logical :: warn
@ -430,8 +429,8 @@ real(pReal) function IO_floatValue (string,chunkPos,myChunk)
integer, dimension(:), intent(in) :: chunkPos !< positions of start and end of each tag/chunk in given string
integer, intent(in) :: myChunk !< position number of desired chunk
character(len=*), intent(in) :: string !< raw input with known start and end of each chunk
character(len=15), parameter :: MYNAME = 'IO_floatValue: '
character(len=17), parameter :: VALIDCHARACTERS = '0123456789eEdD.+-'
character(len=*), parameter :: MYNAME = 'IO_floatValue: '
character(len=*), parameter :: VALIDCHARACTERS = '0123456789eEdD.+-'
IO_floatValue = 0.0_pReal
@ -454,8 +453,8 @@ integer function IO_intValue(string,chunkPos,myChunk)
character(len=*), intent(in) :: string !< raw input with known start and end of each chunk
integer, intent(in) :: myChunk !< position number of desired chunk
integer, dimension(:), intent(in) :: chunkPos !< positions of start and end of each tag/chunk in given string
character(len=13), parameter :: MYNAME = 'IO_intValue: '
character(len=12), parameter :: VALIDCHARACTERS = '0123456789+-'
character(len=*), parameter :: MYNAME = 'IO_intValue: '
character(len=*), parameter :: VALIDCHARACTERS = '0123456789+-'
IO_intValue = 0
@ -478,9 +477,9 @@ real(pReal) function IO_fixedNoEFloatValue (string,ends,myChunk)
character(len=*), intent(in) :: string !< raw input with known ends of each chunk
integer, intent(in) :: myChunk !< position number of desired chunk
integer, dimension(:), intent(in) :: ends !< positions of end of each tag/chunk in given string
character(len=22), parameter :: MYNAME = 'IO_fixedNoEFloatValue '
character(len=13), parameter :: VALIDBASE = '0123456789.+-'
character(len=12), parameter :: VALIDEXP = '0123456789+-'
character(len=*), parameter :: MYNAME = 'IO_fixedNoEFloatValue '
character(len=*), parameter :: VALIDBASE = '0123456789.+-'
character(len=*), parameter :: VALIDEXP = '0123456789+-'
real(pReal) :: base
integer :: expon
@ -510,8 +509,8 @@ integer function IO_fixedIntValue(string,ends,myChunk)
character(len=*), intent(in) :: string !< raw input with known ends of each chunk
integer, intent(in) :: myChunk !< position number of desired chunk
integer, dimension(:), intent(in) :: ends !< positions of end of each tag/chunk in given string
character(len=20), parameter :: MYNAME = 'IO_fixedIntValue: '
character(len=12), parameter :: VALIDCHARACTERS = '0123456789+-'
character(len=*), parameter :: MYNAME = 'IO_fixedIntValue: '
character(len=*), parameter :: VALIDCHARACTERS = '0123456789+-'
IO_fixedIntValue = IO_verifyIntValue(trim(adjustl(string(ends(myChunk)+1:ends(myChunk+1)))),&
VALIDCHARACTERS,MYNAME)
@ -542,26 +541,6 @@ pure function IO_lc(string)
end function IO_lc
!--------------------------------------------------------------------------------------------------
!> @brief returns format string for integer values without leading zeros
!> @details deprecated, use '(i0)' format specifier
!--------------------------------------------------------------------------------------------------
pure function IO_intOut(intToPrint)
integer, intent(in) :: intToPrint
character(len=41) :: IO_intOut
integer :: N_digits
character(len=19) :: width ! maximum digits for 64 bit integer
character(len=20) :: min_width ! longer for negative values
N_digits = 1 + int(log10(real(max(abs(intToPrint),1))))
write(width, '(I19.19)') N_digits
write(min_width, '(I20.20)') N_digits + merge(1,0,intToPrint < 0)
IO_intOut = 'I'//trim(min_width)//'.'//trim(width)
end function IO_intOut
!--------------------------------------------------------------------------------------------------
!> @brief write error statements to standard out and terminate the Marc/spectral run with exit #9xxx
!> in ABAQUS either time step is reduced or execution terminated
@ -905,7 +884,7 @@ end subroutine IO_warning
!--------------------------------------------------------------------------------------------------
function IO_read(fileUnit) result(line)
integer, intent(in) :: fileUnit !< file unit
integer, intent(in) :: fileUnit !< file unit
character(len=pStringLen) :: line
@ -945,7 +924,7 @@ integer function IO_countDataLines(fileUnit)
integer, allocatable, dimension(:) :: chunkPos
character(len=65536) :: line, &
character(len=pStringLen) :: line, &
tmp
IO_countDataLines = 0
@ -977,7 +956,7 @@ integer function IO_countNumericalDataLines(fileUnit)
integer, allocatable, dimension(:) :: chunkPos
character(len=65536) :: line, &
character(len=pStringLen) :: line, &
tmp
IO_countNumericalDataLines = 0
@ -1012,7 +991,7 @@ integer function IO_countContinuousIntValues(fileUnit)
integer :: l,c
#endif
integer, allocatable, dimension(:) :: chunkPos
character(len=65536) :: line
character(len=pStringLen) :: line
IO_countContinuousIntValues = 0
line = ''
@ -1069,21 +1048,21 @@ function IO_continuousIntValues(fileUnit,maxN,lookupName,lookupMap,lookupMaxN)
integer, intent(in) :: fileUnit, &
lookupMaxN
integer, dimension(:,:), intent(in) :: lookupMap
character(len=64), dimension(:), intent(in) :: lookupName
character(len=*), dimension(:), intent(in) :: lookupName
integer :: i,first,last
#ifdef Abaqus
integer :: j,l,c
#endif
integer, allocatable, dimension(:) :: chunkPos
character(len=65536) line
logical rangeGeneration
character(len=pStringLen) :: line
logical :: rangeGeneration
IO_continuousIntValues = 0
rangeGeneration = .false.
#if defined(Marc4DAMASK)
do
read(fileUnit,'(A65536)',end=100) line
read(fileUnit,'(A256)',end=100) line
chunkPos = IO_stringPos(line)
if (chunkPos(1) < 1) then ! empty line
exit
@ -1124,14 +1103,14 @@ function IO_continuousIntValues(fileUnit,maxN,lookupName,lookupMap,lookupMaxN)
!--------------------------------------------------------------------------------------------------
! check if the element values in the elset are auto generated
backspace(fileUnit)
read(fileUnit,'(A65536)',end=100) line
read(fileUnit,'(A256)',end=100) line
chunkPos = IO_stringPos(line)
do i = 1,chunkPos(1)
if (IO_lc(IO_stringValue(line,chunkPos,i)) == 'generate') rangeGeneration = .true.
enddo
do l = 1,c
read(fileUnit,'(A65536)',end=100) line
read(fileUnit,'(A256)',end=100) line
chunkPos = IO_stringPos(line)
if (verify(IO_stringValue(line,chunkPos,1),'0123456789') > 0) then ! a non-int, i.e. set names follow on this line
do i = 1,chunkPos(1) ! loop over set names in line

View File

@ -37,7 +37,6 @@ module constitutive
integer, public, protected :: &
constitutive_plasticity_maxSizeDotState, &
constitutive_source_maxSizePostResults, &
constitutive_source_maxSizeDotState
public :: &
@ -50,7 +49,6 @@ module constitutive
constitutive_SandItsTangents, &
constitutive_collectDotState, &
constitutive_collectDeltaState, &
constitutive_postResults, &
constitutive_results
contains
@ -61,17 +59,9 @@ contains
!--------------------------------------------------------------------------------------------------
subroutine constitutive_init
integer, parameter :: FILEUNIT = 204
integer :: &
o, & !< counter in output loop
ph, & !< counter in phase loop
s, & !< counter in source loop
ins !< instance of plasticity/source
integer, dimension(:,:), pointer :: thisSize
character(len=64), dimension(:,:), pointer :: thisOutput
character(len=32) :: outputName !< name of output, intermediate fix until HDF5 output is ready
logical :: knownSource
s !< counter in source loop
!--------------------------------------------------------------------------------------------------
! initialized plasticity
@ -101,58 +91,10 @@ subroutine constitutive_init
if (any(phase_kinematics == KINEMATICS_slipplane_opening_ID)) call kinematics_slipplane_opening_init
if (any(phase_kinematics == KINEMATICS_thermal_expansion_ID)) call kinematics_thermal_expansion_init
write(6,'(/,a)') ' <<<+- constitutive init -+>>>'
mainProcess: if (worldrank == 0) then
!--------------------------------------------------------------------------------------------------
! write description file for constitutive output
call IO_write_jobFile(FILEUNIT,'outputConstitutive')
PhaseLoop: do ph = 1,material_Nphase
activePhase: if (any(material_phaseAt == ph)) then
write(FILEUNIT,'(/,a,/)') '['//trim(config_name_phase(ph))//']'
SourceLoop: do s = 1, phase_Nsources(ph)
knownSource = .true. ! assume valid
sourceType: select case (phase_source(s,ph))
case (SOURCE_damage_isoBrittle_ID) sourceType
ins = source_damage_isoBrittle_instance(ph)
outputName = SOURCE_damage_isoBrittle_label
thisOutput => source_damage_isoBrittle_output
thisSize => source_damage_isoBrittle_sizePostResult
case (SOURCE_damage_isoDuctile_ID) sourceType
ins = source_damage_isoDuctile_instance(ph)
outputName = SOURCE_damage_isoDuctile_label
thisOutput => source_damage_isoDuctile_output
thisSize => source_damage_isoDuctile_sizePostResult
case (SOURCE_damage_anisoBrittle_ID) sourceType
ins = source_damage_anisoBrittle_instance(ph)
outputName = SOURCE_damage_anisoBrittle_label
thisOutput => source_damage_anisoBrittle_output
thisSize => source_damage_anisoBrittle_sizePostResult
case (SOURCE_damage_anisoDuctile_ID) sourceType
ins = source_damage_anisoDuctile_instance(ph)
outputName = SOURCE_damage_anisoDuctile_label
thisOutput => source_damage_anisoDuctile_output
thisSize => source_damage_anisoDuctile_sizePostResult
case default sourceType
knownSource = .false.
end select sourceType
if (knownSource) then
write(FILEUNIT,'(a)') '(source)'//char(9)//trim(outputName)
OutputSourceLoop: do o = 1,size(thisOutput(:,ins))
if(len_trim(thisOutput(o,ins)) > 0) &
write(FILEUNIT,'(a,i4)') trim(thisOutput(o,ins))//char(9),thisSize(o,ins)
enddo OutputSourceLoop
endif
enddo SourceLoop
endif activePhase
enddo PhaseLoop
close(FILEUNIT)
endif mainProcess
write(6,'(/,a)') ' <<<+- constitutive init -+>>>'; flush(6)
constitutive_plasticity_maxSizeDotState = 0
constitutive_source_maxSizeDotState = 0
constitutive_source_maxSizePostResults = 0
PhaseLoop2:do ph = 1,material_Nphase
!--------------------------------------------------------------------------------------------------
@ -169,11 +111,8 @@ subroutine constitutive_init
plasticState(ph)%sizeDotState)
constitutive_source_maxSizeDotState = max(constitutive_source_maxSizeDotState, &
maxval(sourceState(ph)%p(:)%sizeDotState))
constitutive_source_maxSizePostResults = max(constitutive_source_maxSizePostResults, &
maxval(sourceState(ph)%p(:)%sizePostResults))
enddo PhaseLoop2
end subroutine constitutive_init
@ -639,58 +578,13 @@ subroutine constitutive_collectDeltaState(S, Fe, Fi, ipc, ip, el)
end subroutine constitutive_collectDeltaState
!--------------------------------------------------------------------------------------------------
!> @brief returns array of constitutive results
!--------------------------------------------------------------------------------------------------
function constitutive_postResults(S, Fi, ipc, ip, el)
integer, intent(in) :: &
ipc, & !< component-ID of integration point
ip, & !< integration point
el !< element
real(pReal), dimension(sum(sourceState(material_phaseAt(ipc,el))%p(:)%sizePostResults)) :: &
constitutive_postResults
real(pReal), intent(in), dimension(3,3) :: &
Fi !< intermediate deformation gradient
real(pReal), intent(in), dimension(3,3) :: &
S !< 2nd Piola Kirchhoff stress
integer :: &
startPos, endPos
integer :: &
i, of, instance !< counter in source loop
constitutive_postResults = 0.0_pReal
endPos = 0
SourceLoop: do i = 1, phase_Nsources(material_phaseAt(ipc,el))
startPos = endPos + 1
endPos = endPos + sourceState(material_phaseAt(ipc,el))%p(i)%sizePostResults
of = material_phasememberAt(ipc,ip,el)
sourceType: select case (phase_source(i,material_phaseAt(ipc,el)))
case (SOURCE_damage_isoBrittle_ID) sourceType
constitutive_postResults(startPos:endPos) = source_damage_isoBrittle_postResults(material_phaseAt(ipc,el),of)
case (SOURCE_damage_isoDuctile_ID) sourceType
constitutive_postResults(startPos:endPos) = source_damage_isoDuctile_postResults(material_phaseAt(ipc,el),of)
case (SOURCE_damage_anisoBrittle_ID) sourceType
constitutive_postResults(startPos:endPos) = source_damage_anisoBrittle_postResults(material_phaseAt(ipc,el),of)
case (SOURCE_damage_anisoDuctile_ID) sourceType
constitutive_postResults(startPos:endPos) = source_damage_anisoDuctile_postResults(material_phaseAt(ipc,el),of)
end select sourceType
enddo SourceLoop
end function constitutive_postResults
!--------------------------------------------------------------------------------------------------
!> @brief writes constitutive results to HDF5 output file
!--------------------------------------------------------------------------------------------------
subroutine constitutive_results
#if defined(PETSc) || defined(DAMASK_HDF5)
integer :: p
character(len=256) :: group
character(len=pStringLen) :: group
do p=1,size(config_name_phase)
group = trim('current/constituent')//'/'//trim(config_name_phase(p))
call HDF5_closeGroup(results_addGroup(group))
@ -719,8 +613,8 @@ subroutine constitutive_results
call plastic_nonlocal_results(phase_plasticityInstance(p),group)
end select
enddo
#endif
enddo
end subroutine constitutive_results
end module constitutive

View File

@ -77,7 +77,7 @@ module crystallite
crystallite_localPlasticity !< indicates this grain to have purely local constitutive law
type :: tOutput !< new requested output (per phase)
character(len=65536), allocatable, dimension(:) :: &
character(len=pStringLen), allocatable, dimension(:) :: &
label
end type tOutput
type(tOutput), allocatable, dimension(:) :: output_constituent
@ -108,7 +108,6 @@ module crystallite
crystallite_stressTangent, &
crystallite_orientations, &
crystallite_push33ToRef, &
crystallite_postResults, &
crystallite_results
contains
@ -119,7 +118,6 @@ contains
!--------------------------------------------------------------------------------------------------
subroutine crystallite_init
integer, parameter :: FILEUNIT=434
logical, dimension(:,:), allocatable :: devNull
integer :: &
c, & !< counter in integration point component loop
@ -233,13 +231,6 @@ subroutine crystallite_init
#endif
enddo
!--------------------------------------------------------------------------------------------------
! write description file for crystallite output
if (worldrank == 0) then
call IO_write_jobFile(FILEUNIT,'outputCrystallite')
write(FILEUNIT,'(/,a,/)') '[not supported anymore]'
close(FILEUNIT)
endif
call config_deallocate('material.config/phase')
!--------------------------------------------------------------------------------------------------
@ -732,42 +723,11 @@ function crystallite_push33ToRef(ipc,ip,el, tensor33)
end function crystallite_push33ToRef
!--------------------------------------------------------------------------------------------------
!> @brief return results of particular grain
!--------------------------------------------------------------------------------------------------
function crystallite_postResults(ipc, ip, el)
integer, intent(in):: &
el, & !< element index
ip, & !< integration point index
ipc !< grain index
real(pReal), dimension(1+ &
1+sum(sourceState(material_phaseAt(ipc,el))%p(:)%sizePostResults)) :: &
crystallite_postResults
integer :: &
c
crystallite_postResults = 0.0_pReal
crystallite_postResults(1) = 0.0_pReal ! header-like information (length)
c = 1
crystallite_postResults(c+1) = real(sum(sourceState(material_phaseAt(ipc,el))%p(:)%sizePostResults),pReal) ! size of constitutive results
c = c + 1
if (size(crystallite_postResults)-c > 0) &
crystallite_postResults(c+1:size(crystallite_postResults)) = &
constitutive_postResults(crystallite_S(1:3,1:3,ipc,ip,el), crystallite_Fi(1:3,1:3,ipc,ip,el), &
ipc, ip, el)
end function crystallite_postResults
!--------------------------------------------------------------------------------------------------
!> @brief writes crystallite results to HDF5 output file
!--------------------------------------------------------------------------------------------------
subroutine crystallite_results
#if defined(PETSc) || defined(DAMASK_HDF5)
integer :: p,o
real(pReal), allocatable, dimension(:,:,:) :: selected_tensors
type(rotation), allocatable, dimension(:) :: selected_rotations
@ -888,7 +848,7 @@ subroutine crystallite_results
enddo
end function select_rotations
#endif
end subroutine crystallite_results

View File

@ -5,22 +5,16 @@
module damage_local
use prec
use material
use numerics
use config
use numerics
use source_damage_isoBrittle
use source_damage_isoDuctile
use source_damage_anisoBrittle
use source_damage_anisoDuctile
use results
implicit none
private
integer, dimension(:,:), allocatable, target, public :: &
damage_local_sizePostResult
character(len=64), dimension(:,:), allocatable, target, public :: &
damage_local_output
integer, dimension(:), allocatable, target, public :: &
damage_local_Noutput
enum, bind(c)
enumerator :: &
@ -28,9 +22,6 @@ module damage_local
damage_ID
end enum
integer(kind(undefined_ID)), dimension(:,:), allocatable :: &
damage_local_outputID !< ID of each post result output
type :: tParameters
integer(kind(undefined_ID)), dimension(:), allocatable :: &
outputID
@ -42,7 +33,7 @@ module damage_local
public :: &
damage_local_init, &
damage_local_updateState, &
damage_local_postResults
damage_local_Results
contains
@ -52,70 +43,41 @@ contains
!--------------------------------------------------------------------------------------------------
subroutine damage_local_init
integer :: maxNinstance,homog,instance,i
integer :: sizeState
integer :: NofMyHomog, h
integer(kind(undefined_ID)) :: &
outputID
character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::]
character(len=65536), dimension(:), allocatable :: &
outputs
integer :: maxNinstance,o,NofMyHomog,h
character(len=pStringLen), dimension(:), allocatable :: outputs
write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_local_label//' init -+>>>'
write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_local_label//' init -+>>>'; flush(6)
maxNinstance = count(damage_type == DAMAGE_local_ID)
if (maxNinstance == 0) return
allocate(damage_local_sizePostResult (maxval(homogenization_Noutput),maxNinstance),source=0)
allocate(damage_local_output (maxval(homogenization_Noutput),maxNinstance))
damage_local_output = ''
allocate(damage_local_outputID (maxval(homogenization_Noutput),maxNinstance),source=undefined_ID)
allocate(damage_local_Noutput (maxNinstance), source=0)
allocate(param(maxNinstance))
do h = 1, size(damage_type)
if (damage_type(h) /= DAMAGE_LOCAL_ID) cycle
associate(prm => param(damage_typeInstance(h)), &
config => config_homogenization(h))
associate(prm => param(damage_typeInstance(h)),config => config_homogenization(h))
outputs = config%getStrings('(output)',defaultVal=emptyStringArray)
allocate(prm%outputID(0))
do i=1, size(outputs)
outputID = undefined_ID
select case(outputs(i))
case ('damage')
damage_local_output(i,damage_typeInstance(h)) = outputs(i)
damage_local_Noutput(instance) = damage_local_Noutput(instance) + 1
damage_local_sizePostResult(i,damage_typeInstance(h)) = 1
prm%outputID = [prm%outputID , damage_ID]
end select
do o=1, size(outputs)
select case(outputs(o))
case ('damage')
prm%outputID = [prm%outputID , damage_ID]
end select
enddo
homog = h
NofMyHomog = count(material_homogenizationAt == homog)
instance = damage_typeInstance(homog)
! allocate state arrays
sizeState = 1
damageState(homog)%sizeState = sizeState
damageState(homog)%sizePostResults = sum(damage_local_sizePostResult(:,instance))
allocate(damageState(homog)%state0 (sizeState,NofMyHomog), source=damage_initialPhi(homog))
allocate(damageState(homog)%subState0(sizeState,NofMyHomog), source=damage_initialPhi(homog))
allocate(damageState(homog)%state (sizeState,NofMyHomog), source=damage_initialPhi(homog))
NofMyHomog = count(material_homogenizationAt == h)
damageState(h)%sizeState = 1
allocate(damageState(h)%state0 (1,NofMyHomog), source=damage_initialPhi(h))
allocate(damageState(h)%subState0(1,NofMyHomog), source=damage_initialPhi(h))
allocate(damageState(h)%state (1,NofMyHomog), source=damage_initialPhi(h))
nullify(damageMapping(homog)%p)
damageMapping(homog)%p => mappingHomogenization(1,:,:)
deallocate(damage(homog)%p)
damage(homog)%p => damageState(homog)%state(1,:)
nullify(damageMapping(h)%p)
damageMapping(h)%p => mappingHomogenization(1,:,:)
deallocate(damage(h)%p)
damage(h)%p => damageState(h)%state(1,:)
end associate
enddo
@ -211,35 +173,27 @@ end subroutine damage_local_getSourceAndItsTangent
!--------------------------------------------------------------------------------------------------
!> @brief return array of damage results
!> @brief writes results to HDF5 output file
!--------------------------------------------------------------------------------------------------
function damage_local_postResults(ip,el)
subroutine damage_local_results(homog,group)
integer, intent(in) :: &
ip, & !< integration point
el !< element
real(pReal), dimension(sum(damage_local_sizePostResult(:,damage_typeInstance(material_homogenizationAt(el))))) :: &
damage_local_postResults
integer :: instance, homog, offset, o, c
homog = material_homogenizationAt(el)
offset = damageMapping(homog)%p(ip,el)
instance = damage_typeInstance(homog)
associate(prm => param(instance))
c = 0
integer, intent(in) :: homog
character(len=*), intent(in) :: group
integer :: o
associate(prm => param(damage_typeInstance(homog)))
outputsLoop: do o = 1,size(prm%outputID)
select case(prm%outputID(o))
case (damage_ID)
damage_local_postResults(c+1) = damage(homog)%p(offset)
c = c + 1
end select
case (damage_ID)
call results_writeDataset(group,damage(homog)%p,'phi',&
'damage indicator','-')
end select
enddo outputsLoop
end associate
end function damage_local_postResults
end subroutine damage_local_results
end module damage_local

View File

@ -19,26 +19,23 @@ contains
!--------------------------------------------------------------------------------------------------
subroutine damage_none_init
integer :: &
homog, &
NofMyHomog
integer :: h,NofMyHomog
write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_NONE_LABEL//' init -+>>>'
write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_NONE_LABEL//' init -+>>>'; flush(6)
initializeInstances: do homog = 1, size(config_homogenization)
do h = 1, size(config_homogenization)
if (damage_type(h) /= DAMAGE_NONE_ID) cycle
NofMyHomog = count(material_homogenizationAt == h)
damageState(h)%sizeState = 0
allocate(damageState(h)%state0 (0,NofMyHomog))
allocate(damageState(h)%subState0(0,NofMyHomog))
allocate(damageState(h)%state (0,NofMyHomog))
myhomog: if (damage_type(homog) == DAMAGE_NONE_ID) then
NofMyHomog = count(material_homogenizationAt == homog)
damageState(homog)%sizeState = 0
allocate(damageState(homog)%state0 (0,NofMyHomog))
allocate(damageState(homog)%subState0(0,NofMyHomog))
allocate(damageState(homog)%state (0,NofMyHomog))
deallocate(damage(h)%p)
allocate (damage(h)%p(1), source=damage_initialPhi(h))
deallocate(damage(homog)%p)
allocate (damage(homog)%p(1), source=damage_initialPhi(homog))
endif myhomog
enddo initializeInstances
enddo
end subroutine damage_none_init

View File

@ -1,29 +1,22 @@
!--------------------------------------------------------------------------------------------------
!> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH
!> @brief material subroutine for non-locally evolving damage field
!> @details to be done
!--------------------------------------------------------------------------------------------------
module damage_nonlocal
use prec
use material
use numerics
use config
use numerics
use crystallite
use lattice
use source_damage_isoBrittle
use source_damage_isoDuctile
use source_damage_anisoBrittle
use source_damage_anisoDuctile
use results
implicit none
private
integer, dimension(:,:), allocatable, target, public :: &
damage_nonlocal_sizePostResult
character(len=64), dimension(:,:), allocatable, target, public :: &
damage_nonlocal_output
integer, dimension(:), allocatable, target, public :: &
damage_nonlocal_Noutput
enum, bind(c)
enumerator :: &
@ -45,7 +38,7 @@ module damage_nonlocal
damage_nonlocal_getDiffusion33, &
damage_nonlocal_getMobility, &
damage_nonlocal_putNonLocalDamage, &
damage_nonlocal_postResults
damage_nonlocal_Results
contains
@ -55,70 +48,44 @@ contains
!--------------------------------------------------------------------------------------------------
subroutine damage_nonlocal_init
integer :: maxNinstance,homog,instance,o,i
integer :: sizeState
integer :: NofMyHomog, h
integer(kind(undefined_ID)) :: &
outputID
character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::]
character(len=65536), dimension(:), allocatable :: &
outputs
integer :: maxNinstance,o,NofMyHomog,h
character(len=pStringLen), dimension(:), allocatable :: outputs
write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_nonlocal_label//' init -+>>>'
write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_nonlocal_label//' init -+>>>'; flush(6)
maxNinstance = count(damage_type == DAMAGE_nonlocal_ID)
if (maxNinstance == 0) return
allocate(damage_nonlocal_sizePostResult (maxval(homogenization_Noutput),maxNinstance),source=0)
allocate(damage_nonlocal_output (maxval(homogenization_Noutput),maxNinstance))
damage_nonlocal_output = ''
allocate(damage_nonlocal_Noutput (maxNinstance), source=0)
allocate(param(maxNinstance))
do h = 1, size(damage_type)
if (damage_type(h) /= DAMAGE_NONLOCAL_ID) cycle
associate(prm => param(damage_typeInstance(h)), &
config => config_homogenization(h))
associate(prm => param(damage_typeInstance(h)),config => config_homogenization(h))
instance = damage_typeInstance(h)
outputs = config%getStrings('(output)',defaultVal=emptyStringArray)
allocate(prm%outputID(0))
do i=1, size(outputs)
outputID = undefined_ID
select case(outputs(i))
case ('damage')
damage_nonlocal_output(i,damage_typeInstance(h)) = outputs(i)
damage_nonlocal_Noutput(instance) = damage_nonlocal_Noutput(instance) + 1
damage_nonlocal_sizePostResult(i,damage_typeInstance(h)) = 1
prm%outputID = [prm%outputID , damage_ID]
end select
do o=1, size(outputs)
select case(outputs(o))
case ('damage')
prm%outputID = [prm%outputID, damage_ID]
end select
enddo
homog = h
NofMyHomog = count(material_homogenizationAt == h)
damageState(h)%sizeState = 1
allocate(damageState(h)%state0 (1,NofMyHomog), source=damage_initialPhi(h))
allocate(damageState(h)%subState0(1,NofMyHomog), source=damage_initialPhi(h))
allocate(damageState(h)%state (1,NofMyHomog), source=damage_initialPhi(h))
NofMyHomog = count(material_homogenizationAt == homog)
instance = damage_typeInstance(homog)
! allocate state arrays
sizeState = 1
damageState(homog)%sizeState = sizeState
damageState(homog)%sizePostResults = sum(damage_nonlocal_sizePostResult(:,instance))
allocate(damageState(homog)%state0 (sizeState,NofMyHomog), source=damage_initialPhi(homog))
allocate(damageState(homog)%subState0(sizeState,NofMyHomog), source=damage_initialPhi(homog))
allocate(damageState(homog)%state (sizeState,NofMyHomog), source=damage_initialPhi(homog))
nullify(damageMapping(homog)%p)
damageMapping(homog)%p => mappingHomogenization(1,:,:)
deallocate(damage(homog)%p)
damage(homog)%p => damageState(homog)%state(1,:)
nullify(damageMapping(h)%p)
damageMapping(h)%p => mappingHomogenization(1,:,:)
deallocate(damage(h)%p)
damage(h)%p => damageState(h)%state(1,:)
end associate
enddo
end subroutine damage_nonlocal_init
@ -247,35 +214,26 @@ end subroutine damage_nonlocal_putNonLocalDamage
!--------------------------------------------------------------------------------------------------
!> @brief return array of damage results
!> @brief writes results to HDF5 output file
!--------------------------------------------------------------------------------------------------
function damage_nonlocal_postResults(ip,el)
subroutine damage_nonlocal_results(homog,group)
integer, intent(in) :: &
ip, & !< integration point
el !< element
real(pReal), dimension(sum(damage_nonlocal_sizePostResult(:,damage_typeInstance(material_homogenizationAt(el))))) :: &
damage_nonlocal_postResults
integer :: &
instance, homog, offset, o, c
homog = material_homogenizationAt(el)
offset = damageMapping(homog)%p(ip,el)
instance = damage_typeInstance(homog)
associate(prm => param(instance))
c = 0
integer, intent(in) :: homog
character(len=*), intent(in) :: group
integer :: o
associate(prm => param(damage_typeInstance(homog)))
outputsLoop: do o = 1,size(prm%outputID)
select case(prm%outputID(o))
case (damage_ID)
damage_nonlocal_postResults(c+1) = damage(homog)%p(offset)
c = c + 1
end select
case (damage_ID)
call results_writeDataset(group,damage(homog)%p,'phi',&
'damage indicator','-')
end select
enddo outputsLoop
end associate
end function damage_nonlocal_postResults
end subroutine damage_nonlocal_results
end module damage_nonlocal

View File

@ -78,7 +78,7 @@ end subroutine discretization_init
!> @brief write the displacements
!--------------------------------------------------------------------------------------------------
subroutine discretization_results
#if defined(PETSc) || defined(DAMASK_HDF5)
real(pReal), dimension(:,:), allocatable :: u
call results_closeGroup(results_addGroup(trim('current/geometry')))
@ -90,7 +90,7 @@ subroutine discretization_results
u = discretization_IPcoords &
- discretization_IPcoords0
call results_writeDataset('current/geometry',u,'u_c','cell center displacements','m')
#endif
end subroutine discretization_results

View File

@ -10,7 +10,7 @@ module future
contains
#if defined(__GFORTRAN__) && __GNUC__<9 || __INTEL_COMPILER<1800
#if defined(__GFORTRAN__) && __GNUC__<9 || defined(__INTEL_COMPILER) && INTEL_COMPILER<1800
!--------------------------------------------------------------------------------------------------
!> @brief substitute for the findloc intrinsic (only for integer, dimension(:) at the moment)
!--------------------------------------------------------------------------------------------------

View File

@ -122,7 +122,6 @@ subroutine geometry_plastic_nonlocal_results
integer, dimension(:), allocatable :: shp
#if defined(PETSc) || defined(DAMASK_HDF5)
call results_openJobFile
writeVolume: block
@ -151,7 +150,6 @@ subroutine geometry_plastic_nonlocal_results
call results_closeJobFile
#endif
end subroutine geometry_plastic_nonlocal_results

View File

@ -15,11 +15,7 @@ program DAMASK_spectral
use config
use debug
use math
use mesh_grid
use CPFEM2
use FEsolving
use numerics
use homogenization
use material
use spectral_utilities
use grid_mech_spectral_basic
@ -40,7 +36,7 @@ program DAMASK_spectral
N_t = 0, & !< # of time indicators found in load case file
N_n = 0, & !< # of increment specifiers found in load case file
N_def = 0 !< # of rate of deformation specifiers found in load case file
character(len=65536) :: &
character(len=pStringLen) :: &
line
!--------------------------------------------------------------------------------------------------
@ -80,12 +76,6 @@ program DAMASK_spectral
type(tLoadCase), allocatable, dimension(:) :: loadCases !< array of all load cases
type(tLoadCase) :: newLoadCase
type(tSolutionState), allocatable, dimension(:) :: solres
integer(MPI_OFFSET_KIND) :: fileOffset
integer(MPI_OFFSET_KIND), dimension(:), allocatable :: outputSize
integer, parameter :: maxByteOut = 2147483647-4096 !< limit of one file output write https://trac.mpich.org/projects/mpich/ticket/1742
integer, parameter :: maxRealOut = maxByteOut/pReal
integer(pLongInt), dimension(2) :: outputIndex
PetscErrorCode :: ierr
procedure(grid_mech_spectral_basic_init), pointer :: &
mech_init
procedure(grid_mech_spectral_basic_forward), pointer :: &
@ -257,7 +247,7 @@ program DAMASK_spectral
reportAndCheck: if (worldrank == 0) then
write (loadcase_string, '(i6)' ) currentLoadCase
write(6,'(/,1x,a,i6)') 'load case: ', currentLoadCase
write(6,'(/,1x,a,i0)') 'load case: ', currentLoadCase
if (.not. newLoadCase%followFormerTrajectory) write(6,'(2x,a)') 'drop guessing along trajectory'
if (newLoadCase%deformation%myType == 'l') then
do j = 1, 3
@ -280,10 +270,8 @@ program DAMASK_spectral
enddo
if (any(newLoadCase%stress%maskLogical .eqv. &
newLoadCase%deformation%maskLogical)) errorID = 831 ! exclusive or masking only
if (any(newLoadCase%stress%maskLogical .and. &
transpose(newLoadCase%stress%maskLogical) .and. &
reshape([ .false.,.true.,.true.,.true.,.false.,.true.,.true.,.true.,.false.],[ 3,3]))) &
errorID = 838 ! no rotation is allowed by stress BC
if (any(newLoadCase%stress%maskLogical .and. transpose(newLoadCase%stress%maskLogical) &
.and. (math_I3<1))) errorID = 838 ! no rotation is allowed by stress BC
write(6,'(2x,a)') 'stress / GPa:'
do i = 1, 3; do j = 1, 3
if(newLoadCase%stress%maskLogical(i,j)) then
@ -300,14 +288,14 @@ program DAMASK_spectral
write(6,'(2x,a,/,3(3(3x,f12.7,1x)/))',advance='no') 'rotation of loadframe:',&
transpose(newLoadCase%rot%asMatrix())
if (newLoadCase%time < 0.0_pReal) errorID = 834 ! negative time increment
write(6,'(2x,a,f12.6)') 'time: ', newLoadCase%time
write(6,'(2x,a,f0.3)') 'time: ', newLoadCase%time
if (newLoadCase%incs < 1) errorID = 835 ! non-positive incs count
write(6,'(2x,a,i5)') 'increments: ', newLoadCase%incs
write(6,'(2x,a,i0)') 'increments: ', newLoadCase%incs
if (newLoadCase%outputfrequency < 1) errorID = 836 ! non-positive result frequency
write(6,'(2x,a,i5)') 'output frequency: ', newLoadCase%outputfrequency
write(6,'(2x,a,i0)') 'output frequency: ', newLoadCase%outputfrequency
if (newLoadCase%restartfrequency < 1) errorID = 839 ! non-positive restart frequency
if (newLoadCase%restartfrequency < huge(0)) &
write(6,'(2x,a,i5)') 'restart frequency: ', newLoadCase%restartfrequency
write(6,'(2x,a,i0)') 'restart frequency: ', newLoadCase%restartfrequency
if (errorID > 0) call IO_error(error_ID = errorID, ext_msg = loadcase_string) ! exit with error message
endif reportAndCheck
loadCases = [loadCases,newLoadCase] ! load case is ok, append it
@ -335,26 +323,10 @@ program DAMASK_spectral
! write header of output file
if (worldrank == 0) then
writeHeader: if (interface_restartInc < 1) then
open(newunit=fileUnit,file=trim(getSolverJobName())//&
'.spectralOut',form='UNFORMATTED',status='REPLACE')
write(fileUnit) 'load:', trim(loadCaseFile) ! ... and write header
write(fileUnit) 'workingdir:', 'n/a'
write(fileUnit) 'geometry:', trim(geometryFile)
write(fileUnit) 'grid:', grid
write(fileUnit) 'size:', geomSize
write(fileUnit) 'materialpoint_sizeResults:', materialpoint_sizeResults
write(fileUnit) 'loadcases:', size(loadCases)
write(fileUnit) 'frequencies:', loadCases%outputfrequency ! one entry per LoadCase
write(fileUnit) 'times:', loadCases%time ! one entry per LoadCase
write(fileUnit) 'logscales:', loadCases%logscale
write(fileUnit) 'increments:', loadCases%incs ! one entry per LoadCase
write(fileUnit) 'startingIncrement:', interface_restartInc ! start with writing out the previous inc
write(fileUnit) 'eoh'
close(fileUnit) ! end of header
open(newunit=statUnit,file=trim(getSolverJobName())//'.sta',form='FORMATTED',status='REPLACE')
write(statUnit,'(a)') 'Increment Time CutbackLevel Converged IterationsNeeded' ! statistics file
if (iand(debug_level(debug_spectral),debug_levelBasic) /= 0) &
write(6,'(/,a)') ' header of result and statistics file written out'
write(6,'(/,a)') ' header of statistics file written out'
flush(6)
else writeHeader
open(newunit=statUnit,file=trim(getSolverJobName())//&
@ -362,40 +334,11 @@ program DAMASK_spectral
endif writeHeader
endif
!--------------------------------------------------------------------------------------------------
! prepare MPI parallel out (including opening of file)
allocate(outputSize(worldsize), source = 0_MPI_OFFSET_KIND)
outputSize(worldrank+1) = size(materialpoint_results,kind=MPI_OFFSET_KIND)*int(pReal,MPI_OFFSET_KIND)
call MPI_allreduce(MPI_IN_PLACE,outputSize,worldsize,MPI_LONG,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process
if (ierr /= 0) call IO_error(error_ID=894, ext_msg='MPI_allreduce')
call MPI_file_open(PETSC_COMM_WORLD, trim(getSolverJobName())//'.spectralOut', &
MPI_MODE_WRONLY + MPI_MODE_APPEND, &
MPI_INFO_NULL, &
fileUnit, &
ierr)
if (ierr /= 0) call IO_error(error_ID=894, ext_msg='MPI_file_open')
call MPI_file_get_position(fileUnit,fileOffset,ierr) ! get offset from header
if (ierr /= 0) call IO_error(error_ID=894, ext_msg='MPI_file_get_position')
fileOffset = fileOffset + sum(outputSize(1:worldrank)) ! offset of my process in file (header + processes before me)
call MPI_file_seek (fileUnit,fileOffset,MPI_SEEK_SET,ierr)
if (ierr /= 0) call IO_error(error_ID=894, ext_msg='MPI_file_seek')
writeUndeformed: if (interface_restartInc < 1) then
write(6,'(1/,a)') ' ... writing initial configuration to file ........................'
call CPFEM_results(0,0.0_pReal)
do i = 1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output
outputIndex = int([(i-1)*((maxRealOut)/materialpoint_sizeResults)+1, &
min(i*((maxRealOut)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt)
call MPI_file_write(fileUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)), &
[(outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)]), &
int((outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)), &
MPI_DOUBLE, MPI_STATUS_IGNORE, ierr)
if (ierr /= 0) call IO_error(error_ID=894, ext_msg='MPI_file_write')
enddo
fileOffset = fileOffset + sum(outputSize) ! forward to current file position
endif writeUndeformed
loadCaseLooping: do currentLoadCase = 1, size(loadCases)
time0 = time ! load case start time
guess = loadCases(currentLoadCase)%followFormerTrajectory ! change of load case? homogeneous guess for the first inc
@ -519,7 +462,6 @@ program DAMASK_spectral
write(6,'(/,a)') ' cutting back '
else ! no more options to continue
call IO_warning(850)
call MPI_File_close(fileUnit,ierr)
close(statUnit)
call quit(0) ! quit
endif
@ -537,19 +479,6 @@ program DAMASK_spectral
if (mod(inc,loadCases(currentLoadCase)%outputFrequency) == 0) then ! at output frequency
write(6,'(1/,a)') ' ... writing results to file ......................................'
flush(6)
call materialpoint_postResults()
call MPI_File_seek (fileUnit,fileOffset,MPI_SEEK_SET,ierr)
if (ierr /= 0) call IO_error(894, ext_msg='MPI_file_seek')
do i=1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output
outputIndex=int([(i-1)*((maxRealOut)/materialpoint_sizeResults)+1, &
min(i*((maxRealOut)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt)
call MPI_file_write(fileUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)),&
[(outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)]), &
int((outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)),&
MPI_DOUBLE, MPI_STATUS_IGNORE, ierr)
if(ierr /=0) call IO_error(894, ext_msg='MPI_file_write')
enddo
fileOffset = fileOffset + sum(outputSize) ! forward to current file position
call CPFEM_results(totalIncsCounter,time)
endif
if (mod(inc,loadCases(currentLoadCase)%restartFrequency) == 0) then
@ -566,7 +495,6 @@ program DAMASK_spectral
!--------------------------------------------------------------------------------------------------
! report summary of whole calculation
write(6,'(/,a)') ' ###########################################################################'
call MPI_file_close(fileUnit,ierr)
close(statUnit)
call quit(0) ! no complains ;)

View File

@ -476,8 +476,7 @@ subroutine formResidual(da_local,x_local, &
! begin of new iteration
newIteration: if (totalIter <= PETScIter) then
totalIter = totalIter + 1
write(6,'(1x,a,3(a,'//IO_intOut(itmax)//'))') &
trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter+1, '≤', itmax
write(6,'(1x,a,3(a,i0))') trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter+1, '≤', itmax
if (iand(debug_level(debug_spectral),debug_spectralRotation) /= 0) &
write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') &
' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotTensor2(F_aim,active=.true.))

View File

@ -440,8 +440,7 @@ subroutine formResidual(in, F, &
! begin of new iteration
newIteration: if (totalIter <= PETScIter) then
totalIter = totalIter + 1
write(6,'(1x,a,3(a,'//IO_intOut(itmax)//'))') &
trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter, '≤', itmax
write(6,'(1x,a,3(a,i0))') trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter, '≤', itmax
if (iand(debug_level(debug_spectral),debug_spectralRotation) /= 0) &
write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') &
' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotTensor2(F_aim,active=.true.))

View File

@ -509,8 +509,7 @@ subroutine formResidual(in, FandF_tau, &
! begin of new iteration
newIteration: if (totalIter <= PETScIter) then
totalIter = totalIter + 1
write(6,'(1x,a,3(a,'//IO_intOut(itmax)//'))') &
trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter, '≤', itmax
write(6,'(1x,a,3(a,i0))') trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter, '≤', itmax
if (iand(debug_level(debug_spectral),debug_spectralRotation) /= 0) &
write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') &
' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotTensor2(F_aim,active=.true.))

View File

@ -23,7 +23,6 @@ module homogenization
use damage_local
use damage_nonlocal
use results
use HDF5_utilities
implicit none
private
@ -36,12 +35,6 @@ module homogenization
materialpoint_P !< first P--K stress of IP
real(pReal), dimension(:,:,:,:,:,:), allocatable, public :: &
materialpoint_dPdF !< tangent of first P--K stress at IP
real(pReal), dimension(:,:,:), allocatable, public :: &
materialpoint_results !< results array of material point
integer, public, protected :: &
materialpoint_sizeResults, &
thermal_maxSizePostResults, &
damage_maxSizePostResults
real(pReal), dimension(:,:,:,:), allocatable :: &
materialpoint_subF0, & !< def grad of IP at beginning of homogenization increment
@ -126,7 +119,6 @@ module homogenization
public :: &
homogenization_init, &
materialpoint_stressAndItsTangent, &
materialpoint_postResults, &
homogenization_results
contains
@ -137,14 +129,6 @@ contains
!--------------------------------------------------------------------------------------------------
subroutine homogenization_init
integer, parameter :: FILEUNIT = 200
integer :: e,i,p
integer, dimension(:,:), pointer :: thisSize
integer, dimension(:) , pointer :: thisNoutput
character(len=64), dimension(:,:), pointer :: thisOutput
character(len=32) :: outputName !< name of output, intermediate fix until HDF5 output is ready
logical :: valid
if (any(homogenization_type == HOMOGENIZATION_NONE_ID)) call mech_none_init
if (any(homogenization_type == HOMOGENIZATION_ISOSTRAIN_ID)) call mech_isostrain_init
if (any(homogenization_type == HOMOGENIZATION_RGC_ID)) call mech_RGC_init
@ -157,80 +141,6 @@ subroutine homogenization_init
if (any(damage_type == DAMAGE_local_ID)) call damage_local_init
if (any(damage_type == DAMAGE_nonlocal_ID)) call damage_nonlocal_init
!--------------------------------------------------------------------------------------------------
! write description file for homogenization output
mainProcess: if (worldrank == 0) then
call IO_write_jobFile(FILEUNIT,'outputHomogenization')
do p = 1,size(config_homogenization)
if (any(material_homogenizationAt == p)) then
write(FILEUNIT,'(/,a,/)') '['//trim(config_name_homogenization(p))//']'
write(FILEUNIT,'(a)') '(type) n/a'
write(FILEUNIT,'(a,i4)') '(ngrains)'//char(9),homogenization_Ngrains(p)
i = thermal_typeInstance(p) ! which instance of this thermal type
valid = .true. ! assume valid
select case(thermal_type(p)) ! split per thermal type
case (THERMAL_isothermal_ID)
outputName = THERMAL_isothermal_label
thisNoutput => null()
thisOutput => null()
thisSize => null()
case (THERMAL_adiabatic_ID)
outputName = THERMAL_adiabatic_label
thisNoutput => thermal_adiabatic_Noutput
thisOutput => thermal_adiabatic_output
thisSize => thermal_adiabatic_sizePostResult
case (THERMAL_conduction_ID)
outputName = THERMAL_conduction_label
thisNoutput => thermal_conduction_Noutput
thisOutput => thermal_conduction_output
thisSize => thermal_conduction_sizePostResult
case default
valid = .false.
end select
if (valid) then
write(FILEUNIT,'(a)') '(thermal)'//char(9)//trim(outputName)
if (thermal_type(p) /= THERMAL_isothermal_ID) then
do e = 1,thisNoutput(i)
write(FILEUNIT,'(a,i4)') trim(thisOutput(e,i))//char(9),thisSize(e,i)
enddo
endif
endif
i = damage_typeInstance(p) ! which instance of this damage type
valid = .true. ! assume valid
select case(damage_type(p)) ! split per damage type
case (DAMAGE_none_ID)
outputName = DAMAGE_none_label
thisNoutput => null()
thisOutput => null()
thisSize => null()
case (DAMAGE_local_ID)
outputName = DAMAGE_local_label
thisNoutput => damage_local_Noutput
thisOutput => damage_local_output
thisSize => damage_local_sizePostResult
case (DAMAGE_nonlocal_ID)
outputName = DAMAGE_nonlocal_label
thisNoutput => damage_nonlocal_Noutput
thisOutput => damage_nonlocal_output
thisSize => damage_nonlocal_sizePostResult
case default
valid = .false.
end select
if (valid) then
write(FILEUNIT,'(a)') '(damage)'//char(9)//trim(outputName)
if (damage_type(p) /= DAMAGE_none_ID) then
do e = 1,thisNoutput(i)
write(FILEUNIT,'(a,i4)') trim(thisOutput(e,i))//char(9),thisSize(e,i)
enddo
endif
endif
endif
enddo
close(FILEUNIT)
endif mainProcess
call config_deallocate('material.config/homogenization')
!--------------------------------------------------------------------------------------------------
@ -250,23 +160,7 @@ subroutine homogenization_init
allocate(materialpoint_converged(discretization_nIP,discretization_nElem), source=.true.)
allocate(materialpoint_doneAndHappy(2,discretization_nIP,discretization_nElem), source=.true.)
!--------------------------------------------------------------------------------------------------
! allocate and initialize global state and postresutls variables
thermal_maxSizePostResults = 0
damage_maxSizePostResults = 0
do p = 1,size(config_homogenization)
thermal_maxSizePostResults = max(thermal_maxSizePostResults, thermalState(p)%sizePostResults)
damage_maxSizePostResults = max(damage_maxSizePostResults, damageState (p)%sizePostResults)
enddo
materialpoint_sizeResults = 1 & ! grain count
+ 1 + thermal_maxSizePostResults &
+ damage_maxSizePostResults &
+ homogenization_maxNgrains * ( 1 & ! crystallite size
+ 1 + constitutive_source_maxSizePostResults)
allocate(materialpoint_results(materialpoint_sizeResults,discretization_nIP,discretization_nElem))
write(6,'(/,a)') ' <<<+- homogenization init -+>>>'
write(6,'(/,a)') ' <<<+- homogenization init -+>>>'; flush(6)
if (iand(debug_level(debug_homogenization), debug_levelBasic) /= 0) then
write(6,'(a32,1x,7(i8,1x))') 'materialpoint_dPdF: ', shape(materialpoint_dPdF)
@ -582,52 +476,6 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
end subroutine materialpoint_stressAndItsTangent
!--------------------------------------------------------------------------------------------------
!> @brief parallelized calculation of result array at material points
!--------------------------------------------------------------------------------------------------
subroutine materialpoint_postResults
integer :: &
thePos, &
theSize, &
myNgrains, &
g, & !< grain number
i, & !< integration point number
e !< element number
!$OMP PARALLEL DO PRIVATE(myNgrains,thePos,theSize)
elementLooping: do e = FEsolving_execElem(1),FEsolving_execElem(2)
myNgrains = homogenization_Ngrains(material_homogenizationAt(e))
IpLooping: do i = FEsolving_execIP(1,e),FEsolving_execIP(2,e)
thePos = 0
theSize = thermalState (material_homogenizationAt(e))%sizePostResults &
+ damageState (material_homogenizationAt(e))%sizePostResults
materialpoint_results(thePos+1,i,e) = real(theSize,pReal) ! tell size of homogenization results
thePos = thePos + 1
if (theSize > 0) then ! any homogenization results to mention?
materialpoint_results(thePos+1:thePos+theSize,i,e) = postResults(i,e)
thePos = thePos + theSize
endif
materialpoint_results(thePos+1,i,e) = real(myNgrains,pReal) ! tell number of grains at materialpoint
thePos = thePos + 1
grainLooping :do g = 1,myNgrains
theSize = 1 + &
1 + &
sum(sourceState(material_phaseAt(g,e))%p(:)%sizePostResults)
materialpoint_results(thePos+1:thePos+theSize,i,e) = crystallite_postResults(g,i,e) ! tell crystallite results
thePos = thePos + theSize
enddo grainLooping
enddo IpLooping
enddo elementLooping
!$OMP END PARALLEL DO
end subroutine materialpoint_postResults
!--------------------------------------------------------------------------------------------------
!> @brief partition material point def grad onto constituents
!--------------------------------------------------------------------------------------------------
@ -739,90 +587,58 @@ subroutine averageStressAndItsTangent(ip,el)
end subroutine averageStressAndItsTangent
!--------------------------------------------------------------------------------------------------
!> @brief return array of homogenization results for post file inclusion. call only,
!> if homogenization_sizePostResults(i,e) > 0 !!
!--------------------------------------------------------------------------------------------------
function postResults(ip,el)
integer, intent(in) :: &
ip, & !< integration point
el !< element number
real(pReal), dimension( thermalState (material_homogenizationAt(el))%sizePostResults &
+ damageState (material_homogenizationAt(el))%sizePostResults) :: &
postResults
integer :: &
startPos, endPos ,&
homog
postResults = 0.0_pReal
startPos = 1
endPos = thermalState(material_homogenizationAt(el))%sizePostResults
chosenThermal: select case (thermal_type(material_homogenizationAt(el)))
case (THERMAL_adiabatic_ID) chosenThermal
homog = material_homogenizationAt(el)
postResults(startPos:endPos) = &
thermal_adiabatic_postResults(homog,thermal_typeInstance(homog),thermalMapping(homog)%p(ip,el))
case (THERMAL_conduction_ID) chosenThermal
homog = material_homogenizationAt(el)
postResults(startPos:endPos) = &
thermal_conduction_postResults(homog,thermal_typeInstance(homog),thermalMapping(homog)%p(ip,el))
end select chosenThermal
startPos = endPos + 1
endPos = endPos + damageState(material_homogenizationAt(el))%sizePostResults
chosenDamage: select case (damage_type(material_homogenizationAt(el)))
case (DAMAGE_local_ID) chosenDamage
postResults(startPos:endPos) = damage_local_postResults(ip, el)
case (DAMAGE_nonlocal_ID) chosenDamage
postResults(startPos:endPos) = damage_nonlocal_postResults(ip, el)
end select chosenDamage
end function postResults
!--------------------------------------------------------------------------------------------------
!> @brief writes homogenization results to HDF5 output file
!--------------------------------------------------------------------------------------------------
subroutine homogenization_results
#if defined(PETSc) || defined(DAMASK_HDF5)
use material, only: &
material_homogenization_type => homogenization_type
integer :: p
character(len=256) :: group
character(len=pStringLen) :: group_base,group
!real(pReal), dimension(:,:,:), allocatable :: temp
do p=1,size(config_name_homogenization)
group = trim('current/materialpoint')//'/'//trim(config_name_homogenization(p))
call HDF5_closeGroup(results_addGroup(group))
group = trim(group)//'/mech'
call HDF5_closeGroup(results_addGroup(group))
select case(material_homogenization_type(p))
case(HOMOGENIZATION_rgc_ID)
call mech_RGC_results(homogenization_typeInstance(p),group)
end select
group = trim('current/materialpoint')//'/'//trim(config_name_homogenization(p))//'/generic'
call HDF5_closeGroup(results_addGroup(group))
group_base = 'current/materialpoint/'//trim(config_name_homogenization(p))
call results_closeGroup(results_addGroup(group_base))
group = trim(group_base)//'/generic'
call results_closeGroup(results_addGroup(group))
!temp = reshape(materialpoint_F,[3,3,discretization_nIP*discretization_nElem])
!call results_writeDataset(group,temp,'F',&
! 'deformation gradient','1')
!temp = reshape(materialpoint_P,[3,3,discretization_nIP*discretization_nElem])
!call results_writeDataset(group,temp,'P',&
! '1st Piola-Kirchoff stress','Pa')
group = trim(group_base)//'/mech'
call results_closeGroup(results_addGroup(group))
select case(material_homogenization_type(p))
case(HOMOGENIZATION_rgc_ID)
call mech_RGC_results(homogenization_typeInstance(p),group)
end select
group = trim(group_base)//'/damage'
call results_closeGroup(results_addGroup(group))
select case(damage_type(p))
case(DAMAGE_LOCAL_ID)
call damage_local_results(p,group)
case(DAMAGE_NONLOCAL_ID)
call damage_nonlocal_results(p,group)
end select
group = trim(group_base)//'/thermal'
call results_closeGroup(results_addGroup(group))
select case(thermal_type(p))
case(THERMAL_ADIABATIC_ID)
call thermal_adiabatic_results(p,group)
case(THERMAL_CONDUCTION_ID)
call thermal_conduction_results(p,group)
end select
enddo
enddo
#endif
end subroutine homogenization_results
end module homogenization

View File

@ -74,12 +74,10 @@ module subroutine mech_RGC_init
NofMyHomog, &
sizeState, nIntFaceTot
character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::]
integer(kind(undefined_ID)) :: &
outputID
character(len=65536), dimension(:), allocatable :: &
character(len=pStringLen), dimension(:), allocatable :: &
outputs
write(6,'(/,a)') ' <<<+- homogenization_'//HOMOGENIZATION_RGC_label//' init -+>>>'
@ -928,7 +926,6 @@ end subroutine mech_RGC_averageStressAndItsTangent
!> @brief writes results to HDF5 output file
!--------------------------------------------------------------------------------------------------
module subroutine mech_RGC_results(instance,group)
#if defined(PETSc) || defined(DAMASK_HDF5)
integer, intent(in) :: instance
character(len=*), intent(in) :: group
@ -962,11 +959,6 @@ module subroutine mech_RGC_results(instance,group)
enddo outputsLoop
end associate
#else
integer, intent(in) :: instance
character(len=*), intent(in) :: group
#endif
end subroutine mech_RGC_results

View File

@ -33,7 +33,7 @@ module subroutine mech_isostrain_init
Ninstance, &
h, &
NofMyHomog
character(len=65536) :: &
character(len=pStringLen) :: &
tag = ''
write(6,'(/,a)') ' <<<+- homogenization_'//HOMOGENIZATION_ISOSTRAIN_label//' init -+>>>'

Some files were not shown because too many files have changed in this diff Show More