diff --git a/.gitattributes b/.gitattributes index 2f356076d..7a5c5bde5 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3,8 +3,8 @@ # always use LF, even if the files are edited on windows, they need to be compiled/used on unix * text eol=lf -installation/mods_Abaqus/abaqus_v6_windows.env eol=crlf -# Denote all files that are truly binary and should not be modified. +# Denote all files that are binary and should not be modified. *.png binary *.jpg binary -*.cae binary +*.hdf5 binary +*.pdf binary diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 47cb2810c..7958db9b8 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,6 +1,7 @@ --- stages: - prepareAll + - python - preprocessing - postprocessing - compilePETSc @@ -104,13 +105,16 @@ checkout: - release ################################################################################################### -OrientationRelationship: - stage: preprocessing - script: OrientationRelationship/test.py +Pytest: + stage: python + script: + - cd $DAMASKROOT/python + - pytest except: - master - release +################################################################################################### Pre_SeedGeneration: stage: preprocessing script: PreProcessing_SeedGeneration/test.py @@ -387,7 +391,6 @@ Marc_compileIfort: stage: compileMarc script: - module load $IntelMarc $HDF5Marc $MSC - - export DAMASK_HDF5=ON - Marc_compileIfort/test.py except: - master @@ -398,7 +401,6 @@ Hex_elastic: stage: marc script: - module load $IntelMarc $HDF5Marc $MSC - - export DAMASK_HDF5=ON - Hex_elastic/test.py except: - master @@ -408,7 +410,6 @@ CubicFCC_elastic: stage: marc script: - module load $IntelMarc $HDF5Marc $MSC - - export DAMASK_HDF5=ON - CubicFCC_elastic/test.py except: - master @@ -418,7 +419,6 @@ CubicBCC_elastic: stage: marc script: - module load $IntelMarc $HDF5Marc $MSC - - export DAMASK_HDF5=ON - CubicBCC_elastic/test.py except: - master @@ -428,7 +428,6 @@ J2_plasticBehavior: stage: marc script: - module load $IntelMarc $HDF5Marc $MSC - - export DAMASK_HDF5=ON - J2_plasticBehavior/test.py except: - master @@ -495,18 +494,6 @@ GridSolver: - master - release -Processing: - stage: createDocumentation - script: - - cd $DAMASKROOT/processing/pre - - $DAMASKROOT/PRIVATE/documenting/scriptHelpToWiki.py --debug *.py - - cd $DAMASKROOT/processing/post - - rm vtk2ang.py DAD*.py - - $DAMASKROOT/PRIVATE/documenting/scriptHelpToWiki.py --debug *.py - except: - - master - - release - ################################################################################################## backupData: stage: saveDocumentation @@ -517,7 +504,6 @@ backupData: - mv $TESTROOT/performance/time.png $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/ - mv $TESTROOT/performance/memory.png $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/ - mv $DAMASKROOT/PRIVATE/documenting/DAMASK_* $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/ - - mv $DAMASKROOT/processing $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/ only: - development diff --git a/CMakeLists.txt b/CMakeLists.txt index 3d53000d2..e44f5eab2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -113,13 +113,11 @@ if (DAMASK_SOLVER STREQUAL "grid") elseif (DAMASK_SOLVER STREQUAL "fem" OR DAMASK_SOLVER STREQUAL "mesh") project (damask-mesh Fortran C) add_definitions (-DFEM) - message ("Building FEM Solver\n") + message ("Building Mesh Solver\n") else () message (FATAL_ERROR "Build target (DAMASK_SOLVER) is not defined") endif () - -# set linker commands (needs to be done after defining the project) -set (CMAKE_LINKER "${PETSC_LINKER}") +list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) if (CMAKE_BUILD_TYPE STREQUAL "") set (CMAKE_BUILD_TYPE "RELEASE") @@ -168,9 +166,6 @@ add_definitions (-DDAMASKVERSION="${DAMASK_V}") # definition of other macros add_definitions (-DPETSc) -set (DAMASK_INCLUDE_FLAGS "${DAMASK_INCLUDE_FLAGS} ${PETSC_INCLUDES}") -list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) - if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel") include(Compiler-Intel) elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "GNU") @@ -183,14 +178,14 @@ endif () set (CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${BUILDCMD_PRE} ${OPENMP_FLAGS} ${STANDARD_CHECK} ${OPTIMIZATION_FLAGS} ${COMPILE_FLAGS} ${PRECISION_FLAGS}") -set (CMAKE_Fortran_LINK_EXECUTABLE "${BUILDCMD_PRE} ${CMAKE_LINKER} ${OPENMP_FLAGS} ${OPTIMIZATION_FLAGS} ${LINKER_FLAGS}") +set (CMAKE_Fortran_LINK_EXECUTABLE "${BUILDCMD_PRE} ${PETSC_LINKER} ${OPENMP_FLAGS} ${OPTIMIZATION_FLAGS} ${LINKER_FLAGS}") if (CMAKE_BUILD_TYPE STREQUAL "DEBUG") set (CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}} ${DEBUG_FLAGS}") set (CMAKE_Fortran_LINK_EXECUTABLE "${CMAKE_Fortran_LINK_EXECUTABLE} ${DEBUG_FLAGS}") endif () -set (CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}} ${DAMASK_INCLUDE_FLAGS} ${BUILDCMD_POST}") +set (CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}} ${PETSC_INCLUDES} ${BUILDCMD_POST}") set (CMAKE_Fortran_LINK_EXECUTABLE "${CMAKE_Fortran_LINK_EXECUTABLE} -o ${PETSC_EXTERNAL_LIB} ${BUILDCMD_POST}") message ("Fortran Compiler Flags:\n${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}}\n") diff --git a/CONFIG b/CONFIG index 53e87b647..8da4d5b96 100644 --- a/CONFIG +++ b/CONFIG @@ -1,11 +1,7 @@ # "set"-syntax needed only for tcsh (but works with bash and zsh) -# DAMASK_ROOT will be expanded - set DAMASK_NUM_THREADS = 4 set MSC_ROOT = /opt/msc set MARC_VERSION = 2019 set ABAQUS_VERSION = 2019 - -set DAMASK_HDF5 = ON diff --git a/LICENSE b/LICENSE index 1ab20178c..3ffc3b9e3 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright 2011-19 Max-Planck-Institut für Eisenforschung GmbH +Copyright 2011-20 Max-Planck-Institut für Eisenforschung GmbH DAMASK is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/Makefile b/Makefile index b24e3d36b..0d9374fad 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ build/grid: .PHONY: build/mesh build/mesh: @mkdir -p build/mesh - @(cd build/mesh; cmake -Wno-dev -DDAMASK_SOLVER=FEM -DCMAKE_INSTALL_PREFIX=${DAMASK_ROOT} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILDCMD_POST=${BUILDCMD_POST} -DBUILDCMD_PRE=${BUILDCMD_PRE} -DOPTIMIZATION=${OPTIMIZATION} -DOPENMP=${OPENMP} ../../;) + @(cd build/mesh; cmake -Wno-dev -DDAMASK_SOLVER=MESH -DCMAKE_INSTALL_PREFIX=${DAMASK_ROOT} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILDCMD_POST=${BUILDCMD_POST} -DBUILDCMD_PRE=${BUILDCMD_PRE} -DOPTIMIZATION=${OPTIMIZATION} -DOPENMP=${OPENMP} ../../;) .PHONY: clean clean: diff --git a/PRIVATE b/PRIVATE index 524e86c11..66d562c75 160000 --- a/PRIVATE +++ b/PRIVATE @@ -1 +1 @@ -Subproject commit 524e86c117d816e3bd873eed7663e258a6f2e139 +Subproject commit 66d562c755cd9aa4bbb8280c509383014acd52db diff --git a/VERSION b/VERSION index 6464fe0a4..a7308c0ee 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v2.0.3-1111-g374980da +v2.0.3-1406-g5fc1abae diff --git a/cmake/Compiler-GNU.cmake b/cmake/Compiler-GNU.cmake index 008c0c90e..6a9973bc6 100644 --- a/cmake/Compiler-GNU.cmake +++ b/cmake/Compiler-GNU.cmake @@ -2,129 +2,129 @@ # GNU Compiler ################################################################################################### - if (OPENMP) - set (OPENMP_FLAGS "-fopenmp") - endif () +if (OPENMP) + set (OPENMP_FLAGS "-fopenmp") +endif () - if (OPTIMIZATION STREQUAL "OFF") - set (OPTIMIZATION_FLAGS "-O0" ) - elseif (OPTIMIZATION STREQUAL "DEFENSIVE") - set (OPTIMIZATION_FLAGS "-O2") - elseif (OPTIMIZATION STREQUAL "AGGRESSIVE") - set (OPTIMIZATION_FLAGS "-O3 -ffast-math -funroll-loops -ftree-vectorize") - endif () +if (OPTIMIZATION STREQUAL "OFF") + set (OPTIMIZATION_FLAGS "-O0" ) +elseif (OPTIMIZATION STREQUAL "DEFENSIVE") + set (OPTIMIZATION_FLAGS "-O2") +elseif (OPTIMIZATION STREQUAL "AGGRESSIVE") + set (OPTIMIZATION_FLAGS "-O3 -ffast-math -funroll-loops -ftree-vectorize") +endif () - set (STANDARD_CHECK "-std=f2008ts -pedantic-errors" ) - set (LINKER_FLAGS "${LINKER_FLAGS} -Wl") - # options parsed directly to the linker - set (LINKER_FLAGS "${LINKER_FLAGS},-undefined,dynamic_lookup" ) - # ensure to link against dynamic libraries +set (STANDARD_CHECK "-std=f2008ts -pedantic-errors" ) +set (LINKER_FLAGS "${LINKER_FLAGS} -Wl") +# options parsed directly to the linker +set (LINKER_FLAGS "${LINKER_FLAGS},-undefined,dynamic_lookup" ) +# ensure to link against dynamic libraries #------------------------------------------------------------------------------------------------ # Fine tuning compilation options - set (COMPILE_FLAGS "${COMPILE_FLAGS} -xf95-cpp-input") - # preprocessor +set (COMPILE_FLAGS "${COMPILE_FLAGS} -xf95-cpp-input") +# preprocessor - set (COMPILE_FLAGS "${COMPILE_FLAGS} -ffree-line-length-132") - # restrict line length to the standard 132 characters (lattice.f90 require more characters) +set (COMPILE_FLAGS "${COMPILE_FLAGS} -ffree-line-length-132") +# restrict line length to the standard 132 characters (lattice.f90 require more characters) - set (COMPILE_FLAGS "${COMPILE_FLAGS} -fimplicit-none") - # assume "implicit none" even if not present in source +set (COMPILE_FLAGS "${COMPILE_FLAGS} -fimplicit-none") +# assume "implicit none" even if not present in source - set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wall") - # sets the following Fortran options: - # -Waliasing: warn about possible aliasing of dummy arguments. Specifically, it warns if the same actual argument is associated with a dummy argument with "INTENT(IN)" and a dummy argument with "INTENT(OUT)" in a call with an explicit interface. - # -Wampersand: checks if a character expression is continued proberly by an ampersand at the end of the line and at the beginning of the new line - # -Warray-bounds: checks if array reference is out of bounds at compile time. use -fcheck-bounds to also check during runtime - # -Wconversion: warn about implicit conversions between different type - # -Wsurprising: warn when "suspicious" code constructs are encountered. While technically legal these usually indicate that an error has been made. - # -Wc-binding-type: - # -Wintrinsics-std: only standard intrisics are available, e.g. "call flush(6)" will cause an error - # -Wno-tabs: do not allow tabs in source - # -Wintrinsic-shadow: warn if a user-defined procedure or module procedure has the same name as an intrinsic - # -Wline-truncation: - # -Wtarget-lifetime: - # -Wreal-q-constant: warn about real-literal-constants with 'q' exponent-letter - # -Wunused: a number of unused-xxx warnings - # and sets the general (non-Fortran options) options: - # -Waddress - # -Warray-bounds (only with -O2) - # -Wc++11-compat - # -Wchar-subscripts - # -Wcomment - # -Wformat - # -Wmaybe-uninitialized - # -Wnonnull - # -Wparentheses - # -Wpointer-sign - # -Wreorder - # -Wreturn-type - # -Wsequence-point - # -Wstrict-aliasing - # -Wstrict-overflow=1 - # -Wswitch - # -Wtrigraphs - # -Wuninitialized - # -Wunknown-pragmas - # -Wunused-function - # -Wunused-label - # -Wunused-value - # -Wunused-variable - # -Wvolatile-register-var +set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wall") +# sets the following Fortran options: +# -Waliasing: warn about possible aliasing of dummy arguments. Specifically, it warns if the same actual argument is associated with a dummy argument with "INTENT(IN)" and a dummy argument with "INTENT(OUT)" in a call with an explicit interface. +# -Wampersand: checks if a character expression is continued proberly by an ampersand at the end of the line and at the beginning of the new line +# -Warray-bounds: checks if array reference is out of bounds at compile time. use -fcheck-bounds to also check during runtime +# -Wconversion: warn about implicit conversions between different type +# -Wsurprising: warn when "suspicious" code constructs are encountered. While technically legal these usually indicate that an error has been made. +# -Wc-binding-type: +# -Wintrinsics-std: only standard intrisics are available, e.g. "call flush(6)" will cause an error +# -Wno-tabs: do not allow tabs in source +# -Wintrinsic-shadow: warn if a user-defined procedure or module procedure has the same name as an intrinsic +# -Wline-truncation: +# -Wtarget-lifetime: +# -Wreal-q-constant: warn about real-literal-constants with 'q' exponent-letter +# -Wunused: a number of unused-xxx warnings +# and sets the general (non-Fortran options) options: +# -Waddress +# -Warray-bounds (only with -O2) +# -Wc++11-compat +# -Wchar-subscripts +# -Wcomment +# -Wformat +# -Wmaybe-uninitialized +# -Wnonnull +# -Wparentheses +# -Wpointer-sign +# -Wreorder +# -Wreturn-type +# -Wsequence-point +# -Wstrict-aliasing +# -Wstrict-overflow=1 +# -Wswitch +# -Wtrigraphs +# -Wuninitialized +# -Wunknown-pragmas +# -Wunused-function +# -Wunused-label +# -Wunused-value +# -Wunused-variable +# -Wvolatile-register-var - set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wextra") - # sets the following Fortran options: - # -Wunuses-parameter: - # -Wcompare-reals: - # and sets the general (non-Fortran options) options: - # -Wclobbered - # -Wempty-body - # -Wignored-qualifiers - # -Wmissing-field-initializers - # -Woverride-init - # -Wsign-compare - # -Wtype-limits - # -Wuninitialized - # -Wunused-but-set-parameter (only with -Wunused or -Wall) - # -Wno-globals +set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wextra") +# sets the following Fortran options: +# -Wunuses-parameter: +# -Wcompare-reals: +# and sets the general (non-Fortran options) options: +# -Wclobbered +# -Wempty-body +# -Wignored-qualifiers +# -Wmissing-field-initializers +# -Woverride-init +# -Wsign-compare +# -Wtype-limits +# -Wuninitialized +# -Wunused-but-set-parameter (only with -Wunused or -Wall) +# -Wno-globals - set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wcharacter-truncation") - # warn if character expressions (strings) are truncated +set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wcharacter-truncation") +# warn if character expressions (strings) are truncated - set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wunderflow") - # produce a warning when numerical constant expressions are encountered, which yield an UNDERFLOW during compilation +set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wunderflow") +# produce a warning when numerical constant expressions are encountered, which yield an UNDERFLOW during compilation - set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wsuggest-attribute=pure") - set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wsuggest-attribute=noreturn") - set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wconversion-extra") - set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wimplicit-procedure") - set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wno-unused-parameter") - set (COMPILE_FLAGS "${COMPILE_FLAGS} -ffpe-summary=all") - # print summary of floating point exeptions (invalid,zero,overflow,underflow,inexact,denormal) +set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wsuggest-attribute=pure") +set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wsuggest-attribute=noreturn") +set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wconversion-extra") +set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wimplicit-procedure") +set (COMPILE_FLAGS "${COMPILE_FLAGS} -Wno-unused-parameter") +set (COMPILE_FLAGS "${COMPILE_FLAGS} -ffpe-summary=all") +# print summary of floating point exeptions (invalid,zero,overflow,underflow,inexact,denormal) - # Additional options - # -Warray-temporarieswarnings: because we have many temporary arrays (performance issue?): - # -Wimplicit-interface: no interfaces for lapack/MPI routines - # -Wunsafe-loop-optimizations: warn if the loop cannot be optimized due to nontrivial assumptions. +# Additional options +# -Warray-temporarieswarnings: because we have many temporary arrays (performance issue?): +# -Wimplicit-interface: no interfaces for lapack/MPI routines +# -Wunsafe-loop-optimizations: warn if the loop cannot be optimized due to nontrivial assumptions. #------------------------------------------------------------------------------------------------ # Runtime debugging - set (DEBUG_FLAGS "${DEBUG_FLAGS} -ffpe-trap=invalid,zero,overflow") - # stop execution if floating point exception is detected (NaN is silent) +set (DEBUG_FLAGS "${DEBUG_FLAGS} -ffpe-trap=invalid,zero,overflow") +# stop execution if floating point exception is detected (NaN is silent) - set (DEBUG_FLAGS "${DEBUG_FLAGS} -g") - # Generate symbolic debugging information in the object file +set (DEBUG_FLAGS "${DEBUG_FLAGS} -g") +# Generate symbolic debugging information in the object file - set (DEBUG_FLAGS "${DEBUG_FLAGS} -fbacktrace") - set (DEBUG_FLAGS "${DEBUG_FLAGS} -fdump-core") - set (DEBUG_FLAGS "${DEBUG_FLAGS} -fcheck=all") - # checks for (array-temps,bounds,do,mem,pointer,recursion) +set (DEBUG_FLAGS "${DEBUG_FLAGS} -fbacktrace") +set (DEBUG_FLAGS "${DEBUG_FLAGS} -fdump-core") +set (DEBUG_FLAGS "${DEBUG_FLAGS} -fcheck=all") +# checks for (array-temps,bounds,do,mem,pointer,recursion) - # Additional options - # -ffpe-trap=precision,denormal,underflow +# Additional options +# -ffpe-trap=precision,denormal,underflow #------------------------------------------------------------------------------------------------ # precision settings - set (PRECISION_FLAGS "${PRECISION_FLAGS} -fdefault-real-8") - # set precision to 8 bytes for standard real (=8 for pReal). Will set size of double to 16 bytes as long as -fdefault-double-8 is not set - set (PRECISION_FLAGS "${PRECISION_FLAGS} -fdefault-double-8") - # set precision to 8 bytes for double real, would be 16 bytes if -fdefault-real-8 is used +set (PRECISION_FLAGS "${PRECISION_FLAGS} -fdefault-real-8") +# set precision to 8 bytes for standard real (=8 for pReal). Will set size of double to 16 bytes as long as -fdefault-double-8 is not set +set (PRECISION_FLAGS "${PRECISION_FLAGS} -fdefault-double-8") +# set precision to 8 bytes for double real, would be 16 bytes if -fdefault-real-8 is used diff --git a/cmake/Compiler-Intel.cmake b/cmake/Compiler-Intel.cmake index 60ed46cbc..1a2c2c455 100644 --- a/cmake/Compiler-Intel.cmake +++ b/cmake/Compiler-Intel.cmake @@ -1,116 +1,116 @@ ################################################################################################### # Intel Compiler ################################################################################################### - if (OPENMP) - set (OPENMP_FLAGS "-qopenmp -parallel") - endif () +if (OPENMP) + set (OPENMP_FLAGS "-qopenmp -parallel") +endif () - if (OPTIMIZATION STREQUAL "OFF") - set (OPTIMIZATION_FLAGS "-O0 -no-ip") - elseif (OPTIMIZATION STREQUAL "DEFENSIVE") - set (OPTIMIZATION_FLAGS "-O2") - elseif (OPTIMIZATION STREQUAL "AGGRESSIVE") - set (OPTIMIZATION_FLAGS "-ipo -O3 -no-prec-div -fp-model fast=2 -xHost") - # -fast = -ipo, -O3, -no-prec-div, -static, -fp-model fast=2, and -xHost" - endif () +if (OPTIMIZATION STREQUAL "OFF") + set (OPTIMIZATION_FLAGS "-O0 -no-ip") +elseif (OPTIMIZATION STREQUAL "DEFENSIVE") + set (OPTIMIZATION_FLAGS "-O2") +elseif (OPTIMIZATION STREQUAL "AGGRESSIVE") + set (OPTIMIZATION_FLAGS "-ipo -O3 -no-prec-div -fp-model fast=2 -xHost") + # -fast = -ipo, -O3, -no-prec-div, -static, -fp-model fast=2, and -xHost" +endif () - # -assume std_mod_proc_name (included in -standard-semantics) causes problems if other modules - # (PETSc, HDF5) are not compiled with this option (https://software.intel.com/en-us/forums/intel-fortran-compiler-for-linux-and-mac-os-x/topic/62172) - set (STANDARD_CHECK "-stand f15 -standard-semantics -assume nostd_mod_proc_name") - set (LINKER_FLAGS "${LINKER_FLAGS} -shared-intel") - # Link against shared Intel libraries instead of static ones +# -assume std_mod_proc_name (included in -standard-semantics) causes problems if other modules +# (PETSc, HDF5) are not compiled with this option (https://software.intel.com/en-us/forums/intel-fortran-compiler-for-linux-and-mac-os-x/topic/62172) +set (STANDARD_CHECK "-stand f15 -standard-semantics -assume nostd_mod_proc_name") +set (LINKER_FLAGS "${LINKER_FLAGS} -shared-intel") +# Link against shared Intel libraries instead of static ones #------------------------------------------------------------------------------------------------ # Fine tuning compilation options - set (COMPILE_FLAGS "${COMPILE_FLAGS} -fpp") - # preprocessor +set (COMPILE_FLAGS "${COMPILE_FLAGS} -fpp") +# preprocessor - set (COMPILE_FLAGS "${COMPILE_FLAGS} -ftz") - # flush underflow to zero, automatically set if -O[1,2,3] +set (COMPILE_FLAGS "${COMPILE_FLAGS} -ftz") +# flush underflow to zero, automatically set if -O[1,2,3] - set (COMPILE_FLAGS "${COMPILE_FLAGS} -diag-disable") - # disables warnings ... - set (COMPILE_FLAGS "${COMPILE_FLAGS} 5268") - # ... the text exceeds right hand column allowed on the line (we have only comments there) - set (COMPILE_FLAGS "${COMPILE_FLAGS},7624") - # ... about deprecated forall (has nice syntax and most likely a performance advantage) +set (COMPILE_FLAGS "${COMPILE_FLAGS} -diag-disable") +# disables warnings ... +set (COMPILE_FLAGS "${COMPILE_FLAGS} 5268") +# ... the text exceeds right hand column allowed on the line (we have only comments there) +set (COMPILE_FLAGS "${COMPILE_FLAGS},7624") +# ... about deprecated forall (has nice syntax and most likely a performance advantage) - set (COMPILE_FLAGS "${COMPILE_FLAGS} -warn") - # enables warnings ... - set (COMPILE_FLAGS "${COMPILE_FLAGS} declarations") - # ... any undeclared names (alternative name: -implicitnone) - set (COMPILE_FLAGS "${COMPILE_FLAGS},general") - # ... warning messages and informational messages are issued by the compiler - set (COMPILE_FLAGS "${COMPILE_FLAGS},usage") - # ... questionable programming practices - set (COMPILE_FLAGS "${COMPILE_FLAGS},interfaces") - # ... checks the interfaces of all SUBROUTINEs called and FUNCTIONs invoked in your compilation against an external set of interface blocks - set (COMPILE_FLAGS "${COMPILE_FLAGS},ignore_loc") - # ... %LOC is stripped from an actual argument - set (COMPILE_FLAGS "${COMPILE_FLAGS},alignments") - # ... data that is not naturally aligned - set (COMPILE_FLAGS "${COMPILE_FLAGS},unused") - # ... declared variables that are never used +set (COMPILE_FLAGS "${COMPILE_FLAGS} -warn") +# enables warnings ... +set (COMPILE_FLAGS "${COMPILE_FLAGS} declarations") +# ... any undeclared names (alternative name: -implicitnone) +set (COMPILE_FLAGS "${COMPILE_FLAGS},general") +# ... warning messages and informational messages are issued by the compiler +set (COMPILE_FLAGS "${COMPILE_FLAGS},usage") +# ... questionable programming practices +set (COMPILE_FLAGS "${COMPILE_FLAGS},interfaces") +# ... checks the interfaces of all SUBROUTINEs called and FUNCTIONs invoked in your compilation against an external set of interface blocks +set (COMPILE_FLAGS "${COMPILE_FLAGS},ignore_loc") +# ... %LOC is stripped from an actual argument +set (COMPILE_FLAGS "${COMPILE_FLAGS},alignments") +# ... data that is not naturally aligned +set (COMPILE_FLAGS "${COMPILE_FLAGS},unused") +# ... declared variables that are never used - # Additional options - # -warn: enables warnings, where - # truncated_source: Determines whether warnings occur when source exceeds the maximum column width in fixed-format files. - # (too many warnings because we have comments beyond character 132) - # uncalled: Determines whether warnings occur when a statement function is never called - # all: - # -name as_is: case sensitive Fortran! +# Additional options +# -warn: enables warnings, where +# truncated_source: Determines whether warnings occur when source exceeds the maximum column width in fixed-format files. +# (too many warnings because we have comments beyond character 132) +# uncalled: Determines whether warnings occur when a statement function is never called +# all: +# -name as_is: case sensitive Fortran! #------------------------------------------------------------------------------------------------ # Runtime debugging - set (DEBUG_FLAGS "${DEBUG_FLAGS} -g") - # Generate symbolic debugging information in the object file +set (DEBUG_FLAGS "${DEBUG_FLAGS} -g") +# Generate symbolic debugging information in the object file - set (DEBUG_FLAGS "${DEBUG_FLAGS} -traceback") - # Generate extra information in the object file to provide source file traceback information when a severe error occurs at run time +set (DEBUG_FLAGS "${DEBUG_FLAGS} -traceback") +# Generate extra information in the object file to provide source file traceback information when a severe error occurs at run time - set (DEBUG_FLAGS "${DEBUG_FLAGS} -gen-interfaces") - # Generate an interface block for each routine. http://software.intel.com/en-us/blogs/2012/01/05/doctor-fortran-gets-explicit-again/ +set (DEBUG_FLAGS "${DEBUG_FLAGS} -gen-interfaces") +# Generate an interface block for each routine. http://software.intel.com/en-us/blogs/2012/01/05/doctor-fortran-gets-explicit-again/ - set (DEBUG_FLAGS "${DEBUG_FLAGS} -fp-stack-check") - # Generate extra code after every function call to ensure that the floating-point (FP) stack is in the expected state +set (DEBUG_FLAGS "${DEBUG_FLAGS} -fp-stack-check") +# Generate extra code after every function call to ensure that the floating-point (FP) stack is in the expected state - set (DEBUG_FLAGS "${DEBUG_FLAGS} -fp-model strict") - # Trap uninitalized variables +set (DEBUG_FLAGS "${DEBUG_FLAGS} -fp-model strict") +# Trap uninitalized variables - set (DEBUG_FLAGS "${DEBUG_FLAGS} -check" ) - # Checks at runtime ... - set (DEBUG_FLAGS "${DEBUG_FLAGS} bounds") - # ... if an array index is too small (<1) or too large! - set (DEBUG_FLAGS "${DEBUG_FLAGS},format") - # ... for the data type of an item being formatted for output. - set (DEBUG_FLAGS "${DEBUG_FLAGS},output_conversion") - # ... for the fit of data items within a designated format descriptor field. - set (DEBUG_FLAGS "${DEBUG_FLAGS},pointers") - # ... for certain disassociated or uninitialized pointers or unallocated allocatable objects. - set (DEBUG_FLAGS "${DEBUG_FLAGS},uninit") - # ... for uninitialized variables. - set (DEBUG_FLAGS "${DEBUG_FLAGS} -ftrapuv") - # ... initializes stack local variables to an unusual value to aid error detection - set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0") - # ... capture all floating-point exceptions, sets -ftz automatically +set (DEBUG_FLAGS "${DEBUG_FLAGS} -check" ) +# Checks at runtime ... +set (DEBUG_FLAGS "${DEBUG_FLAGS} bounds") +# ... if an array index is too small (<1) or too large! +set (DEBUG_FLAGS "${DEBUG_FLAGS},format") +# ... for the data type of an item being formatted for output. +set (DEBUG_FLAGS "${DEBUG_FLAGS},output_conversion") +# ... for the fit of data items within a designated format descriptor field. +set (DEBUG_FLAGS "${DEBUG_FLAGS},pointers") +# ... for certain disassociated or uninitialized pointers or unallocated allocatable objects. +set (DEBUG_FLAGS "${DEBUG_FLAGS},uninit") +# ... for uninitialized variables. +set (DEBUG_FLAGS "${DEBUG_FLAGS} -ftrapuv") +# ... initializes stack local variables to an unusual value to aid error detection +set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0") +# ... capture all floating-point exceptions, sets -ftz automatically - set (DEBUG_FLAGS "${DEBUG_FLAGS} -warn") - # enables warnings ... - set (DEBUG_FLAGS "${DEBUG_FLAGS} errors") - # ... warnings are changed to errors - set (DEBUG_FLAGS "${DEBUG_FLAGS},stderrors") - # ... warnings about Fortran standard violations are changed to errors +set (DEBUG_FLAGS "${DEBUG_FLAGS} -warn") +# enables warnings ... +set (DEBUG_FLAGS "${DEBUG_FLAGS} errors") +# ... warnings are changed to errors +set (DEBUG_FLAGS "${DEBUG_FLAGS},stderrors") +# ... warnings about Fortran standard violations are changed to errors - set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all") - # generate debug information for parameters +set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all") +# generate debug information for parameters - # Additional options - # -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits - # -check: Checks at runtime, where - # arg_temp_created: will cause a lot of warnings because we create a bunch of temporary arrays (performance?) - # stack: +# Additional options +# -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits +# -check: Checks at runtime, where +# arg_temp_created: will cause a lot of warnings because we create a bunch of temporary arrays (performance?) +# stack: #------------------------------------------------------------------------------------------------ # precision settings - set (PRECISION_FLAGS "${PRECISION_FLAGS} -real-size 64") - # set precision for standard real to 32 | 64 | 128 (= 4 | 8 | 16 bytes, type pReal is always 8 bytes) +set (PRECISION_FLAGS "${PRECISION_FLAGS} -real-size 64") +# set precision for standard real to 32 | 64 | 128 (= 4 | 8 | 16 bytes, type pReal is always 8 bytes) diff --git a/cmake/Compiler-PGI.cmake b/cmake/Compiler-PGI.cmake index bca76f648..39d9b092f 100644 --- a/cmake/Compiler-PGI.cmake +++ b/cmake/Compiler-PGI.cmake @@ -1,25 +1,24 @@ ################################################################################################### # PGI Compiler ################################################################################################### -elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "PGI") - if (OPTIMIZATION STREQUAL "OFF") - set (OPTIMIZATION_FLAGS "-O0" ) - elseif (OPTIMIZATION STREQUAL "DEFENSIVE") - set (OPTIMIZATION_FLAGS "-O2") - elseif (OPTIMIZATION STREQUAL "AGGRESSIVE") - set (OPTIMIZATION_FLAGS "-O3") - endif () +if (OPTIMIZATION STREQUAL "OFF") + set (OPTIMIZATION_FLAGS "-O0" ) +elseif (OPTIMIZATION STREQUAL "DEFENSIVE") + set (OPTIMIZATION_FLAGS "-O2") +elseif (OPTIMIZATION STREQUAL "AGGRESSIVE") + set (OPTIMIZATION_FLAGS "-O3") +endif () #------------------------------------------------------------------------------------------------ # Fine tuning compilation options - set (COMPILE_FLAGS "${COMPILE_FLAGS} -Mpreprocess") - # preprocessor +set (COMPILE_FLAGS "${COMPILE_FLAGS} -Mpreprocess") +# preprocessor - set (STANDARD_CHECK "-Mallocatable=03") +set (STANDARD_CHECK "-Mallocatable=03") #------------------------------------------------------------------------------------------------ # Runtime debugging - set (DEBUG_FLAGS "${DEBUG_FLAGS} -g") - # Includes debugging information in the object module; sets the optimization level to zero unless a -⁠O option is present on the command line +set (DEBUG_FLAGS "${DEBUG_FLAGS} -g") +# Includes debugging information in the object module; sets the optimization level to zero unless a -⁠O option is present on the command line diff --git a/env/DAMASK.csh b/env/DAMASK.csh index d223d885a..b1b9dfb98 100644 --- a/env/DAMASK.csh +++ b/env/DAMASK.csh @@ -7,12 +7,6 @@ set DAMASK_ROOT=`python -c "import os,sys; print(os.path.realpath(os.path.expand source $DAMASK_ROOT/CONFIG -# add BRANCH if DAMASK_ROOT is a git repository -cd $DAMASK_ROOT >/dev/null -set BRANCH = `git branch 2>/dev/null| grep -E '^\* ')` -cd - >/dev/null - -# if DAMASK_BIN is present set path = ($DAMASK_ROOT/bin $path) set SOLVER=`which DAMASK_spectral` @@ -21,20 +15,12 @@ if ( "x$DAMASK_NUM_THREADS" == "x" ) then set DAMASK_NUM_THREADS=1 endif -# currently, there is no information that unlimited causes problems +# currently, there is no information that unlimited stack size causes problems # still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it # more info https://jblevins.org/log/segfault # https://stackoverflow.com/questions/79923/what-and-where-are-the-stack-and-heap -# http://superuser.com/questions/220059/what-parameters-has-ulimit -limit datasize unlimited # maximum heap size (kB) +# http://superuser.com/questions/220059/what-parameters-has-ulimit limit stacksize unlimited # maximum stack size (kB) -endif -if ( `limit | grep memoryuse` != "" ) then - limit memoryuse unlimited # maximum physical memory size -endif -if ( `limit | grep vmemoryuse` != "" ) then - limit vmemoryuse unlimited # maximum virtual memory size -endif # disable output in case of scp if ( $?prompt ) then @@ -44,8 +30,8 @@ if ( $?prompt ) then echo https://damask.mpie.de echo echo Using environment with ... - echo "DAMASK $DAMASK_ROOT $BRANCH" - echo "Spectral Solver $SOLVER" + echo "DAMASK $DAMASK_ROOT" + echo "Grid Solver $SOLVER" echo "Post Processing $PROCESSING" if ( $?PETSC_DIR) then echo "PETSc location $PETSC_DIR" diff --git a/env/DAMASK.sh b/env/DAMASK.sh index 1b4bea86a..50760b76d 100644 --- a/env/DAMASK.sh +++ b/env/DAMASK.sh @@ -43,15 +43,12 @@ PROCESSING=$(type -p postResults || true 2>/dev/null) [ "x$DAMASK_NUM_THREADS" == "x" ] && DAMASK_NUM_THREADS=1 -# currently, there is no information that unlimited causes problems +# currently, there is no information that unlimited stack size causes problems # still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it # more info https://jblevins.org/log/segfault # https://stackoverflow.com/questions/79923/what-and-where-are-the-stack-and-heap -# http://superuser.com/questions/220059/what-parameters-has-ulimit -ulimit -d unlimited 2>/dev/null # maximum heap size (kB) +# http://superuser.com/questions/220059/what-parameters-has-ulimit ulimit -s unlimited 2>/dev/null # maximum stack size (kB) -ulimit -v unlimited 2>/dev/null # maximum virtual memory size -ulimit -m unlimited 2>/dev/null # maximum physical memory size # disable output in case of scp if [ ! -z "$PS1" ]; then @@ -62,7 +59,7 @@ if [ ! -z "$PS1" ]; then echo echo Using environment with ... echo "DAMASK $DAMASK_ROOT $BRANCH" - echo "Spectral Solver $SOLVER" + echo "Grid Solver $SOLVER" echo "Post Processing $PROCESSING" if [ "x$PETSC_DIR" != "x" ]; then echo -n "PETSc location " @@ -96,7 +93,7 @@ fi export DAMASK_NUM_THREADS export PYTHONPATH=$DAMASK_ROOT/python:$PYTHONPATH -for var in BASE STAT SOLVER PROCESSING FREE DAMASK_BIN BRANCH; do +for var in BASE STAT SOLVER PROCESSING BRANCH; do unset "${var}" done for var in DAMASK MSC; do diff --git a/env/DAMASK.zsh b/env/DAMASK.zsh index 5449007f9..066d56dd6 100644 --- a/env/DAMASK.zsh +++ b/env/DAMASK.zsh @@ -24,7 +24,6 @@ unset -f set # add BRANCH if DAMASK_ROOT is a git repository cd $DAMASK_ROOT >/dev/null; BRANCH=$(git branch 2>/dev/null| grep -E '^\* '); cd - >/dev/null -# add DAMASK_BIN if present PATH=${DAMASK_ROOT}/bin:$PATH SOLVER=$(which DAMASK_spectral || true 2>/dev/null) @@ -35,15 +34,12 @@ PROCESSING=$(which postResults || true 2>/dev/null) [[ "x$DAMASK_NUM_THREADS" == "x" ]] && DAMASK_NUM_THREADS=1 -# currently, there is no information that unlimited causes problems +# currently, there is no information that unlimited stack size causes problems # still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it # more info https://jblevins.org/log/segfault # https://stackoverflow.com/questions/79923/what-and-where-are-the-stack-and-heap -# http://superuser.com/questions/220059/what-parameters-has-ulimit -ulimit -d unlimited 2>/dev/null # maximum heap size (kB) +# http://superuser.com/questions/220059/what-parameters-has-ulimit ulimit -s unlimited 2>/dev/null # maximum stack size (kB) -ulimit -v unlimited 2>/dev/null # maximum virtual memory size -ulimit -m unlimited 2>/dev/null # maximum physical memory size # disable output in case of scp if [ ! -z "$PS1" ]; then @@ -54,7 +50,7 @@ if [ ! -z "$PS1" ]; then echo echo "Using environment with ..." echo "DAMASK $DAMASK_ROOT $BRANCH" - echo "Spectral Solver $SOLVER" + echo "Grid Solver $SOLVER" echo "Post Processing $PROCESSING" if [ "x$PETSC_DIR" != "x" ]; then echo -n "PETSc location " @@ -90,7 +86,7 @@ fi export DAMASK_NUM_THREADS export PYTHONPATH=$DAMASK_ROOT/python:$PYTHONPATH -for var in BASE STAT SOLVER PROCESSING FREE DAMASK_BIN BRANCH; do +for var in SOLVER PROCESSING BRANCH; do unset "${var}" done for var in DAMASK MSC; do diff --git a/examples/ConfigFiles/Homogenization_Thermal_Conduction.config b/examples/ConfigFiles/Homogenization_Thermal_Conduction.config index 48ad9ddc6..36fc7ea6e 100644 --- a/examples/ConfigFiles/Homogenization_Thermal_Conduction.config +++ b/examples/ConfigFiles/Homogenization_Thermal_Conduction.config @@ -1,3 +1,3 @@ thermal conduction -initialT 300.0 +t0 270.0 (output) temperature diff --git a/examples/ConfigFiles/Texture_Gauss_001.config b/examples/ConfigFiles/Texture_Gauss_001.config index 4711c4135..4fb519f08 100644 --- a/examples/ConfigFiles/Texture_Gauss_001.config +++ b/examples/ConfigFiles/Texture_Gauss_001.config @@ -1,2 +1,2 @@ [001] -(gauss) phi1 0.000 Phi 0.000 phi2 0.000 scatter 0.000 fraction 1.000 +(gauss) phi1 0.000 Phi 0.000 phi2 0.000 diff --git a/examples/ConfigFiles/Texture_Gauss_101.config b/examples/ConfigFiles/Texture_Gauss_101.config index 79457aeac..c6c1b5dbe 100644 --- a/examples/ConfigFiles/Texture_Gauss_101.config +++ b/examples/ConfigFiles/Texture_Gauss_101.config @@ -1,2 +1,2 @@ [101] -(gauss) phi1 0.000 Phi 45.000 phi2 90.000 scatter 0.000 fraction 1.000 +(gauss) phi1 0.000 Phi 45.000 phi2 90.000 diff --git a/examples/ConfigFiles/Texture_Gauss_111.config b/examples/ConfigFiles/Texture_Gauss_111.config index 8204bfb69..0d685a66e 100644 --- a/examples/ConfigFiles/Texture_Gauss_111.config +++ b/examples/ConfigFiles/Texture_Gauss_111.config @@ -1,2 +1,2 @@ [111] -(gauss) phi1 0.000 Phi 54.7356 phi2 45.000 scatter 0.000 fraction 1.000 +(gauss) phi1 0.000 Phi 54.7356 phi2 45.000 diff --git a/examples/ConfigFiles/Texture_Gauss_123.config b/examples/ConfigFiles/Texture_Gauss_123.config index 32d28442f..da4fa30ab 100644 --- a/examples/ConfigFiles/Texture_Gauss_123.config +++ b/examples/ConfigFiles/Texture_Gauss_123.config @@ -1,2 +1,2 @@ [123] -(gauss) phi1 209.805 Phi 29.206 phi2 63.435 scatter 0.000 fraction 1.000 +(gauss) phi1 209.805 Phi 29.206 phi2 63.435 diff --git a/examples/FEM/polyXtal/material.config b/examples/FEM/polyXtal/material.config index 51cf8c163..71e5350dc 100644 --- a/examples/FEM/polyXtal/material.config +++ b/examples/FEM/polyXtal/material.config @@ -4,14 +4,6 @@ [SX] mech none -#-------------------# - -#-------------------# - -[aLittleSomething] -(output) f -(output) p - #-------------------# #-------------------# @@ -50,408 +42,212 @@ interaction_twinslip 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 interaction_twintwin 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 atol_resistance 1 +(output) f +(output) p + #-------------------# #-------------------# [Grain001] -crystallite 1 (constituent) phase 1 texture 1 fraction 1.0 - [Grain002] -crystallite 1 (constituent) phase 1 texture 2 fraction 1.0 - [Grain003] -crystallite 1 (constituent) phase 1 texture 3 fraction 1.0 - [Grain004] -crystallite 1 (constituent) phase 1 texture 4 fraction 1.0 - [Grain005] -crystallite 1 (constituent) phase 1 texture 5 fraction 1.0 - [Grain006] -crystallite 1 (constituent) phase 1 texture 6 fraction 1.0 - [Grain007] -crystallite 1 (constituent) phase 1 texture 7 fraction 1.0 - [Grain008] -crystallite 1 (constituent) phase 1 texture 8 fraction 1.0 - [Grain009] -crystallite 1 (constituent) phase 1 texture 9 fraction 1.0 - [Grain010] -crystallite 1 (constituent) phase 1 texture 10 fraction 1.0 - [Grain011] -crystallite 1 (constituent) phase 1 texture 11 fraction 1.0 - [Grain012] -crystallite 1 (constituent) phase 1 texture 12 fraction 1.0 - [Grain013] -crystallite 1 (constituent) phase 1 texture 13 fraction 1.0 - [Grain014] -crystallite 1 (constituent) phase 1 texture 14 fraction 1.0 - [Grain015] -crystallite 1 (constituent) phase 1 texture 15 fraction 1.0 - [Grain016] -crystallite 1 (constituent) phase 1 texture 16 fraction 1.0 - [Grain017] -crystallite 1 (constituent) phase 1 texture 17 fraction 1.0 - [Grain018] -crystallite 1 (constituent) phase 1 texture 18 fraction 1.0 - [Grain019] -crystallite 1 (constituent) phase 1 texture 19 fraction 1.0 - [Grain020] -crystallite 1 (constituent) phase 1 texture 20 fraction 1.0 - [Grain021] -crystallite 1 (constituent) phase 1 texture 21 fraction 1.0 - [Grain022] -crystallite 1 (constituent) phase 1 texture 22 fraction 1.0 - [Grain023] -crystallite 1 (constituent) phase 1 texture 23 fraction 1.0 - [Grain024] -crystallite 1 (constituent) phase 1 texture 24 fraction 1.0 - [Grain025] -crystallite 1 (constituent) phase 1 texture 25 fraction 1.0 - [Grain026] -crystallite 1 (constituent) phase 1 texture 26 fraction 1.0 - [Grain027] -crystallite 1 (constituent) phase 1 texture 27 fraction 1.0 - [Grain028] -crystallite 1 (constituent) phase 1 texture 28 fraction 1.0 - [Grain029] -crystallite 1 (constituent) phase 1 texture 29 fraction 1.0 - [Grain030] -crystallite 1 (constituent) phase 1 texture 30 fraction 1.0 - [Grain031] -crystallite 1 (constituent) phase 1 texture 31 fraction 1.0 - [Grain032] -crystallite 1 (constituent) phase 1 texture 32 fraction 1.0 - [Grain033] -crystallite 1 (constituent) phase 1 texture 33 fraction 1.0 - [Grain034] -crystallite 1 (constituent) phase 1 texture 34 fraction 1.0 - [Grain035] -crystallite 1 (constituent) phase 1 texture 35 fraction 1.0 - [Grain036] -crystallite 1 (constituent) phase 1 texture 36 fraction 1.0 - [Grain037] -crystallite 1 (constituent) phase 1 texture 37 fraction 1.0 - [Grain038] -crystallite 1 (constituent) phase 1 texture 38 fraction 1.0 - [Grain039] -crystallite 1 (constituent) phase 1 texture 39 fraction 1.0 - [Grain040] -crystallite 1 (constituent) phase 1 texture 40 fraction 1.0 - [Grain041] -crystallite 1 (constituent) phase 1 texture 41 fraction 1.0 - [Grain042] -crystallite 1 (constituent) phase 1 texture 42 fraction 1.0 - [Grain043] -crystallite 1 (constituent) phase 1 texture 43 fraction 1.0 - [Grain044] -crystallite 1 (constituent) phase 1 texture 44 fraction 1.0 - [Grain045] -crystallite 1 (constituent) phase 1 texture 45 fraction 1.0 - [Grain046] -crystallite 1 (constituent) phase 1 texture 46 fraction 1.0 - [Grain047] -crystallite 1 (constituent) phase 1 texture 47 fraction 1.0 - [Grain048] -crystallite 1 (constituent) phase 1 texture 48 fraction 1.0 - [Grain049] -crystallite 1 (constituent) phase 1 texture 49 fraction 1.0 - [Grain050] -crystallite 1 (constituent) phase 1 texture 50 fraction 1.0 - [Grain051] -crystallite 1 (constituent) phase 1 texture 51 fraction 1.0 - [Grain052] -crystallite 1 (constituent) phase 1 texture 52 fraction 1.0 - [Grain053] -crystallite 1 (constituent) phase 1 texture 53 fraction 1.0 - [Grain054] -crystallite 1 (constituent) phase 1 texture 54 fraction 1.0 - [Grain055] -crystallite 1 (constituent) phase 1 texture 55 fraction 1.0 - [Grain056] -crystallite 1 (constituent) phase 1 texture 56 fraction 1.0 - [Grain057] -crystallite 1 (constituent) phase 1 texture 57 fraction 1.0 - [Grain058] -crystallite 1 (constituent) phase 1 texture 58 fraction 1.0 - [Grain059] -crystallite 1 (constituent) phase 1 texture 59 fraction 1.0 - [Grain060] -crystallite 1 (constituent) phase 1 texture 60 fraction 1.0 - [Grain061] -crystallite 1 (constituent) phase 1 texture 61 fraction 1.0 - [Grain062] -crystallite 1 (constituent) phase 1 texture 62 fraction 1.0 - [Grain063] -crystallite 1 (constituent) phase 1 texture 63 fraction 1.0 - [Grain064] -crystallite 1 (constituent) phase 1 texture 64 fraction 1.0 - [Grain065] -crystallite 1 (constituent) phase 1 texture 65 fraction 1.0 - [Grain066] -crystallite 1 (constituent) phase 1 texture 66 fraction 1.0 - [Grain067] -crystallite 1 (constituent) phase 1 texture 67 fraction 1.0 - [Grain068] -crystallite 1 (constituent) phase 1 texture 68 fraction 1.0 - [Grain069] -crystallite 1 (constituent) phase 1 texture 69 fraction 1.0 - [Grain070] -crystallite 1 (constituent) phase 1 texture 70 fraction 1.0 - [Grain071] -crystallite 1 (constituent) phase 1 texture 71 fraction 1.0 - [Grain072] -crystallite 1 (constituent) phase 1 texture 72 fraction 1.0 - [Grain073] -crystallite 1 (constituent) phase 1 texture 73 fraction 1.0 - [Grain074] -crystallite 1 (constituent) phase 1 texture 74 fraction 1.0 - [Grain075] -crystallite 1 (constituent) phase 1 texture 75 fraction 1.0 - [Grain076] -crystallite 1 (constituent) phase 1 texture 76 fraction 1.0 - [Grain077] -crystallite 1 (constituent) phase 1 texture 77 fraction 1.0 - [Grain078] -crystallite 1 (constituent) phase 1 texture 78 fraction 1.0 - [Grain079] -crystallite 1 (constituent) phase 1 texture 79 fraction 1.0 - [Grain080] -crystallite 1 (constituent) phase 1 texture 80 fraction 1.0 - [Grain081] -crystallite 1 (constituent) phase 1 texture 81 fraction 1.0 - [Grain082] -crystallite 1 (constituent) phase 1 texture 82 fraction 1.0 - [Grain083] -crystallite 1 (constituent) phase 1 texture 83 fraction 1.0 - [Grain084] -crystallite 1 (constituent) phase 1 texture 84 fraction 1.0 - [Grain085] -crystallite 1 (constituent) phase 1 texture 85 fraction 1.0 - [Grain086] -crystallite 1 (constituent) phase 1 texture 86 fraction 1.0 - [Grain087] -crystallite 1 (constituent) phase 1 texture 87 fraction 1.0 - [Grain088] -crystallite 1 (constituent) phase 1 texture 88 fraction 1.0 - [Grain089] -crystallite 1 (constituent) phase 1 texture 89 fraction 1.0 - [Grain090] -crystallite 1 (constituent) phase 1 texture 90 fraction 1.0 - [Grain091] -crystallite 1 (constituent) phase 1 texture 91 fraction 1.0 - [Grain092] -crystallite 1 (constituent) phase 1 texture 92 fraction 1.0 - [Grain093] -crystallite 1 (constituent) phase 1 texture 93 fraction 1.0 - [Grain094] -crystallite 1 (constituent) phase 1 texture 94 fraction 1.0 - [Grain095] -crystallite 1 (constituent) phase 1 texture 95 fraction 1.0 - [Grain096] -crystallite 1 (constituent) phase 1 texture 96 fraction 1.0 - [Grain097] -crystallite 1 (constituent) phase 1 texture 97 fraction 1.0 - [Grain098] -crystallite 1 (constituent) phase 1 texture 98 fraction 1.0 - [Grain099] -crystallite 1 (constituent) phase 1 texture 99 fraction 1.0 - [Grain100] -crystallite 1 (constituent) phase 1 texture 100 fraction 1.0 #-------------------# @@ -459,301 +255,202 @@ crystallite 1 #-------------------# [Grain001] -(gauss) phi1 172.344 Phi 114.046 phi2 294.669 scatter 0.0 fraction 1.0 - +(gauss) phi1 172.344 Phi 114.046 phi2 294.669 [Grain002] -(gauss) phi1 186.013 Phi 94.7338 phi2 329.683 scatter 0.0 fraction 1.0 - +(gauss) phi1 186.013 Phi 94.7338 phi2 329.683 [Grain003] -(gauss) phi1 162.41 Phi 98.9455 phi2 130.322 scatter 0.0 fraction 1.0 - +(gauss) phi1 162.41 Phi 98.9455 phi2 130.322 [Grain004] -(gauss) phi1 355.272 Phi 140.621 phi2 125.567 scatter 0.0 fraction 1.0 - +(gauss) phi1 355.272 Phi 140.621 phi2 125.567 [Grain005] -(gauss) phi1 21.7641 Phi 143.388 phi2 240.373 scatter 0.0 fraction 1.0 - +(gauss) phi1 21.7641 Phi 143.388 phi2 240.373 [Grain006] -(gauss) phi1 88.1966 Phi 92.3358 phi2 194.78 scatter 0.0 fraction 1.0 - +(gauss) phi1 88.1966 Phi 92.3358 phi2 194.78 [Grain007] -(gauss) phi1 161.137 Phi 78.0062 phi2 111.948 scatter 0.0 fraction 1.0 - +(gauss) phi1 161.137 Phi 78.0062 phi2 111.948 [Grain008] -(gauss) phi1 169.792 Phi 89.5333 phi2 159.265 scatter 0.0 fraction 1.0 - +(gauss) phi1 169.792 Phi 89.5333 phi2 159.265 [Grain009] -(gauss) phi1 264.847 Phi 130.291 phi2 180.604 scatter 0.0 fraction 1.0 - +(gauss) phi1 264.847 Phi 130.291 phi2 180.604 [Grain010] -(gauss) phi1 70.6323 Phi 84.1754 phi2 341.162 scatter 0.0 fraction 1.0 - +(gauss) phi1 70.6323 Phi 84.1754 phi2 341.162 [Grain011] -(gauss) phi1 67.7751 Phi 36.1662 phi2 139.898 scatter 0.0 fraction 1.0 - +(gauss) phi1 67.7751 Phi 36.1662 phi2 139.898 [Grain012] -(gauss) phi1 111.621 Phi 19.1089 phi2 228.338 scatter 0.0 fraction 1.0 - +(gauss) phi1 111.621 Phi 19.1089 phi2 228.338 [Grain013] -(gauss) phi1 129.9 Phi 139.011 phi2 238.735 scatter 0.0 fraction 1.0 - +(gauss) phi1 129.9 Phi 139.011 phi2 238.735 [Grain014] -(gauss) phi1 221.405 Phi 129.743 phi2 99.6471 scatter 0.0 fraction 1.0 - +(gauss) phi1 221.405 Phi 129.743 phi2 99.6471 [Grain015] -(gauss) phi1 241.783 Phi 98.3729 phi2 260.615 scatter 0.0 fraction 1.0 - +(gauss) phi1 241.783 Phi 98.3729 phi2 260.615 [Grain016] -(gauss) phi1 72.5592 Phi 122.403 phi2 165.046 scatter 0.0 fraction 1.0 - +(gauss) phi1 72.5592 Phi 122.403 phi2 165.046 [Grain017] -(gauss) phi1 64.8818 Phi 82.6384 phi2 236.305 scatter 0.0 fraction 1.0 - +(gauss) phi1 64.8818 Phi 82.6384 phi2 236.305 [Grain018] -(gauss) phi1 201.096 Phi 65.9312 phi2 330.745 scatter 0.0 fraction 1.0 - +(gauss) phi1 201.096 Phi 65.9312 phi2 330.745 [Grain019] -(gauss) phi1 192.994 Phi 81.9371 phi2 239.326 scatter 0.0 fraction 1.0 - +(gauss) phi1 192.994 Phi 81.9371 phi2 239.326 [Grain020] -(gauss) phi1 125.335 Phi 90.4527 phi2 207.982 scatter 0.0 fraction 1.0 - +(gauss) phi1 125.335 Phi 90.4527 phi2 207.982 [Grain021] -(gauss) phi1 55.8848 Phi 26.4455 phi2 100.921 scatter 0.0 fraction 1.0 - +(gauss) phi1 55.8848 Phi 26.4455 phi2 100.921 [Grain022] -(gauss) phi1 40.722 Phi 95.6415 phi2 269.174 scatter 0.0 fraction 1.0 - +(gauss) phi1 40.722 Phi 95.6415 phi2 269.174 [Grain023] -(gauss) phi1 250.487 Phi 69.6035 phi2 201.732 scatter 0.0 fraction 1.0 - +(gauss) phi1 250.487 Phi 69.6035 phi2 201.732 [Grain024] -(gauss) phi1 204.199 Phi 84.983 phi2 20.3469 scatter 0.0 fraction 1.0 - +(gauss) phi1 204.199 Phi 84.983 phi2 20.3469 [Grain025] -(gauss) phi1 12.7416 Phi 128.589 phi2 271.553 scatter 0.0 fraction 1.0 - +(gauss) phi1 12.7416 Phi 128.589 phi2 271.553 [Grain026] -(gauss) phi1 299.704 Phi 85.3961 phi2 217.359 scatter 0.0 fraction 1.0 - +(gauss) phi1 299.704 Phi 85.3961 phi2 217.359 [Grain027] -(gauss) phi1 48.8232 Phi 83.6209 phi2 200.361 scatter 0.0 fraction 1.0 - +(gauss) phi1 48.8232 Phi 83.6209 phi2 200.361 [Grain028] -(gauss) phi1 336.395 Phi 97.3059 phi2 187.071 scatter 0.0 fraction 1.0 - +(gauss) phi1 336.395 Phi 97.3059 phi2 187.071 [Grain029] -(gauss) phi1 274.354 Phi 78.2424 phi2 320.308 scatter 0.0 fraction 1.0 - +(gauss) phi1 274.354 Phi 78.2424 phi2 320.308 [Grain030] -(gauss) phi1 320.776 Phi 149.72 phi2 163.862 scatter 0.0 fraction 1.0 - +(gauss) phi1 320.776 Phi 149.72 phi2 163.862 [Grain031] -(gauss) phi1 179.549 Phi 106.548 phi2 345.498 scatter 0.0 fraction 1.0 - +(gauss) phi1 179.549 Phi 106.548 phi2 345.498 [Grain032] -(gauss) phi1 163.508 Phi 24.4238 phi2 127.809 scatter 0.0 fraction 1.0 - +(gauss) phi1 163.508 Phi 24.4238 phi2 127.809 [Grain033] -(gauss) phi1 193.405 Phi 157.012 phi2 321.342 scatter 0.0 fraction 1.0 - +(gauss) phi1 193.405 Phi 157.012 phi2 321.342 [Grain034] -(gauss) phi1 9.09886 Phi 95.9453 phi2 102.32 scatter 0.0 fraction 1.0 - +(gauss) phi1 9.09886 Phi 95.9453 phi2 102.32 [Grain035] -(gauss) phi1 353.876 Phi 150.824 phi2 174.886 scatter 0.0 fraction 1.0 - +(gauss) phi1 353.876 Phi 150.824 phi2 174.886 [Grain036] -(gauss) phi1 138.914 Phi 76.5811 phi2 167.927 scatter 0.0 fraction 1.0 - +(gauss) phi1 138.914 Phi 76.5811 phi2 167.927 [Grain037] -(gauss) phi1 262.655 Phi 76.2738 phi2 12.4459 scatter 0.0 fraction 1.0 - +(gauss) phi1 262.655 Phi 76.2738 phi2 12.4459 [Grain038] -(gauss) phi1 121.849 Phi 65.5254 phi2 192.601 scatter 0.0 fraction 1.0 - +(gauss) phi1 121.849 Phi 65.5254 phi2 192.601 [Grain039] -(gauss) phi1 275.824 Phi 81.6788 phi2 164.228 scatter 0.0 fraction 1.0 - +(gauss) phi1 275.824 Phi 81.6788 phi2 164.228 [Grain040] -(gauss) phi1 68.9202 Phi 160.5 phi2 210.862 scatter 0.0 fraction 1.0 - +(gauss) phi1 68.9202 Phi 160.5 phi2 210.862 [Grain041] -(gauss) phi1 51.0398 Phi 82.7291 phi2 74.016 scatter 0.0 fraction 1.0 - +(gauss) phi1 51.0398 Phi 82.7291 phi2 74.016 [Grain042] -(gauss) phi1 338.746 Phi 62.7854 phi2 129.362 scatter 0.0 fraction 1.0 - +(gauss) phi1 338.746 Phi 62.7854 phi2 129.362 [Grain043] -(gauss) phi1 204.51 Phi 151.256 phi2 178.89 scatter 0.0 fraction 1.0 - +(gauss) phi1 204.51 Phi 151.256 phi2 178.89 [Grain044] -(gauss) phi1 122.098 Phi 104.003 phi2 323.04 scatter 0.0 fraction 1.0 - +(gauss) phi1 122.098 Phi 104.003 phi2 323.04 [Grain045] -(gauss) phi1 106.693 Phi 108.61 phi2 336.935 scatter 0.0 fraction 1.0 - +(gauss) phi1 106.693 Phi 108.61 phi2 336.935 [Grain046] -(gauss) phi1 118.856 Phi 160.992 phi2 316.152 scatter 0.0 fraction 1.0 - +(gauss) phi1 118.856 Phi 160.992 phi2 316.152 [Grain047] -(gauss) phi1 177.962 Phi 114.868 phi2 13.6918 scatter 0.0 fraction 1.0 - +(gauss) phi1 177.962 Phi 114.868 phi2 13.6918 [Grain048] -(gauss) phi1 330.273 Phi 174.495 phi2 231.249 scatter 0.0 fraction 1.0 - +(gauss) phi1 330.273 Phi 174.495 phi2 231.249 [Grain049] -(gauss) phi1 7.31937 Phi 94.7313 phi2 17.8461 scatter 0.0 fraction 1.0 - +(gauss) phi1 7.31937 Phi 94.7313 phi2 17.8461 [Grain050] -(gauss) phi1 74.3385 Phi 49.9546 phi2 286.482 scatter 0.0 fraction 1.0 - +(gauss) phi1 74.3385 Phi 49.9546 phi2 286.482 [Grain051] -(gauss) phi1 326.388 Phi 76.9547 phi2 214.758 scatter 0.0 fraction 1.0 - +(gauss) phi1 326.388 Phi 76.9547 phi2 214.758 [Grain052] -(gauss) phi1 276.024 Phi 72.1242 phi2 275.884 scatter 0.0 fraction 1.0 - +(gauss) phi1 276.024 Phi 72.1242 phi2 275.884 [Grain053] -(gauss) phi1 137.681 Phi 116.99 phi2 6.87047 scatter 0.0 fraction 1.0 - +(gauss) phi1 137.681 Phi 116.99 phi2 6.87047 [Grain054] -(gauss) phi1 200.213 Phi 123.618 phi2 268.84 scatter 0.0 fraction 1.0 - +(gauss) phi1 200.213 Phi 123.618 phi2 268.84 [Grain055] -(gauss) phi1 7.13702 Phi 56.2015 phi2 119.65 scatter 0.0 fraction 1.0 - +(gauss) phi1 7.13702 Phi 56.2015 phi2 119.65 [Grain056] -(gauss) phi1 72.1783 Phi 81.0906 phi2 6.06213 scatter 0.0 fraction 1.0 - +(gauss) phi1 72.1783 Phi 81.0906 phi2 6.06213 [Grain057] -(gauss) phi1 184.565 Phi 110.01 phi2 239.546 scatter 0.0 fraction 1.0 - +(gauss) phi1 184.565 Phi 110.01 phi2 239.546 [Grain058] -(gauss) phi1 210.124 Phi 128.631 phi2 8.61611 scatter 0.0 fraction 1.0 - +(gauss) phi1 210.124 Phi 128.631 phi2 8.61611 [Grain059] -(gauss) phi1 290.326 Phi 170.412 phi2 144.269 scatter 0.0 fraction 1.0 - +(gauss) phi1 290.326 Phi 170.412 phi2 144.269 [Grain060] -(gauss) phi1 204.748 Phi 76.7343 phi2 200.385 scatter 0.0 fraction 1.0 - +(gauss) phi1 204.748 Phi 76.7343 phi2 200.385 [Grain061] -(gauss) phi1 54.3015 Phi 65.9143 phi2 117.373 scatter 0.0 fraction 1.0 - +(gauss) phi1 54.3015 Phi 65.9143 phi2 117.373 [Grain062] -(gauss) phi1 261.263 Phi 52.255 phi2 95.9146 scatter 0.0 fraction 1.0 - +(gauss) phi1 261.263 Phi 52.255 phi2 95.9146 [Grain063] -(gauss) phi1 328.054 Phi 51.0778 phi2 24.2782 scatter 0.0 fraction 1.0 - +(gauss) phi1 328.054 Phi 51.0778 phi2 24.2782 [Grain064] -(gauss) phi1 163.03 Phi 154.894 phi2 64.126 scatter 0.0 fraction 1.0 - +(gauss) phi1 163.03 Phi 154.894 phi2 64.126 [Grain065] -(gauss) phi1 183.87 Phi 80.1848 phi2 18.7438 scatter 0.0 fraction 1.0 - +(gauss) phi1 183.87 Phi 80.1848 phi2 18.7438 [Grain066] -(gauss) phi1 219.91 Phi 113.727 phi2 126.67 scatter 0.0 fraction 1.0 - +(gauss) phi1 219.91 Phi 113.727 phi2 126.67 [Grain067] -(gauss) phi1 1.43844 Phi 87.6365 phi2 217.342 scatter 0.0 fraction 1.0 - +(gauss) phi1 1.43844 Phi 87.6365 phi2 217.342 [Grain068] -(gauss) phi1 16.6245 Phi 162.07 phi2 43.7899 scatter 0.0 fraction 1.0 - +(gauss) phi1 16.6245 Phi 162.07 phi2 43.7899 [Grain069] -(gauss) phi1 16.86 Phi 53.8682 phi2 256.917 scatter 0.0 fraction 1.0 - +(gauss) phi1 16.86 Phi 53.8682 phi2 256.917 [Grain070] -(gauss) phi1 1.01921 Phi 118.449 phi2 307.223 scatter 0.0 fraction 1.0 - +(gauss) phi1 1.01921 Phi 118.449 phi2 307.223 [Grain071] -(gauss) phi1 19.0397 Phi 83.8885 phi2 262.687 scatter 0.0 fraction 1.0 - +(gauss) phi1 19.0397 Phi 83.8885 phi2 262.687 [Grain072] -(gauss) phi1 99.799 Phi 77.2307 phi2 84.9727 scatter 0.0 fraction 1.0 - +(gauss) phi1 99.799 Phi 77.2307 phi2 84.9727 [Grain073] -(gauss) phi1 234.292 Phi 63.5029 phi2 250.315 scatter 0.0 fraction 1.0 - +(gauss) phi1 234.292 Phi 63.5029 phi2 250.315 [Grain074] -(gauss) phi1 315.529 Phi 106.015 phi2 103.711 scatter 0.0 fraction 1.0 - +(gauss) phi1 315.529 Phi 106.015 phi2 103.711 [Grain075] -(gauss) phi1 235.595 Phi 110.152 phi2 210.277 scatter 0.0 fraction 1.0 - +(gauss) phi1 235.595 Phi 110.152 phi2 210.277 [Grain076] -(gauss) phi1 341.907 Phi 17.1839 phi2 332.75 scatter 0.0 fraction 1.0 - +(gauss) phi1 341.907 Phi 17.1839 phi2 332.75 [Grain077] -(gauss) phi1 352.166 Phi 88.6049 phi2 114.964 scatter 0.0 fraction 1.0 - +(gauss) phi1 352.166 Phi 88.6049 phi2 114.964 [Grain078] -(gauss) phi1 342.33 Phi 117.777 phi2 180.346 scatter 0.0 fraction 1.0 - +(gauss) phi1 342.33 Phi 117.777 phi2 180.346 [Grain079] -(gauss) phi1 224.952 Phi 70.5702 phi2 148.486 scatter 0.0 fraction 1.0 - +(gauss) phi1 224.952 Phi 70.5702 phi2 148.486 [Grain080] -(gauss) phi1 7.71702 Phi 23.6124 phi2 131.591 scatter 0.0 fraction 1.0 - +(gauss) phi1 7.71702 Phi 23.6124 phi2 131.591 [Grain081] -(gauss) phi1 65.1024 Phi 138.774 phi2 247.344 scatter 0.0 fraction 1.0 - +(gauss) phi1 65.1024 Phi 138.774 phi2 247.344 [Grain082] -(gauss) phi1 37.6181 Phi 51.5209 phi2 8.4169 scatter 0.0 fraction 1.0 - +(gauss) phi1 37.6181 Phi 51.5209 phi2 8.4169 [Grain083] -(gauss) phi1 245.335 Phi 53.4543 phi2 52.5205 scatter 0.0 fraction 1.0 - +(gauss) phi1 245.335 Phi 53.4543 phi2 52.5205 [Grain084] -(gauss) phi1 259.572 Phi 87.7026 phi2 272.065 scatter 0.0 fraction 1.0 - +(gauss) phi1 259.572 Phi 87.7026 phi2 272.065 [Grain085] -(gauss) phi1 269.39 Phi 103.379 phi2 132.506 scatter 0.0 fraction 1.0 - +(gauss) phi1 269.39 Phi 103.379 phi2 132.506 [Grain086] -(gauss) phi1 175.156 Phi 119.338 phi2 355.51 scatter 0.0 fraction 1.0 - +(gauss) phi1 175.156 Phi 119.338 phi2 355.51 [Grain087] -(gauss) phi1 248.11 Phi 39.4772 phi2 310.371 scatter 0.0 fraction 1.0 - +(gauss) phi1 248.11 Phi 39.4772 phi2 310.371 [Grain088] -(gauss) phi1 121.809 Phi 141.465 phi2 10.0736 scatter 0.0 fraction 1.0 - +(gauss) phi1 121.809 Phi 141.465 phi2 10.0736 [Grain089] -(gauss) phi1 2.4357 Phi 47.118 phi2 274.654 scatter 0.0 fraction 1.0 - +(gauss) phi1 2.4357 Phi 47.118 phi2 274.654 [Grain090] -(gauss) phi1 314.188 Phi 134.146 phi2 250.673 scatter 0.0 fraction 1.0 - +(gauss) phi1 314.188 Phi 134.146 phi2 250.673 [Grain091] -(gauss) phi1 114.815 Phi 121.132 phi2 275.124 scatter 0.0 fraction 1.0 - +(gauss) phi1 114.815 Phi 121.132 phi2 275.124 [Grain092] -(gauss) phi1 126.699 Phi 99.0325 phi2 320.537 scatter 0.0 fraction 1.0 - +(gauss) phi1 126.699 Phi 99.0325 phi2 320.537 [Grain093] -(gauss) phi1 184.138 Phi 20.1663 phi2 159.314 scatter 0.0 fraction 1.0 - +(gauss) phi1 184.138 Phi 20.1663 phi2 159.314 [Grain094] -(gauss) phi1 296.502 Phi 15.2389 phi2 39.382 scatter 0.0 fraction 1.0 - +(gauss) phi1 296.502 Phi 15.2389 phi2 39.382 [Grain095] -(gauss) phi1 167.8 Phi 151.764 phi2 192.568 scatter 0.0 fraction 1.0 - +(gauss) phi1 167.8 Phi 151.764 phi2 192.568 [Grain096] -(gauss) phi1 257.822 Phi 133.446 phi2 257.108 scatter 0.0 fraction 1.0 - +(gauss) phi1 257.822 Phi 133.446 phi2 257.108 [Grain097] -(gauss) phi1 71.6923 Phi 74.5726 phi2 342.575 scatter 0.0 fraction 1.0 - +(gauss) phi1 71.6923 Phi 74.5726 phi2 342.575 [Grain098] -(gauss) phi1 176.748 Phi 28.39 phi2 327.375 scatter 0.0 fraction 1.0 - +(gauss) phi1 176.748 Phi 28.39 phi2 327.375 [Grain099] -(gauss) phi1 121.822 Phi 141.836 phi2 22.6349 scatter 0.0 fraction 1.0 - +(gauss) phi1 121.822 Phi 141.836 phi2 22.6349 [Grain100] -(gauss) phi1 180.151 Phi 109.246 phi2 146.177 scatter 0.0 fraction 1.0 +(gauss) phi1 180.151 Phi 109.246 phi2 146.177 diff --git a/examples/MSC.Marc/material.config b/examples/MSC.Marc/material.config index 92a3ed38e..46ea44367 100644 --- a/examples/MSC.Marc/material.config +++ b/examples/MSC.Marc/material.config @@ -9,307 +9,206 @@ #-------------------# [Grain001] -crystallite 1 (constituent) phase 1 texture 1 fraction 1.0 [Grain002] -crystallite 1 (constituent) phase 1 texture 2 fraction 1.0 [Grain003] -crystallite 1 (constituent) phase 1 texture 3 fraction 1.0 [Grain004] -crystallite 1 (constituent) phase 1 texture 4 fraction 1.0 [Grain005] -crystallite 1 (constituent) phase 1 texture 5 fraction 1.0 [Grain006] -crystallite 1 (constituent) phase 1 texture 6 fraction 1.0 [Grain007] -crystallite 1 (constituent) phase 1 texture 7 fraction 1.0 [Grain008] -crystallite 1 (constituent) phase 1 texture 8 fraction 1.0 [Grain009] -crystallite 1 (constituent) phase 1 texture 9 fraction 1.0 [Grain010] -crystallite 1 (constituent) phase 1 texture 10 fraction 1.0 [Grain011] -crystallite 1 (constituent) phase 1 texture 11 fraction 1.0 [Grain012] -crystallite 1 (constituent) phase 1 texture 12 fraction 1.0 [Grain013] -crystallite 1 (constituent) phase 1 texture 13 fraction 1.0 [Grain014] -crystallite 1 (constituent) phase 1 texture 14 fraction 1.0 [Grain015] -crystallite 1 (constituent) phase 1 texture 15 fraction 1.0 [Grain016] -crystallite 1 (constituent) phase 1 texture 16 fraction 1.0 [Grain017] -crystallite 1 (constituent) phase 1 texture 17 fraction 1.0 [Grain018] -crystallite 1 (constituent) phase 1 texture 18 fraction 1.0 [Grain019] -crystallite 1 (constituent) phase 1 texture 19 fraction 1.0 [Grain020] -crystallite 1 (constituent) phase 1 texture 20 fraction 1.0 [Grain021] -crystallite 1 (constituent) phase 1 texture 21 fraction 1.0 [Grain022] -crystallite 1 (constituent) phase 1 texture 22 fraction 1.0 [Grain023] -crystallite 1 (constituent) phase 1 texture 23 fraction 1.0 [Grain024] -crystallite 1 (constituent) phase 1 texture 24 fraction 1.0 [Grain025] -crystallite 1 (constituent) phase 1 texture 25 fraction 1.0 [Grain026] -crystallite 1 (constituent) phase 1 texture 26 fraction 1.0 [Grain027] -crystallite 1 (constituent) phase 1 texture 27 fraction 1.0 [Grain028] -crystallite 1 (constituent) phase 1 texture 28 fraction 1.0 [Grain029] -crystallite 1 (constituent) phase 1 texture 29 fraction 1.0 [Grain030] -crystallite 1 (constituent) phase 1 texture 30 fraction 1.0 [Grain031] -crystallite 1 (constituent) phase 1 texture 31 fraction 1.0 [Grain032] -crystallite 1 (constituent) phase 1 texture 32 fraction 1.0 [Grain033] -crystallite 1 (constituent) phase 1 texture 33 fraction 1.0 [Grain034] -crystallite 1 (constituent) phase 1 texture 34 fraction 1.0 [Grain035] -crystallite 1 (constituent) phase 1 texture 35 fraction 1.0 [Grain036] -crystallite 1 (constituent) phase 1 texture 36 fraction 1.0 [Grain037] -crystallite 1 (constituent) phase 1 texture 37 fraction 1.0 [Grain038] -crystallite 1 (constituent) phase 1 texture 38 fraction 1.0 [Grain039] -crystallite 1 (constituent) phase 1 texture 39 fraction 1.0 [Grain040] -crystallite 1 (constituent) phase 1 texture 40 fraction 1.0 [Grain041] -crystallite 1 (constituent) phase 1 texture 41 fraction 1.0 [Grain042] -crystallite 1 (constituent) phase 1 texture 42 fraction 1.0 [Grain043] -crystallite 1 (constituent) phase 1 texture 43 fraction 1.0 [Grain044] -crystallite 1 (constituent) phase 1 texture 44 fraction 1.0 [Grain045] -crystallite 1 (constituent) phase 1 texture 45 fraction 1.0 [Grain046] -crystallite 1 (constituent) phase 1 texture 46 fraction 1.0 [Grain047] -crystallite 1 (constituent) phase 1 texture 47 fraction 1.0 [Grain048] -crystallite 1 (constituent) phase 1 texture 48 fraction 1.0 [Grain049] -crystallite 1 (constituent) phase 1 texture 49 fraction 1.0 [Grain050] -crystallite 1 (constituent) phase 1 texture 50 fraction 1.0 [Grain051] -crystallite 1 (constituent) phase 1 texture 51 fraction 1.0 [Grain052] -crystallite 1 (constituent) phase 1 texture 52 fraction 1.0 [Grain053] -crystallite 1 (constituent) phase 1 texture 53 fraction 1.0 [Grain054] -crystallite 1 (constituent) phase 1 texture 54 fraction 1.0 [Grain055] -crystallite 1 (constituent) phase 1 texture 55 fraction 1.0 [Grain056] -crystallite 1 (constituent) phase 1 texture 56 fraction 1.0 [Grain057] -crystallite 1 (constituent) phase 1 texture 57 fraction 1.0 [Grain058] -crystallite 1 (constituent) phase 1 texture 58 fraction 1.0 [Grain059] -crystallite 1 (constituent) phase 1 texture 59 fraction 1.0 [Grain060] -crystallite 1 (constituent) phase 1 texture 60 fraction 1.0 [Grain061] -crystallite 1 (constituent) phase 1 texture 61 fraction 1.0 [Grain062] -crystallite 1 (constituent) phase 1 texture 62 fraction 1.0 [Grain063] -crystallite 1 (constituent) phase 1 texture 63 fraction 1.0 [Grain064] -crystallite 1 (constituent) phase 1 texture 64 fraction 1.0 [Grain065] -crystallite 1 (constituent) phase 1 texture 65 fraction 1.0 [Grain066] -crystallite 1 (constituent) phase 1 texture 66 fraction 1.0 [Grain067] -crystallite 1 (constituent) phase 1 texture 67 fraction 1.0 [Grain068] -crystallite 1 (constituent) phase 1 texture 68 fraction 1.0 [Grain069] -crystallite 1 (constituent) phase 1 texture 69 fraction 1.0 [Grain070] -crystallite 1 (constituent) phase 1 texture 70 fraction 1.0 [Grain071] -crystallite 1 (constituent) phase 1 texture 71 fraction 1.0 [Grain072] -crystallite 1 (constituent) phase 1 texture 72 fraction 1.0 [Grain073] -crystallite 1 (constituent) phase 1 texture 73 fraction 1.0 [Grain074] -crystallite 1 (constituent) phase 1 texture 74 fraction 1.0 [Grain075] -crystallite 1 (constituent) phase 1 texture 75 fraction 1.0 [Grain076] -crystallite 1 (constituent) phase 1 texture 76 fraction 1.0 [Grain077] -crystallite 1 (constituent) phase 1 texture 77 fraction 1.0 [Grain078] -crystallite 1 (constituent) phase 1 texture 78 fraction 1.0 [Grain079] -crystallite 1 (constituent) phase 1 texture 79 fraction 1.0 [Grain080] -crystallite 1 (constituent) phase 1 texture 80 fraction 1.0 [Grain081] -crystallite 1 (constituent) phase 1 texture 81 fraction 1.0 [Grain082] -crystallite 1 (constituent) phase 1 texture 82 fraction 1.0 [Grain083] -crystallite 1 (constituent) phase 1 texture 83 fraction 1.0 [Grain084] -crystallite 1 (constituent) phase 1 texture 84 fraction 1.0 [Grain085] -crystallite 1 (constituent) phase 1 texture 85 fraction 1.0 [Grain086] -crystallite 1 (constituent) phase 1 texture 86 fraction 1.0 [Grain087] -crystallite 1 (constituent) phase 1 texture 87 fraction 1.0 [Grain088] -crystallite 1 (constituent) phase 1 texture 88 fraction 1.0 [Grain089] -crystallite 1 (constituent) phase 1 texture 89 fraction 1.0 [Grain090] -crystallite 1 (constituent) phase 1 texture 90 fraction 1.0 [Grain091] -crystallite 1 (constituent) phase 1 texture 91 fraction 1.0 [Grain092] -crystallite 1 (constituent) phase 1 texture 92 fraction 1.0 [Grain093] -crystallite 1 (constituent) phase 1 texture 93 fraction 1.0 [Grain094] -crystallite 1 (constituent) phase 1 texture 94 fraction 1.0 [Grain095] -crystallite 1 (constituent) phase 1 texture 95 fraction 1.0 [Grain096] -crystallite 1 (constituent) phase 1 texture 96 fraction 1.0 [Grain097] -crystallite 1 (constituent) phase 1 texture 97 fraction 1.0 [Grain098] -crystallite 1 (constituent) phase 1 texture 98 fraction 1.0 [Grain099] -crystallite 1 (constituent) phase 1 texture 99 fraction 1.0 [Grain100] -crystallite 1 (constituent) phase 1 texture 100 fraction 1.0 [cubeGrain] -crystallite 1 (constituent) phase 1 texture 101 fraction 1.0 #-------------------# @@ -317,214 +216,209 @@ crystallite 1 #-------------------# [Grain001] -(gauss) phi1 359.121452 Phi 82.319471 Phi2 347.729535 scatter 0 fraction 1 +(gauss) phi1 359.121452 Phi 82.319471 Phi2 347.729535 [Grain002] -(gauss) phi1 269.253967 Phi 105.379919 Phi2 173.029284 scatter 0 fraction 1 +(gauss) phi1 269.253967 Phi 105.379919 Phi2 173.029284 [Grain003] -(gauss) phi1 26.551535 Phi 171.606752 Phi2 124.949264 scatter 0 fraction 1 +(gauss) phi1 26.551535 Phi 171.606752 Phi2 124.949264 [Grain004] -(gauss) phi1 123.207774 Phi 124.339577 Phi2 47.937748 scatter 0 fraction 1 +(gauss) phi1 123.207774 Phi 124.339577 Phi2 47.937748 [Grain005] -(gauss) phi1 324.188825 Phi 103.089216 Phi2 160.373624 scatter 0 fraction 1 +(gauss) phi1 324.188825 Phi 103.089216 Phi2 160.373624 [Grain006] -(gauss) phi1 238.295585 Phi 165.416882 Phi2 234.307741 scatter 0 fraction 1 +(gauss) phi1 238.295585 Phi 165.416882 Phi2 234.307741 [Grain007] -(gauss) phi1 232.707177 Phi 110.733726 Phi2 308.049265 scatter 0 fraction 1 +(gauss) phi1 232.707177 Phi 110.733726 Phi2 308.049265 [Grain008] -(gauss) phi1 144.463291 Phi 125.891441 Phi2 348.674207 scatter 0 fraction 1 +(gauss) phi1 144.463291 Phi 125.891441 Phi2 348.674207 [Grain009] -(gauss) phi1 215.423832 Phi 69.759502 Phi2 164.477632 scatter 0 fraction 1 +(gauss) phi1 215.423832 Phi 69.759502 Phi2 164.477632 [Grain010] -(gauss) phi1 118.805444 Phi 143.057031 Phi2 271.963190 scatter 0 fraction 1 +(gauss) phi1 118.805444 Phi 143.057031 Phi2 271.963190 [Grain011] -(gauss) phi1 218.049576 Phi 64.017550 Phi2 323.040457 scatter 0 fraction 1 +(gauss) phi1 218.049576 Phi 64.017550 Phi2 323.040457 [Grain012] -(gauss) phi1 236.962483 Phi 134.312093 Phi2 220.433366 scatter 0 fraction 1 +(gauss) phi1 236.962483 Phi 134.312093 Phi2 220.433366 [Grain013] -(gauss) phi1 352.317686 Phi 3.356527 Phi2 92.447275 scatter 0 fraction 1 +(gauss) phi1 352.317686 Phi 3.356527 Phi2 92.447275 [Grain014] -(gauss) phi1 198.311545 Phi 71.452240 Phi2 199.441849 scatter 0 fraction 1 +(gauss) phi1 198.311545 Phi 71.452240 Phi2 199.441849 [Grain015] -(gauss) phi1 351.993635 Phi 36.500987 Phi2 236.852886 scatter 0 fraction 1 +(gauss) phi1 351.993635 Phi 36.500987 Phi2 236.852886 [Grain016] -(gauss) phi1 262.389063 Phi 101.249950 Phi2 334.305959 scatter 0 fraction 1 +(gauss) phi1 262.389063 Phi 101.249950 Phi2 334.305959 [Grain017] -(gauss) phi1 53.220668 Phi 69.570254 Phi2 277.061151 scatter 0 fraction 1 +(gauss) phi1 53.220668 Phi 69.570254 Phi2 277.061151 [Grain018] -(gauss) phi1 122.156119 Phi 140.207051 Phi2 221.172906 scatter 0 fraction 1 +(gauss) phi1 122.156119 Phi 140.207051 Phi2 221.172906 [Grain019] -(gauss) phi1 295.422170 Phi 26.595511 Phi2 263.206315 scatter 0 fraction 1 +(gauss) phi1 295.422170 Phi 26.595511 Phi2 263.206315 [Grain020] -(gauss) phi1 179.137406 Phi 104.500977 Phi2 151.742108 scatter 0 fraction 1 +(gauss) phi1 179.137406 Phi 104.500977 Phi2 151.742108 [Grain021] -(gauss) phi1 199.045094 Phi 5.228899 Phi2 356.542109 scatter 0 fraction 1 +(gauss) phi1 199.045094 Phi 5.228899 Phi2 356.542109 [Grain022] -(gauss) phi1 268.671476 Phi 24.835403 Phi2 33.578889 scatter 0 fraction 1 +(gauss) phi1 268.671476 Phi 24.835403 Phi2 33.578889 [Grain023] -(gauss) phi1 264.248527 Phi 59.766630 Phi2 340.865462 scatter 0 fraction 1 +(gauss) phi1 264.248527 Phi 59.766630 Phi2 340.865462 [Grain024] -(gauss) phi1 254.223491 Phi 51.125301 Phi2 201.094027 scatter 0 fraction 1 +(gauss) phi1 254.223491 Phi 51.125301 Phi2 201.094027 [Grain025] -(gauss) phi1 22.214008 Phi 92.248774 Phi2 215.168318 scatter 0 fraction 1 +(gauss) phi1 22.214008 Phi 92.248774 Phi2 215.168318 [Grain026] -(gauss) phi1 49.511491 Phi 79.933539 Phi2 187.188575 scatter 0 fraction 1 +(gauss) phi1 49.511491 Phi 79.933539 Phi2 187.188575 [Grain027] -(gauss) phi1 318.916204 Phi 113.102650 Phi2 241.076629 scatter 0 fraction 1 +(gauss) phi1 318.916204 Phi 113.102650 Phi2 241.076629 [Grain028] -(gauss) phi1 239.378433 Phi 89.578655 Phi2 94.167043 scatter 0 fraction 1 +(gauss) phi1 239.378433 Phi 89.578655 Phi2 94.167043 [Grain029] -(gauss) phi1 27.561421 Phi 142.892093 Phi2 197.735666 scatter 0 fraction 1 +(gauss) phi1 27.561421 Phi 142.892093 Phi2 197.735666 [Grain030] -(gauss) phi1 135.210581 Phi 165.859834 Phi2 285.449561 scatter 0 fraction 1 +(gauss) phi1 135.210581 Phi 165.859834 Phi2 285.449561 [Grain031] -(gauss) phi1 223.515916 Phi 56.824378 Phi2 343.289074 scatter 0 fraction 1 +(gauss) phi1 223.515916 Phi 56.824378 Phi2 343.289074 [Grain032] -(gauss) phi1 41.127974 Phi 111.289145 Phi2 214.855145 scatter 0 fraction 1 +(gauss) phi1 41.127974 Phi 111.289145 Phi2 214.855145 [Grain033] -(gauss) phi1 17.335045 Phi 140.496745 Phi2 77.747371 scatter 0 fraction 1 +(gauss) phi1 17.335045 Phi 140.496745 Phi2 77.747371 [Grain034] -(gauss) phi1 36.206421 Phi 148.574232 Phi2 88.870226 scatter 0 fraction 1 +(gauss) phi1 36.206421 Phi 148.574232 Phi2 88.870226 [Grain035] -(gauss) phi1 159.618336 Phi 125.680504 Phi2 204.119403 scatter 0 fraction 1 +(gauss) phi1 159.618336 Phi 125.680504 Phi2 204.119403 [Grain036] -(gauss) phi1 8.752464 Phi 99.173166 Phi2 143.227089 scatter 0 fraction 1 +(gauss) phi1 8.752464 Phi 99.173166 Phi2 143.227089 [Grain037] -(gauss) phi1 351.570753 Phi 67.343218 Phi2 1.779612 scatter 0 fraction 1 +(gauss) phi1 351.570753 Phi 67.343218 Phi2 1.779612 [Grain038] -(gauss) phi1 46.771572 Phi 155.018674 Phi2 302.319987 scatter 0 fraction 1 +(gauss) phi1 46.771572 Phi 155.018674 Phi2 302.319987 [Grain039] -(gauss) phi1 244.255976 Phi 80.566566 Phi2 264.069331 scatter 0 fraction 1 +(gauss) phi1 244.255976 Phi 80.566566 Phi2 264.069331 [Grain040] -(gauss) phi1 41.775388 Phi 47.109507 Phi2 300.598550 scatter 0 fraction 1 +(gauss) phi1 41.775388 Phi 47.109507 Phi2 300.598550 [Grain041] -(gauss) phi1 268.753103 Phi 46.654050 Phi2 190.382041 scatter 0 fraction 1 +(gauss) phi1 268.753103 Phi 46.654050 Phi2 190.382041 [Grain042] -(gauss) phi1 239.574480 Phi 62.517793 Phi2 147.817535 scatter 0 fraction 1 +(gauss) phi1 239.574480 Phi 62.517793 Phi2 147.817535 [Grain043] -(gauss) phi1 128.059775 Phi 61.916743 Phi2 169.674359 scatter 0 fraction 1 +(gauss) phi1 128.059775 Phi 61.916743 Phi2 169.674359 [Grain044] -(gauss) phi1 166.545156 Phi 58.709099 Phi2 252.885391 scatter 0 fraction 1 +(gauss) phi1 166.545156 Phi 58.709099 Phi2 252.885391 [Grain045] -(gauss) phi1 92.867691 Phi 28.906456 Phi2 164.197290 scatter 0 fraction 1 +(gauss) phi1 92.867691 Phi 28.906456 Phi2 164.197290 [Grain046] -(gauss) phi1 291.056147 Phi 35.145174 Phi2 250.155599 scatter 0 fraction 1 +(gauss) phi1 291.056147 Phi 35.145174 Phi2 250.155599 [Grain047] -(gauss) phi1 79.015862 Phi 44.772479 Phi2 267.982808 scatter 0 fraction 1 +(gauss) phi1 79.015862 Phi 44.772479 Phi2 267.982808 [Grain048] -(gauss) phi1 108.400702 Phi 69.883075 Phi2 222.737053 scatter 0 fraction 1 +(gauss) phi1 108.400702 Phi 69.883075 Phi2 222.737053 [Grain049] -(gauss) phi1 348.326500 Phi 11.339714 Phi2 121.682346 scatter 0 fraction 1 +(gauss) phi1 348.326500 Phi 11.339714 Phi2 121.682346 [Grain050] -(gauss) phi1 331.476209 Phi 108.775043 Phi2 335.139671 scatter 0 fraction 1 +(gauss) phi1 331.476209 Phi 108.775043 Phi2 335.139671 [Grain051] -(gauss) phi1 196.750278 Phi 93.955106 Phi2 63.689075 scatter 0 fraction 1 +(gauss) phi1 196.750278 Phi 93.955106 Phi2 63.689075 [Grain052] -(gauss) phi1 136.077875 Phi 130.508342 Phi2 128.468976 scatter 0 fraction 1 +(gauss) phi1 136.077875 Phi 130.508342 Phi2 128.468976 [Grain053] -(gauss) phi1 239.643513 Phi 76.284643 Phi2 168.821008 scatter 0 fraction 1 +(gauss) phi1 239.643513 Phi 76.284643 Phi2 168.821008 [Grain054] -(gauss) phi1 113.850670 Phi 117.531757 Phi2 71.971648 scatter 0 fraction 1 +(gauss) phi1 113.850670 Phi 117.531757 Phi2 71.971648 [Grain055] -(gauss) phi1 149.554071 Phi 16.543098 Phi2 195.556172 scatter 0 fraction 1 +(gauss) phi1 149.554071 Phi 16.543098 Phi2 195.556172 [Grain056] -(gauss) phi1 46.626579 Phi 52.447846 Phi2 304.495569 scatter 0 fraction 1 +(gauss) phi1 46.626579 Phi 52.447846 Phi2 304.495569 [Grain057] -(gauss) phi1 255.251821 Phi 86.678048 Phi2 238.982712 scatter 0 fraction 1 +(gauss) phi1 255.251821 Phi 86.678048 Phi2 238.982712 [Grain058] -(gauss) phi1 324.266133 Phi 28.075458 Phi2 41.191295 scatter 0 fraction 1 +(gauss) phi1 324.266133 Phi 28.075458 Phi2 41.191295 [Grain059] -(gauss) phi1 312.000332 Phi 74.648725 Phi2 87.403581 scatter 0 fraction 1 +(gauss) phi1 312.000332 Phi 74.648725 Phi2 87.403581 [Grain060] -(gauss) phi1 57.742481 Phi 163.241519 Phi2 68.491438 scatter 0 fraction 1 +(gauss) phi1 57.742481 Phi 163.241519 Phi2 68.491438 [Grain061] -(gauss) phi1 112.442447 Phi 51.735320 Phi2 206.538656 scatter 0 fraction 1 +(gauss) phi1 112.442447 Phi 51.735320 Phi2 206.538656 [Grain062] -(gauss) phi1 297.453842 Phi 115.283041 Phi2 57.785319 scatter 0 fraction 1 +(gauss) phi1 297.453842 Phi 115.283041 Phi2 57.785319 [Grain063] -(gauss) phi1 119.132681 Phi 117.923565 Phi2 196.121206 scatter 0 fraction 1 +(gauss) phi1 119.132681 Phi 117.923565 Phi2 196.121206 [Grain064] -(gauss) phi1 199.267314 Phi 163.091476 Phi2 53.549301 scatter 0 fraction 1 +(gauss) phi1 199.267314 Phi 163.091476 Phi2 53.549301 [Grain065] -(gauss) phi1 37.765215 Phi 76.795488 Phi2 146.264753 scatter 0 fraction 1 +(gauss) phi1 37.765215 Phi 76.795488 Phi2 146.264753 [Grain066] -(gauss) phi1 324.550183 Phi 27.665150 Phi2 56.383148 scatter 0 fraction 1 +(gauss) phi1 324.550183 Phi 27.665150 Phi2 56.383148 [Grain067] -(gauss) phi1 337.305377 Phi 136.807151 Phi2 133.661586 scatter 0 fraction 1 +(gauss) phi1 337.305377 Phi 136.807151 Phi2 133.661586 [Grain068] -(gauss) phi1 115.744041 Phi 64.536978 Phi2 262.694800 scatter 0 fraction 1 +(gauss) phi1 115.744041 Phi 64.536978 Phi2 262.694800 [Grain069] -(gauss) phi1 136.293403 Phi 48.862462 Phi2 343.319175 scatter 0 fraction 1 +(gauss) phi1 136.293403 Phi 48.862462 Phi2 343.319175 [Grain070] -(gauss) phi1 111.030931 Phi 80.823213 Phi2 84.041594 scatter 0 fraction 1 +(gauss) phi1 111.030931 Phi 80.823213 Phi2 84.041594 [Grain071] -(gauss) phi1 303.985249 Phi 118.929631 Phi2 302.307709 scatter 0 fraction 1 +(gauss) phi1 303.985249 Phi 118.929631 Phi2 302.307709 [Grain072] -(gauss) phi1 193.556259 Phi 75.928015 Phi2 176.696899 scatter 0 fraction 1 +(gauss) phi1 193.556259 Phi 75.928015 Phi2 176.696899 [Grain073] -(gauss) phi1 102.543259 Phi 121.929923 Phi2 234.496773 scatter 0 fraction 1 +(gauss) phi1 102.543259 Phi 121.929923 Phi2 234.496773 [Grain074] -(gauss) phi1 218.581323 Phi 101.753894 Phi2 305.566089 scatter 0 fraction 1 +(gauss) phi1 218.581323 Phi 101.753894 Phi2 305.566089 [Grain075] -(gauss) phi1 229.542114 Phi 118.839215 Phi2 129.179156 scatter 0 fraction 1 +(gauss) phi1 229.542114 Phi 118.839215 Phi2 129.179156 [Grain076] -(gauss) phi1 202.258840 Phi 139.205956 Phi2 352.248979 scatter 0 fraction 1 +(gauss) phi1 202.258840 Phi 139.205956 Phi2 352.248979 [Grain077] -(gauss) phi1 137.954289 Phi 63.806918 Phi2 128.975049 scatter 0 fraction 1 +(gauss) phi1 137.954289 Phi 63.806918 Phi2 128.975049 [Grain078] -(gauss) phi1 327.557366 Phi 84.987420 Phi2 345.483143 scatter 0 fraction 1 +(gauss) phi1 327.557366 Phi 84.987420 Phi2 345.483143 [Grain079] -(gauss) phi1 334.610243 Phi 74.535474 Phi2 106.419231 scatter 0 fraction 1 +(gauss) phi1 334.610243 Phi 74.535474 Phi2 106.419231 [Grain080] -(gauss) phi1 62.906243 Phi 46.752029 Phi2 222.692276 scatter 0 fraction 1 +(gauss) phi1 62.906243 Phi 46.752029 Phi2 222.692276 [Grain081] -(gauss) phi1 254.121439 Phi 121.005485 Phi2 287.265977 scatter 0 fraction 1 +(gauss) phi1 254.121439 Phi 121.005485 Phi2 287.265977 [Grain082] -(gauss) phi1 140.765045 Phi 141.268031 Phi2 271.327656 scatter 0 fraction 1 +(gauss) phi1 140.765045 Phi 141.268031 Phi2 271.327656 [Grain083] -(gauss) phi1 10.726984 Phi 66.339177 Phi2 189.073212 scatter 0 fraction 1 +(gauss) phi1 10.726984 Phi 66.339177 Phi2 189.073212 [Grain084] -(gauss) phi1 270.921536 Phi 72.821127 Phi2 313.590515 scatter 0 fraction 1 +(gauss) phi1 270.921536 Phi 72.821127 Phi2 313.590515 [Grain085] -(gauss) phi1 299.059668 Phi 23.884874 Phi2 80.016277 scatter 0 fraction 1 +(gauss) phi1 299.059668 Phi 23.884874 Phi2 80.016277 [Grain086] -(gauss) phi1 208.617406 Phi 11.031834 Phi2 302.388247 scatter 0 fraction 1 +(gauss) phi1 208.617406 Phi 11.031834 Phi2 302.388247 [Grain087] -(gauss) phi1 62.929967 Phi 65.223261 Phi2 108.558265 scatter 0 fraction 1 +(gauss) phi1 62.929967 Phi 65.223261 Phi2 108.558265 [Grain088] -(gauss) phi1 9.014959 Phi 33.542169 Phi2 247.970366 scatter 0 fraction 1 +(gauss) phi1 9.014959 Phi 33.542169 Phi2 247.970366 [Grain089] -(gauss) phi1 272.432808 Phi 30.065174 Phi2 19.803570 scatter 0 fraction 1 +(gauss) phi1 272.432808 Phi 30.065174 Phi2 19.803570 [Grain090] -(gauss) phi1 179.621980 Phi 151.763475 Phi2 61.871794 scatter 0 fraction 1 +(gauss) phi1 179.621980 Phi 151.763475 Phi2 61.871794 [Grain091] -(gauss) phi1 247.810321 Phi 112.752980 Phi2 264.668469 scatter 0 fraction 1 +(gauss) phi1 247.810321 Phi 112.752980 Phi2 264.668469 [Grain092] -(gauss) phi1 270.780630 Phi 102.037858 Phi2 31.602610 scatter 0 fraction 1 +(gauss) phi1 270.780630 Phi 102.037858 Phi2 31.602610 [Grain093] -(gauss) phi1 17.626672 Phi 56.032415 Phi2 245.079600 scatter 0 fraction 1 +(gauss) phi1 17.626672 Phi 56.032415 Phi2 245.079600 [Grain094] -(gauss) phi1 112.165186 Phi 87.390459 Phi2 182.086729 scatter 0 fraction 1 +(gauss) phi1 112.165186 Phi 87.390459 Phi2 182.086729 [Grain095] -(gauss) phi1 157.869381 Phi 79.905131 Phi2 107.037081 scatter 0 fraction 1 +(gauss) phi1 157.869381 Phi 79.905131 Phi2 107.037081 [Grain096] -(gauss) phi1 106.163846 Phi 148.477084 Phi2 350.980466 scatter 0 fraction 1 +(gauss) phi1 106.163846 Phi 148.477084 Phi2 350.980466 [Grain097] -(gauss) phi1 262.138550 Phi 58.923588 Phi2 111.303439 scatter 0 fraction 1 +(gauss) phi1 262.138550 Phi 58.923588 Phi2 111.303439 [Grain098] -(gauss) phi1 88.739397 Phi 119.092789 Phi2 222.502594 scatter 0 fraction 1 +(gauss) phi1 88.739397 Phi 119.092789 Phi2 222.502594 [Grain099] -(gauss) phi1 337.603765 Phi 10.145102 Phi2 80.934916 scatter 0 fraction 1 +(gauss) phi1 337.603765 Phi 10.145102 Phi2 80.934916 [Grain100] -(gauss) phi1 341.022242 Phi 45.927285 Phi2 252.045476 scatter 0 fraction 1 +(gauss) phi1 341.022242 Phi 45.927285 Phi2 252.045476 [cube] -(gauss) phi1 0 Phi 0 phi2 0 scatter 0 fraction 1 +(gauss) phi1 0 Phi 0 phi2 0 -#-------------------# - -#-------------------# - -{../ConfigFiles/Crystallite_All.config} #-------------------# diff --git a/examples/MSC.Marc/reference_postProc/rotation_90deg.txt b/examples/MSC.Marc/reference_postProc/rotation_90deg.txt deleted file mode 100644 index 27c80fbdb..000000000 --- a/examples/MSC.Marc/reference_postProc/rotation_90deg.txt +++ /dev/null @@ -1,94 +0,0 @@ -2 header -$Id: postResults 861 2011-05-06 10:00:27Z MPIE\c.kords $ -inc time elem node ip grain ip.x ip.y ip.z CauchyStress.intensity CauchyStress.t11 CauchyStress.t22 CauchyStress.t33 CauchyStress.t12 CauchyStress.t23 CauchyStress.t13 1_1_f 1_2_f 1_3_f 1_4_f 1_5_f 1_6_f 1_7_f 1_8_f 1_9_f 1_1_grainrotation 1_2_grainrotation 1_3_grainrotation 1_4_grainrotation 1_1_resistance_slip 1_2_resistance_slip 1_3_resistance_slip 1_4_resistance_slip 1_5_resistance_slip 1_6_resistance_slip 1_7_resistance_slip 1_8_resistance_slip 1_9_resistance_slip 1_10_resistance_slip 1_11_resistance_slip 1_12_resistance_slip -0 0.0 1 5 1 1 0.5 0.5 0.5 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -1 1.0 1 5 1 1 0.5 0.5 0.5 0.024172998067 0.056046936661 0.0577092021704 0.0580734722316 0.0075496127829 0.00882737897336 0.00766104180366 1.0 1.1259596093e-13 1.12595994811e-13 1.55780499177e-13 0.999847710133 -0.0174524057657 1.55782193243e-13 0.0174524057657 0.999847710133 1.0 -1.23725617425e-12 1.23720879461e-12 1.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -2 2.0 1 5 1 1 0.5 0.5 0.5 0.0241432830571 0.0377263836563 0.0324090756476 0.033376660198 0.00727691268548 0.00865175202489 0.00764666078612 1.0 1.10501743118e-13 1.10501777e-13 1.50193822223e-13 0.99939084053 -0.034899495542 1.55527744546e-13 0.034899495542 0.99939084053 1.0 -6.45080505939e-13 5.68662738426e-13 2.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -3 3.0 1 5 1 1 0.5 0.5 0.5 0.0257848323757 0.0201567672193 0.00817993376404 0.00972554087639 0.00701188668609 0.00847246591002 0.00763081293553 1.0 1.08486407791e-13 1.08486434896e-13 1.447601533e-13 0.998629510403 -0.0523359552026 1.55243941075e-13 0.0523359552026 0.998629510403 1.0 -4.46705416797e-13 3.46547009838e-13 3.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -4 4.0 1 5 1 1 0.5 0.5 0.5 0.0286209738301 0.00342932180502 -0.0149000706151 -0.0128218811005 0.00675505120307 0.00828998535872 0.0076136472635 1.0 1.06547922069e-13 1.06547949174e-13 1.39501515266e-13 0.997564077377 -0.0697564706206 1.54932937682e-13 0.0697564706206 0.997564077377 1.0 -3.46813588048e-13 2.36204529243e-13 4.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -5 5.0 1 5 1 1 0.5 0.5 0.5 0.032148640163 -0.0124530605972 -0.0368128865957 -0.0341981202364 0.00650494545698 0.00810338370502 0.00759505899623 1.0 1.04684232745e-13 1.04684253074e-13 1.34370257434e-13 0.996194720268 -0.0871557444334 1.54588120733e-13 0.0871557444334 0.996194720268 1.0 -2.8629133569e-13 1.70304512138e-13 5.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -6 6.0 1 5 1 1 0.5 0.5 0.5 0.0360145416329 -0.027449907735 -0.0575108379126 -0.0543776340783 0.00626290449873 0.00791467912495 0.0075753852725 1.0 1.02893266281e-13 1.0289328661e-13 1.29417364412e-13 0.994521915913 -0.104528464377 1.54218746606e-13 0.104528464377 0.994521915913 1.0 -2.45509477548e-13 1.26874964528e-13 6.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -7 7.0 1 5 1 1 0.5 0.5 0.5 0.0399740663918 -0.0415176264942 -0.0769297853112 -0.0733039304614 0.00602708896622 0.00772315822542 0.00755509966984 1.0 1.01173003352e-13 1.01173016904e-13 1.24581768512e-13 0.992546141148 -0.121869340539 1.5383908611e-13 0.121869340539 0.992546141148 1.0 -2.1607593644e-13 9.60403970436e-14 7.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -8 8.0 1 5 1 1 0.5 0.5 0.5 0.0438656010832 -0.0547002702951 -0.0950862541795 -0.0910048484802 0.00579795939848 0.0075320713222 0.00753418169916 1.0 9.95214110774e-14 9.952142463e-14 1.19883541028e-13 0.990268051624 -0.139173105359 1.5344615769e-13 0.139173105359 0.990268051624 1.0 -1.93732616754e-13 7.31539937134e-14 8.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -9 9.0 1 5 1 1 0.5 0.5 0.5 0.0476265064906 -0.06693007797 -0.111963532865 -0.107425913215 0.00557443965226 0.00733647309244 0.00751292472705 1.0 9.79365107892e-14 9.79365175654e-14 1.15288543144e-13 0.987688362598 -0.156434461474 1.53048269045e-13 0.156434461474 0.987688362598 1.0 -1.7614966289e-13 5.54610577028e-14 9.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -10 10.0 1 5 1 1 0.5 0.5 0.5 0.0511619284052 -0.0781974568963 -0.127493560314 -0.122531078756 0.00535882124677 0.00714056473225 0.00749061629176 1.0 9.64163509231e-14 9.64163509231e-14 1.10881335851e-13 0.984807729721 -0.173648178577 1.52618409989e-13 0.173648178577 0.984807729721 1.0 -1.61827379296e-13 4.16502735892e-14 10.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -11 11.0 1 5 1 1 0.5 0.5 0.5 0.0544590124719 -0.088471762836 -0.141687169671 -0.136302277446 0.0051483400166 0.00694213900715 0.00746928341687 1.0 9.49590273492e-14 9.49590341255e-14 1.06568040796e-13 0.981627166271 -0.190808996558 1.52226335378e-13 0.190808996558 0.981627166271 1.0 -1.50064490456e-13 3.04205106743e-14 11.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -12 12.0 1 5 1 1 0.5 0.5 0.5 0.0574517639017 -0.0978478044271 -0.154580652714 -0.148810505867 0.00494291307405 0.00674560666084 0.00744635425508 1.0 9.35626969238e-14 9.35626969238e-14 1.02349369485e-13 0.978147625923 -0.207911685109 1.51779779608e-13 0.207911685109 0.978147625923 1.0 -1.40004354681e-13 2.11307767752e-14 12.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -13 13.0 1 5 1 1 0.5 0.5 0.5 0.0601600304963 -0.10622742027 -0.166131272912 -0.159981891513 0.0047429674305 0.00654464075342 0.00742361694574 1.0 9.22255571608e-14 9.22255503845e-14 9.82438888813e-14 0.974370062351 -0.224951043725 1.5134031181e-13 0.224951043725 0.974370062351 1.0 -1.31394718782e-13 1.33769905892e-14 13.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -14 14.0 1 5 1 1 0.5 0.5 0.5 0.0625560237246 -0.113614186645 -0.176321923733 -0.169804736972 0.00454916572198 0.00634495634586 0.00740135088563 1.0 9.09458597841e-14 9.09458530078e-14 9.42784397643e-14 0.970295727253 -0.241921886802 1.50916524286e-13 0.241921886802 0.970295727253 1.0 -1.23946358277e-13 6.88772167895e-15 14.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -15 15.0 1 5 1 1 0.5 0.5 0.5 0.064623152588 -0.120048590004 -0.185179919004 -0.17832493782 0.00435922853649 0.00614238297567 0.00737902149558 1.0 8.97219175041e-14 8.97219107278e-14 9.03759421358e-14 0.965925812721 -0.258819043636 1.50488345743e-13 0.258819043636 0.965925812721 1.0 -1.17391732728e-13 1.26347923687e-15 15.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -16 16.0 1 5 1 1 0.5 0.5 0.5 0.0663746195376 -0.125523671508 -0.192728817463 -0.185538485646 0.00417540827766 0.00594279170036 0.00735600618646 1.0 8.85521107938e-14 8.85521040175e-14 8.66191951607e-14 0.961261689663 -0.275637358427 1.50032669123e-13 0.275637358427 0.961261689663 1.0 -1.11524369384e-13 -3.50626472131e-15 16.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -17 17.0 1 5 1 1 0.5 0.5 0.5 0.0677872073143 -0.130049750209 -0.198957800865 -0.191445931792 0.00399514567107 0.00573630817235 0.00733454944566 1.0 8.74348675601e-14 8.74348607838e-14 8.29210628655e-14 0.956304728985 -0.292371690273 1.49627706059e-13 0.292371690273 0.956304728985 1.0 -1.06359209905e-13 -7.7192983925e-15 17.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -18 18.0 1 5 1 1 0.5 0.5 0.5 0.0688726440667 -0.13366368413 -0.20391356945 -0.196102648973 0.00382082024589 0.00553295295686 0.00731354439631 1.0 8.63686902487e-14 8.63686766962e-14 7.93686337898e-14 0.951056540012 -0.309017002583 1.49233476596e-13 0.309017002583 0.951056540012 1.0 -1.01717394815e-13 -1.1326327212e-14 18.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -19 19.0 1 5 1 1 0.5 0.5 0.5 0.0696297180331 -0.136321663857 -0.207558274269 -0.199463963509 0.00364940264262 0.00533262779936 0.00729088904336 1.0 8.53521355156e-14 8.53521151868e-14 7.58580481416e-14 0.945518553257 -0.325568139553 1.48775008155e-13 0.325568139553 0.945518553257 1.0 -9.7403408581e-14 -1.45807929668e-14 19.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -20 20.0 1 5 1 1 0.5 0.5 0.5 0.0700644066665 -0.138097688556 -0.209973961115 -0.201613843441 0.00348059250973 0.00512766698375 0.00727100577205 1.0 8.43838074503e-14 8.43837938977e-14 7.23816555195e-14 0.939692616463 -0.342020124197 1.48407572039e-13 0.342020124197 0.939692616463 1.0 -9.35965172554e-14 -1.75459757942e-14 20.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -21 21.0 1 5 1 1 0.5 0.5 0.5 0.0701877711751 -0.138971403241 -0.211157605052 -0.20253777504 0.00331580708735 0.00492311827838 0.00725055858493 1.0 8.34624050102e-14 8.34623846814e-14 6.89923311081e-14 0.933580458164 -0.358367949724 1.4801277337e-13 0.358367949724 0.933580458164 1.0 -9.0061604821e-14 -2.01888507268e-14 21.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -22 22.0 1 5 1 1 0.5 0.5 0.5 0.0699866553715 -0.138959825039 -0.211107447743 -0.202245801687 0.00315649039112 0.00472341617569 0.00723067810759 1.0 8.25866610342e-14 8.25866339291e-14 6.57438174539e-14 0.927183866501 -0.374606579542 1.4762987382e-13 0.374606579542 0.927183866501 1.0 -8.68153950963e-14 -2.24807118179e-14 22.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -23 23.0 1 5 1 1 0.5 0.5 0.5 0.0694881714825 -0.138143435121 -0.209938883781 -0.200845211744 0.00299837184139 0.00451778201386 0.00721126794815 1.0 8.17553761235e-14 8.17553490184e-14 6.24881345127e-14 0.920504868031 -0.39073112607 1.47254902499e-13 0.39073112607 0.920504868031 1.0 -8.38166477413e-14 -2.46553706542e-14 23.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -24 24.0 1 5 1 1 0.5 0.5 0.5 0.0687132541533 -0.136471450329 -0.20762142539 -0.198303565383 0.0028445108328 0.00431606685743 0.00719203986228 1.0 8.09674050896e-14 8.09673779845e-14 5.93358776826e-14 0.913545489311 -0.406736642122 1.46877193567e-13 0.406736642122 0.913545489311 1.0 -8.10227265054e-14 -2.65915626107e-14 24.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -25 25.0 1 5 1 1 0.5 0.5 0.5 0.0676169983635 -0.13403198123 -0.204196736217 -0.194667950273 0.00269203982316 0.00410861568525 0.00717331608757 1.0 8.02216705066e-14 8.02216434015e-14 5.61884252234e-14 0.906307816505 -0.422618240118 1.46507656809e-13 0.422618240118 0.906307816505 1.0 -7.84230400888e-14 -2.84337508508e-14 25.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -26 26.0 1 5 1 1 0.5 0.5 0.5 0.0662631331079 -0.130787238479 -0.199683398008 -0.189970225096 0.00254649785347 0.00390863511711 0.00715752178803 1.0 7.95171491586e-14 7.95171220536e-14 5.32437219585e-14 0.898794054985 -0.438371151686 1.46231442753e-13 0.438371151686 0.898794054985 1.0 -7.6093421657e-14 -2.99671074901e-14 26.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -27 27.0 1 5 1 1 0.5 0.5 0.5 0.0646436064666 -0.126818150282 -0.194168791175 -0.184269443154 0.00239934097044 0.00370743637905 0.00713936518878 1.0 7.88528788163e-14 7.8852844935e-14 5.02027178238e-14 0.891006529331 -0.453990489244 1.45861472314e-13 0.453990489244 0.891006529331 1.0 -7.37995886732e-14 -3.15536984562e-14 27.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -28 28.0 1 5 1 1 0.5 0.5 0.5 0.0627565607148 -0.122118026018 -0.18763422966 -0.177565723658 0.00225654477254 0.00350114423782 0.00712363282219 1.0 7.82279311316e-14 7.82279040265e-14 4.72786279527e-14 0.88294762373 -0.469471544027 1.45566271168e-13 0.469471544027 0.88294762373 1.0 -7.17171954418e-14 -3.29618534615e-14 28.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -29 29.0 1 5 1 1 0.5 0.5 0.5 0.0606526623375 -0.11671321094 -0.180158898234 -0.169930398464 0.00211710762233 0.003298870055 0.00710933981463 1.0 7.76414658477e-14 7.76414319664e-14 4.44384214485e-14 0.874619722366 -0.484809607267 1.45310806031e-13 0.484809607267 0.874619722366 1.0 -6.9789633057e-14 -3.42433838175e-14 29.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -30 30.0 1 5 1 1 0.5 0.5 0.5 0.0582944850272 -0.1107018888 -0.171804904938 -0.161417961121 0.00197660620324 0.00309796072543 0.00709319859743 1.0 7.7092669813e-14 7.70926291555e-14 4.15282669635e-14 0.866025388241 -0.5 1.44978782668e-13 0.5 0.866025388241 1.0 -6.78861535128e-14 -3.55643994614e-14 30.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -31 31.0 1 5 1 1 0.5 0.5 0.5 0.0557503201598 -0.104059666395 -0.162606656551 -0.152081504464 0.00184033811092 0.00289721833542 0.00708150491118 1.0 7.65807840861e-14 7.65807502048e-14 3.87372832379e-14 0.857167303562 -0.515038073063 1.44792137264e-13 0.515038073063 0.857167303562 1.0 -6.62197551025e-14 -3.67385463685e-14 31.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -32 32.0 1 5 1 1 0.5 0.5 0.5 0.0529884126408 -0.0968004092574 -0.152546048164 -0.14190004766 0.00170668761712 0.0026978047099 0.00706525752321 1.0 7.6105117488e-14 7.61050768304e-14 3.60106096418e-14 0.848048090935 -0.529919266701 1.44433035952e-13 0.529919266701 0.848048090935 1.0 -6.44701645043e-14 -3.78307716371e-14 32.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -33 33.0 1 5 1 1 0.5 0.5 0.5 0.0500567272075 -0.0890314355493 -0.141778171062 -0.131015405059 0.00157438474707 0.00249168649316 0.00705360202119 1.0 7.56650127211e-14 7.56649788398e-14 3.33055252214e-14 0.838670551777 -0.544639050961 1.44223988221e-13 0.544639050961 0.838670551777 1.0 -6.29398605716e-14 -3.88876756317e-14 33.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -34 34.0 1 5 1 1 0.5 0.5 0.5 0.0469640629868 -0.0807560905814 -0.130312100053 -0.119427792728 0.00144245161209 0.00229033012874 0.00704180356115 1.0 7.52598734743e-14 7.52598328167e-14 3.05895784506e-14 0.829037606716 -0.559192895889 1.43997755885e-13 0.559192895889 0.829037606716 1.0 -6.14617270923e-14 -3.99417607007e-14 34.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -35 35.0 1 5 1 1 0.5 0.5 0.5 0.0437172172209 -0.071987785399 -0.118148125708 -0.107174038887 0.00131240475457 0.00208335206844 0.00703246705234 1.0 7.48891440939e-14 7.48891034364e-14 2.79183008092e-14 0.819152057171 -0.573576450348 1.43846116659e-13 0.573576450348 0.819152057171 1.0 -6.01114481542e-14 -4.09455862228e-14 35.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -36 36.0 1 5 1 1 0.5 0.5 0.5 0.0403788372113 -0.0627495497465 -0.105374902487 -0.0943066850305 0.00118379341438 0.00188469397835 0.00702259968966 1.0 7.45523163603e-14 7.45522757027e-14 2.5277750135e-14 0.809017002583 -0.587785243988 1.43663347277e-13 0.587785243988 0.809017002583 1.0 -5.87893923538e-14 -4.19154524999e-14 36.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -37 37.0 1 5 1 1 0.5 0.5 0.5 0.0369407713034 -0.053146019578 -0.0920672789216 -0.0809241756797 0.00105845439248 0.00168191338889 0.0070105525665 1.0 7.42489362636e-14 7.42488888298e-14 2.27346861768e-14 0.798635542393 -0.601814985275 1.43391131217e-13 0.601814985275 0.798635542393 1.0 -5.74447715958e-14 -4.27990704942e-14 37.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -38 38.0 1 5 1 1 0.5 0.5 0.5 0.0334600072977 -0.0431970842183 -0.0782782137394 -0.0670673400164 0.000933687435463 0.00148375425488 0.00700649619102 1.0 7.39785836756e-14 7.39785362418e-14 2.01957371047e-14 0.788010776043 -0.61566144228 1.43388285186e-13 0.61566144228 0.788010776043 1.0 -5.63700595804e-14 -4.36789107216e-14 38.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -39 39.0 1 5 1 1 0.5 0.5 0.5 0.0299732687575 -0.032885748893 -0.0640020221472 -0.0527336075902 0.000808531127404 0.00128437299281 0.00699805375189 1.0 7.37408926781e-14 7.37408452443e-14 1.76288596622e-14 0.777145981789 -0.629320383072 1.4321798413e-13 0.629320383072 0.777145981789 1.0 -5.52001275093e-14 -4.45814548201e-14 39.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -40 40.0 1 5 1 1 0.5 0.5 0.5 0.0265501119175 -0.0222893729806 -0.0493332147598 -0.0380037464201 0.000683732156176 0.00107720762026 0.00699263811111 1.0 7.35355447866e-14 7.35354973528e-14 1.50622549643e-14 0.766044437885 -0.642787575722 1.43141751165e-13 0.642787575722 0.766044437885 1.0 -5.41440536069e-14 -4.54841479964e-14 40.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -41 41.0 1 5 1 1 0.5 0.5 0.5 0.0232912298862 -0.0114479679614 -0.0343343839049 -0.0229646526277 0.000558626372367 0.000879483588506 0.00698518194258 1.0 7.33622553982e-14 7.33622079643e-14 1.24743245179e-14 0.754709601402 -0.656059026718 1.42980692932e-13 0.656059026718 0.754709601402 1.0 -5.30580997703e-14 -4.64043103802e-14 41.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -42 42.0 1 5 1 1 0.5 0.5 0.5 0.0203406476883 -0.000415417132899 -0.0190748237073 -0.00765737332404 0.000437716866145 0.000681487843394 0.00698295794427 1.0 7.32207941197e-14 7.32207466859e-14 1.00257809663e-14 0.7431448102 -0.669130623341 1.42991467191e-13 0.669130623341 0.7431448102 1.0 -5.21353590182e-14 -4.72217344437e-14 42.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -43 43.0 1 5 1 1 0.5 0.5 0.5 0.017873190849 0.0107409814373 -0.00360602373257 0.00782212708145 0.000314239208819 0.000479505921248 0.00697580305859 1.0 7.31109712159e-14 7.31109170058e-14 7.47973206403e-15 0.731353700161 -0.681998372078 1.42815406311e-13 0.681998372078 0.731353700161 1.0 -5.1103120623e-14 -4.81168585336e-14 43.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -44 44.0 1 5 1 1 0.5 0.5 0.5 0.0162081584588 0.0220755711198 0.0120714716613 0.0235084760934 0.000193880769075 0.000282061198959 0.00697334948927 1.0 7.30326240564e-14 7.30325698463e-14 5.03865299026e-15 0.71933978796 -0.694658339024 1.42792353462e-13 0.694658339024 0.71933978796 1.0 -5.02115777884e-14 -4.89405845223e-14 44.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -45 45.0 1 5 1 1 0.5 0.5 0.5 0.0156273554774 0.0334772281349 0.0278379991651 0.0392913781106 7.09175001248e-05 8.74613033375e-05 0.00697097880766 1.0 7.29856645498e-14 7.29856103397e-14 2.50234157871e-15 0.707106769085 -0.707106769085 1.42759393716e-13 0.707106769085 0.707106769085 1.0 -4.93375177139e-14 -4.98392356573e-14 45.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -46 46.0 1 5 1 1 0.5 0.5 0.5 0.0162480691786 0.0449191257358 0.0436743237078 0.0551158338785 -4.99888519698e-05 -0.000117406474601 0.00696983095258 1.0 7.29700181572e-14 7.29699571708e-14 3.70651989336e-17 0.694658398628 -0.71933978796 1.42756737421e-13 0.71933978796 0.694658398628 1.0 -4.85075202933e-14 -5.06943662396e-14 46.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -47 47.0 1 5 1 1 0.5 0.5 0.5 0.017954318699 0.0563476122916 0.0594997182488 0.0709141045809 -0.000170402156073 -0.000321725587128 0.00697033200413 1.0 7.29856645498e-14 7.29856035634e-14 -2.41059012283e-15 0.681998372078 -0.731353700161 1.42799441434e-13 0.731353700161 0.681998372078 1.0 -4.77291918824e-14 -5.15456851735e-14 47.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -48 48.0 1 5 1 1 0.5 0.5 0.5 0.0204742447658 0.0677090287209 0.075221426785 0.0866304710507 -0.00029246028862 -0.000511626130901 0.00697235297412 1.0 7.30326240564e-14 7.303256307e-14 -4.91429161294e-15 0.669130623341 -0.7431448102 1.42883006316e-13 0.7431448102 0.669130623341 1.0 -4.69965219356e-14 -5.2443960226e-14 48.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -49 49.0 1 5 1 1 0.5 0.5 0.5 0.023524710328 0.0790234953165 0.0908864960074 0.102273575962 -0.000416969822254 -0.000719646865036 0.0069726947695 1.0 7.31109644396e-14 7.31109034533e-14 -7.50072916376e-15 0.656059026718 -0.754709541798 1.42894593727e-13 0.754709541798 0.656059026718 1.0 -4.62321492396e-14 -5.34057729138e-14 49.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -50 50.0 1 5 1 1 0.5 0.5 0.5 0.0268598238159 0.0902177020907 0.106357127428 0.1177008003 -0.000538720283657 -0.0009086750797 0.00697285402566 1.0 7.32207941197e-14 7.32207331333e-14 -9.98427042269e-15 0.642787635326 -0.766044437885 1.42887085627e-13 0.766044437885 0.642787635326 1.0 -4.54714797716e-14 -5.43082492497e-14 50.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -51 51.0 1 5 1 1 0.5 0.5 0.5 0.0303826202004 0.101213820279 0.121579430997 0.132867023349 -0.000660901481751 -0.00111284002196 0.00697690108791 1.0 7.33622553982e-14 7.33621876355e-14 -1.24756806177e-14 0.629320383072 -0.777145922184 1.43004138804e-13 0.777145922184 0.629320383072 1.0 -4.48062200948e-14 -5.52263923069e-14 51.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -52 52.0 1 5 1 1 0.5 0.5 0.5 0.0339949033569 0.11201544106 0.136535584927 0.147761180997 -0.000784714065958 -0.00131459208205 0.00697894394398 1.0 7.35355380104e-14 7.3535477024e-14 -1.50155309329e-14 0.615661501884 -0.788010716438 1.43037965912e-13 0.788010716438 0.615661501884 1.0 -4.40999640234e-14 -5.61864601069e-14 52.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -53 53.0 1 5 1 1 0.5 0.5 0.5 0.0376308280896 0.122612737119 0.15118843317 0.1623544842 -0.00091004971182 -0.00151794939302 0.00698458682746 1.0 7.37408859018e-14 7.37408249155e-14 -1.75981801289e-14 0.60181504488 -0.798635482788 1.43186406742e-13 0.798635482788 0.60181504488 1.0 -4.34776455289e-14 -5.71844546542e-14 53.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -54 54.0 1 5 1 1 0.5 0.5 0.5 0.0412246747871 0.132881388068 0.165393546224 0.176486164331 -0.00103311298881 -0.00171284656972 0.00699377711862 1.0 7.39785768994e-14 7.39785091367e-14 -2.00878776232e-14 0.587785243988 -0.809017002583 1.4344772657e-13 0.809017002583 0.587785243988 1.0 -4.29343382678e-14 -5.81362689651e-14 54.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -55 55.0 1 5 1 1 0.5 0.5 0.5 0.0447415113323 0.142851829529 0.179176211357 0.190176308155 -0.00116179022007 -0.00190604047384 0.00699571380392 1.0 7.42489294873e-14 7.42488617247e-14 -2.27610458421e-14 0.573576450348 -0.819152057171 1.43440909649e-13 0.819152057171 0.573576450348 1.0 -4.22339470525e-14 -5.92136542165e-14 55.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -56 56.0 1 5 1 1 0.5 0.5 0.5 0.0481736416751 0.152466788888 0.192468911409 0.203372821212 -0.00128960516304 -0.00210840022191 0.00700309127569 1.0 7.4552309584e-14 7.45522418214e-14 -2.53872833536e-14 0.559192895889 -0.829037547112 1.43614029631e-13 0.829037547112 0.559192895889 1.0 -4.1651784696e-14 -6.02744647271e-14 56.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -57 57.0 1 5 1 1 0.5 0.5 0.5 0.0514957040887 0.16164894402 0.205184012651 0.215988025069 -0.00141807645559 -0.00230375886895 0.00701119331643 1.0 7.48891373177e-14 7.48890627788e-14 -2.80182744139e-14 0.544639050961 -0.838670551777 1.43800837665e-13 0.838670551777 0.544639050961 1.0 -4.10839371963e-14 -6.13515111652e-14 57.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -58 58.0 1 5 1 1 0.5 0.5 0.5 0.0546568840541 0.170429125428 0.217311233282 0.228001371026 -0.00154578115325 -0.00251251878217 0.00702094798908 1.0 7.5259866698e-14 7.52597921591e-14 -3.06018773689e-14 0.529919266701 -0.848048090935 1.44034266393e-13 0.848048090935 0.529919266701 1.0 -4.05486903006e-14 -6.24149440898e-14 58.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -59 59.0 1 5 1 1 0.5 0.5 0.5 0.057665178562 0.178753256798 0.228810757399 0.239380404353 -0.00167655316181 -0.0027051072102 0.00703151477501 1.0 7.56650059448e-14 7.56649314059e-14 -3.3271399958e-14 0.515038073063 -0.857167303562 1.44284839068e-13 0.857167303562 0.515038073063 1.0 -4.00271416218e-14 -6.35444252555e-14 59.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -60 60.0 1 5 1 1 0.5 0.5 0.5 0.0604909854062 0.186547547579 0.239586278796 0.250017344952 -0.00180981971789 -0.00291019980796 0.00704179238528 1.0 7.61051039354e-14 7.61050361728e-14 -3.6004626201e-14 0.5 -0.866025388241 1.44513903882e-13 0.866025388241 0.5 1.0 -3.94958825573e-14 -6.47265850943e-14 60.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -61 61.0 1 5 1 1 0.5 0.5 0.5 0.0631217688617 0.193791866302 0.249607756734 0.259886533022 -0.00194548477884 -0.00310831447132 0.00705294730142 1.0 7.65807705336e-14 7.65806959947e-14 -3.87961283108e-14 0.48480963707 -0.874619722366 1.44762877358e-13 0.874619722366 0.48480963707 1.0 -3.89781861843e-14 -6.59583268537e-14 61.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -62 62.0 1 5 1 1 0.5 0.5 0.5 0.0655460742438 0.20045940578 0.258834958076 0.268953949213 -0.00208330713212 -0.00330717721954 0.00706291478127 1.0 7.70926562605e-14 7.70925749453e-14 -4.16352980467e-14 0.46947157383 -0.882947564125 1.44959158609e-13 0.882947564125 0.46947157383 1.0 -3.84318262164e-14 -6.72338636046e-14 62.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -63 63.0 1 5 1 1 0.5 0.5 0.5 0.0677412166974 0.206590846181 0.267280697823 0.277241885662 -0.00222166883759 -0.00350732635707 0.00707424012944 1.0 7.76414522952e-14 7.764137098e-14 -4.44629108651e-14 0.453990519047 -0.891006529331 1.45192844835e-13 0.891006529331 0.453990519047 1.0 -3.79073942375e-14 -6.85204659939e-14 63.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -64 64.0 1 5 1 1 0.5 0.5 0.5 0.0697129410434 0.212010905147 0.274793446064 0.284571915865 -0.00236255140044 -0.00370562658645 0.00708921952173 1.0 7.82279175791e-14 7.82278430402e-14 -4.73467022966e-14 0.438371151686 -0.898794054985 1.45545264751e-13 0.898794054985 0.438371151686 1.0 -3.74487496135e-14 -6.98572872725e-14 64.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -65 65.0 1 5 1 1 0.5 0.5 0.5 0.0714181561332 0.216807678342 0.281412482262 0.290993452072 -0.00250276038423 -0.00390887679532 0.00710331602022 1.0 7.88528652638e-14 7.88527839486e-14 -5.01717130298e-14 0.42261826992 -0.906307756901 1.45856701825e-13 0.906307756901 0.42261826992 1.0 -3.69653241935e-14 -7.11814369383e-14 65.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -66 66.0 1 5 1 1 0.5 0.5 0.5 0.0728654392243 0.220906943083 0.287071973085 0.296444416046 -0.00265323766507 -0.00410525733605 0.00711599038914 1.0 7.95171356061e-14 7.95170542909e-14 -5.33217404692e-14 0.406736671925 -0.913545429707 1.46108494227e-13 0.913545429707 0.406736671925 1.0 -3.64467062487e-14 -7.27051272377e-14 66.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -67 67.0 1 5 1 1 0.5 0.5 0.5 0.0740421446261 0.224261745811 0.291689932346 0.300876945257 -0.00280131306499 -0.00430715922266 0.00713248644024 1.0 8.0221656954e-14 8.02215688626e-14 -5.63473692618e-14 0.390731155872 -0.920504868031 1.46486108291e-13 0.920504868031 0.390731155872 1.0 -3.59935876677e-14 -7.41815937562e-14 67.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -68 68.0 1 5 1 1 0.5 0.5 0.5 0.0749262839347 0.22694632411 0.295370638371 0.30431458354 -0.00294701498933 -0.00449755322188 0.00714947842062 1.0 8.09673915371e-14 8.09673034456e-14 -5.92471696161e-14 0.374606609344 -0.927183866501 1.46872639918e-13 0.927183866501 0.374606609344 1.0 -3.55405978356e-14 -7.56131336471e-14 68.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -69 69.0 1 5 1 1 0.5 0.5 0.5 0.0755447188712 0.228792086244 0.297914654016 0.306645393372 -0.00310259847902 -0.00470214849338 0.00716820033267 1.0 8.17553557947e-14 8.17552677033e-14 -6.24511022323e-14 0.358367979527 -0.93358039856 1.47312107716e-13 0.93358039856 0.358367979527 1.0 -3.51104440118e-14 -7.7233013008e-14 69.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -70 70.0 1 5 1 1 0.5 0.5 0.5 0.0758345472717 0.229867011309 0.299378037453 0.307846367359 -0.00326239760034 -0.00490199634805 0.00718151498586 1.0 8.25866474817e-14 8.2586552614e-14 -6.57566788021e-14 0.342020153999 -0.939692616463 1.47553288489e-13 0.939692616463 0.342020153999 1.0 -3.45680752631e-14 -7.89318358395e-14 70.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -71 71.0 1 5 1 1 0.5 0.5 0.5 0.0758360864293 0.230157241225 0.299764603376 0.307952821255 -0.00342002091929 -0.00510233594105 0.00720483297482 1.0 8.34623914577e-14 8.34623033663e-14 -6.89357628598e-14 0.325568169355 -0.945518553257 1.48140492386e-13 0.945518553257 0.325568169355 1.0 -3.42024992312e-14 -8.05897232628e-14 71.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -72 72.0 1 5 1 1 0.5 0.5 0.5 0.075525141316 0.229602411389 0.298972666264 0.306899368763 -0.00358790112659 -0.0052975253202 0.00721790129319 1.0 8.43837938977e-14 8.438369903e-14 -7.24243798614e-14 0.309017002583 -0.951056480408 1.48359772276e-13 0.951056480408 0.309017002583 1.0 -3.36342078863e-14 -8.24389384882e-14 72.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -73 73.0 1 5 1 1 0.5 0.5 0.5 0.0749000000883 0.228239625692 0.29704400897 0.304708212614 -0.00375670660287 -0.00549271516502 0.00723527790979 1.0 8.5352121963e-14 8.53520203191e-14 -7.58904522341e-14 0.292371720076 -0.956304728985 1.48725527878e-13 0.956304728985 0.292371720076 1.0 -3.3134577032e-14 -8.43050130574e-14 73.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -74 74.0 1 5 1 1 0.5 0.5 0.5 0.0739484623657 0.225991547108 0.293922573328 0.301260143518 -0.00392613513395 -0.00569652067497 0.00725413672626 1.0 8.63686766962e-14 8.63685818285e-14 -7.93205019896e-14 0.275637388229 -0.961261689663 1.49138825747e-13 0.961261689663 0.275637388229 1.0 -3.26499251084e-14 -8.6183183257e-14 74.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -75 75.0 1 5 1 1 0.5 0.5 0.5 0.072654064954 0.22285503149 0.289548963308 0.296567112207 -0.00410118186846 -0.00588755588979 0.00727464165539 1.0 8.74348607838e-14 8.74347591399e-14 -8.2887859174e-14 0.258819073439 -0.965925812721 1.49606293066e-13 0.965925812721 0.258819073439 1.0 -3.21823527571e-14 -8.81655249567e-14 75.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -76 76.0 1 5 1 1 0.5 0.5 0.5 0.0710590306271 0.218880146742 0.284022808075 0.290706813335 -0.00428106123582 -0.00608264096081 0.00729729793966 1.0 8.85520972413e-14 8.85519955973e-14 -8.65618002082e-14 0.241921916604 -0.970295727253 1.50146700086e-13 0.970295727253 0.241921916604 1.0 -3.17401744536e-14 -9.02373878744e-14 76.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -77 77.0 1 5 1 1 0.5 0.5 0.5 0.0691430442849 0.214006558061 0.277257710695 0.283598601818 -0.00446529500186 -0.00628326507285 0.00731486734003 1.0 8.97219039515e-14 8.97218023076e-14 -9.03223757722e-14 0.224951073527 -0.974370062351 1.5050570653e-13 0.974370062351 0.224951073527 1.0 -3.11913851952e-14 -9.23900916167e-14 77.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -78 78.0 1 5 1 1 0.5 0.5 0.5 0.0668996797354 0.208257958293 0.269263744354 0.275259256363 -0.00465713022277 -0.00647377641872 0.00733389845118 1.0 9.09458462315e-14 9.09457378113e-14 -9.42809808631e-14 0.207911714911 -0.978147566319 1.50915440084e-13 0.978147566319 0.207911714911 1.0 -3.06547254486e-14 -9.46824541513e-14 78.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -79 79.0 1 5 1 1 0.5 0.5 0.5 0.0643428410009 0.201603919268 0.260032474995 0.265651851892 -0.00485096639022 -0.00666582910344 0.00735527323559 1.0 9.22255503845e-14 9.2225435188e-14 -9.82367805808e-14 0.190809011459 -0.981627166271 1.51408264181e-13 0.981627166271 0.190809011459 1.0 -3.0145265624e-14 -9.70135769136e-14 79.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -80 80.0 1 5 1 1 0.5 0.5 0.5 0.0614645414137 0.194048047066 0.249545291066 0.254766076803 -0.00505235884339 -0.00686362525448 0.00737208453938 1.0 9.35626901476e-14 9.35625817274e-14 -1.02382444428e-13 0.17364820838 -0.984807729721 1.51741778322e-13 0.984807729721 0.17364820838 1.0 -2.95383529648e-14 -9.94839518924e-14 80.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -81 81.0 1 5 1 1 0.5 0.5 0.5 0.0582718432416 0.185640856624 0.237853914499 0.242651328444 -0.00525587564334 -0.00704948464409 0.00739240786061 1.0 9.49590273492e-14 9.49589053765e-14 -1.06522917657e-13 0.156434491277 -0.987688362598 1.52202360958e-13 0.987688362598 0.156434491277 1.0 -2.89785014561e-14 -1.01996719176e-13 81.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -82 82.0 1 5 1 1 0.5 0.5 0.5 0.0548411210712 0.176334485412 0.224956199527 0.229332342744 -0.00546766957268 -0.00723874382675 0.00740870647132 1.0 9.64163441469e-14 9.64162289504e-14 -1.10871842306e-13 0.13917312026 -0.990268051624 1.52525670045e-13 0.990268051624 0.13917312026 1.0 -2.833043316e-14 -1.04662663898e-13 82.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -83 83.0 1 5 1 1 0.5 0.5 0.5 0.0511313149972 0.166156679392 0.210826560855 0.214756399393 -0.00568466819823 -0.00742362486199 0.00743296789005 1.0 9.79365040129e-14 9.79363888164e-14 -1.15317233844e-13 0.121869370341 -0.992546141148 1.53136387577e-13 0.992546141148 0.121869370341 1.0 -2.78072717305e-14 -1.07427616607e-13 83.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -84 84.0 1 5 1 1 0.5 0.5 0.5 0.0472034591648 0.155100286007 0.195493191481 0.198953047395 -0.00591072347015 -0.00760667771101 0.00744895776734 1.0 9.95214110774e-14 9.95212891047e-14 -1.19991419145e-13 0.104528486729 -0.994521915913 1.53463518477e-13 0.994521915913 0.104528486729 1.0 -2.7119679183e-14 -1.10360986571e-13 84.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -85 85.0 1 5 1 1 0.5 0.5 0.5 0.0431223359335 0.143299892545 0.179094478488 0.182078793645 -0.0061384961009 -0.00779581116512 0.00746519910172 1.0 1.01173003352e-13 1.01172881379e-13 -1.24631763785e-13 0.0871557667851 -0.996194720268 1.53809826204e-13 0.996194720268 0.0871557667851 1.0 -2.64190067528e-14 -1.13333658864e-13 85.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -86 86.0 1 5 1 1 0.5 0.5 0.5 0.0389558933784 0.130603894591 0.161492869258 0.163967981935 -0.00637407042086 -0.00797392893583 0.00748812500387 1.0 1.02893273058e-13 1.02893144309e-13 -1.29449253508e-13 0.069756500423 -0.997564077377 1.54404497543e-13 0.997564077377 0.069756500423 1.0 -2.58185688134e-14 -1.16454941393e-13 86.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -87 87.0 1 5 1 1 0.5 0.5 0.5 0.0348200893229 0.117140188813 0.142814710736 0.144770666957 -0.00661494443193 -0.00814818497747 0.0075002736412 1.0 1.04684239521e-13 1.04684103996e-13 -1.34351825997e-13 0.0523359812796 -0.998629510403 1.5463373854e-13 0.998629510403 0.0523359812796 1.0 -2.50090916129e-14 -1.19682055607e-13 87.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -88 88.0 1 5 1 1 0.5 0.5 0.5 0.0309165569603 0.102922193706 0.123093500733 0.124490439892 -0.00686429999769 -0.0083291567862 0.00751314265653 1.0 1.06547935622e-13 1.06547800097e-13 -1.39447996336e-13 0.0348995216191 -0.99939084053 1.54905873285e-13 0.99939084053 0.0348995216191 1.0 -2.41937732733e-14 -1.23072938559e-13 88.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -89 89.0 1 5 1 1 0.5 0.5 0.5 0.0275482477894 0.0879731550813 0.102374792099 0.103184834123 -0.00712391687557 -0.00850248057395 0.00752882473171 1.0 1.08486421344e-13 1.08486279042e-13 -1.44796799334e-13 0.0174524337053 -0.999847710133 1.55297460004e-13 0.999847710133 0.0174524337053 1.0 -2.34091547958e-14 -1.26660902361e-13 89.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 -90 90.0 1 5 1 1 0.5 0.5 0.5 0.0251206236511 0.0723270624876 0.0806358680129 0.0808931067586 -0.00738708348945 -0.00866825506091 0.00754283368587 1.0 1.10501756671e-13 1.10501614369e-13 -1.50157664081e-13 2.67917759089e-08 -1.0 1.55653268052e-13 1.0 2.67920814423e-08 1.0 -2.25758302298e-14 -1.30329706988e-13 90.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 31000000.0 diff --git a/examples/MSC.Marc/reference_postProc/rotation_90deg_inc90.txt b/examples/MSC.Marc/reference_postProc/rotation_90deg_inc90.txt deleted file mode 100644 index ebee6aed8..000000000 --- a/examples/MSC.Marc/reference_postProc/rotation_90deg_inc90.txt +++ /dev/null @@ -1,3 +0,0 @@ -1 header -inc elem node ip grain ip.x ip.y ip.z 1_1_p 1_2_p 1_3_p 1_4_p 1_5_p 1_6_p 1_7_p 1_8_p 1_9_p 1_1_div(p) 1_2_div(p) 1_3_div(p) 1_norm(div(p)) -90 1 5 1 1 0.5 0.5 0.5 0.0723270624876 0.00754283322021 0.00738708395511 -0.00738708348945 -0.00866825319827 -0.0806358680129 0.00754283368587 0.0808931067586 0.00866825692356 0.000000 0.000000 0.000000 0.000000 diff --git a/examples/SpectralMethod/EshelbyInclusion/material.config b/examples/SpectralMethod/EshelbyInclusion/material.config index e002584b0..008c44f4b 100644 --- a/examples/SpectralMethod/EshelbyInclusion/material.config +++ b/examples/SpectralMethod/EshelbyInclusion/material.config @@ -3,25 +3,12 @@ #-------------------# [direct] -mech none # isostrain 1 grain +mech none -thermal adiabatic # thermal strain (stress) induced mass transport -initialT 300.0 +thermal adiabatic +t0 330.0 (output) temperature -#-------------------# - -#-------------------# - -[aLittleSomething] - -(output) texture -(output) f -(output) p -(output) fe -(output) fi -(output) fp - #-------------------# #-------------------# @@ -34,6 +21,12 @@ plasticity none {config/elastic_isotropic.config} {config/thermal.config} +(output) f +(output) p +(output) fe +(output) fi +(output) fp + #................. [Ti matrix] @@ -43,6 +36,12 @@ plasticity none {config/elastic_Ti.config} {config/thermal.config} +(output) f +(output) p +(output) fe +(output) fi +(output) fp + #................. [isotropic inclusion] @@ -52,6 +51,12 @@ plasticity none {config/thermal.config} {config/thermalExpansion_isotropic.config} +(output) f +(output) p +(output) fe +(output) fi +(output) fp + #................. [anisotropic inclusion] @@ -61,6 +66,12 @@ plasticity none {config/thermal.config} {config/thermalExpansion_fullyAnisotropic.config} +(output) f +(output) p +(output) fe +(output) fi +(output) fp + #................. [Ti inclusion] @@ -71,32 +82,32 @@ plasticity none {config/thermal.config} {config/thermalExpansion_Ti.config} +(output) f +(output) p +(output) fe +(output) fi +(output) fp + #--------------------------# #--------------------------# [isotropic matrix] -crystallite 1 (constituent) phase 1 texture 1 fraction 1.0 [Ti matrix] -crystallite 1 (constituent) phase 2 texture 1 fraction 1.0 [isotropic inclusion] -crystallite 1 (constituent) phase 3 texture 1 fraction 1.0 [anisotropic inclusion] -crystallite 1 (constituent) phase 4 texture 1 fraction 1.0 [rotated inclusion] -crystallite 1 (constituent) phase 4 texture 2 fraction 1.0 [Ti inclusion] -crystallite 1 (constituent) phase 5 texture 1 fraction 1.0 #--------------------------# @@ -104,8 +115,8 @@ crystallite 1 #--------------------------# [cube] -(gauss) phi1 0.0 Phi 0.0 phi2 0.0 scatter 0.0 fraction 1.0 +(gauss) phi1 0.0 Phi 0.0 phi2 0.0 [rotated] -(gauss) phi1 0.0 Phi 45.0 phi2 0.0 scatter 0.0 fraction 1.0 +(gauss) phi1 0.0 Phi 45.0 phi2 0.0 diff --git a/examples/SpectralMethod/EshelbyInclusion/runAll.sh b/examples/SpectralMethod/EshelbyInclusion/runAll.sh deleted file mode 100755 index cef1128ef..000000000 --- a/examples/SpectralMethod/EshelbyInclusion/runAll.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash - -for geom in $(ls geom/*.geom) -do - base=${geom%.geom} - base=${base#geom/} - name=${base}_thermal - vtr=${base}.vtr - - [[ -f ${name}.spectralOut ]] || \ - DAMASK_spectral \ - --workingdir ./ \ - --load thermal.load \ - --geom $geom \ - > ${name}.out - - if [ ! -f postProc/${name}_inc10.txt ] - then - postResults ${name}.spectralOut \ - --ho temperature \ - --cr f,fe,fi,fp,p \ - --split \ - --separation x,y,z \ - - addCauchy postProc/${name}_inc*.txt \ - - addDeviator postProc/${name}_inc*.txt \ - --spherical \ - --tensor p,Cauchy \ - - addDisplacement postProc/${name}_inc*.txt \ - --nodal \ - - fi - - geom_check ${geom} - - for inc in {00..10} - do - echo "generating postProc/${name}_inc${inc}.vtr" - cp geom/${vtr} postProc/${name}_inc${inc}.vtr - vtk_addRectilinearGridData \ - postProc/${name}_inc${inc}.txt \ - --vtk postProc/${name}_inc${inc}.vtr \ - --data 'sph(p)','sph(Cauchy)',temperature \ - --tensor f,fe,fi,fp,p,Cauchy \ - - vtk_addRectilinearGridData \ - postProc/${name}_inc${inc}_nodal.txt \ - --vtk postProc/${name}_inc${inc}.vtr \ - --data 'avg(f).pos','fluct(f).pos' \ - - done -done diff --git a/examples/SpectralMethod/Polycrystal/material.config b/examples/SpectralMethod/Polycrystal/material.config index e47c2142c..44e8b8c1d 100644 --- a/examples/SpectralMethod/Polycrystal/material.config +++ b/examples/SpectralMethod/Polycrystal/material.config @@ -51,64 +51,44 @@ atol_resistance 1 #-------------------# [Grain01] -crystallite 1 (constituent) phase 1 texture 01 fraction 1.0 [Grain02] -crystallite 1 (constituent) phase 1 texture 02 fraction 1.0 [Grain03] -crystallite 1 (constituent) phase 1 texture 03 fraction 1.0 [Grain04] -crystallite 1 (constituent) phase 1 texture 04 fraction 1.0 [Grain05] -crystallite 1 (constituent) phase 1 texture 05 fraction 1.0 [Grain06] -crystallite 1 (constituent) phase 1 texture 06 fraction 1.0 [Grain07] -crystallite 1 (constituent) phase 1 texture 07 fraction 1.0 [Grain08] -crystallite 1 (constituent) phase 1 texture 08 fraction 1.0 [Grain09] -crystallite 1 (constituent) phase 1 texture 09 fraction 1.0 [Grain10] -crystallite 1 (constituent) phase 1 texture 10 fraction 1.0 [Grain11] -crystallite 1 (constituent) phase 1 texture 11 fraction 1.0 [Grain12] -crystallite 1 (constituent) phase 1 texture 12 fraction 1.0 [Grain13] -crystallite 1 (constituent) phase 1 texture 13 fraction 1.0 [Grain14] -crystallite 1 (constituent) phase 1 texture 14 fraction 1.0 [Grain15] -crystallite 1 (constituent) phase 1 texture 15 fraction 1.0 [Grain16] -crystallite 1 (constituent) phase 1 texture 16 fraction 1.0 [Grain17] -crystallite 1 (constituent) phase 1 texture 17 fraction 1.0 [Grain18] -crystallite 1 (constituent) phase 1 texture 18 fraction 1.0 [Grain19] -crystallite 1 (constituent) phase 1 texture 19 fraction 1.0 [Grain20] -crystallite 1 (constituent) phase 1 texture 20 fraction 1.0 @@ -116,42 +96,42 @@ crystallite 1 #-------------------# [Grain01] -(gauss) phi1 0.0 Phi 0.0 phi2 0.0 scatter 0.0 fraction 1.0 +(gauss) phi1 0.0 Phi 0.0 phi2 0.0 [Grain02] -(gauss) phi1 257.468172 Phi 53.250534 phi2 157.331503 scatter 0.0 fraction 1.0 +(gauss) phi1 257.468172 Phi 53.250534 phi2 157.331503 [Grain03] -(gauss) phi1 216.994815 Phi 94.418518 phi2 251.147231 scatter 0.0 fraction 1.0 +(gauss) phi1 216.994815 Phi 94.418518 phi2 251.147231 [Grain04] -(gauss) phi1 196.157946 Phi 55.870978 phi2 21.68117 scatter 0.0 fraction 1.0 +(gauss) phi1 196.157946 Phi 55.870978 phi2 21.68117 [Grain05] -(gauss) phi1 152.515728 Phi 139.769395 phi2 240.036018 scatter 0.0 fraction 1.0 +(gauss) phi1 152.515728 Phi 139.769395 phi2 240.036018 [Grain06] -(gauss) phi1 232.521881 Phi 73.749222 phi2 241.429633 scatter 0.0 fraction 1.0 +(gauss) phi1 232.521881 Phi 73.749222 phi2 241.429633 [Grain07] -(gauss) phi1 157.531396 Phi 135.503513 phi2 75.737722 scatter 0.0 fraction 1.0 +(gauss) phi1 157.531396 Phi 135.503513 phi2 75.737722 [Grain08] -(gauss) phi1 321.03828 Phi 27.209843 phi2 46.413467 scatter 0.0 fraction 1.0 +(gauss) phi1 321.03828 Phi 27.209843 phi2 46.413467 [Grain09] -(gauss) phi1 346.918594 Phi 87.495569 phi2 113.554206 scatter 0.0 fraction 1.0 +(gauss) phi1 346.918594 Phi 87.495569 phi2 113.554206 [Grain10] -(gauss) phi1 138.038947 Phi 99.827132 phi2 130.935878 scatter 0.0 fraction 1.0 +(gauss) phi1 138.038947 Phi 99.827132 phi2 130.935878 [Grain11] -(gauss) phi1 285.021014 Phi 118.092004 phi2 205.270837 scatter 0.0 fraction 1.0 +(gauss) phi1 285.021014 Phi 118.092004 phi2 205.270837 [Grain12] -(gauss) phi1 190.402171 Phi 56.738068 phi2 157.896545 scatter 0.0 fraction 1.0 +(gauss) phi1 190.402171 Phi 56.738068 phi2 157.896545 [Grain13] -(gauss) phi1 204.496042 Phi 95.031265 phi2 355.814582 scatter 0.0 fraction 1.0 +(gauss) phi1 204.496042 Phi 95.031265 phi2 355.814582 [Grain14] -(gauss) phi1 333.21479 Phi 82.133355 phi2 36.736132 scatter 0.0 fraction 1.0 +(gauss) phi1 333.21479 Phi 82.133355 phi2 36.736132 [Grain15] -(gauss) phi1 25.572981 Phi 164.242648 phi2 75.195632 scatter 0.0 fraction 1.0 +(gauss) phi1 25.572981 Phi 164.242648 phi2 75.195632 [Grain16] -(gauss) phi1 31.366548 Phi 76.392403 phi2 58.071426 scatter 0.0 fraction 1.0 +(gauss) phi1 31.366548 Phi 76.392403 phi2 58.071426 [Grain17] -(gauss) phi1 7.278623 Phi 77.044663 phi2 235.118997 scatter 0.0 fraction 1.0 +(gauss) phi1 7.278623 Phi 77.044663 phi2 235.118997 [Grain18] -(gauss) phi1 299.743144 Phi 76.475096 phi2 91.184977 scatter 0.0 fraction 1.0 +(gauss) phi1 299.743144 Phi 76.475096 phi2 91.184977 [Grain19] -(gauss) phi1 280.13643 Phi 27.439718 phi2 167.871878 scatter 0.0 fraction 1.0 +(gauss) phi1 280.13643 Phi 27.439718 phi2 167.871878 [Grain20] -(gauss) phi1 313.204373 Phi 68.676053 phi2 87.993213 scatter 0.0 fraction 1.0 +(gauss) phi1 313.204373 Phi 68.676053 phi2 87.993213 diff --git a/installation/mods_MarcMentat/2018.1/Marc_tools/include_linux64 b/installation/mods_MarcMentat/2018.1/Marc_tools/include_linux64 index 10a796e47..8adabaff1 100644 --- a/installation/mods_MarcMentat/2018.1/Marc_tools/include_linux64 +++ b/installation/mods_MarcMentat/2018.1/Marc_tools/include_linux64 @@ -99,14 +99,9 @@ else fi # DAMASK uses the HDF5 compiler wrapper around the Intel compiler -if test "$DAMASK_HDF5" = "ON";then - H5FC="$(h5fc -shlib -show)" - HDF5_LIB=${H5FC//ifort/} - FCOMP="$H5FC -DDAMASK_HDF5" - echo $FCOMP -else - FCOMP=ifort -fi +H5FC="$(h5fc -shlib -show)" +HDF5_LIB=${H5FC//ifort/} +FCOMP="$H5FC -DDAMASK_HDF5" # AEM if test "$MARCDLLOUTDIR" = ""; then diff --git a/installation/mods_MarcMentat/2018/Marc_tools/include_linux64 b/installation/mods_MarcMentat/2018/Marc_tools/include_linux64 index 694dccee3..c99313a30 100644 --- a/installation/mods_MarcMentat/2018/Marc_tools/include_linux64 +++ b/installation/mods_MarcMentat/2018/Marc_tools/include_linux64 @@ -99,14 +99,9 @@ else fi # DAMASK uses the HDF5 compiler wrapper around the Intel compiler -if test "$DAMASK_HDF5" = "ON";then - H5FC="$(h5fc -shlib -show)" - HDF5_LIB=${H5FC//ifort/} - FCOMP="$H5FC -DDAMASK_HDF5" - echo $FCOMP -else - FCOMP=ifort -fi +H5FC="$(h5fc -shlib -show)" +HDF5_LIB=${H5FC//ifort/} +FCOMP="$H5FC -DDAMASK_HDF5" # AEM if test "$MARCDLLOUTDIR" = ""; then diff --git a/installation/mods_MarcMentat/2019/Marc_tools/include_linux64 b/installation/mods_MarcMentat/2019/Marc_tools/include_linux64 index 6d630bd1d..2dba03961 100644 --- a/installation/mods_MarcMentat/2019/Marc_tools/include_linux64 +++ b/installation/mods_MarcMentat/2019/Marc_tools/include_linux64 @@ -100,11 +100,9 @@ else fi # DAMASK uses the HDF5 compiler wrapper around the Intel compiler -if test "$DAMASK_HDF5" = "ON";then - H5FC="$(h5fc -shlib -show)" - HDF5_LIB=${H5FC//ifort/} - FCOMP="$H5FC -DDAMASK_HDF5" -fi +H5FC="$(h5fc -shlib -show)" +HDF5_LIB=${H5FC//ifort/} +FCOMP="$H5FC -DDAMASK_HDF5" # AEM if test "$MARCDLLOUTDIR" = ""; then diff --git a/installation/patch/README.md b/installation/patch/README.md index 0b8251510..95f377691 100644 --- a/installation/patch/README.md +++ b/installation/patch/README.md @@ -9,14 +9,6 @@ cd DAMASK_ROOT patch -p1 < installation/patch/nameOfPatch ``` -## Available patches - - * **disable_HDF5** disables all HDF5 output. - HDF5 output is an experimental feature. Also, some routines not present in HDF5 1.8.x are removed to allow compilation of DAMASK with HDF5 < 1.10.x - - * **disable_old_output** disables all non-HDF5 output. - Saves some memory when using only HDF5 output - ## Create patch commit your changes diff --git a/installation/patch/disable_HDF5 b/installation/patch/disable_HDF5 deleted file mode 100644 index bbba30c4a..000000000 --- a/installation/patch/disable_HDF5 +++ /dev/null @@ -1,57 +0,0 @@ -diff --git a/src/DAMASK_grid.f90 b/src/DAMASK_grid.f90 -index 496bfd0d..7b0f499c 100644 ---- a/src/DAMASK_grid.f90 -+++ b/src/DAMASK_grid.f90 -@@ -75,7 +75,6 @@ program DAMASK_spectral - use grid_mech_spectral_polarisation - use grid_damage_spectral - use grid_thermal_spectral -- use results - - implicit none - -@@ -153,8 +152,6 @@ program DAMASK_spectral - write(6,'(/,a)') ' Shanthraj et al., Handbook of Mechanics of Materials, 2019' - write(6,'(a)') ' https://doi.org/10.1007/978-981-10-6855-3_80' - -- call results_openJobFile() -- call results_closeJobFile() - !-------------------------------------------------------------------------------------------------- - ! initialize field solver information - nActiveFields = 1 -@@ -595,7 +592,6 @@ program DAMASK_spectral - if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_write') - enddo - fileOffset = fileOffset + sum(outputSize) ! forward to current file position -- call CPFEM_results(totalIncsCounter,time) - endif - if ( loadCases(currentLoadCase)%restartFrequency > 0_pInt & ! writing of restart info requested ... - .and. mod(inc,loadCases(currentLoadCase)%restartFrequency) == 0_pInt) then ! ... and at frequency of writing restart information -diff --git a/src/HDF5_utilities.f90 b/src/HDF5_utilities.f90 -index a81aaee0..3d3cdee3 100644 ---- a/src/HDF5_utilities.f90 -+++ b/src/HDF5_utilities.f90 -@@ -197,7 +197,6 @@ integer(HID_T) function HDF5_addGroup(fileHandle,groupName) - !------------------------------------------------------------------------------------------------- - ! setting I/O mode to collective - #ifdef PETSc -- call h5pset_all_coll_metadata_ops_f(aplist_id, .true., hdferr) - if (hdferr < 0) call IO_error(1_pInt,ext_msg = 'HDF5_addGroup: h5pset_all_coll_metadata_ops_f ('//trim(groupName)//')') - #endif - -@@ -232,7 +231,6 @@ integer(HID_T) function HDF5_openGroup(fileHandle,groupName) - !------------------------------------------------------------------------------------------------- - ! setting I/O mode to collective - #ifdef PETSc -- call h5pget_all_coll_metadata_ops_f(aplist_id, is_collective, hdferr) - if (hdferr < 0) call IO_error(1_pInt,ext_msg = 'HDF5_openGroup: h5pset_all_coll_metadata_ops_f ('//trim(groupName)//')') - #endif - -@@ -1646,7 +1644,6 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ - call h5pcreate_f(H5P_DATASET_ACCESS_F, aplist_id, hdferr) - if (hdferr < 0) call IO_error(1_pInt,ext_msg='initialize_read: h5pcreate_f') - #ifdef PETSc -- call h5pset_all_coll_metadata_ops_f(aplist_id, .true., hdferr) - if (hdferr < 0) call IO_error(1_pInt,ext_msg='initialize_read: h5pset_all_coll_metadata_ops_f') - #endif - diff --git a/installation/patch/disable_old_output b/installation/patch/disable_old_output deleted file mode 100644 index 732dfc83e..000000000 --- a/installation/patch/disable_old_output +++ /dev/null @@ -1,178 +0,0 @@ -From 6dbd904a4cfc28add3c39bb2a4ec9e2dbb2442b6 Mon Sep 17 00:00:00 2001 -From: Martin Diehl -Date: Thu, 18 Apr 2019 18:25:32 +0200 -Subject: [PATCH] to create patch - ---- - src/DAMASK_grid.f90 | 81 +----------------------------------------- - src/homogenization.f90 | 2 ++ - 2 files changed, 3 insertions(+), 80 deletions(-) - -diff --git a/src/DAMASK_grid.f90 b/src/DAMASK_grid.f90 -index f2f52bb2..a7543f4d 100644 ---- a/src/DAMASK_grid.f90 -+++ b/src/DAMASK_grid.f90 -@@ -18,7 +18,6 @@ program DAMASK_spectral - use DAMASK_interface, only: & - DAMASK_interface_init, & - loadCaseFile, & -- geometryFile, & - getSolverJobName, & - interface_restartInc - use IO, only: & -@@ -49,14 +48,9 @@ program DAMASK_spectral - restartInc - use numerics, only: & - worldrank, & -- worldsize, & - stagItMax, & - maxCutBack, & - continueCalculation -- use homogenization, only: & -- materialpoint_sizeResults, & -- materialpoint_results, & -- materialpoint_postResults - use material, only: & - thermal_type, & - damage_type, & -@@ -131,12 +125,6 @@ program DAMASK_spectral - type(tLoadCase), allocatable, dimension(:) :: loadCases !< array of all load cases - type(tLoadCase) :: newLoadCase - type(tSolutionState), allocatable, dimension(:) :: solres -- integer(MPI_OFFSET_KIND) :: fileOffset -- integer(MPI_OFFSET_KIND), dimension(:), allocatable :: outputSize -- integer(pInt), parameter :: maxByteOut = 2147483647-4096 !< limit of one file output write https://trac.mpich.org/projects/mpich/ticket/1742 -- integer(pInt), parameter :: maxRealOut = maxByteOut/pReal -- integer(pLongInt), dimension(2) :: outputIndex -- PetscErrorCode :: ierr - procedure(grid_mech_spectral_basic_init), pointer :: & - mech_init - procedure(grid_mech_spectral_basic_forward), pointer :: & -@@ -384,22 +372,6 @@ program DAMASK_spectral - ! write header of output file - if (worldrank == 0) then - writeHeader: if (interface_restartInc < 1_pInt) then -- open(newunit=fileUnit,file=trim(getSolverJobName())//& -- '.spectralOut',form='UNFORMATTED',status='REPLACE') -- write(fileUnit) 'load:', trim(loadCaseFile) ! ... and write header -- write(fileUnit) 'workingdir:', 'n/a' -- write(fileUnit) 'geometry:', trim(geometryFile) -- write(fileUnit) 'grid:', grid -- write(fileUnit) 'size:', geomSize -- write(fileUnit) 'materialpoint_sizeResults:', materialpoint_sizeResults -- write(fileUnit) 'loadcases:', size(loadCases) -- write(fileUnit) 'frequencies:', loadCases%outputfrequency ! one entry per LoadCase -- write(fileUnit) 'times:', loadCases%time ! one entry per LoadCase -- write(fileUnit) 'logscales:', loadCases%logscale -- write(fileUnit) 'increments:', loadCases%incs ! one entry per LoadCase -- write(fileUnit) 'startingIncrement:', restartInc ! start with writing out the previous inc -- write(fileUnit) 'eoh' -- close(fileUnit) ! end of header - open(newunit=statUnit,file=trim(getSolverJobName())//& - '.sta',form='FORMATTED',status='REPLACE') - write(statUnit,'(a)') 'Increment Time CutbackLevel Converged IterationsNeeded' ! statistics file -@@ -412,39 +384,6 @@ program DAMASK_spectral - endif writeHeader - endif - --!-------------------------------------------------------------------------------------------------- --! prepare MPI parallel out (including opening of file) -- allocate(outputSize(worldsize), source = 0_MPI_OFFSET_KIND) -- outputSize(worldrank+1) = size(materialpoint_results,kind=MPI_OFFSET_KIND)*int(pReal,MPI_OFFSET_KIND) -- call MPI_allreduce(MPI_IN_PLACE,outputSize,worldsize,MPI_LONG,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process -- if (ierr /= 0_pInt) call IO_error(error_ID=894_pInt, ext_msg='MPI_allreduce') -- call MPI_file_open(PETSC_COMM_WORLD, trim(getSolverJobName())//'.spectralOut', & -- MPI_MODE_WRONLY + MPI_MODE_APPEND, & -- MPI_INFO_NULL, & -- fileUnit, & -- ierr) -- if (ierr /= 0_pInt) call IO_error(error_ID=894_pInt, ext_msg='MPI_file_open') -- call MPI_file_get_position(fileUnit,fileOffset,ierr) ! get offset from header -- if (ierr /= 0_pInt) call IO_error(error_ID=894_pInt, ext_msg='MPI_file_get_position') -- fileOffset = fileOffset + sum(outputSize(1:worldrank)) ! offset of my process in file (header + processes before me) -- call MPI_file_seek (fileUnit,fileOffset,MPI_SEEK_SET,ierr) -- if (ierr /= 0_pInt) call IO_error(error_ID=894_pInt, ext_msg='MPI_file_seek') -- -- writeUndeformed: if (interface_restartInc < 1_pInt) then -- write(6,'(1/,a)') ' ... writing initial configuration to file ........................' -- call CPFEM_results(0_pInt,0.0_pReal) -- do i = 1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output -- outputIndex = int([(i-1_pInt)*((maxRealOut)/materialpoint_sizeResults)+1_pInt, & ! QUESTION: why not starting i at 0 instead of murky 1? -- min(i*((maxRealOut)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt) -- call MPI_file_write(fileUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)), & -- [(outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)]), & -- int((outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)), & -- MPI_DOUBLE, MPI_STATUS_IGNORE, ierr) -- if (ierr /= 0_pInt) call IO_error(error_ID=894_pInt, ext_msg='MPI_file_write') -- enddo -- fileOffset = fileOffset + sum(outputSize) ! forward to current file position -- endif writeUndeformed -- - - loadCaseLooping: do currentLoadCase = 1_pInt, size(loadCases) - time0 = time ! load case start time -@@ -574,7 +513,6 @@ program DAMASK_spectral - write(6,'(/,a)') ' cutting back ' - else ! no more options to continue - call IO_warning(850_pInt) -- call MPI_file_close(fileUnit,ierr) - close(statUnit) - call quit(-1_pInt*(lastRestartWritten+1_pInt)) ! quit and provide information about last restart inc written - endif -@@ -593,24 +531,8 @@ program DAMASK_spectral - ' increment ', totalIncsCounter, ' NOT converged' - endif; flush(6) - -- if (mod(inc,loadCases(currentLoadCase)%outputFrequency) == 0_pInt) then ! at output frequency -- write(6,'(1/,a)') ' ... writing results to file ......................................' -- flush(6) -- call materialpoint_postResults() -- call MPI_file_seek (fileUnit,fileOffset,MPI_SEEK_SET,ierr) -- if (ierr /= 0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_seek') -- do i=1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output -- outputIndex=int([(i-1_pInt)*((maxRealOut)/materialpoint_sizeResults)+1_pInt, & -- min(i*((maxRealOut)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt) -- call MPI_file_write(fileUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)),& -- [(outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)]), & -- int((outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)),& -- MPI_DOUBLE, MPI_STATUS_IGNORE, ierr) -- if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_write') -- enddo -- fileOffset = fileOffset + sum(outputSize) ! forward to current file position -+ if (mod(inc,loadCases(currentLoadCase)%outputFrequency) == 0_pInt) & ! at output frequency - call CPFEM_results(totalIncsCounter,time) -- endif - if ( loadCases(currentLoadCase)%restartFrequency > 0_pInt & ! writing of restart info requested ... - .and. mod(inc,loadCases(currentLoadCase)%restartFrequency) == 0_pInt) then ! ... and at frequency of writing restart information - restartWrite = .true. ! set restart parameter for FEsolving -@@ -633,7 +555,6 @@ program DAMASK_spectral - real(convergedCounter, pReal)/& - real(notConvergedCounter + convergedCounter,pReal)*100.0_pReal, ' %) increments converged!' - flush(6) -- call MPI_file_close(fileUnit,ierr) - close(statUnit) - - if (notConvergedCounter > 0_pInt) call quit(2_pInt) ! error if some are not converged -diff --git a/src/homogenization.f90 b/src/homogenization.f90 -index 06da6ab2..0743d545 100644 ---- a/src/homogenization.f90 -+++ b/src/homogenization.f90 -@@ -269,6 +269,7 @@ subroutine homogenization_init - + homogenization_maxNgrains * (1 + crystallite_maxSizePostResults & ! crystallite size & crystallite results - + 1 + constitutive_plasticity_maxSizePostResults & ! constitutive size & constitutive results - + constitutive_source_maxSizePostResults) -+ materialpoint_sizeResults = 0 - allocate(materialpoint_results(materialpoint_sizeResults,theMesh%elem%nIPs,theMesh%nElems)) - - write(6,'(/,a)') ' <<<+- homogenization init -+>>>' -@@ -682,6 +683,7 @@ subroutine materialpoint_postResults - i, & !< integration point number - e !< element number - -+ return - !$OMP PARALLEL DO PRIVATE(myNgrains,myCrystallite,thePos,theSize) - elementLooping: do e = FEsolving_execElem(1),FEsolving_execElem(2) - myNgrains = homogenization_Ngrains(mesh_element(3,e)) --- -2.21.0 - diff --git a/installation/patch/python2to3.sh b/installation/patch/python2to3.sh deleted file mode 100755 index 255e62781..000000000 --- a/installation/patch/python2to3.sh +++ /dev/null @@ -1,8 +0,0 @@ -#! /usr/bin/env bash -if [ $1x != 3to2x ]; then - echo 'python2.7 to python3' - find . -name '*.py' -type f | xargs sed -i 's/usr\/bin\/env python2.7/usr\/bin\/env python3/g' -else - echo 'python3 to python2.7' - find . -name '*.py' -type f | xargs sed -i 's/usr\/bin\/env python3/usr\/bin\/env python2.7/g' -fi diff --git a/processing/misc/ang_toTable.py b/processing/misc/ang_toTable.py index 19fdcd55b..5579f2466 100755 --- a/processing/misc/ang_toTable.py +++ b/processing/misc/ang_toTable.py @@ -1,8 +1,10 @@ -#!/usr/bin/env python2.7 -# -*- coding: UTF-8 no BOM -*- +#!/usr/bin/env python3 import os +import sys +from io import StringIO from optparse import OptionParser + import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] @@ -19,47 +21,10 @@ Convert TSL/EDAX *.ang file to ASCIItable """, version = scriptID) (options, filenames) = parser.parse_args() - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - outname = os.path.splitext(name)[0]+'.txt' if name else name, - buffered = False, labeled = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# --- interpret header ----------------------------------------------------------------------------- - - table.head_read() - -# --- read comments -------------------------------------------------------------------------------- - - table.info_clear() - while table.data_read(advance = False) and table.line.startswith('#'): # cautiously (non-progressing) read header - table.info_append(table.line) # add comment to info part - table.data_read() # wind forward - - table.labels_clear() - table.labels_append(['1_Euler','2_Euler','3_Euler', - '1_pos','2_pos', - 'IQ','CI','PhaseID','Intensity','Fit', - ], # OIM Analysis 7.2 Manual, p 403 (of 517) - reset = True) - -# ------------------------------------------ assemble header --------------------------------------- - - table.head_write() - -#--- write remainder of data file ------------------------------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): - outputAlive = table.data_write() - -# ------------------------------------------ finalize output --------------------------------------- - - table.close() + table = damask.Table.from_ang(StringIO(''.join(sys.stdin.read())) if name is None else name) + table.to_ASCII(sys.stdout if name is None else os.path.splitext(name)[0]+'.txt') diff --git a/processing/misc/yieldSurface.py b/processing/misc/yieldSurface.py deleted file mode 100755 index 28f52062f..000000000 --- a/processing/misc/yieldSurface.py +++ /dev/null @@ -1,1431 +0,0 @@ -#!/usr/bin/env python2.7 -# -*- coding: UTF-8 no BOM -*- - -import threading,time,os -import numpy as np -from optparse import OptionParser -import damask -from damask.util import leastsqBound - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - -def runFit(exponent, eqStress, dimension, criterion): - global threads, myFit, myLoad - global fitResidual - global Guess, dDim - - dDim = dimension - 3 - nParas = len(fitCriteria[criterion]['bound'][dDim]) - nExpo = fitCriteria[criterion]['nExpo'] - - if exponent > 0.0: # User defined exponents - nParas = nParas-nExpo - fitCriteria[criterion]['bound'][dDim] = fitCriteria[criterion]['bound'][dDim][:nParas] - - for i in range(nParas): - temp = fitCriteria[criterion]['bound'][dDim][i] - if fitCriteria[criterion]['bound'][dDim][i] == (None,None): - Guess.append(1.0) - else: - g = (temp[0]+temp[1])/2.0 - if g == 0: g = temp[1]*0.5 - Guess.append(g) - - myLoad = Loadcase(options.load[0],options.load[1],options.load[2], - nSet = 10, dimension = dimension, vegter = options.criterion=='vegter') - - - myFit = Criterion(exponent,eqStress, dimension, criterion) - for t in range(options.threads): - threads.append(myThread(t)) - threads[t].start() - - for t in range(options.threads): - threads[t].join() - damask.util.croak('Residuals') - damask.util.croak(fitResidual) - -def principalStresses(sigmas): - """ - Computes principal stresses (i.e. eigenvalues) for a set of Cauchy stresses. - - sorted in descending order. - """ - lambdas=np.zeros(0,'d') - for i in range(np.shape(sigmas)[1]): - eigenvalues = np.linalg.eigvalsh(sym6toT33(sigmas[:,i])) - lambdas = np.append(lambdas,np.sort(eigenvalues)[::-1]) #append eigenvalues in descending order - lambdas = np.transpose(lambdas.reshape(np.shape(sigmas)[1],3)) - return lambdas - -def principalStress(p): - I = invariant(p) - - I1s3I2= (I[0]**2 - 3.0*I[1])**0.5 - numer = 2.0*I[0]**3 - 9.0*I[0]*I[1] + 27.0*I[2] - denom = 2.0*I1s3I2**3 - cs = numer/denom - - phi = np.arccos(cs)/3.0 - t1 = I[0]/3.0; t2 = 2.0/3.0*I1s3I2 - return np.array( [t1 + t2*np.cos(phi), - t1 + t2*np.cos(phi+np.pi*2.0/3.0), - t1 + t2*np.cos(phi+np.pi*4.0/3.0)]) - -def principalStrs_Der(p, s, dim, Karafillis=False): - """Derivative of principal stress with respect to stress""" - third = 1.0/3.0 - third2 = 2.0*third - - I = invariant(p) - I1s3I2= np.sqrt(I[0]**2 - 3.0*I[1]) - numer = 2.0*I[0]**3 - 9.0*I[0]*I[1] + 27.0*I[2] - denom = 2.0*I1s3I2**3 - cs = numer/denom - phi = np.arccos(cs)/3.0 - - dphidcs = -third/np.sqrt(1.0 - cs**2) - dcsddenom = 0.5*numer*(-1.5)*I1s3I2**(-5.0) - dcsdI1 = (6.0*I[0]**2 - 9.0*I[1])*denom + dcsddenom*(2.0*I[0]) - dcsdI2 = ( - 9.0*I[0])*denom + dcsddenom*(-3.0) - dcsdI3 = 27.0*denom - dphidI1, dphidI2, dphidI3 = dphidcs*dcsdI1, dphidcs*dcsdI2, dphidcs*dcsdI3 - - dI1s3I2dI1 = I[0]/I1s3I2 - dI1s3I2dI2 = -1.5/I1s3I2 - tcoeff = third2*I1s3I2 - - dSidIj = lambda theta : ( tcoeff*(-np.sin(theta))*dphidI1 + third2*dI1s3I2dI1*np.cos(theta) + third, - tcoeff*(-np.sin(theta))*dphidI2 + third2*dI1s3I2dI2*np.cos(theta), - tcoeff*(-np.sin(theta))*dphidI3) - dSdI = np.array([dSidIj(phi),dSidIj(phi+np.pi*2.0/3.0),dSidIj(phi+np.pi*4.0/3.0)]) # i=1,2,3; j=1,2,3 - -# calculate the derivation of principal stress with regards to the anisotropic coefficients - one = np.ones_like(s); zero = np.zeros_like(s); num = len(s) - dIdp = np.array([[one, one, one, zero, zero, zero], - [p[1]+p[2], p[2]+p[0], p[0]+p[1], -2.0*p[3], -2.0*p[4], -2.0*p[5]], - [p[1]*p[2]-p[4]**2, p[2]*p[0]-p[5]**2, p[0]*p[1]-p[3]**2, - -2.0*p[3]*p[2]+2.0*p[4]*p[5], -2.0*p[4]*p[0]+2.0*p[5]*p[3], -2.0*p[5]*p[1]+2.0*p[3]*p[4]] ]) - if Karafillis: - dpdc = np.array([[zero,s[0]-s[2],s[0]-s[1]], [s[1]-s[2],zero,s[1]-s[0]], [s[2]-s[1],s[2]-s[0],zero]])/3.0 - dSdp = np.array([np.dot(dSdI[:,:,i],dIdp[:,:,i]).T for i in range(num)]).T - if dim == 2: - temp = np.vstack([dSdp[:,3]*s[3]]).T.reshape(num,1,3).T - else: - temp = np.vstack([dSdp[:,3]*s[3],dSdp[:,4]*s[4],dSdp[:,5]*s[5]]).T.reshape(num,3,3).T - - return np.concatenate((np.array([np.dot(dSdp[:,0:3,i], dpdc[:,:,i]).T for i in range(num)]).T, - temp), axis=1) - else: - if dim == 2: - dIdc=np.array([[-dIdp[i,0]*s[1], -dIdp[i,1]*s[0], -dIdp[i,1]*s[2], - -dIdp[i,2]*s[1], -dIdp[i,2]*s[0], -dIdp[i,0]*s[2], - dIdp[i,3]*s[3] ] for i in range(3)]) - else: - dIdc=np.array([[-dIdp[i,0]*s[1], -dIdp[i,1]*s[0], -dIdp[i,1]*s[2], - -dIdp[i,2]*s[1], -dIdp[i,2]*s[0], -dIdp[i,0]*s[2], - dIdp[i,3]*s[3], dIdp[i,4]*s[4], dIdp[i,5]*s[5] ] for i in range(3)]) - return np.array([np.dot(dSdI[:,:,i],dIdc[:,:,i]).T for i in range(num)]).T - -def invariant(sigmas): - I = np.zeros(3) - s11,s22,s33,s12,s23,s31 = sigmas - I[0] = s11 + s22 + s33 - I[1] = s11*s22 + s22*s33 + s33*s11 - s12**2 - s23**2 - s31**2 - I[2] = s11*s22*s33 + 2.0*s12*s23*s31 - s12**2*s33 - s23**2*s11 - s31**2*s22 - return I - -def math_ln(x): - return np.log(x + 1.0e-32) - -def sym6toT33(sym6): - """Shape the symmetric stress tensor(6) into (3,3)""" - return np.array([[sym6[0],sym6[3],sym6[5]], - [sym6[3],sym6[1],sym6[4]], - [sym6[5],sym6[4],sym6[2]]]) - -def t33toSym6(t33): - """Shape the stress tensor(3,3) into symmetric (6)""" - return np.array([ t33[0,0], - t33[1,1], - t33[2,2], - (t33[0,1] + t33[1,0])/2.0, # 0 3 5 - (t33[1,2] + t33[2,1])/2.0, # * 1 4 - (t33[2,0] + t33[0,2])/2.0,]) # * * 2 - -class Criteria(object): - def __init__(self, criterion, uniaxialStress,exponent, dimension): - self.stress0 = uniaxialStress - if exponent < 0.0: # Fitting exponent m - self.mFix = [False, exponent] - else: # fixed exponent m - self.mFix = [True, exponent] - self.func = fitCriteria[criterion]['func'] - self.criteria = criterion - self.dim = dimension - def fun(self, paras, ydata, sigmas): - return self.func(self.stress0, paras, sigmas,self.mFix,self.criteria,self.dim) - def jac(self, paras, ydata, sigmas): - return self.func(self.stress0, paras, sigmas,self.mFix,self.criteria,self.dim,Jac=True) - -class Vegter(object): - """Vegter yield criterion""" - - def __init__(self, refPts, refNormals,nspace=11): - self.refPts, self.refNormals = self._getRefPointsNormals(refPts, refNormals) - self.hingePts = self._getHingePoints() - self.nspace = nspace - def _getRefPointsNormals(self,refPtsQtr,refNormalsQtr): - if len(refPtsQtr) == 12: - refPts = refPtsQtr - refNormals = refNormalsQtr - else: - refPts = np.empty([13,2]) - refNormals = np.empty([13,2]) - refPts[12] = refPtsQtr[0] - refNormals[12] = refNormalsQtr[0] - for i in range(3): - refPts[i] = refPtsQtr[i] - refPts[i+3] = refPtsQtr[3-i][::-1] - refPts[i+6] =-refPtsQtr[i] - refPts[i+9] =-refPtsQtr[3-i][::-1] - refNormals[i] = refNormalsQtr[i] - refNormals[i+3] = refNormalsQtr[3-i][::-1] - refNormals[i+6] =-refNormalsQtr[i] - refNormals[i+9] =-refNormalsQtr[3-i][::-1] - return refPts,refNormals - - def _getHingePoints(self): - """ - Calculate the hinge point B according to the reference points A,C and the normals n,m - - refPoints = np.array([[p1_x, p1_y], [p2_x, p2_y]]); - refNormals = np.array([[n1_x, n1_y], [n2_x, n2_y]]) - """ - def hingPoint(points, normals): - A1 = points[0][0]; A2 = points[0][1] - C1 = points[1][0]; C2 = points[1][1] - n1 = normals[0][0]; n2 = normals[0][1] - m1 = normals[1][0]; m2 = normals[1][1] - B1 = (m2*(n1*A1 + n2*A2) - n2*(m1*C1 + m2*C2))/(n1*m2-m1*n2) - B2 = (n1*(m1*C1 + m2*C2) - m1*(n1*A1 + n2*A2))/(n1*m2-m1*n2) - return np.array([B1,B2]) - return np.array([hingPoint(self.refPts[i:i+2],self.refNormals[i:i+2]) for i in range(len(self.refPts)-1)]) - - def getBezier(self): - def bezier(R,H): - b = [] - for mu in np.linspace(0.0,1.0,self.nspace): - b.append(np.array(R[0]*np.ones_like(mu) + 2.0*mu*(H - R[0]) + mu**2*(R[0]+R[1] - 2.0*H))) - return b - return np.array([bezier(self.refPts[i:i+2],self.hingePts[i]) for i in range(len(self.refPts)-1)]) - -def VetgerCriterion(stress,lankford, rhoBi0, theta=0.0): - """0-pure shear; 1-uniaxial; 2-plane strain; 3-equi-biaxial""" - def getFourierParas(r): - # get the value after Fourier transformation - nset = len(r) - lmatrix = np.empty([nset,nset]) - theta = np.linspace(0.0,np.pi/2,nset) - for i,th in enumerate(theta): - lmatrix[i] = np.array([np.cos(2*j*th) for j in range(nset)]) - return np.linalg.solve(lmatrix, r) - - nps = len(stress) - if nps%4 != 0: - damask.util.croak('Warning: the number of stress points is uncorrect, stress points of %s are missing in set %i'%( - ['eq-biaxial, plane strain & uniaxial', 'eq-biaxial & plane strain','eq-biaxial'][nps%4-1],nps/4+1)) - else: - nset = nps/4 - strsSet = stress.reshape(nset,4,2) - refPts = np.empty([4,2]) - - fouriercoeffs = np.array([np.cos(2.0*i*theta) for i in range(nset)]) - for i in range(2): - refPts[3,i] = sum(strsSet[:,3,i])/nset - for j in range(3): - refPts[j,i] = np.dot(getFourierParas(strsSet[:,j,i]), fouriercoeffs) - - -def Tresca(eqStress=None, #not needed/supported - paras=None, - sigmas=None, - mFix=None, #not needed/supported - criteria=None, #not needed/supported - dim=3, - Jac=False): - """ - Tresca yield criterion - - the fitted parameter is paras(sigma0) - """ - if not Jac: - lambdas = principalStresses(sigmas) - r = np.amax(np.array([abs(lambdas[2,:]-lambdas[1,:]),\ - abs(lambdas[1,:]-lambdas[0,:]),\ - abs(lambdas[0,:]-lambdas[2,:])]),0) - paras - return r.ravel() - else: - return -np.ones(len(sigmas)) - -def Cazacu_Barlat(eqStress=None, - paras=None, - sigmas=None, - mFix=None,#not needed/supported - criteria=None, - dim=3, #2D also possible - Jac=False): - """ - Cazacu-Barlat (CB) yield criterion - - the fitted parameters are: - a1,a2,a3,a6; b1,b2,b3,b4,b5,b10; c for plane stress - a1,a2,a3,a4,a5,a6; b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11; c: for general case - mFix is ignored - """ - s11,s22,s33,s12,s23,s31 = sigmas - if dim == 2: - (a1,a2,a3,a4), (b1,b2,b3,b4,b5,b10), c = paras[0:4],paras[4:10],paras[10] - a5 = a6 = b6 = b7 = b8 = b9 = b11 = 0.0 - s33 = s23 = s31 = np.zeros_like(s11) - else: - (a1,a2,a3,a4,a5,a6), (b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11), c = paras[0:6],paras[6:17],paras[17] - - s1_2, s2_2, s3_2, s12_2, s23_2, s31_2 = np.array([s11,s22,s33,s12,s23,s31])**2 - s1_3, s2_3, s3_3, s123, s321 = s11*s1_2, s22*s2_2, s33*s3_2,s11*s22*s33, s12*s23*s31 - d12_2,d23_2,d31_2 = (s11-s22)**2, (s22-s33)**2, (s33-s11)**2 - - J20 = ( a1*d12_2 + a2*d23_2 + a3*d31_2 )/6.0 + a4*s12_2 + a5*s23_2 + a6*s31_2 - J30 = ( (b1 +b2 )*s1_3 + (b3 +b4 )*s2_3 + ( b1+b4-b2 + b1+b4-b3 )*s3_3 )/27.0- \ - ( (b1*s22+b2*s33)*s1_2 + (b3*s33+b4*s11)*s2_2 + ((b1+b4-b2)*s11 + (b1+b4-b3)*s22)*s3_2 )/9.0 + \ - ( (b1+b4)*s123/9.0 + b11*s321 )*2.0 - \ - ( ( 2.0*b9 *s22 - b8*s33 - (2.0*b9 -b8)*s11 )*s31_2 + - ( 2.0*b10*s33 - b5*s22 - (2.0*b10-b5)*s11 )*s12_2 + - ( (b6+b7)*s11 - b6*s22 - b7*s33 )*s23_2 - )/3.0 - f0 = J20**3 - c*J30**2 - r = f0**(1.0/6.0)*np.sqrt(3.0)/eqStress - - if not Jac: - return (r - 1.0).ravel() - else: - drdf = r/f0/6.0 - dj2, dj3 = drdf*3.0*J20**2, -drdf*2.0*J30*c - jc = -drdf*J30**2 - - ja1,ja2,ja3 = dj2*d12_2/6.0, dj2*d23_2/6.0, dj2*d31_2/6.0 - ja4,ja5,ja6 = dj2*s12_2, dj2*s23_2, dj2*s31_2 - jb1 = dj3*( (s1_3 + 2.0*s3_3)/27.0 - s22*s1_2/9.0 - (s11+s22)*s3_2/9.0 + s123/4.5 ) - jb2 = dj3*( (s1_3 - s3_3)/27.0 - s33*s1_2/9.0 + s11 *s3_2/9.0 ) - jb3 = dj3*( (s2_3 - s3_3)/27.0 - s33*s2_2/9.0 + s22 *s3_2/9.0 ) - jb4 = dj3*( (s2_3 + 2.0*s3_3)/27.0 - s11*s2_2/9.0 - (s11+s22)*s3_2/9.0 + s123/4.5 ) - - jb5, jb10 = dj3*(s22 - s11)*s12_2/3.0, dj3*(s11 - s33)*s12_2/1.5 - jb6, jb7 = dj3*(s22 - s11)*s23_2/3.0, dj3*(s33 - s11)*s23_2/3.0 - jb8, jb9 = dj3*(s33 - s11)*s31_2/3.0, dj3*(s11 - s22)*s31_2/1.5 - jb11 = dj3*s321*2.0 - if dim == 2: - return np.vstack((ja1,ja2,ja3,ja4,jb1,jb2,jb3,jb4,jb5,jb10,jc)).T - else: - return np.vstack((ja1,ja2,ja3,ja4,ja5,ja6,jb1,jb2,jb3,jb4,jb5,jb6,jb7,jb8,jb9,jb10,jb11,jc)).T - -def Drucker(eqStress=None,#not needed/supported - paras=None, - sigmas=None, - mFix=None, #not needed/supported - criteria=None, - dim=3, - Jac=False): - """ - Drucker yield criterion - - the fitted parameters are - sigma0, C_D for Drucker(p=1); - sigma0, C_D, p for general Drucker - eqStress, mFix are invalid inputs - """ - if criteria == 'drucker': - sigma0, C_D= paras - p = 1.0 - else: - sigma0, C_D = paras[0:2] - if mFix[0]: p = mFix[1] - else: p = paras[-1] - I = invariant(sigmas) - J = np.zeros([3]) - J[1] = I[0]**2/3.0 - I[1] - J[2] = I[0]**3/13.5 - I[0]*I[1]/3.0 + I[2] - J2_3p = J[1]**(3.0*p) - J3_2p = J[2]**(2.0*p) - left = J2_3p - C_D*J3_2p - r = left**(1.0/(6.0*p))*3.0**0.5/sigma0 - - if not Jac: - return (r - 1.0).ravel() - else: - drdl = r/left/(6.0*p) - if criteria == 'drucker': - return np.vstack((-r/sigma0, -drdl*J3_2p)).T - else: - dldp = 3.0*J2_3p*math_ln(J[1]) - 2.0*C_D*J3_2p*math_ln(J[2]) - jp = drdl*dldp + r*math_ln(left)/(-6.0*p*p) - - if mFix[0]: return np.vstack((-r/sigma0, -drdl*J3_2p)).T - else: return np.vstack((-r/sigma0, -drdl*J3_2p, jp)).T - -def Hill1948(eqStress=None,#not needed/supported - paras=None, - sigmas=None, - mFix=None, #not needed/supported - criteria=None,#not needed/supported - dim=3, - Jac=False): - """ - Hill 1948 yield criterion - - the fitted parameters are: - F, G, H, L, M, N for 3D - F, G, H, N for 2D - """ - s11,s22,s33,s12,s23,s31 = sigmas - if dim == 2: # plane stress - jac = np.array([ s22**2, s11**2, (s11-s22)**2, 2.0*s12**2]) - else: # general case - jac = np.array([(s22-s33)**2,(s33-s11)**2,(s11-s22)**2, 2.0*s23**2,2.0*s31**2,2.0*s12**2]) - - if not Jac: - return (np.dot(paras,jac)/2.0-0.5).ravel() - else: - return jac.T - -def Hill1979(eqStress=None,#not needed/supported - paras=None, - sigmas=None, - mFix=None, - criteria=None,#not needed/supported - dim=3, - Jac=False): - """ - Hill 1979 yield criterion - - the fitted parameters are: f,g,h,a,b,c,m - """ - if mFix[0]: - m = mFix[1] - else: - m = paras[-1] - - coeff = paras[0:6] - s = principalStresses(sigmas) - diffs = np.array([s[1]-s[2], s[2]-s[0], s[0]-s[1],\ - 2.0*s[0]-s[1]-s[2], 2.0*s[1]-s[2]-s[0], 2.0*s[2]-s[0]-s[1]])**2 - - diffsm = diffs**(m/2.0) - left = np.dot(coeff,diffsm) - r = (0.5*left)**(1.0/m)/eqStress #left = base**mi - - if not Jac: - return (r-1.0).ravel() - else: - drdl, dldm = r/left/m, np.dot(coeff,diffsm*math_ln(diffs))*0.5 - jm = drdl*dldm + r*math_ln(0.5*left)*(-1.0/m/m) #/(-m**2) - - if mFix[0]: return np.vstack((drdl*diffsm)).T - else: return np.vstack((drdl*diffsm, jm)).T - -def Hosford(eqStress=None, - paras=None, - sigmas=None, - mFix=None, - criteria=None, - dim=3, - Jac=False): - """ - Hosford family criteria - - the fitted parameters are: - von Mises: sigma0 - Hershey: (1) sigma0, a, when a is not fixed; (2) sigma0, when a is fixed - general Hosford: (1) F,G,H, a, when a is not fixed; (2) F,G,H, when a is fixed - """ - if criteria == 'vonmises': - sigma0 = paras - coeff = np.ones(3) - a = 2.0 - elif criteria == 'hershey': - sigma0 = paras[0] - coeff = np.ones(3) - if mFix[0]: a = mFix[1] - else: a = paras[1] - else: - sigma0 = eqStress - coeff = paras[0:3] - if mFix[0]: a = mFix[1] - else: a = paras[3] - - s = principalStresses(sigmas) - diffs = np.array([s[1]-s[2], s[2]-s[0], s[0]-s[1]])**2 - diffsm = diffs**(a/2.0) - left = np.dot(coeff,diffsm) - r = (0.5*left)**(1.0/a)/sigma0 - - if not Jac: - return (r-1.0).ravel() - else: - if criteria == 'vonmises': # von Mises - return -r/sigma0 - else: - drdl, dlda = r/left/a, np.dot(coeff,diffsm*math_ln(diffs))*0.5 - ja = drdl*dlda + r*math_ln(0.5*left)*(-1.0/a/a) - if criteria == 'hershey': # Hershey - if mFix[0]: return -r/sigma0 - else: return np.vstack((-r/sigma0, ja)).T - else: # Anisotropic Hosford - if mFix[0]: return np.vstack((drdl*diffsm)).T - else: return np.vstack((drdl*diffsm, ja)).T - -def Barlat1989(eqStress=None, - paras=None, - sigmas=None, - mFix=None, - criteria=None, - dim=3, - Jac=False): - """ - Barlat-Lian 1989 yield criteria - - the fitted parameters are: - Anisotropic: a, h, p, m; m is optional - """ - a, h, p = paras[0:3] - if mFix[0]: m = mFix[1] - else: m = paras[-1] - - c = 2.0-a - s11,s22,s12 = sigmas[0], sigmas[1], sigmas[3] - k1,k2 = 0.5*(s11 + h*s22), (0.25*(s11 - h*s22)**2 + (p*s12)**2)**0.5 - fs = np.array([ (k1+k2)**2, (k1-k2)**2, 4.0*k2**2 ]); fm = fs**(m/2.0) - left = np.dot(np.array([a,a,c]),fm) - r = (0.5*left)**(1.0/m)/eqStress - - if not Jac: - return (r-1.0).ravel() - else: - dk1dh = 0.5*s22 - dk2dh, dk2dp = 0.25*(s11-h*s22)*(-s22)/k2, p*s12**2/k2 - dlda, dldc = fm[0]+fm[1], fm[2] - fm1 = fs**(m/2.0-1.0)*m - dldk1, dldk2 = a*fm1[0]*(k1+k2)+a*fm1[1]*(k1-k2), a*fm1[0]*(k1+k2)-a*fm1[1]*(k1-k2)+c*fm1[2]*k2*4.0 - drdl, drdm = r/m/left, r*math_ln(0.5*left)*(-1.0/m/m) - dldm = np.dot(np.array([a,a,c]),fm*math_ln(fs))*0.5 - - ja,jc = drdl*dlda, drdl*dldc - jh,jp = drdl*(dldk1*dk1dh + dldk2*dk2dh), drdl*dldk2*dk2dp - jm = drdl*dldm + drdm - - if mFix[0]: return np.vstack((ja,jc,jh,jp)).T - else: return np.vstack((ja,jc,jh,jp,jm)).T - -def Barlat1991(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - """ - Barlat 1991 criteria - - the fitted parameters are: - Anisotropic: a, b, c, f, g, h, m for 3D - a, b, c, h, m for plane stress - m is optional - """ - if dim == 2: coeff = paras[0:4] # plane stress - else: coeff = paras[0:6] # general case - if mFix[0]: m = mFix[1] - else: m = paras[-1] - - s11,s22,s33,s12,s23,s31 = sigmas - if dim == 2: - dXdx = np.array([s22,-s11,s11-s22,s12]) - A,B,C,H = np.array(coeff)[:,None]*dXdx; F=G=0.0 - else: - dXdx = np.array([s22-s33,s33-s11,s11-s22,s23,s31,s12]) - A,B,C,F,G,H = np.array(coeff)[:,None]*dXdx - - I2 = (F*F + G*G + H*H)/3.0+ ((A-C)**2+(C-B)**2+(B-A)**2)/54.0 - I3 = (C-B)*(A-C)*(B-A)/54.0 + F*G*H - ((C-B)*F*F + (A-C)*G*G + (B-A)*H*H)/6.0 - phi1 = np.arccos(I3/I2**1.5)/3.0 + np.pi/6.0; absc1 = 2.0*np.abs(np.cos(phi1)) - phi2 = phi1 + np.pi/3.0; absc2 = 2.0*np.abs(np.cos(phi2)) - phi3 = phi2 + np.pi/3.0; absc3 = 2.0*np.abs(np.cos(phi3)) - left = ( absc1**m + absc2**m + absc3**m ) - r = (0.5*left)**(1.0/m)*np.sqrt(3.0*I2)/eqStress - - if not Jac: - return (r - 1.0).ravel() - else: - dfdl = r/left/m - jm = r*math_ln(0.5*left)*(-1.0/m/m) + dfdl*0.5*( - absc1**m*math_ln(absc1) + absc2**m*math_ln(absc2) + absc3**m*math_ln(absc3) ) - - da,db,dc = (2.0*A-B-C)/18.0, (2.0*B-C-A)/18.0, (2.0*C-A-B)/18.0 - if dim == 2: - dI2dx = np.array([da, db, dc, H])/1.5*dXdx - dI3dx = np.array([ da*(B-C) + (H**2-G**2)/2.0, - db*(C-A) + (F**2-H**2)/2.0, - dc*(A-B) + (G**2-F**2)/2.0, - (G*F + (A-B))*H ])/3.0*dXdx - else: - dI2dx = np.array([da, db, dc, F,G,H])/1.5*dXdx - dI3dx = np.array([ da*(B-C) + (H**2-G**2)/2.0, - db*(C-A) + (F**2-H**2)/2.0, - dc*(A-B) + (G**2-F**2)/2.0, - (H*G*3.0 + (B-C))*F, - (F*H*3.0 + (C-A))*G, - (G*F*3.0 + (A-B))*H ])/3.0*dXdx - darccos = -1.0/np.sqrt(1.0 - I3**2/I2**3) - - dfdcos = lambda phi : dfdl*m*(2.0*abs(np.cos(phi)))**(m-1.0)*np.sign(np.cos(phi))*(-np.sin(phi)/1.5) - - dfdthe= (dfdcos(phi1) + dfdcos(phi2) + dfdcos(phi3)) - dfdI2, dfdI3 = dfdthe*darccos*I3*(-1.5)*I2**(-2.5)+r/2.0/I2, dfdthe*darccos*I2**(-1.5) - - if mFix[0]: return np.vstack((dfdI2*dI2dx + dfdI3*dI3dx)).T - else: return np.vstack((dfdI2*dI2dx + dfdI3*dI3dx, jm)).T - -def BBC2000(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - """ - BBC2000 yield criterion - - the fitted parameters are - d,e,f,g, b,c,a, k; k is optional - criteria are invalid input - """ - d,e,f,g, b,c,a= paras[0:7] - if mFix[0]: k = mFix[1] - else: k = paras[-1] - - s11,s22,s12 = sigmas[0], sigmas[1], sigmas[3] - k2 = 2.0*k; k1 = k - 1.0 - M,N,P,Q,R = d+e, e+f, (d-e)/2.0, (e-f)/2.0, g**2 - Gamma = M*s11 + N*s22 - Psi = ( (P*s11 + Q*s22)**2 + s12**2*R )**0.5 - - l1, l2, l3 = b*Gamma + c*Psi, b*Gamma - c*Psi, 2.0*c*Psi - l1s,l2s,l3s = l1**2, l2**2, l3**2 - - left = a*l1s**k + a*l2s**k + (1-a)*l3s**k - r = left**(1.0/k2)/eqStress - if not Jac: - return (r - 1.0).ravel() - else: - drdl,drdk = r/left/k2, r*math_ln(left)*(-1.0/k2/k) - dldl1,dldl2,dldl3 = a*k2*(l1s**k1)*l1, a*k2*(l2s**k1)*l2, (1-a)*k2*(l3s**k1)*l3 - dldGama, dldPsi = (dldl1 + dldl2)*b, (dldl1 - dldl2 + 2.0*dldl3)*c - temp = (P*s11 + Q*s22)/Psi - dPsidP, dPsidQ, dPsidR = temp*s11, temp*s22, 0.5*s12**2/Psi - dlda = l1s**k + l2s**k - l3s**k - dldb = dldl1*Gamma + dldl2*Gamma - dldc = dldl1*Psi - dldl2*Psi + dldl3*2.0*Psi - dldk = a*math_ln(l1s)*l1s**k + a*math_ln(l2s)*l2s**k + (1-a)*math_ln(l3s)*l3s**k - - J = drdl*np.array([dldGama*s11+dldPsi*dPsidP*0.5, dldGama*(s11+s22)+dldPsi*(-dPsidP+dPsidQ)*0.5, #jd,je - dldGama*s22-dldPsi*dPsidQ*0.5, dldPsi*dPsidR*2.0*g, #jf,jg - dldb, dldc, dlda]) #jb,jc,ja - if mFix[0]: return np.vstack(J).T - else: return np.vstack((J, drdl*dldk + drdk)).T - - -def BBC2003(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - """ - BBC2003 yield criterion - - the fitted parameters are - M,N,P,Q,R,S,T,a, k; k is optional - criteria are invalid input - """ - M,N,P,Q,R,S,T,a = paras[0:8] - if mFix[0]: k = mFix[1] - else: k = paras[-1] - - s11,s22,s12 = sigmas[0], sigmas[1], sigmas[3] - k2 = 2.0*k; k1 = k - 1.0 - Gamma = 0.5 * (s11 + M*s22) - Psi = ( 0.25*(N*s11 - P*s22)**2 + Q*Q*s12**2 )**0.5 - Lambda = ( 0.25*(R*s11 - S*s22)**2 + T*T*s12**2 )**0.5 - - l1, l2, l3 = Gamma + Psi, Gamma - Psi, 2.0*Lambda - l1s,l2s,l3s = l1**2, l2**2, l3**2 - left = a*l1s**k + a*l2s**k + (1-a)*l3s**k - r = left**(1.0/k2)/eqStress - if not Jac: - return (r - 1.0).ravel() - else: - drdl,drdk = r/left/k2, r*math_ln(left)*(-1.0/k2/k) - dldl1,dldl2,dldl3 = a*k2*(l1s**k1)*l1, a*k2*(l2s**k1)*l2, (1-a)*k2*(l3s**k1)*l3 - - dldGamma, dldPsi, dldLambda = dldl1+dldl2, dldl1-dldl2, 2.0*dldl3 - temp = 0.25/Psi*(N*s11 - P*s22) - dPsidN, dPsidP, dPsidQ = s11*temp, -s22*temp, Q*s12**2/Psi - temp = 0.25/Lambda*(R*s11 - S*s22) - dLambdadR, dLambdadS, dLambdadT = s11*temp, -s22*temp, T*s12**2/Psi - dldk = a*math_ln(l1s)*l1s**k + a*math_ln(l2s)*l2s**k + (1-a)*math_ln(l3s)*l3s**k - - J = drdl * np.array([dldGamma*s22*0.5, #jM - dldPsi*dPsidN, dldPsi*dPsidP, dldPsi*dPsidQ, #jN, jP, jQ - dldLambda*dLambdadR, dldLambda*dLambdadS, dldLambda*dLambdadT, #jR, jS, jT - l1s**k + l2s**k - l3s**k ]) #ja - - if mFix[0]: return np.vstack(J).T - else : return np.vstack((J, drdl*dldk+drdk)).T - -def BBC2005(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - """ - BBC2005 yield criterion - - the fitted parameters are - a, b, L ,M, N, P, Q, R, k k are optional - criteria is invalid input - """ - a,b,L, M, N, P, Q, R = paras[0:8] - if mFix[0]: k = mFix[1] - else: k = paras[-1] - - s11 = sigmas[0]; s22 = sigmas[1]; s12 = sigmas[3] - k2 = 2.0*k - Gamma = L*s11 + M*s22 - Lambda = ( (N*s11 - P*s22)**2 + s12**2 )**0.5 - Psi = ( (Q*s11 - R*s22)**2 + s12**2 )**0.5 - - l1 = Lambda + Gamma; l2 = Lambda - Gamma; l3 = Lambda + Psi; l4 = Lambda - Psi - l1s = l1**2; l2s = l2**2; l3s = l3**2; l4s = l4**2 - left = a*l1s**k + a*l2s**k + b*l3s**k + b*l4s**k - sBar = left**(1.0/k2); r = sBar/eqStress - 1.0 - if not Jac: - return r.ravel() - else: - ln = lambda x : np.log(x + 1.0e-32) - expo = 0.5/k; k1 = k-1.0 - - dsBardl = expo*sBar/left/eqStress - dsBarde = sBar*ln(left); dedk = expo/(-k) - dldl1 = a*k*(l1s**k1)*(2.0*l1) - dldl2 = a*k*(l2s**k1)*(2.0*l2) - dldl3 = b*k*(l3s**k1)*(2.0*l3) - dldl4 = b*k*(l4s**k1)*(2.0*l4) - - dldLambda = dldl1 + dldl2 + dldl3 + dldl4 - dldGama = dldl1 - dldl2 - dldPsi = dldl3 - dldl4 - temp = (N*s11 - P*s22)/Lambda - dLambdadN = s11*temp; dLambdadP = -s22*temp - temp = (Q*s11 - R*s22)/Psi - dPsidQ = s11*temp; dPsidR = -s22*temp - dldk = a*ln(l1s)*l1s**k + a*ln(l2s)*l2s**k + b*ln(l3s)*l3s**k + b*ln(l4s)*l4s**k - - J = dsBardl * np.array( [ - l1s**k+l2s**k, l3s**k+l4s**k,dldGama*s11,dldGama*s22,dldLambda*dLambdadN, - dldLambda*dLambdadP, dldPsi*dPsidQ, dldPsi*dPsidR]) - - if mFix[0]: return np.vstack(J).T - else : return np.vstack(J, dldk+dsBarde*dedk).T - -def Yld2000(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - """ - Yld2000 yield criterion - - C: c11,c22,c66 c12=c21=1.0 JAC NOT PASS - D: d11,d12,d21,d22,d66 - """ - C,D = paras[0:3], paras[3:8] - if mFix[0]: m = mFix[1] - else: m = paras[-1] - - s11, s22, s12 = sigmas[0],sigmas[1],sigmas[3] - X = np.array([ 2.0*C[0]*s11-C[0]*s22, 2.0*C[1]*s22-C[1]*s11, 3.0*C[2]*s12 ])/3.0 # a1,a2,a7 - Y = np.array([ (8.0*D[2]-2.0*D[0]-2.0*D[3]+2.0*D[1])*s11 + (4.0*D[3]-4.0*D[1]-4.0*D[2]+ D[0])*s22, - (4.0*D[0]-4.0*D[2]-4.0*D[1]+ D[3])*s11 + (8.0*D[1]-2.0*D[3]-2.0*D[0]+2.0*D[2])*s22, - 9.0*D[4]*s12 ])/9.0 - - def priStrs(s): - temp = np.sqrt( (s[0]-s[1])**2 + 4.0*s[2]**2 ) - return 0.5*(s[0]+s[1] + temp), 0.5*(s[0]+s[1] - temp) - m2 = m/2.0; m21 = m2 - 1.0 - (X1,X2), (Y1,Y2) = priStrs(X), priStrs(Y) # Principal values of X, Y - phi1s, phi21s, phi22s = (X1-X2)**2, (2.0*Y2+Y1)**2, (2.0*Y1+Y2)**2 - phi1, phi21, phi22 = phi1s**m2, phi21s**m2, phi22s**m2 - left = phi1 + phi21 + phi22 - r = (0.5*left)**(1.0/m)/eqStress - - if not Jac: - return (r-1.0).ravel() - else: - drdl, drdm = r/m/left, r*math_ln(0.5*left)*(-1.0/m/m) #/(-m*m) - dldm = ( phi1*math_ln(phi1s) + phi21*math_ln(phi21s) + phi22*math_ln(phi22s) )*0.5 - zero = np.zeros_like(s11); num = len(s11) - def dPrincipalds(X): - """Derivative of principla with respect to stress""" - temp = 1.0/np.sqrt( (X[0]-X[1])**2 + 4.0*X[2]**2 ) - dP1dsi = 0.5*np.array([ 1.0+temp*(X[0]-X[1]), 1.0-temp*(X[0]-X[1]), temp*4.0*X[2]]) - dP2dsi = 0.5*np.array([ 1.0-temp*(X[0]-X[1]), 1.0+temp*(X[0]-X[1]), -temp*4.0*X[2]]) - return np.array([dP1dsi, dP2dsi]) - - dXdXi, dYdYi = dPrincipalds(X), dPrincipalds(Y) - dXidC = np.array([ [ 2.0*s11-s22, zero, zero ], #dX11dC - [ zero, 2.0*s22-s11, zero ], #dX22dC - [ zero, zero, 3.0*s12 ] ])/3.0 #dX12dC - dYidD = np.array([ [ -2.0*s11+ s22, 2.0*s11-4.0*s22, 8.0*s11-4.0*s22, -2.0*s11+4.0*s22, zero ], #dY11dD - [ 4.0*s11-2.0*s22, -4.0*s11+8.0*s22, -4.0*s11+2.0*s22, s11-2.0*s22, zero ], #dY22dD - [ zero, zero, zero, zero, 9.0*s12 ] ])/9.0 #dY12dD - - dXdC=np.array([np.dot(dXdXi[:,:,i], dXidC[:,:,i]).T for i in range(num)]).T - dYdD=np.array([np.dot(dYdYi[:,:,i], dYidD[:,:,i]).T for i in range(num)]).T - - dldX = m*np.array([ phi1s**m21*(X1-X2), phi1s**m21*(X2-X1)]) - dldY = m*np.array([phi21s**m21*(2.0*Y2+Y1) + 2.0*phi22s**m21*(2.0*Y1+Y2), \ - phi22s**m21*(2.0*Y1+Y2) + 2.0*phi21s**m21*(2.0*Y2+Y1) ]) - jC = drdl*np.array([np.dot(dldX[:,i], dXdC[:,:,i]) for i in range(num)]).T - jD = drdl*np.array([np.dot(dldY[:,i], dYdD[:,:,i]) for i in range(num)]).T - - jm = drdl*dldm + drdm - if mFix[0]: return np.vstack((jC,jD)).T - else: return np.vstack((jC,jD,jm)).T - -def Yld200418p(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - """ - Yld2004-18p yield criterion - - the fitted parameters are - C: c12,c21,c23,c32,c31,c13,c44,c55,c66; D: d12,d21,d23,d32,d31,d13,d44,d55,d66 for 3D - C: c12,c21,c23,c32,c31,c13,c44; D: d12,d21,d23,d32,d31,d13,d44 for 2D - and m, m are optional - criteria is ignored - """ - if dim == 2: C,D = np.append(paras[0:7],[0.0,0.0]), np.append(paras[7:14],[0.0,0.0]) - else: C,D = paras[0:9], paras[9:18] - if mFix[0]: m = mFix[1] - else: m = paras[-1] - - sv = (sigmas[0] + sigmas[1] + sigmas[2])/3.0 - sdev = np.vstack((sigmas[0:3]-sv,sigmas[3:6])) - ys = lambda sdev, C: np.array([-C[0]*sdev[1]-C[5]*sdev[2], -C[1]*sdev[0]-C[2]*sdev[2], - -C[4]*sdev[0]-C[3]*sdev[1], C[6]*sdev[3], C[7]*sdev[4], C[8]*sdev[5]]) - p,q = ys(sdev, C), ys(sdev, D) - pLambdas, qLambdas = principalStress(p), principalStress(q) # no sort - - m2 = m/2.0; x3 = range(3); num = len(sv) - PiQj = np.array([(pLambdas[i,:]-qLambdas[j,:]) for i in x3 for j in x3]) - QiPj = np.array([(qLambdas[i,:]-pLambdas[j,:]) for i in x3 for j in x3]).reshape(3,3,num) - PiQjs = PiQj**2 - left = np.sum(PiQjs**m2,axis=0) - r = (0.25*left)**(1.0/m)/eqStress - - if not Jac: - return (r - 1.0).ravel() - else: - drdl, drdm = r/m/left, r*math_ln(0.25*left)*(-1.0/m/m) - dldm = np.sum(PiQjs**m2*math_ln(PiQjs),axis=0)*0.5 - dPdc, dQdd = principalStrs_Der(p, sdev, dim), principalStrs_Der(q, sdev, dim) - PiQjs3d = ( PiQjs**(m2-1.0) ).reshape(3,3,num) - dldP = -m*np.array([np.diag(np.dot(PiQjs3d[:,:,i], QiPj [:,:,i])) for i in range(num)]).T - dldQ = m*np.array([np.diag(np.dot(QiPj [:,:,i], PiQjs3d[:,:,i])) for i in range(num)]).T - - jm = drdl*dldm + drdm - jc = drdl*np.sum([dldP[i]*dPdc[i] for i in x3],axis=0) - jd = drdl*np.sum([dldQ[i]*dQdd[i] for i in x3],axis=0) - - if mFix[0]: return np.vstack((jc,jd)).T - else: return np.vstack((jc,jd,jm)).T - -def KarafillisBoyce(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - """ - Karafillis-Boyce - - the fitted parameters are - c11,c12,c13,c14,c15,c16,c,m for 3D - c11,c12,c13,c14,c,m for plane stress - 0 1 and self.dimen == 2: - return fitCriteria[self.name]['labels'][1] - else: - return fitCriteria[self.name]['labels'][0] - - def report_name(self): - return fitCriteria[self.name]['name'] - - def fit(self,stress): - global fitResults; fitErrors; fitResidual - if options.exponent > 0.0: nExponent = options.exponent - else: nExponent = 0 - nameCriterion = self.name.lower() - criteria = Criteria(nameCriterion,self.uniaxial,self.expo, self.dimen) - bounds = fitCriteria[nameCriterion]['bound'][dDim] # Default bounds, no bound - guess0 = Guess # Default initial guess, depends on bounds - - if fitResults == []: - initialguess = guess0 - else: - initialguess = np.array(fitResults[-1]) - - ydata = np.zeros(np.shape(stress)[1]) - try: - popt, pcov, infodict, errmsg, ierr = \ - leastsqBound (criteria.fun, initialguess, args=(ydata,stress), - bounds=bounds, Dfun=criteria.jac, full_output=True) - if ierr not in [1, 2, 3, 4]: - raise RuntimeError("Optimal parameters not found: "+errmsg) - else: - residual = criteria.fun(popt, ydata, stress) - fitResidual.append(np.linalg.norm(residual)/np.sqrt(len(residual))) - if (len(ydata) > len(initialguess)) and pcov is not None: - s_sq = (criteria.fun(popt, *(ydata,stress))**2).sum()/(len(ydata)-len(initialguess)) - pcov = pcov * s_sq - perr = np.sqrt(np.diag(pcov)) - fitResults.append(popt.tolist()) - fitErrors .append(perr.tolist()) - - popt = np.concatenate((np.array(popt), np.repeat(options.exponent,nExponent))) - perr = np.concatenate((np.array(perr), np.repeat(0.0,nExponent))) - - damask.util.croak('Needed {} function calls for fitting'.format(infodict['nfev'])) - except Exception as detail: - damask.util.croak(detail) - pass - return popt - -#--------------------------------------------------------------------------------------------------- -class myThread (threading.Thread): - """Runner""" - - def __init__(self, threadID): - threading.Thread.__init__(self) - self.threadID = threadID - def run(self): - semaphore.acquire() - conv=converged() - semaphore.release() - while not conv: - doSim(self.name) - semaphore.acquire() - conv=converged() - semaphore.release() - -def doSim(thread): - semaphore.acquire() - global myLoad - loadNo=loadcaseNo() - if not os.path.isfile('%s.load'%loadNo): - damask.util.croak('Generating load case for simulation %s (%s)'%(loadNo,thread)) - f=open('%s.load'%loadNo,'w') - f.write(myLoad.getLoadcase(loadNo)) - f.close() - semaphore.release() - else: semaphore.release() - -# if spectralOut does not exist, run simulation - semaphore.acquire() - if not os.path.isfile('%s_%i.spectralOut'%(options.geometry,loadNo)): - damask.util.croak('Starting simulation %i (%s)'%(loadNo,thread)) - semaphore.release() - damask.util.execute('DAMASK_spectral -g %s -l %i'%(options.geometry,loadNo)) - else: semaphore.release() - -# if ASCII tables do not exist, run postprocessing - semaphore.acquire() - if not os.path.isfile('./postProc/%s_%i.txt'%(options.geometry,loadNo)): - damask.util.croak('Starting post processing for simulation %i (%s)'%(loadNo,thread)) - semaphore.release() - try: - damask.util.execute('postResults --cr f,p --co totalshear %s_%i.spectralOut'%(options.geometry,loadNo)) - except: - damask.util.execute('postResults --cr f,p %s_%i.spectralOut'%(options.geometry,loadNo)) - damask.util.execute('addCauchy ./postProc/%s_%i.txt'%(options.geometry,loadNo)) - damask.util.execute('addStrainTensors -0 -v ./postProc/%s_%i.txt'%(options.geometry,loadNo)) - damask.util.execute('addMises -s Cauchy -e ln(V) ./postProc/%s_%i.txt'%(options.geometry,loadNo)) - else: semaphore.release() - -# reading values from ASCII table (including linear interpolation between points) - semaphore.acquire() - damask.util.croak('Reading values from simulation %i (%s)'%(loadNo,thread)) - refFile = './postProc/%s_%i.txt'%(options.geometry,loadNo) - table = damask.ASCIItable(refFile,readonly=True) - table.head_read() - - thresholdKey = {'equivalentStrain':'Mises(ln(V))', - 'totalshear': 'totalshear', - }[options.fitting] - - for l in [thresholdKey,'1_Cauchy']: - if l not in table.labels(raw = True): damask.util.croak('%s not found'%l) - semaphore.release() - - table.data_readArray(['%i_Cauchy'%(i+1) for i in range(9)]+[thresholdKey]+['%i_ln(V)'%(i+1) for i in range(9)]) - - validity = np.zeros((int(options.yieldValue[2])), dtype=bool) # found data for desired threshold - yieldStress = np.empty((int(options.yieldValue[2]),6),'d') - deformationRate = np.empty((int(options.yieldValue[2]),6),'d') - - line = 0 - for i,threshold in enumerate(np.linspace(options.yieldValue[0],options.yieldValue[1],options.yieldValue[2])): - while line < np.shape(table.data)[0]: - if abs(table.data[line,9])>= threshold: - upper,lower = abs(table.data[line,9]),abs(table.data[line-1,9]) # values for linear interpolation - stress = np.array(table.data[line-1,0:9] * (upper-threshold)/(upper-lower) + \ - table.data[line ,0:9] * (threshold-lower)/(upper-lower)).reshape(3,3) # linear interpolation of stress values - yieldStress[i,:] = t33toSym6(stress) - - dstrain= np.array(table.data[line,10:] - table.data[line-1,10:]).reshape(3,3) - deformationRate[i,:] = t33toSym6(dstrain) - - validity[i] = True - break - else: - line+=1 - if not validity[i]: - semaphore.acquire() - damask.util.croak('The data of result %i at the threshold %f is invalid,'%(loadNo,threshold)\ - +'the fitting at this point is skipped') - semaphore.release() - -# do the actual fitting procedure and write results to file - semaphore.acquire() - global stressAll, strainAll - f=open(options.geometry+'_'+options.criterion+'_'+str(time.time())+'.txt','w') - f.write(' '.join([options.fitting]+myFit.report_labels())+'\n') - try: - for i,threshold in enumerate(np.linspace(options.yieldValue[0],options.yieldValue[1],options.yieldValue[2])): - if validity[i]: - stressAll[i]=np.append(stressAll[i], yieldStress[i]/stressUnit) - strainAll[i]=np.append(strainAll[i], deformationRate[i]) - f.write( str(threshold)+' '+ - ' '.join(map(str,myFit.fit(stressAll[i].reshape(len(stressAll[i])//6,6).transpose())))+'\n') - except Exception: - damask.util.croak('Could not fit results of simulation (%s)'%thread) - semaphore.release() - return - damask.util.croak('\n') - semaphore.release() - -def loadcaseNo(): - global N_simulations - N_simulations+=1 - return N_simulations - -def converged(): - global N_simulations; fitResidual - - if N_simulations < options.max: - if len(fitResidual) > 5 and N_simulations >= options.min: - residualList = np.array(fitResidual[len(fitResidual)-5:]) - if np.std(residualList)/np.max(residualList) < 0.05: - return True - return False - else: - return True - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ -Performs calculations with various loads on given geometry file and fits yield surface. - -""", version = scriptID) - -# maybe make an option to specifiy if 2D/3D fitting should be done? - -parser.add_option('-l','--load' , dest='load', type='float', nargs=3, - help='load: final strain; increments; time %default', metavar='float int float') -parser.add_option('-g','--geometry', dest='geometry', type='string', - help='name of the geometry file [%default]', metavar='string') -parser.add_option('-c','--criterion', dest='criterion', choices=fitCriteria.keys(), - help='criterion for stopping simulations [%default]', metavar='string') -parser.add_option('-f','--fitting', dest='fitting', choices=thresholdParameter, - help='yield criterion [%default]', metavar='string') -parser.add_option('-y','--yieldvalue', dest='yieldValue', type='float', nargs=3, - help='yield points: start; end; count %default', metavar='float float int') -parser.add_option('--min', dest='min', type='int', - help='minimum number of simulations [%default]', metavar='int') -parser.add_option('--max', dest='max', type='int', - help='maximum number of iterations [%default]', metavar='int') -parser.add_option('-t','--threads', dest='threads', type='int', - help='number of parallel executions [%default]', metavar='int') -parser.add_option('-b','--bound', dest='bounds', type='float', nargs=2, - help='yield points: start; end; count %default', metavar='float float') -parser.add_option('-d','--dimension', dest='dimension', type='choice', choices=['2','3'], - help='dimension of the virtual test [%default]', metavar='int') -parser.add_option('-e', '--exponent', dest='exponent', type='float', - help='exponent of non-quadratic criteria', metavar='int') -parser.add_option('-u', '--uniaxial', dest='eqStress', type='float', - help='Equivalent stress', metavar='float') - -parser.set_defaults(min = 12, - max = 30, - threads = 4, - yieldValue = (0.002,0.004,2), - load = (0.010,100,100.0), - criterion = 'vonmises', - fitting = 'totalshear', - geometry = '20grains16x16x16', - bounds = None, - dimension = '3', - exponent = -1.0, - ) - -options = parser.parse_args()[0] - -if options.threads < 1: - parser.error('invalid number of threads {}'.format(options.threads)) -if options.min < 0: - parser.error('invalid minimum number of simulations {}'.format(options.min)) -if options.max < options.min: - parser.error('invalid maximum number of simulations (below minimum)') -if options.yieldValue[0] > options.yieldValue[1]: - parser.error('invalid yield start (below yield end)') -if options.yieldValue[2] != int(options.yieldValue[2]): - parser.error('count must be an integer') - -for check in [options.geometry+'.geom','numerics.config','material.config']: - if not os.path.isfile(check): - damask.util.croak('"{}" file not found'.format(check)) - -options.dimension = int(options.dimension) - -stressUnit = 1.0e9 if options.criterion == 'hill1948' else 1.0e6 - - -if options.dimension not in fitCriteria[options.criterion]['dimen']: - parser.error('invalid dimension for selected criterion') - -if options.criterion not in ['vonmises','tresca','drucker','hill1948'] and options.eqStress is None: - parser.error('please specify an equivalent stress (e.g. fitting to von Mises)') - -# global variables -fitResults = [] -fitErrors = [] -fitResidual = [] -stressAll= [np.zeros(0,'d').reshape(0,0) for i in range(int(options.yieldValue[2]))] -strainAll= [np.zeros(0,'d').reshape(0,0) for i in range(int(options.yieldValue[2]))] -N_simulations=0 -Guess = [] -threads=[] -semaphore=threading.Semaphore(1) -dDim = None -myLoad = None -myFit = None - -run = runFit(options.exponent, options.eqStress, options.dimension, options.criterion) - -damask.util.croak('Finished fitting to yield criteria') diff --git a/processing/misc/yieldSurfaceFast.py b/processing/misc/yieldSurfaceFast.py deleted file mode 100755 index c58dca733..000000000 --- a/processing/misc/yieldSurfaceFast.py +++ /dev/null @@ -1,1513 +0,0 @@ -#!/usr/bin/env python2.7 -# -*- coding: UTF-8 no BOM -*- - -import threading,time,os -import numpy as np -from optparse import OptionParser -import damask -from damask.util import leastsqBound -from scipy.optimize import nnls - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - -def runFit(exponent, eqStress, dimension, criterion): - global threads, myFit, myLoad - global fitResidual - global Guess, dDim - - if options.criterion!='facet': - dDim = dimension - 3 - nParas = len(fitCriteria[criterion]['bound'][dDim]) - nExpo = fitCriteria[criterion]['nExpo'] - - if exponent > 0.0: # User defined exponents - nParas = nParas-nExpo - fitCriteria[criterion]['bound'][dDim] = fitCriteria[criterion]['bound'][dDim][:nParas] - - for i in range(nParas): - temp = fitCriteria[criterion]['bound'][dDim][i] - if fitCriteria[criterion]['bound'][dDim][i] == (None,None): - Guess.append(1.0) - else: - g = (temp[0]+temp[1])/2.0 - if g == 0: g = temp[1]*0.5 - Guess.append(g) - - myLoad = Loadcase(options.load[0],options.load[1],options.load[2],options.flag,options.yieldValue, - nSet = 10, dimension = dimension, vegter = options.criterion=='vegter') - - - myFit = Criterion(exponent,eqStress, dimension, criterion) - for t in range(options.threads): - threads.append(myThread(t)) - threads[t].start() - - for t in range(options.threads): - threads[t].join() - - if options.criterion=='facet': - doFacetFit() - - damask.util.croak('Residuals') - damask.util.croak(fitResidual) - -def doFacetFit(): - n = options.order - Data = np.zeros((options.numpoints, 10)) - for i in range(options.numpoints): - fileName = options.geometry + '_' + str(i+1) + '.yield' - data_i = np.loadtxt(fileName) - - sv = (data_i[0,0] + data_i[1,1] + data_i[2,2])/3.0 - - #convert stress and strain form the 6D to 5D space - S1 = np.sqrt(2.0)*(data_i[0,0] - data_i[1,1])/2.0 - S2 = np.sqrt(6.0)*(data_i[0,0] + data_i[1,1] - 2.0*sv)/2.0 - S3 = np.sqrt(2.0)*data_i[1,2] - S4 = np.sqrt(2.0)*data_i[2,0] - S5 = np.sqrt(2.0)*data_i[0,1] - - E1 = np.sqrt(2.0)*(data_i[3,0]-data_i[4,1])/2.0 - E2 = np.sqrt(6.0)*(data_i[3,0]+data_i[4,1])/2.0 - E3 = np.sqrt(2.0)*data_i[4,2] - E4 = np.sqrt(2.0)*data_i[5,0] - E5 = np.sqrt(2.0)*data_i[3,1] - - Data[i,:] = [E1,E2,E3,E4,E5,S1,S2,S3,S4,S5] - - Data[:,5:] = Data[:,5:] / 100000000.0 - - path=os.path.join(os.getcwd(),'final.mmm') - np.savetxt(path, Data, header='', comments='', fmt='% 15.10f') - - if options.dimension == 2: - reducedIndices = [0,1,4,5,6,9] - elif options.dimension == 3: - reducedIndices = [i for i in range(10)] - - numDirections = Data.shape[0] - Indices = np.arange(numDirections) - sdPairs = Data[:,reducedIndices][Indices,:] - numPairs = sdPairs.shape[0] - dimensionality = sdPairs.shape[1] / 2 - ds = sdPairs[:,0:dimensionality] - s = sdPairs[:,dimensionality::] - - A = np.zeros((numPairs, numPairs)) - B = np.ones((numPairs,)) - for i in range(numPairs): - for j in range(numPairs): - lamb = 1.0 - s_i = s[i,:] - ds_j = ds[j,:] - A[i,j] = lamb * (np.dot(s_i.ravel(), ds_j.ravel()) ** n) - - lambdas, residuals = nnls(A, B) - nonZeroTerms = np.logical_not(np.isclose(lambdas, 0.)) - numNonZeroTerms = np.sum(nonZeroTerms) - dataOut = np.zeros((numNonZeroTerms, 6)) - - if options.dimension == 2: - dataOut[:,0] = lambdas[nonZeroTerms] - dataOut[:,1] = ds[nonZeroTerms,:][:,0] - dataOut[:,2] = ds[nonZeroTerms,:][:,1] - dataOut[:,5] = ds[nonZeroTerms,:][:,2] - elif options.dimension == 3: - dataOut[:,0] = lambdas[nonZeroTerms] - dataOut[:,1] = ds[nonZeroTerms,:][:,0] - dataOut[:,2] = ds[nonZeroTerms,:][:,1] - dataOut[:,3] = ds[nonZeroTerms,:][:,2] - dataOut[:,4] = ds[nonZeroTerms,:][:,3] - dataOut[:,5] = ds[nonZeroTerms,:][:,4] - - headerText = 'facet\n 1 \n F \n {0:<3d} \n {1:<3d} '.format(n, numNonZeroTerms) - path=os.path.join(os.getcwd(),'facet_o{0}.fac'.format(n)) - np.savetxt(path, dataOut, header=headerText, comments='', fmt='% 15.10f') - -def principalStresses(sigmas): - """ - Computes principal stresses (i.e. eigenvalues) for a set of Cauchy stresses. - - sorted in descending order. - """ - lambdas=np.zeros(0,'d') - for i in range(np.shape(sigmas)[1]): - eigenvalues = np.linalg.eigvalsh(sym6toT33(sigmas[:,i])) - lambdas = np.append(lambdas,np.sort(eigenvalues)[::-1]) #append eigenvalues in descending order - lambdas = np.transpose(lambdas.reshape(np.shape(sigmas)[1],3)) - return lambdas - -def principalStress(p): - I = invariant(p) - - I1s3I2= (I[0]**2 - 3.0*I[1])**0.5 - numer = 2.0*I[0]**3 - 9.0*I[0]*I[1] + 27.0*I[2] - denom = 2.0*I1s3I2**3 - cs = numer/denom - - phi = np.arccos(cs)/3.0 - t1 = I[0]/3.0; t2 = 2.0/3.0*I1s3I2 - return np.array( [t1 + t2*np.cos(phi), - t1 + t2*np.cos(phi+np.pi*2.0/3.0), - t1 + t2*np.cos(phi+np.pi*4.0/3.0)]) - -def principalStrs_Der(p, s, dim, Karafillis=False): - """Derivative of principal stress with respect to stress""" - third = 1.0/3.0 - third2 = 2.0*third - - I = invariant(p) - I1s3I2= np.sqrt(I[0]**2 - 3.0*I[1]) - numer = 2.0*I[0]**3 - 9.0*I[0]*I[1] + 27.0*I[2] - denom = 2.0*I1s3I2**3 - cs = numer/denom - phi = np.arccos(cs)/3.0 - - dphidcs = -third/np.sqrt(1.0 - cs**2) - dcsddenom = 0.5*numer*(-1.5)*I1s3I2**(-5.0) - dcsdI1 = (6.0*I[0]**2 - 9.0*I[1])*denom + dcsddenom*(2.0*I[0]) - dcsdI2 = ( - 9.0*I[0])*denom + dcsddenom*(-3.0) - dcsdI3 = 27.0*denom - dphidI1, dphidI2, dphidI3 = dphidcs*dcsdI1, dphidcs*dcsdI2, dphidcs*dcsdI3 - - dI1s3I2dI1 = I[0]/I1s3I2 - dI1s3I2dI2 = -1.5/I1s3I2 - tcoeff = third2*I1s3I2 - - dSidIj = lambda theta : ( tcoeff*(-np.sin(theta))*dphidI1 + third2*dI1s3I2dI1*np.cos(theta) + third, - tcoeff*(-np.sin(theta))*dphidI2 + third2*dI1s3I2dI2*np.cos(theta), - tcoeff*(-np.sin(theta))*dphidI3) - dSdI = np.array([dSidIj(phi),dSidIj(phi+np.pi*2.0/3.0),dSidIj(phi+np.pi*4.0/3.0)]) # i=1,2,3; j=1,2,3 - -# calculate the derivation of principal stress with regards to the anisotropic coefficients - one = np.ones_like(s); zero = np.zeros_like(s); num = len(s) - dIdp = np.array([[one, one, one, zero, zero, zero], - [p[1]+p[2], p[2]+p[0], p[0]+p[1], -2.0*p[3], -2.0*p[4], -2.0*p[5]], - [p[1]*p[2]-p[4]**2, p[2]*p[0]-p[5]**2, p[0]*p[1]-p[3]**2, - -2.0*p[3]*p[2]+2.0*p[4]*p[5], -2.0*p[4]*p[0]+2.0*p[5]*p[3], -2.0*p[5]*p[1]+2.0*p[3]*p[4]] ]) - if Karafillis: - dpdc = np.array([[zero,s[0]-s[2],s[0]-s[1]], [s[1]-s[2],zero,s[1]-s[0]], [s[2]-s[1],s[2]-s[0],zero]])/3.0 - dSdp = np.array([np.dot(dSdI[:,:,i],dIdp[:,:,i]).T for i in range(num)]).T - if dim == 2: - temp = np.vstack([dSdp[:,3]*s[3]]).T.reshape(num,1,3).T - else: - temp = np.vstack([dSdp[:,3]*s[3],dSdp[:,4]*s[4],dSdp[:,5]*s[5]]).T.reshape(num,3,3).T - - return np.concatenate((np.array([np.dot(dSdp[:,0:3,i], dpdc[:,:,i]).T for i in range(num)]).T, - temp), axis=1) - else: - if dim == 2: - dIdc=np.array([[-dIdp[i,0]*s[1], -dIdp[i,1]*s[0], -dIdp[i,1]*s[2], - -dIdp[i,2]*s[1], -dIdp[i,2]*s[0], -dIdp[i,0]*s[2], - dIdp[i,3]*s[3] ] for i in range(3)]) - else: - dIdc=np.array([[-dIdp[i,0]*s[1], -dIdp[i,1]*s[0], -dIdp[i,1]*s[2], - -dIdp[i,2]*s[1], -dIdp[i,2]*s[0], -dIdp[i,0]*s[2], - dIdp[i,3]*s[3], dIdp[i,4]*s[4], dIdp[i,5]*s[5] ] for i in range(3)]) - return np.array([np.dot(dSdI[:,:,i],dIdc[:,:,i]).T for i in range(num)]).T - -def invariant(sigmas): - I = np.zeros(3) - s11,s22,s33,s12,s23,s31 = sigmas - I[0] = s11 + s22 + s33 - I[1] = s11*s22 + s22*s33 + s33*s11 - s12**2 - s23**2 - s31**2 - I[2] = s11*s22*s33 + 2.0*s12*s23*s31 - s12**2*s33 - s23**2*s11 - s31**2*s22 - return I - -def math_ln(x): - return np.log(x + 1.0e-32) - -def sym6toT33(sym6): - """Shape the symmetric stress tensor(6) into (3,3)""" - return np.array([[sym6[0],sym6[3],sym6[5]], - [sym6[3],sym6[1],sym6[4]], - [sym6[5],sym6[4],sym6[2]]]) - -def t33toSym6(t33): - """Shape the stress tensor(3,3) into symmetric (6)""" - return np.array([ t33[0,0], - t33[1,1], - t33[2,2], - (t33[0,1] + t33[1,0])/2.0, # 0 3 5 - (t33[1,2] + t33[2,1])/2.0, # * 1 4 - (t33[2,0] + t33[0,2])/2.0,]) # * * 2 - -class Criteria(object): - def __init__(self, criterion, uniaxialStress,exponent, dimension): - self.stress0 = uniaxialStress - if exponent < 0.0: # Fitting exponent m - self.mFix = [False, exponent] - else: # fixed exponent m - self.mFix = [True, exponent] - self.func = fitCriteria[criterion]['func'] - self.criteria = criterion - self.dim = dimension - def fun(self, paras, ydata, sigmas): - return self.func(self.stress0, paras, sigmas,self.mFix,self.criteria,self.dim) - def jac(self, paras, ydata, sigmas): - return self.func(self.stress0, paras, sigmas,self.mFix,self.criteria,self.dim,Jac=True) - -class Vegter(object): - """Vegter yield criterion""" - - def __init__(self, refPts, refNormals,nspace=11): - self.refPts, self.refNormals = self._getRefPointsNormals(refPts, refNormals) - self.hingePts = self._getHingePoints() - self.nspace = nspace - def _getRefPointsNormals(self,refPtsQtr,refNormalsQtr): - if len(refPtsQtr) == 12: - refPts = refPtsQtr - refNormals = refNormalsQtr - else: - refPts = np.empty([13,2]) - refNormals = np.empty([13,2]) - refPts[12] = refPtsQtr[0] - refNormals[12] = refNormalsQtr[0] - for i in range(3): - refPts[i] = refPtsQtr[i] - refPts[i+3] = refPtsQtr[3-i][::-1] - refPts[i+6] =-refPtsQtr[i] - refPts[i+9] =-refPtsQtr[3-i][::-1] - refNormals[i] = refNormalsQtr[i] - refNormals[i+3] = refNormalsQtr[3-i][::-1] - refNormals[i+6] =-refNormalsQtr[i] - refNormals[i+9] =-refNormalsQtr[3-i][::-1] - return refPts,refNormals - - def _getHingePoints(self): - """ - Calculate the hinge point B according to the reference points A,C and the normals n,m - - refPoints = np.array([[p1_x, p1_y], [p2_x, p2_y]]); - refNormals = np.array([[n1_x, n1_y], [n2_x, n2_y]]) - """ - def hingPoint(points, normals): - A1 = points[0][0]; A2 = points[0][1] - C1 = points[1][0]; C2 = points[1][1] - n1 = normals[0][0]; n2 = normals[0][1] - m1 = normals[1][0]; m2 = normals[1][1] - B1 = (m2*(n1*A1 + n2*A2) - n2*(m1*C1 + m2*C2))/(n1*m2-m1*n2) - B2 = (n1*(m1*C1 + m2*C2) - m1*(n1*A1 + n2*A2))/(n1*m2-m1*n2) - return np.array([B1,B2]) - return np.array([hingPoint(self.refPts[i:i+2],self.refNormals[i:i+2]) for i in range(len(self.refPts)-1)]) - - def getBezier(self): - def bezier(R,H): - b = [] - for mu in np.linspace(0.0,1.0,self.nspace): - b.append(np.array(R[0]*np.ones_like(mu) + 2.0*mu*(H - R[0]) + mu**2*(R[0]+R[1] - 2.0*H))) - return b - return np.array([bezier(self.refPts[i:i+2],self.hingePts[i]) for i in range(len(self.refPts)-1)]) - -def VetgerCriterion(stress,lankford, rhoBi0, theta=0.0): - """0-pure shear; 1-uniaxial; 2-plane strain; 3-equi-biaxial""" - def getFourierParas(r): - # get the value after Fourier transformation - nset = len(r) - lmatrix = np.empty([nset,nset]) - theta = np.linspace(0.0,np.pi/2,nset) - for i,th in enumerate(theta): - lmatrix[i] = np.array([np.cos(2*j*th) for j in range(nset)]) - return np.linalg.solve(lmatrix, r) - - nps = len(stress) - if nps%4 != 0: - damask.util.croak('Warning: the number of stress points is uncorrect, stress points of %s are missing in set %i'%( - ['eq-biaxial, plane strain & uniaxial', 'eq-biaxial & plane strain','eq-biaxial'][nps%4-1],nps/4+1)) - else: - nset = nps/4 - strsSet = stress.reshape(nset,4,2) - refPts = np.empty([4,2]) - - fouriercoeffs = np.array([np.cos(2.0*i*theta) for i in range(nset)]) - for i in range(2): - refPts[3,i] = sum(strsSet[:,3,i])/nset - for j in range(3): - refPts[j,i] = np.dot(getFourierParas(strsSet[:,j,i]), fouriercoeffs) - - -def Tresca(eqStress=None, #not needed/supported - paras=None, - sigmas=None, - mFix=None, #not needed/supported - criteria=None, #not needed/supported - dim=3, - Jac=False): - """ - Tresca yield criterion - - the fitted parameter is paras(sigma0) - """ - if not Jac: - lambdas = principalStresses(sigmas) - r = np.amax(np.array([abs(lambdas[2,:]-lambdas[1,:]),\ - abs(lambdas[1,:]-lambdas[0,:]),\ - abs(lambdas[0,:]-lambdas[2,:])]),0) - paras - return r.ravel() - else: - return -np.ones(len(sigmas)) - -def Cazacu_Barlat(eqStress=None, - paras=None, - sigmas=None, - mFix=None,#not needed/supported - criteria=None, - dim=3, #2D also possible - Jac=False): - """ - Cazacu-Barlat (CB) yield criterion - - the fitted parameters are: - a1,a2,a3,a6; b1,b2,b3,b4,b5,b10; c for plane stress - a1,a2,a3,a4,a5,a6; b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11; c: for general case - mFix is ignored - """ - s11,s22,s33,s12,s23,s31 = sigmas - if dim == 2: - (a1,a2,a3,a4), (b1,b2,b3,b4,b5,b10), c = paras[0:4],paras[4:10],paras[10] - a5 = a6 = b6 = b7 = b8 = b9 = b11 = 0.0 - s33 = s23 = s31 = np.zeros_like(s11) - else: - (a1,a2,a3,a4,a5,a6), (b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11), c = paras[0:6],paras[6:17],paras[17] - - s1_2, s2_2, s3_2, s12_2, s23_2, s31_2 = np.array([s11,s22,s33,s12,s23,s31])**2 - s1_3, s2_3, s3_3, s123, s321 = s11*s1_2, s22*s2_2, s33*s3_2,s11*s22*s33, s12*s23*s31 - d12_2,d23_2,d31_2 = (s11-s22)**2, (s22-s33)**2, (s33-s11)**2 - - J20 = ( a1*d12_2 + a2*d23_2 + a3*d31_2 )/6.0 + a4*s12_2 + a5*s23_2 + a6*s31_2 - J30 = ( (b1 +b2 )*s1_3 + (b3 +b4 )*s2_3 + ( b1+b4-b2 + b1+b4-b3 )*s3_3 )/27.0- \ - ( (b1*s22+b2*s33)*s1_2 + (b3*s33+b4*s11)*s2_2 + ((b1+b4-b2)*s11 + (b1+b4-b3)*s22)*s3_2 )/9.0 + \ - ( (b1+b4)*s123/9.0 + b11*s321 )*2.0 - \ - ( ( 2.0*b9 *s22 - b8*s33 - (2.0*b9 -b8)*s11 )*s31_2 + - ( 2.0*b10*s33 - b5*s22 - (2.0*b10-b5)*s11 )*s12_2 + - ( (b6+b7)*s11 - b6*s22 - b7*s33 )*s23_2 - )/3.0 - f0 = J20**3 - c*J30**2 - r = f0**(1.0/6.0)*np.sqrt(3.0)/eqStress - - if not Jac: - return (r - 1.0).ravel() - else: - drdf = r/f0/6.0 - dj2, dj3 = drdf*3.0*J20**2, -drdf*2.0*J30*c - jc = -drdf*J30**2 - - ja1,ja2,ja3 = dj2*d12_2/6.0, dj2*d23_2/6.0, dj2*d31_2/6.0 - ja4,ja5,ja6 = dj2*s12_2, dj2*s23_2, dj2*s31_2 - jb1 = dj3*( (s1_3 + 2.0*s3_3)/27.0 - s22*s1_2/9.0 - (s11+s22)*s3_2/9.0 + s123/4.5 ) - jb2 = dj3*( (s1_3 - s3_3)/27.0 - s33*s1_2/9.0 + s11 *s3_2/9.0 ) - jb3 = dj3*( (s2_3 - s3_3)/27.0 - s33*s2_2/9.0 + s22 *s3_2/9.0 ) - jb4 = dj3*( (s2_3 + 2.0*s3_3)/27.0 - s11*s2_2/9.0 - (s11+s22)*s3_2/9.0 + s123/4.5 ) - - jb5, jb10 = dj3*(s22 - s11)*s12_2/3.0, dj3*(s11 - s33)*s12_2/1.5 - jb6, jb7 = dj3*(s22 - s11)*s23_2/3.0, dj3*(s33 - s11)*s23_2/3.0 - jb8, jb9 = dj3*(s33 - s11)*s31_2/3.0, dj3*(s11 - s22)*s31_2/1.5 - jb11 = dj3*s321*2.0 - if dim == 2: - return np.vstack((ja1,ja2,ja3,ja4,jb1,jb2,jb3,jb4,jb5,jb10,jc)).T - else: - return np.vstack((ja1,ja2,ja3,ja4,ja5,ja6,jb1,jb2,jb3,jb4,jb5,jb6,jb7,jb8,jb9,jb10,jb11,jc)).T - -def Drucker(eqStress=None,#not needed/supported - paras=None, - sigmas=None, - mFix=None, #not needed/supported - criteria=None, - dim=3, - Jac=False): - """ - Drucker yield criterion - - the fitted parameters are - sigma0, C_D for Drucker(p=1); - sigma0, C_D, p for general Drucker - eqStress, mFix are invalid inputs - """ - if criteria == 'drucker': - sigma0, C_D= paras - p = 1.0 - else: - sigma0, C_D = paras[0:2] - if mFix[0]: p = mFix[1] - else: p = paras[-1] - I = invariant(sigmas) - J = np.zeros([3]) - J[1] = I[0]**2/3.0 - I[1] - J[2] = I[0]**3/13.5 - I[0]*I[1]/3.0 + I[2] - J2_3p = J[1]**(3.0*p) - J3_2p = J[2]**(2.0*p) - left = J2_3p - C_D*J3_2p - r = left**(1.0/(6.0*p))*3.0**0.5/sigma0 - - if not Jac: - return (r - 1.0).ravel() - else: - drdl = r/left/(6.0*p) - if criteria == 'drucker': - return np.vstack((-r/sigma0, -drdl*J3_2p)).T - else: - dldp = 3.0*J2_3p*math_ln(J[1]) - 2.0*C_D*J3_2p*math_ln(J[2]) - jp = drdl*dldp + r*math_ln(left)/(-6.0*p*p) - - if mFix[0]: return np.vstack((-r/sigma0, -drdl*J3_2p)).T - else: return np.vstack((-r/sigma0, -drdl*J3_2p, jp)).T - -def Hill1948(eqStress=None,#not needed/supported - paras=None, - sigmas=None, - mFix=None, #not needed/supported - criteria=None,#not needed/supported - dim=3, - Jac=False): - """ - Hill 1948 yield criterion - - the fitted parameters are: - F, G, H, L, M, N for 3D - F, G, H, N for 2D - """ - s11,s22,s33,s12,s23,s31 = sigmas - if dim == 2: # plane stress - jac = np.array([ s22**2, s11**2, (s11-s22)**2, 2.0*s12**2]) - else: # general case - jac = np.array([(s22-s33)**2,(s33-s11)**2,(s11-s22)**2, 2.0*s23**2,2.0*s31**2,2.0*s12**2]) - - if not Jac: - return (np.dot(paras,jac)/2.0-0.5).ravel() - else: - return jac.T - -def Hill1979(eqStress=None,#not needed/supported - paras=None, - sigmas=None, - mFix=None, - criteria=None,#not needed/supported - dim=3, - Jac=False): - """ - Hill 1979 yield criterion - - the fitted parameters are: f,g,h,a,b,c,m - """ - if mFix[0]: - m = mFix[1] - else: - m = paras[-1] - - coeff = paras[0:6] - s = principalStresses(sigmas) - diffs = np.array([s[1]-s[2], s[2]-s[0], s[0]-s[1],\ - 2.0*s[0]-s[1]-s[2], 2.0*s[1]-s[2]-s[0], 2.0*s[2]-s[0]-s[1]])**2 - - diffsm = diffs**(m/2.0) - left = np.dot(coeff,diffsm) - r = (0.5*left)**(1.0/m)/eqStress #left = base**mi - - if not Jac: - return (r-1.0).ravel() - else: - drdl, dldm = r/left/m, np.dot(coeff,diffsm*math_ln(diffs))*0.5 - jm = drdl*dldm + r*math_ln(0.5*left)*(-1.0/m/m) #/(-m**2) - - if mFix[0]: return np.vstack((drdl*diffsm)).T - else: return np.vstack((drdl*diffsm, jm)).T - -def Hosford(eqStress=None, - paras=None, - sigmas=None, - mFix=None, - criteria=None, - dim=3, - Jac=False): - """ - Hosford family criteria - - the fitted parameters are: - von Mises: sigma0 - Hershey: (1) sigma0, a, when a is not fixed; (2) sigma0, when a is fixed - general Hosford: (1) F,G,H, a, when a is not fixed; (2) F,G,H, when a is fixed - """ - if criteria == 'vonmises': - sigma0 = paras - coeff = np.ones(3) - a = 2.0 - elif criteria == 'hershey': - sigma0 = paras[0] - coeff = np.ones(3) - if mFix[0]: a = mFix[1] - else: a = paras[1] - else: - sigma0 = eqStress - coeff = paras[0:3] - if mFix[0]: a = mFix[1] - else: a = paras[3] - - s = principalStresses(sigmas) - diffs = np.array([s[1]-s[2], s[2]-s[0], s[0]-s[1]])**2 - diffsm = diffs**(a/2.0) - left = np.dot(coeff,diffsm) - r = (0.5*left)**(1.0/a)/sigma0 - - if not Jac: - return (r-1.0).ravel() - else: - if criteria == 'vonmises': # von Mises - return -r/sigma0 - else: - drdl, dlda = r/left/a, np.dot(coeff,diffsm*math_ln(diffs))*0.5 - ja = drdl*dlda + r*math_ln(0.5*left)*(-1.0/a/a) - if criteria == 'hershey': # Hershey - if mFix[0]: return -r/sigma0 - else: return np.vstack((-r/sigma0, ja)).T - else: # Anisotropic Hosford - if mFix[0]: return np.vstack((drdl*diffsm)).T - else: return np.vstack((drdl*diffsm, ja)).T - -def Barlat1989(eqStress=None, - paras=None, - sigmas=None, - mFix=None, - criteria=None, - dim=3, - Jac=False): - """ - Barlat-Lian 1989 yield criteria - - the fitted parameters are: - Anisotropic: a, h, p, m; m is optional - """ - a, h, p = paras[0:3] - if mFix[0]: m = mFix[1] - else: m = paras[-1] - - c = 2.0-a - s11,s22,s12 = sigmas[0], sigmas[1], sigmas[3] - k1,k2 = 0.5*(s11 + h*s22), (0.25*(s11 - h*s22)**2 + (p*s12)**2)**0.5 - fs = np.array([ (k1+k2)**2, (k1-k2)**2, 4.0*k2**2 ]); fm = fs**(m/2.0) - left = np.dot(np.array([a,a,c]),fm) - r = (0.5*left)**(1.0/m)/eqStress - - if not Jac: - return (r-1.0).ravel() - else: - dk1dh = 0.5*s22 - dk2dh, dk2dp = 0.25*(s11-h*s22)*(-s22)/k2, p*s12**2/k2 - dlda, dldc = fm[0]+fm[1], fm[2] - fm1 = fs**(m/2.0-1.0)*m - dldk1, dldk2 = a*fm1[0]*(k1+k2)+a*fm1[1]*(k1-k2), a*fm1[0]*(k1+k2)-a*fm1[1]*(k1-k2)+c*fm1[2]*k2*4.0 - drdl, drdm = r/m/left, r*math_ln(0.5*left)*(-1.0/m/m) - dldm = np.dot(np.array([a,a,c]),fm*math_ln(fs))*0.5 - - ja,jc = drdl*dlda, drdl*dldc - jh,jp = drdl*(dldk1*dk1dh + dldk2*dk2dh), drdl*dldk2*dk2dp - jm = drdl*dldm + drdm - - if mFix[0]: return np.vstack((ja,jc,jh,jp)).T - else: return np.vstack((ja,jc,jh,jp,jm)).T - -def Barlat1991(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - """ - Barlat 1991 criteria - - the fitted parameters are: - Anisotropic: a, b, c, f, g, h, m for 3D - a, b, c, h, m for plane stress - m is optional - """ - if dim == 2: coeff = paras[0:4] # plane stress - else: coeff = paras[0:6] # general case - if mFix[0]: m = mFix[1] - else: m = paras[-1] - - s11,s22,s33,s12,s23,s31 = sigmas - if dim == 2: - dXdx = np.array([s22,-s11,s11-s22,s12]) - A,B,C,H = np.array(coeff)[:,None]*dXdx; F=G=0.0 - else: - dXdx = np.array([s22-s33,s33-s11,s11-s22,s23,s31,s12]) - A,B,C,F,G,H = np.array(coeff)[:,None]*dXdx - - I2 = (F*F + G*G + H*H)/3.0+ ((A-C)**2+(C-B)**2+(B-A)**2)/54.0 - I3 = (C-B)*(A-C)*(B-A)/54.0 + F*G*H - ((C-B)*F*F + (A-C)*G*G + (B-A)*H*H)/6.0 - phi1 = np.arccos(I3/I2**1.5)/3.0 + np.pi/6.0; absc1 = 2.0*np.abs(np.cos(phi1)) - phi2 = phi1 + np.pi/3.0; absc2 = 2.0*np.abs(np.cos(phi2)) - phi3 = phi2 + np.pi/3.0; absc3 = 2.0*np.abs(np.cos(phi3)) - left = ( absc1**m + absc2**m + absc3**m ) - r = (0.5*left)**(1.0/m)*np.sqrt(3.0*I2)/eqStress - - if not Jac: - return (r - 1.0).ravel() - else: - dfdl = r/left/m - jm = r*math_ln(0.5*left)*(-1.0/m/m) + dfdl*0.5*( - absc1**m*math_ln(absc1) + absc2**m*math_ln(absc2) + absc3**m*math_ln(absc3) ) - - da,db,dc = (2.0*A-B-C)/18.0, (2.0*B-C-A)/18.0, (2.0*C-A-B)/18.0 - if dim == 2: - dI2dx = np.array([da, db, dc, H])/1.5*dXdx - dI3dx = np.array([ da*(B-C) + (H**2-G**2)/2.0, - db*(C-A) + (F**2-H**2)/2.0, - dc*(A-B) + (G**2-F**2)/2.0, - (G*F + (A-B))*H ])/3.0*dXdx - else: - dI2dx = np.array([da, db, dc, F,G,H])/1.5*dXdx - dI3dx = np.array([ da*(B-C) + (H**2-G**2)/2.0, - db*(C-A) + (F**2-H**2)/2.0, - dc*(A-B) + (G**2-F**2)/2.0, - (H*G*3.0 + (B-C))*F, - (F*H*3.0 + (C-A))*G, - (G*F*3.0 + (A-B))*H ])/3.0*dXdx - darccos = -1.0/np.sqrt(1.0 - I3**2/I2**3) - - dfdcos = lambda phi : dfdl*m*(2.0*abs(np.cos(phi)))**(m-1.0)*np.sign(np.cos(phi))*(-np.sin(phi)/1.5) - - dfdthe= (dfdcos(phi1) + dfdcos(phi2) + dfdcos(phi3)) - dfdI2, dfdI3 = dfdthe*darccos*I3*(-1.5)*I2**(-2.5)+r/2.0/I2, dfdthe*darccos*I2**(-1.5) - - if mFix[0]: return np.vstack((dfdI2*dI2dx + dfdI3*dI3dx)).T - else: return np.vstack((dfdI2*dI2dx + dfdI3*dI3dx, jm)).T - -def BBC2000(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - """ - BBC2000 yield criterion - - the fitted parameters are - d,e,f,g, b,c,a, k; k is optional - criteria are invalid input - """ - d,e,f,g, b,c,a= paras[0:7] - if mFix[0]: k = mFix[1] - else: k = paras[-1] - - s11,s22,s12 = sigmas[0], sigmas[1], sigmas[3] - k2 = 2.0*k; k1 = k - 1.0 - M,N,P,Q,R = d+e, e+f, (d-e)/2.0, (e-f)/2.0, g**2 - Gamma = M*s11 + N*s22 - Psi = ( (P*s11 + Q*s22)**2 + s12**2*R )**0.5 - - l1, l2, l3 = b*Gamma + c*Psi, b*Gamma - c*Psi, 2.0*c*Psi - l1s,l2s,l3s = l1**2, l2**2, l3**2 - - left = a*l1s**k + a*l2s**k + (1-a)*l3s**k - r = left**(1.0/k2)/eqStress - if not Jac: - return (r - 1.0).ravel() - else: - drdl,drdk = r/left/k2, r*math_ln(left)*(-1.0/k2/k) - dldl1,dldl2,dldl3 = a*k2*(l1s**k1)*l1, a*k2*(l2s**k1)*l2, (1-a)*k2*(l3s**k1)*l3 - dldGama, dldPsi = (dldl1 + dldl2)*b, (dldl1 - dldl2 + 2.0*dldl3)*c - temp = (P*s11 + Q*s22)/Psi - dPsidP, dPsidQ, dPsidR = temp*s11, temp*s22, 0.5*s12**2/Psi - dlda = l1s**k + l2s**k - l3s**k - dldb = dldl1*Gamma + dldl2*Gamma - dldc = dldl1*Psi - dldl2*Psi + dldl3*2.0*Psi - dldk = a*math_ln(l1s)*l1s**k + a*math_ln(l2s)*l2s**k + (1-a)*math_ln(l3s)*l3s**k - - J = drdl*np.array([dldGama*s11+dldPsi*dPsidP*0.5, dldGama*(s11+s22)+dldPsi*(-dPsidP+dPsidQ)*0.5, #jd,je - dldGama*s22-dldPsi*dPsidQ*0.5, dldPsi*dPsidR*2.0*g, #jf,jg - dldb, dldc, dlda]) #jb,jc,ja - if mFix[0]: return np.vstack(J).T - else: return np.vstack((J, drdl*dldk + drdk)).T - - -def BBC2003(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - """ - BBC2003 yield criterion - - the fitted parameters are - M,N,P,Q,R,S,T,a, k; k is optional - criteria are invalid input - """ - M,N,P,Q,R,S,T,a = paras[0:8] - if mFix[0]: k = mFix[1] - else: k = paras[-1] - - s11,s22,s12 = sigmas[0], sigmas[1], sigmas[3] - k2 = 2.0*k; k1 = k - 1.0 - Gamma = 0.5 * (s11 + M*s22) - Psi = ( 0.25*(N*s11 - P*s22)**2 + Q*Q*s12**2 )**0.5 - Lambda = ( 0.25*(R*s11 - S*s22)**2 + T*T*s12**2 )**0.5 - - l1, l2, l3 = Gamma + Psi, Gamma - Psi, 2.0*Lambda - l1s,l2s,l3s = l1**2, l2**2, l3**2 - left = a*l1s**k + a*l2s**k + (1-a)*l3s**k - r = left**(1.0/k2)/eqStress - if not Jac: - return (r - 1.0).ravel() - else: - drdl,drdk = r/left/k2, r*math_ln(left)*(-1.0/k2/k) - dldl1,dldl2,dldl3 = a*k2*(l1s**k1)*l1, a*k2*(l2s**k1)*l2, (1-a)*k2*(l3s**k1)*l3 - - dldGamma, dldPsi, dldLambda = dldl1+dldl2, dldl1-dldl2, 2.0*dldl3 - temp = 0.25/Psi*(N*s11 - P*s22) - dPsidN, dPsidP, dPsidQ = s11*temp, -s22*temp, Q*s12**2/Psi - temp = 0.25/Lambda*(R*s11 - S*s22) - dLambdadR, dLambdadS, dLambdadT = s11*temp, -s22*temp, T*s12**2/Psi - dldk = a*math_ln(l1s)*l1s**k + a*math_ln(l2s)*l2s**k + (1-a)*math_ln(l3s)*l3s**k - - J = drdl * np.array([dldGamma*s22*0.5, #jM - dldPsi*dPsidN, dldPsi*dPsidP, dldPsi*dPsidQ, #jN, jP, jQ - dldLambda*dLambdadR, dldLambda*dLambdadS, dldLambda*dLambdadT, #jR, jS, jT - l1s**k + l2s**k - l3s**k ]) #ja - - if mFix[0]: return np.vstack(J).T - else : return np.vstack((J, drdl*dldk+drdk)).T - -def BBC2005(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - """ - BBC2005 yield criterion - - the fitted parameters are - a, b, L ,M, N, P, Q, R, k k are optional - criteria is invalid input - """ - a,b,L, M, N, P, Q, R = paras[0:8] - if mFix[0]: k = mFix[1] - else: k = paras[-1] - - s11 = sigmas[0]; s22 = sigmas[1]; s12 = sigmas[3] - k2 = 2.0*k - Gamma = L*s11 + M*s22 - Lambda = ( (N*s11 - P*s22)**2 + s12**2 )**0.5 - Psi = ( (Q*s11 - R*s22)**2 + s12**2 )**0.5 - - l1 = Lambda + Gamma; l2 = Lambda - Gamma; l3 = Lambda + Psi; l4 = Lambda - Psi - l1s = l1**2; l2s = l2**2; l3s = l3**2; l4s = l4**2 - left = a*l1s**k + a*l2s**k + b*l3s**k + b*l4s**k - sBar = left**(1.0/k2); r = sBar/eqStress - 1.0 - if not Jac: - return r.ravel() - else: - ln = lambda x : np.log(x + 1.0e-32) - expo = 0.5/k; k1 = k-1.0 - - dsBardl = expo*sBar/left/eqStress - dsBarde = sBar*ln(left); dedk = expo/(-k) - dldl1 = a*k*(l1s**k1)*(2.0*l1) - dldl2 = a*k*(l2s**k1)*(2.0*l2) - dldl3 = b*k*(l3s**k1)*(2.0*l3) - dldl4 = b*k*(l4s**k1)*(2.0*l4) - - dldLambda = dldl1 + dldl2 + dldl3 + dldl4 - dldGama = dldl1 - dldl2 - dldPsi = dldl3 - dldl4 - temp = (N*s11 - P*s22)/Lambda - dLambdadN = s11*temp; dLambdadP = -s22*temp - temp = (Q*s11 - R*s22)/Psi - dPsidQ = s11*temp; dPsidR = -s22*temp - dldk = a*ln(l1s)*l1s**k + a*ln(l2s)*l2s**k + b*ln(l3s)*l3s**k + b*ln(l4s)*l4s**k - - J = dsBardl * np.array( [ - l1s**k+l2s**k, l3s**k+l4s**k,dldGama*s11,dldGama*s22,dldLambda*dLambdadN, - dldLambda*dLambdadP, dldPsi*dPsidQ, dldPsi*dPsidR]) - - if mFix[0]: return np.vstack(J).T - else : return np.vstack(J, dldk+dsBarde*dedk).T - -def Yld2000(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - """ - Yld2000 yield criterion - - C: c11,c22,c66 c12=c21=1.0 JAC NOT PASS - D: d11,d12,d21,d22,d66 - """ - C,D = paras[0:3], paras[3:8] - if mFix[0]: m = mFix[1] - else: m = paras[-1] - - s11, s22, s12 = sigmas[0],sigmas[1],sigmas[3] - X = np.array([ 2.0*C[0]*s11-C[0]*s22, 2.0*C[1]*s22-C[1]*s11, 3.0*C[2]*s12 ])/3.0 # a1,a2,a7 - Y = np.array([ (8.0*D[2]-2.0*D[0]-2.0*D[3]+2.0*D[1])*s11 + (4.0*D[3]-4.0*D[1]-4.0*D[2]+ D[0])*s22, - (4.0*D[0]-4.0*D[2]-4.0*D[1]+ D[3])*s11 + (8.0*D[1]-2.0*D[3]-2.0*D[0]+2.0*D[2])*s22, - 9.0*D[4]*s12 ])/9.0 - - def priStrs(s): - temp = np.sqrt( (s[0]-s[1])**2 + 4.0*s[2]**2 ) - return 0.5*(s[0]+s[1] + temp), 0.5*(s[0]+s[1] - temp) - m2 = m/2.0; m21 = m2 - 1.0 - (X1,X2), (Y1,Y2) = priStrs(X), priStrs(Y) # Principal values of X, Y - phi1s, phi21s, phi22s = (X1-X2)**2, (2.0*Y2+Y1)**2, (2.0*Y1+Y2)**2 - phi1, phi21, phi22 = phi1s**m2, phi21s**m2, phi22s**m2 - left = phi1 + phi21 + phi22 - r = (0.5*left)**(1.0/m)/eqStress - - if not Jac: - return (r-1.0).ravel() - else: - drdl, drdm = r/m/left, r*math_ln(0.5*left)*(-1.0/m/m) #/(-m*m) - dldm = ( phi1*math_ln(phi1s) + phi21*math_ln(phi21s) + phi22*math_ln(phi22s) )*0.5 - zero = np.zeros_like(s11); num = len(s11) - def dPrincipalds(X): - """Derivative of principla with respect to stress""" - temp = 1.0/np.sqrt( (X[0]-X[1])**2 + 4.0*X[2]**2 ) - dP1dsi = 0.5*np.array([ 1.0+temp*(X[0]-X[1]), 1.0-temp*(X[0]-X[1]), temp*4.0*X[2]]) - dP2dsi = 0.5*np.array([ 1.0-temp*(X[0]-X[1]), 1.0+temp*(X[0]-X[1]), -temp*4.0*X[2]]) - return np.array([dP1dsi, dP2dsi]) - - dXdXi, dYdYi = dPrincipalds(X), dPrincipalds(Y) - dXidC = np.array([ [ 2.0*s11-s22, zero, zero ], #dX11dC - [ zero, 2.0*s22-s11, zero ], #dX22dC - [ zero, zero, 3.0*s12 ] ])/3.0 #dX12dC - dYidD = np.array([ [ -2.0*s11+ s22, 2.0*s11-4.0*s22, 8.0*s11-4.0*s22, -2.0*s11+4.0*s22, zero ], #dY11dD - [ 4.0*s11-2.0*s22, -4.0*s11+8.0*s22, -4.0*s11+2.0*s22, s11-2.0*s22, zero ], #dY22dD - [ zero, zero, zero, zero, 9.0*s12 ] ])/9.0 #dY12dD - - dXdC=np.array([np.dot(dXdXi[:,:,i], dXidC[:,:,i]).T for i in range(num)]).T - dYdD=np.array([np.dot(dYdYi[:,:,i], dYidD[:,:,i]).T for i in range(num)]).T - - dldX = m*np.array([ phi1s**m21*(X1-X2), phi1s**m21*(X2-X1)]) - dldY = m*np.array([phi21s**m21*(2.0*Y2+Y1) + 2.0*phi22s**m21*(2.0*Y1+Y2), \ - phi22s**m21*(2.0*Y1+Y2) + 2.0*phi21s**m21*(2.0*Y2+Y1) ]) - jC = drdl*np.array([np.dot(dldX[:,i], dXdC[:,:,i]) for i in range(num)]).T - jD = drdl*np.array([np.dot(dldY[:,i], dYdD[:,:,i]) for i in range(num)]).T - - jm = drdl*dldm + drdm - if mFix[0]: return np.vstack((jC,jD)).T - else: return np.vstack((jC,jD,jm)).T - -def Yld200418p(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - """ - Yld2004-18p yield criterion - - the fitted parameters are - C: c12,c21,c23,c32,c31,c13,c44,c55,c66; D: d12,d21,d23,d32,d31,d13,d44,d55,d66 for 3D - C: c12,c21,c23,c32,c31,c13,c44; D: d12,d21,d23,d32,d31,d13,d44 for 2D - and m, m are optional - criteria is ignored - """ - if dim == 2: C,D = np.append(paras[0:7],[0.0,0.0]), np.append(paras[7:14],[0.0,0.0]) - else: C,D = paras[0:9], paras[9:18] - if mFix[0]: m = mFix[1] - else: m = paras[-1] - - sv = (sigmas[0] + sigmas[1] + sigmas[2])/3.0 - sdev = np.vstack((sigmas[0:3]-sv,sigmas[3:6])) - ys = lambda sdev, C: np.array([-C[0]*sdev[1]-C[5]*sdev[2], -C[1]*sdev[0]-C[2]*sdev[2], - -C[4]*sdev[0]-C[3]*sdev[1], C[6]*sdev[3], C[7]*sdev[4], C[8]*sdev[5]]) - p,q = ys(sdev, C), ys(sdev, D) - pLambdas, qLambdas = principalStress(p), principalStress(q) # no sort - - m2 = m/2.0; x3 = range(3); num = len(sv) - PiQj = np.array([(pLambdas[i,:]-qLambdas[j,:]) for i in x3 for j in x3]) - QiPj = np.array([(qLambdas[i,:]-pLambdas[j,:]) for i in x3 for j in x3]).reshape(3,3,num) - PiQjs = PiQj**2 - left = np.sum(PiQjs**m2,axis=0) - r = (0.25*left)**(1.0/m)/eqStress - - if not Jac: - return (r - 1.0).ravel() - else: - drdl, drdm = r/m/left, r*math_ln(0.25*left)*(-1.0/m/m) - dldm = np.sum(PiQjs**m2*math_ln(PiQjs),axis=0)*0.5 - dPdc, dQdd = principalStrs_Der(p, sdev, dim), principalStrs_Der(q, sdev, dim) - PiQjs3d = ( PiQjs**(m2-1.0) ).reshape(3,3,num) - dldP = -m*np.array([np.diag(np.dot(PiQjs3d[:,:,i], QiPj [:,:,i])) for i in range(num)]).T - dldQ = m*np.array([np.diag(np.dot(QiPj [:,:,i], PiQjs3d[:,:,i])) for i in range(num)]).T - - jm = drdl*dldm + drdm - jc = drdl*np.sum([dldP[i]*dPdc[i] for i in x3],axis=0) - jd = drdl*np.sum([dldQ[i]*dQdd[i] for i in x3],axis=0) - - if mFix[0]: return np.vstack((jc,jd)).T - else: return np.vstack((jc,jd,jm)).T - -def KarafillisBoyce(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - """ - Karafillis-Boyce - - the fitted parameters are - c11,c12,c13,c14,c15,c16,c,m for 3D - c11,c12,c13,c14,c,m for plane stress - 0 1 and self.dimen == 2: - return fitCriteria[self.name]['labels'][1] - else: - return fitCriteria[self.name]['labels'][0] - - def report_name(self): - return fitCriteria[self.name]['name'] - - def fit(self,stress): - global fitResults; fitErrors; fitResidual - if options.exponent > 0.0: nExponent = options.exponent - else: nExponent = 0 - nameCriterion = self.name.lower() - criteria = Criteria(nameCriterion,self.uniaxial,self.expo, self.dimen) - bounds = fitCriteria[nameCriterion]['bound'][dDim] # Default bounds, no bound - guess0 = Guess # Default initial guess, depends on bounds - - if fitResults == []: - initialguess = guess0 - else: - initialguess = np.array(fitResults[-1]) - - ydata = np.zeros(np.shape(stress)[1]) - try: - popt, pcov, infodict, errmsg, ierr = \ - leastsqBound (criteria.fun, initialguess, args=(ydata,stress), - bounds=bounds, Dfun=criteria.jac, full_output=True) - if ierr not in [1, 2, 3, 4]: - raise RuntimeError("Optimal parameters not found: "+errmsg) - else: - residual = criteria.fun(popt, ydata, stress) - fitResidual.append(np.linalg.norm(residual)/np.sqrt(len(residual))) - if (len(ydata) > len(initialguess)) and pcov is not None: - s_sq = (criteria.fun(popt, *(ydata,stress))**2).sum()/(len(ydata)-len(initialguess)) - pcov = pcov * s_sq - perr = np.sqrt(np.diag(pcov)) - fitResults.append(popt.tolist()) - fitErrors .append(perr.tolist()) - - popt = np.concatenate((np.array(popt), np.repeat(options.exponent,nExponent))) - perr = np.concatenate((np.array(perr), np.repeat(0.0,nExponent))) - - damask.util.croak('Needed {} function calls for fitting'.format(infodict['nfev'])) - except Exception as detail: - damask.util.croak(detail) - pass - return popt - -#--------------------------------------------------------------------------------------------------- -class myThread (threading.Thread): - """Runner""" - - def __init__(self, threadID): - threading.Thread.__init__(self) - self.threadID = threadID - def run(self): - semaphore.acquire() - conv=converged() - semaphore.release() - while not conv: - if options.criterion=='facet': - doSimForFacet(self.name) - else: - doSim(self.name) - semaphore.acquire() - conv=converged() - semaphore.release() - -def doSim(thread): - semaphore.acquire() - global myLoad - loadNo=loadcaseNo() - if not os.path.isfile('%s.load'%loadNo): - damask.util.croak('Generating load case for simulation %s (%s)'%(loadNo,thread)) - f=open('%s.load'%loadNo,'w') - f.write(myLoad.getLoadcase(loadNo)) - f.close() - semaphore.release() - else: semaphore.release() - -# if spectralOut does not exist, run simulation - semaphore.acquire() - if not os.path.isfile('%s_%i.spectralOut'%(options.geometry,loadNo)): - damask.util.croak('Starting simulation %i (%s)'%(loadNo,thread)) - semaphore.release() - damask.util.execute('DAMASK_spectral -g %s -l %i'%(options.geometry,loadNo)) - else: semaphore.release() - -# reading values from ASCII file - semaphore.acquire() - damask.util.croak('Reading values from simulation %i (%s)'%(loadNo,thread)) - semaphore.release() - refFile = '%s_%i.yield'%(options.geometry,loadNo) - yieldStress = np.empty((6),'d') - if not os.path.isfile(refFile): - validity = False - else: - validity = True - yieldData = np.loadtxt(refFile) - stress = yieldData[:3] - yieldStress = t33toSym6(stress) -# do the actual fitting procedure and write results to file - semaphore.acquire() - global stressAll - f=open(options.geometry+'_'+options.criterion+'_'+str(time.time())+'.txt','w') - f.write(' '.join([options.fitting]+myFit.report_labels())+'\n') - try: - if validity: - stressAll=np.append(stressAll, yieldStress/stressUnit) - f.write(' '.join(map(str,myFit.fit(stressAll.reshape(len(stressAll)//6,6).transpose())))+'\n') - except Exception: - damask.util.croak('Could not fit results of simulation (%s)'%thread) - semaphore.release() - return - damask.util.croak('\n') - semaphore.release() - -def doSimForFacet(thread): - semaphore.acquire() - global myLoad - loadNo=loadcaseNo() - if not os.path.isfile('%s.load'%loadNo): - damask.util.croak('Generating load case for simulation %s (%s)'%(loadNo,thread)) - f=open('%s.load'%loadNo,'w') - f.write(myLoad.getLoadcase(loadNo)) - f.close() - semaphore.release() - else: semaphore.release() - -# if spectralOut does not exist, run simulation - semaphore.acquire() - if not os.path.isfile('%s_%i.spectralOut'%(options.geometry,loadNo)): - damask.util.croak('Starting simulation %i (%s)'%(loadNo,thread)) - semaphore.release() - damask.util.execute('DAMASK_spectral -g %s -l %i'%(options.geometry,loadNo)) - else: semaphore.release() - -def loadcaseNo(): - global N_simulations - N_simulations+=1 - return N_simulations - -def converged(): - global N_simulations; fitResidual - - if options.criterion=='facet': - if N_simulations == options.numpoints: - return True - else: - return False - else: - if N_simulations < options.max: - if len(fitResidual) > 5 and N_simulations >= options.min: - residualList = np.array(fitResidual[len(fitResidual)-5:]) - if np.std(residualList)/np.max(residualList) < 0.05: - return True - return False - else: - return True - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ -Performs calculations with various loads on given geometry file and fits yield surface. - -""", version = scriptID) - -# maybe make an option to specifiy if 2D/3D fitting should be done? - -parser.add_option('-l','--load' , dest='load', type='float', nargs=3, - help='load: final strain; increments; time %default', metavar='float int float') -parser.add_option('-g','--geometry', dest='geometry', type='string', - help='name of the geometry file [%default]', metavar='string') -parser.add_option('-c','--criterion', dest='criterion', choices=fitCriteria.keys(), - help='criterion for stopping simulations [%default]', metavar='string') -parser.add_option('-f','--fitting', dest='fitting', choices=thresholdParameter, - help='yield criterion [%default]', metavar='string') -parser.add_option('-y','--yieldvalue', dest='yieldValue', type='float', - help='yield points %default', metavar='float') -parser.add_option('--min', dest='min', type='int', - help='minimum number of simulations [%default]', metavar='int') -parser.add_option('--max', dest='max', type='int', - help='maximum number of iterations [%default]', metavar='int') -parser.add_option('-t','--threads', dest='threads', type='int', - help='number of parallel executions [%default]', metavar='int') -parser.add_option('-b','--bound', dest='bounds', type='float', nargs=2, - help='yield points: start; end; count %default', metavar='float float') -parser.add_option('-d','--dimension', dest='dimension', type='choice', choices=['2','3'], - help='dimension of the virtual test [%default]', metavar='int') -parser.add_option('-e', '--exponent', dest='exponent', type='float', - help='exponent of non-quadratic criteria', metavar='int') -parser.add_option('-u', '--uniaxial', dest='eqStress', type='float', - help='Equivalent stress', metavar='float') -parser.add_option('--flag', dest='flag', type='string', - help='yield stop flag, totalStrain, plasticStrain or plasticWork', metavar='string') -parser.add_option('--numpoints', dest='numpoints', type='int', - help='number of yield points to fit facet potential [%default]', metavar='int') -parser.add_option('--order', dest='order', type='int', - help='order of facet potential [%default]', metavar='int') - -parser.set_defaults(min = 12, - max = 20, - threads = 4, - yieldValue = 0.002, - load = (0.010,100,100.0), - criterion = 'vonmises', - fitting = 'totalshear', - geometry = '20grains16x16x16', - bounds = None, - dimension = '3', - exponent = -1.0, - flag = 'totalStrain', - numpoints = 100, - order = 8 - ) - -options = parser.parse_args()[0] - -if options.threads < 1: - parser.error('invalid number of threads {}'.format(options.threads)) -if options.min < 0: - parser.error('invalid minimum number of simulations {}'.format(options.min)) -if options.max < options.min: - parser.error('invalid maximum number of simulations (below minimum)') - -for check in [options.geometry+'.geom','numerics.config','material.config']: - if not os.path.isfile(check): - damask.util.croak('"{}" file not found'.format(check)) - -options.dimension = int(options.dimension) - -stressUnit = 1.0e9 if options.criterion == 'hill1948' else 1.0e6 - - -if options.dimension not in fitCriteria[options.criterion]['dimen']: - parser.error('invalid dimension for selected criterion') - -if options.criterion not in ['vonmises','tresca','drucker','hill1948'] and options.eqStress is None: - parser.error('please specify an equivalent stress (e.g. fitting to von Mises)') - -# global variables -fitResults = [] -fitErrors = [] -fitResidual = [] -stressAll= np.zeros(0,'d').reshape(0,0) -N_simulations=0 -Guess = [] -threads=[] -semaphore=threading.Semaphore(1) -dDim = None -myLoad = None -myFit = None - -if options.criterion == 'facet': - run = runFit(options.exponent, options.eqStress, options.dimension, options.criterion) -else: - run = runFit(options.exponent, options.eqStress, options.dimension, options.criterion) - -damask.util.croak('Finished fitting to yield criteria') diff --git a/processing/post/DADF5_postResults.py b/processing/post/DADF5_postResults.py index 88e4d777a..a6dc0b34a 100755 --- a/processing/post/DADF5_postResults.py +++ b/processing/post/DADF5_postResults.py @@ -39,58 +39,36 @@ for filename in options.filenames: results = damask.DADF5(filename) if not results.structured: continue - delta = results.size/results.grid*0.5 - x, y, z = np.meshgrid(np.linspace(delta[2],results.size[2]-delta[2],results.grid[2]), - np.linspace(delta[1],results.size[1]-delta[1],results.grid[1]), - np.linspace(delta[0],results.size[0]-delta[0],results.grid[0]), - indexing = 'ij') - - coords = np.concatenate((z[:,:,:,None],y[:,:,:,None],x[:,:,:,None]),axis = 3) + if results.version_major == 0 and results.version_minor >= 5: + coords = damask.grid_filters.cell_coord0(results.grid,results.size,results.origin) + else: + coords = damask.grid_filters.cell_coord0(results.grid,results.size) + N_digits = int(np.floor(np.log10(int(results.increments[-1][3:]))))+1 + N_digits = 5 # hack to keep test intact for i,inc in enumerate(results.iter_visible('increments')): print('Output step {}/{}'.format(i+1,len(results.increments))) - header = '1 header\n' - - data = np.array([int(inc[3:]) for j in range(np.product(results.grid))]).reshape([np.product(results.grid),1]) - header+= 'inc' - - coords = coords.reshape([np.product(results.grid),3]) - data = np.concatenate((data,coords),1) - header+=' 1_pos 2_pos 3_pos' + table = damask.Table(np.ones(np.product(results.grid),dtype=int)*int(inc[3:]),{'inc':(1,)}) + table.add('pos',coords.reshape((-1,3))) results.set_visible('materialpoints',False) results.set_visible('constituents', True) for label in options.con: x = results.get_dataset_location(label) - if len(x) == 0: - continue - array = results.read_dataset(x,0,plain=True) - d = np.product(np.shape(array)[1:]) - data = np.concatenate((data,np.reshape(array,[np.product(results.grid),d])),1) - - if d>1: - header+= ''.join([' {}_{}'.format(j+1,label) for j in range(d)]) - else: - header+=' '+label + if len(x) != 0: + table.add(label,results.read_dataset(x,0,plain=True).reshape((results.grid.prod(),-1))) results.set_visible('constituents', False) results.set_visible('materialpoints',True) for label in options.mat: x = results.get_dataset_location(label) - if len(x) == 0: - continue - array = results.read_dataset(x,0,plain=True) - d = np.product(np.shape(array)[1:]) - data = np.concatenate((data,np.reshape(array,[np.product(results.grid),d])),1) - - if d>1: - header+= ''.join([' {}_{}'.format(j+1,label) for j in range(d)]) - else: - header+=' '+label + if len(x) != 0: + table.add(label,results.read_dataset(x,0,plain=True).reshape((results.grid.prod(),-1))) dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir)) if not os.path.isdir(dirname): os.mkdir(dirname,0o755) - file_out = '{}_{}.txt'.format(os.path.splitext(os.path.split(filename)[-1])[0],inc) - np.savetxt(os.path.join(dirname,file_out),data,header=header,comments='') + file_out = '{}_inc{}.txt'.format(os.path.splitext(os.path.split(filename)[-1])[0], + inc[3:].zfill(N_digits)) + table.to_ASCII(os.path.join(dirname,file_out)) diff --git a/processing/post/DADF5_vtk_cells.py b/processing/post/DADF5_vtk_cells.py deleted file mode 100755 index 9cd982e50..000000000 --- a/processing/post/DADF5_vtk_cells.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/env python3 - -import os -import argparse - -import h5py -import numpy as np -import vtk -from vtk.util import numpy_support - -import damask - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- -parser = argparse.ArgumentParser() - -#ToDo: We need to decide on a way of handling arguments of variable lentght -#https://stackoverflow.com/questions/15459997/passing-integer-lists-to-python - -#parser.add_argument('--version', action='version', version='%(prog)s {}'.format(scriptID)) -parser.add_argument('filenames', nargs='+', - help='DADF5 files') -parser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string', - help='name of subdirectory relative to the location of the DADF5 file to hold output') -parser.add_argument('--mat', nargs='+', - help='labels for materialpoint',dest='mat') -parser.add_argument('--con', nargs='+', - help='labels for constituent',dest='con') - -options = parser.parse_args() - -if options.mat is None: options.mat=[] -if options.con is None: options.con=[] - -# --- loop over input files ------------------------------------------------------------------------ - -for filename in options.filenames: - results = damask.DADF5(filename) - - if results.structured: # for grid solvers use rectilinear grid - grid = vtk.vtkRectilinearGrid() - coordArray = [vtk.vtkDoubleArray(), - vtk.vtkDoubleArray(), - vtk.vtkDoubleArray(), - ] - - grid.SetDimensions(*(results.grid+1)) - for dim in [0,1,2]: - for c in np.linspace(0,results.size[dim],1+results.grid[dim]): - coordArray[dim].InsertNextValue(c) - - grid.SetXCoordinates(coordArray[0]) - grid.SetYCoordinates(coordArray[1]) - grid.SetZCoordinates(coordArray[2]) - else: - nodes = vtk.vtkPoints() - with h5py.File(filename) as f: - nodes.SetData(numpy_support.numpy_to_vtk(f['/geometry/x_n'][()],deep=True)) - grid = vtk.vtkUnstructuredGrid() - grid.SetPoints(nodes) - grid.Allocate(f['/geometry/T_c'].shape[0]) - for i in f['/geometry/T_c']: - grid.InsertNextCell(vtk.VTK_HEXAHEDRON,8,i-1) - - - for i,inc in enumerate(results.iter_visible('increments')): - print('Output step {}/{}'.format(i+1,len(results.increments))) - vtk_data = [] - - results.set_visible('materialpoints',False) - results.set_visible('constituents', True) - for label in options.con: - for p in results.iter_visible('con_physics'): - if p != 'generic': - for c in results.iter_visible('constituents'): - x = results.get_dataset_location(label) - if len(x) == 0: - continue - array = results.read_dataset(x,0) - shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) - grid.GetCellData().AddArray(vtk_data[-1]) - else: - x = results.get_dataset_location(label) - if len(x) == 0: - continue - array = results.read_dataset(x,0) - shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) - grid.GetCellData().AddArray(vtk_data[-1]) - - results.set_visible('constituents', False) - results.set_visible('materialpoints',True) - for label in options.mat: - for p in results.iter_visible('mat_physics'): - if p != 'generic': - for m in results.iter_visible('materialpoints'): - x = results.get_dataset_location(label) - if len(x) == 0: - continue - array = results.read_dataset(x,0) - shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) - grid.GetCellData().AddArray(vtk_data[-1]) - else: - x = results.get_dataset_location(label) - if len(x) == 0: - continue - array = results.read_dataset(x,0) - shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) - grid.GetCellData().AddArray(vtk_data[-1]) - - writer = vtk.vtkXMLRectilinearGridWriter() if results.structured else \ - vtk.vtkXMLUnstructuredGridWriter() - - results.set_visible('constituents', False) - results.set_visible('materialpoints',False) - x = results.get_dataset_location('u_n') - vtk_data.append(numpy_support.numpy_to_vtk(num_array=results.read_dataset(x,0),deep=True,array_type=vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('u') - grid.GetPointData().AddArray(vtk_data[-1]) - - dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir)) - if not os.path.isdir(dirname): - os.mkdir(dirname,0o755) - file_out = '{}_{}.{}'.format(os.path.splitext(os.path.split(filename)[-1])[0],inc,writer.GetDefaultFileExtension()) - - writer.SetCompressorTypeToZLib() - writer.SetDataModeToBinary() - writer.SetFileName(os.path.join(dirname,file_out)) - writer.SetInputData(grid) - - writer.Write() diff --git a/processing/post/DADF5_vtk_points.py b/processing/post/DADF5_vtk_points.py deleted file mode 100755 index 87c1ad93e..000000000 --- a/processing/post/DADF5_vtk_points.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env python3 - -import os -import argparse - -import numpy as np -import vtk -from vtk.util import numpy_support - -import damask - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- -parser = argparse.ArgumentParser() - -#ToDo: We need to decide on a way of handling arguments of variable lentght -#https://stackoverflow.com/questions/15459997/passing-integer-lists-to-python - -#parser.add_argument('--version', action='version', version='%(prog)s {}'.format(scriptID)) -parser.add_argument('filenames', nargs='+', - help='DADF5 files') -parser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string', - help='name of subdirectory relative to the location of the DADF5 file to hold output') -parser.add_argument('--mat', nargs='+', - help='labels for materialpoint',dest='mat') -parser.add_argument('--con', nargs='+', - help='labels for constituent',dest='con') - -options = parser.parse_args() - -if options.mat is None: options.mat=[] -if options.con is None: options.con=[] - -# --- loop over input files ------------------------------------------------------------------------ - -for filename in options.filenames: - results = damask.DADF5(filename) - - Points = vtk.vtkPoints() - Vertices = vtk.vtkCellArray() - for c in results.cell_coordinates(): - pointID = Points.InsertNextPoint(c) - Vertices.InsertNextCell(1) - Vertices.InsertCellPoint(pointID) - - Polydata = vtk.vtkPolyData() - Polydata.SetPoints(Points) - Polydata.SetVerts(Vertices) - Polydata.Modified() - - for i,inc in enumerate(results.iter_visible('increments')): - print('Output step {}/{}'.format(i+1,len(results.increments))) - vtk_data = [] - - results.set_visible('materialpoints',False) - results.set_visible('constituents', True) - for label in options.con: - - for p in results.iter_visible('con_physics'): - if p != 'generic': - for c in results.iter_visible('constituents'): - x = results.get_dataset_location(label) - if len(x) == 0: - continue - array = results.read_dataset(x,0) - shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) - Polydata.GetCellData().AddArray(vtk_data[-1]) - else: - x = results.get_dataset_location(label) - if len(x) == 0: - continue - array = results.read_dataset(x,0) - shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) - Polydata.GetCellData().AddArray(vtk_data[-1]) - - results.set_visible('constituents', False) - results.set_visible('materialpoints',True) - for label in options.mat: - for p in results.iter_visible('mat_physics'): - if p != 'generic': - for m in results.iter_visible('materialpoints'): - x = results.get_dataset_location(label) - if len(x) == 0: - continue - array = results.read_dataset(x,0) - shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) - Polydata.GetCellData().AddArray(vtk_data[-1]) - else: - x = results.get_dataset_location(label) - if len(x) == 0: - continue - array = results.read_dataset(x,0) - shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) - Polydata.GetCellData().AddArray(vtk_data[-1]) - - writer = vtk.vtkXMLPolyDataWriter() - - - dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir)) - if not os.path.isdir(dirname): - os.mkdir(dirname,0o755) - file_out = '{}_{}.{}'.format(os.path.splitext(os.path.split(filename)[-1])[0],inc,writer.GetDefaultFileExtension()) - - writer.SetCompressorTypeToZLib() - writer.SetDataModeToBinary() - writer.SetFileName(os.path.join(dirname,file_out)) - writer.SetInputData(Polydata) - - writer.Write() diff --git a/processing/post/DADF5toDREAM3D.py b/processing/post/DADF5toDREAM3D.py index 885545297..7ab04b934 100755 --- a/processing/post/DADF5toDREAM3D.py +++ b/processing/post/DADF5toDREAM3D.py @@ -49,7 +49,7 @@ Phase_types = {'Primary': 0} #further additions to these can be done by looking # -------------------------------------------------------------------- parser = argparse.ArgumentParser(description='Creating a file for DREAM3D from DAMASK data') parser.add_argument('filenames',nargs='+',help='HDF5 based output file') -parser.add_argument('--inc',nargs='+',help='Increment for which DREAM3D to be used, eg. 00025',type=int) +parser.add_argument('--inc',nargs='+',help='Increment for which DREAM3D to be used, eg. 25',type=int) parser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string', help='name of subdirectory to hold output') @@ -59,15 +59,13 @@ options = parser.parse_args() # loop over input files for filename in options.filenames: f = damask.DADF5(filename) #DAMASK output file - count = 0 - for increment in f.increments: - if int(increment[3:]) not in options.inc: - count = count + 1 + for increment in options.inc: + f.set_by_increment(increment,increment) + if len(f.visible['increments']) == 0: continue #-------output file creation------------------------------------- dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir)) - print(dirname) try: os.mkdir(dirname) except FileExistsError: @@ -90,11 +88,10 @@ for filename in options.filenames: # Phase information of DREAM.3D is constituent ID in DAMASK o[cell_data_label + '/Phases'] = f.get_constituent_ID().reshape(tuple(f.grid)+(1,)) # Data quaternions - DAMASK_quaternion = f.read_dataset(f.get_dataset_location('orientation'),0) - DREAM_3D_quaternion = np.empty((np.prod(f.grid),4),dtype=np.float32) + DAMASK_quaternion = f.read_dataset(f.get_dataset_location('orientation')) # Convert: DAMASK uses P = -1, DREAM.3D uses P = +1. Also change position of imagninary part DREAM_3D_quaternion = np.hstack((-DAMASK_quaternion['x'],-DAMASK_quaternion['y'],-DAMASK_quaternion['z'], - DAMASK_quaternion['w'])) + DAMASK_quaternion['w'])).astype(np.float32) o[cell_data_label + '/Quats'] = DREAM_3D_quaternion.reshape(tuple(f.grid)+(4,)) # Attributes to CellData group @@ -109,12 +106,14 @@ for filename in options.filenames: # phase attributes o[cell_data_label + '/Phases'].attrs['ComponentDimensions'] = np.array([1],np.uint64) o[cell_data_label + '/Phases'].attrs['ObjectType'] = 'DataArray' + o[cell_data_label + '/Phases'].attrs['TupleDimensions'] = f.grid.astype(np.uint64) # Quats attributes o[cell_data_label + '/Quats'].attrs['ComponentDimensions'] = np.array([4],np.uint64) o[cell_data_label + '/Quats'].attrs['ObjectType'] = 'DataArray' - - # Create EnsembleAttributeMatrix + o[cell_data_label + '/Quats'].attrs['TupleDimensions'] = f.grid.astype(np.uint64) + + # Create EnsembleAttributeMatrix ensemble_label = data_container_label + '/EnsembleAttributeMatrix' # Data CrystalStructures diff --git a/processing/post/addAPS34IDEstrainCoords.py b/processing/post/addAPS34IDEstrainCoords.py index fe834cf38..c14983799 100755 --- a/processing/post/addAPS34IDEstrainCoords.py +++ b/processing/post/addAPS34IDEstrainCoords.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import numpy as np @@ -24,61 +25,33 @@ Transform X,Y,Z,F APS BeamLine 34 coordinates to x,y,z APS strain coordinates. parser.add_option('-f','--frame',dest='frame', metavar='string', help='label of APS X,Y,Z coords') -parser.add_option('--depth', dest='depth', metavar='string', +parser.add_option('--depth', dest='depth', metavar='string', help='depth') (options,filenames) = parser.parse_args() +if filenames == []: filenames = [None] if options.frame is None: parser.error('frame not specified') if options.depth is None: parser.error('depth not specified') -# --- loop over input files ------------------------------------------------------------------------ -if filenames == []: filenames = [None] +theta=-0.75*np.pi +RotMat2TSL=np.array([[1., 0., 0.], + [0., np.cos(theta), np.sin(theta)], # Orientation to account for -135 deg + [0., -np.sin(theta), np.cos(theta)]]) # rotation for TSL convention for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + + coord = - table.get(options.frame) + coord[:,2] += table.get(options.depth)[:,0] - table.head_read() + table.add('coord', + np.einsum('ijk,ik->ij',np.broadcast_to(RotMat2TSL,(coord.shape[0],3,3)),coord), + scriptID+' '+' '.join(sys.argv[1:])) -# ------------------------------------------ sanity checks ----------------------------------------- - errors = [] - if table.label_dimension(options.frame) != 3: - errors.append('input {} does not have dimension 3.'.format(options.frame)) - if table.label_dimension(options.depth) != 1: - errors.append('input {} does not have dimension 1.'.format(options.depth)) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - -# ------------------------------------------ assemble header --------------------------------------- - table.labels_append(['%i_coord'%(i+1) for i in range(3)]) # extend ASCII header with new labels - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - theta=-0.75*np.pi - RotMat2TSL=np.array([[1., 0., 0.], - [0., np.cos(theta), np.sin(theta)], # Orientation to account for -135 deg - [0., -np.sin(theta), np.cos(theta)]]) # rotation for TSL convention - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - coord = list(map(float,table.data[table.label_index(options.frame):table.label_index(options.frame)+3])) - depth = float(table.data[table.label_index(options.depth)]) - - table.data_append(np.dot(RotMat2TSL,np.array([-coord[0],-coord[1],-coord[2]+depth]))) - - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addCalculation.py b/processing/post/addCalculation.py index db0428753..b1eed3c6d 100755 --- a/processing/post/addCalculation.py +++ b/processing/post/addCalculation.py @@ -4,7 +4,7 @@ import os import sys from optparse import OptionParser import re -import collections +from collections.abc import Iterable import math # noqa import scipy # noqa @@ -18,7 +18,7 @@ scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) def listify(x): - return x if isinstance(x, collections.Iterable) else [x] + return x if isinstance(x, Iterable) else [x] # -------------------------------------------------------------------- @@ -65,9 +65,10 @@ for i in range(len(options.formulas)): if filenames == []: filenames = [None] for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False) - except: continue + try: + table = damask.ASCIItable(name = name, buffered = False) + except IOError: + continue damask.util.report(scriptName,name) # ------------------------------------------ read header ------------------------------------------- diff --git a/processing/post/addCauchy.py b/processing/post/addCauchy.py index 18c4ec215..afc5a57be 100755 --- a/processing/post/addCauchy.py +++ b/processing/post/addCauchy.py @@ -2,10 +2,9 @@ import os import sys +from io import StringIO from optparse import OptionParser -import numpy as np - import damask @@ -36,54 +35,15 @@ parser.set_defaults(defgrad = 'f', ) (options,filenames) = parser.parse_args() - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, buffered = False) - except: - continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + table.add('Cauchy', + damask.mechanics.Cauchy(table.get(options.defgrad).reshape(-1,3,3), + table.get(options.stress ).reshape(-1,3,3)).reshape(-1,9), + scriptID+' '+' '.join(sys.argv[1:])) - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - errors = [] - column = {} - - for tensor in [options.defgrad,options.stress]: - dim = table.label_dimension(tensor) - if dim < 0: errors.append('column {} not found.'.format(tensor)) - elif dim != 9: errors.append('column {} is not a tensor.'.format(tensor)) - else: - column[tensor] = table.label_index(tensor) - - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.labels_append(['{}_Cauchy'.format(i+1) for i in range(9)]) # extend ASCII header with new labels - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - F = np.array(list(map(float,table.data[column[options.defgrad]:column[options.defgrad]+9])),'d').reshape(3,3) - P = np.array(list(map(float,table.data[column[options.stress ]:column[options.stress ]+9])),'d').reshape(3,3) - table.data_append(list(1.0/np.linalg.det(F)*np.dot(P,F.T).reshape(9))) # [Cauchy] = (1/det(F)) * [P].[F_transpose] - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addCompatibilityMismatch.py b/processing/post/addCompatibilityMismatch.py index 7556cb863..c7c5086ca 100755 --- a/processing/post/addCompatibilityMismatch.py +++ b/processing/post/addCompatibilityMismatch.py @@ -1,11 +1,11 @@ #!/usr/bin/env python3 import os -import math +import sys +from io import StringIO from optparse import OptionParser import numpy as np -import scipy.ndimage import damask @@ -13,78 +13,6 @@ import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) -#-------------------------------------------------------------------------------------------------- -def cell2node(cellData,grid): - - nodeData = 0.0 - datalen = np.array(cellData.shape[3:]).prod() - - for i in range(datalen): - node = scipy.ndimage.convolve(cellData.reshape(tuple(grid[::-1])+(datalen,))[...,i], - np.ones((2,2,2))/8., # 2x2x2 neighborhood of cells - mode = 'wrap', - origin = -1, # offset to have cell origin as center - ) # now averaged at cell origins - node = np.append(node,node[np.newaxis,0,:,:,...],axis=0) # wrap along z - node = np.append(node,node[:,0,np.newaxis,:,...],axis=1) # wrap along y - node = np.append(node,node[:,:,0,np.newaxis,...],axis=2) # wrap along x - - nodeData = node[...,np.newaxis] if i==0 else np.concatenate((nodeData,node[...,np.newaxis]),axis=-1) - - return nodeData - -#-------------------------------------------------------------------------------------------------- -def deformationAvgFFT(F,grid,size,nodal=False,transformed=False): - """Calculate average cell center (or nodal) deformation for deformation gradient field specified in each grid cell""" - if nodal: - x, y, z = np.meshgrid(np.linspace(0,size[2],1+grid[2]), - np.linspace(0,size[1],1+grid[1]), - np.linspace(0,size[0],1+grid[0]), - indexing = 'ij') - else: - x, y, z = np.meshgrid(np.linspace(size[2]/grid[2]/2.,size[2]-size[2]/grid[2]/2.,grid[2]), - np.linspace(size[1]/grid[1]/2.,size[1]-size[1]/grid[1]/2.,grid[1]), - np.linspace(size[0]/grid[0]/2.,size[0]-size[0]/grid[0]/2.,grid[0]), - indexing = 'ij') - - origCoords = np.concatenate((z[:,:,:,None],y[:,:,:,None],x[:,:,:,None]),axis = 3) - - F_fourier = F if transformed else np.fft.rfftn(F,axes=(0,1,2)) # transform or use provided data - Favg = np.real(F_fourier[0,0,0,:,:])/grid.prod() # take zero freq for average - avgDeformation = np.einsum('ml,ijkl->ijkm',Favg,origCoords) # dX = Favg.X - - return avgDeformation - -#-------------------------------------------------------------------------------------------------- -def displacementFluctFFT(F,grid,size,nodal=False,transformed=False): - """Calculate cell center (or nodal) displacement for deformation gradient field specified in each grid cell""" - integrator = 0.5j * size / math.pi - - kk, kj, ki = np.meshgrid(np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2])), - np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1])), - np.arange(grid[0]//2+1), - indexing = 'ij') - k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3) - k_sSquared = np.einsum('...l,...l',k_s,k_s) - k_sSquared[0,0,0] = 1.0 # ignore global average frequency - -#-------------------------------------------------------------------------------------------------- -# integration in Fourier space - - displacement_fourier = -np.einsum('ijkml,ijkl,l->ijkm', - F if transformed else np.fft.rfftn(F,axes=(0,1,2)), - k_s, - integrator, - ) / k_sSquared[...,np.newaxis] - -#-------------------------------------------------------------------------------------------------- -# backtransformation to real space - - displacement = np.fft.irfftn(displacement_fourier,grid[::-1],axes=(0,1,2)) - - return cell2node(displacement,grid) if nodal else displacement - - def volTetrahedron(coords): """ Return the volume of the tetrahedron with given vertices or sides. @@ -133,10 +61,10 @@ def volTetrahedron(coords): def volumeMismatch(size,F,nodes): """ - Calculates the volume mismatch + Calculates the volume mismatch. volume mismatch is defined as the difference between volume of reconstructed - (compatible) cube and determinant of defgrad at the FP + (compatible) cube and determinant of deformation gradient at Fourier point. """ coords = np.empty([8,3]) vMismatch = np.empty(grid[::-1]) @@ -169,11 +97,11 @@ def volumeMismatch(size,F,nodes): def shapeMismatch(size,F,nodes,centres): """ - Routine to calculate the shape mismatch + Routine to calculate the shape mismatch. shape mismatch is defined as difference between the vectors from the central point to the corners of reconstructed (combatible) volume element and the vectors calculated by deforming - the initial volume element with the current deformation gradient + the initial volume element with the current deformation gradient. """ coordsInitial = np.empty([8,3]) sMismatch = np.empty(grid[::-1]) @@ -241,92 +169,29 @@ parser.set_defaults(pos = 'pos', ) (options,filenames) = parser.parse_args() - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] + for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue damask.util.report(scriptName,name) - -# ------------------------------------------ read header ------------------------------------------ - - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - errors = [] - remarks = [] - if table.label_dimension(options.defgrad) != 9: - errors.append('deformation gradient "{}" is not a 3x3 tensor.'.format(options.defgrad)) + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos)) - coordDim = table.label_dimension(options.pos) - if not 3 >= coordDim >= 1: - errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos)) - elif coordDim < 3: - remarks.append('appending {} dimension{} to coordinates "{}"...'.format(3-coordDim, - 's' if coordDim < 2 else '', - options.pos)) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss=True) - continue - -# --------------- figure out size and grid --------------------------------------------------------- - - table.data_readArray([options.defgrad,options.pos]) - table.data_rewind() - - if table.data[:,9:].shape[1] < 3: - table.data = np.hstack((table.data, - np.zeros((table.data.shape[0], - 3-table.data[:,9:].shape[1]),dtype='f'))) # fill coords up to 3D with zeros - - grid,size = damask.util.coordGridAndSize(table.data[:,9:12]) - N = grid.prod() - - if N != len(table.data): errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*grid)) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue + F = table.get(options.defgrad).reshape(grid[2],grid[1],grid[0],3,3) + nodes = damask.grid_filters.node_coord(size,F) -# -----------------------------process data and assemble header ------------------------------------- - - F_fourier = np.fft.rfftn(table.data[:,:9].reshape(grid[2],grid[1],grid[0],3,3),axes=(0,1,2)) # perform transform only once... - nodes = displacementFluctFFT(F_fourier,grid,size,True,transformed=True)\ - + deformationAvgFFT (F_fourier,grid,size,True,transformed=True) - if options.shape: - table.labels_append(['shapeMismatch({})'.format(options.defgrad)]) - centres = displacementFluctFFT(F_fourier,grid,size,False,transformed=True)\ - + deformationAvgFFT (F_fourier,grid,size,False,transformed=True) - + centers = damask.grid_filters.cell_coord(size,F) + shapeMismatch = shapeMismatch( size,table.get(options.defgrad).reshape(grid[2],grid[1],grid[0],3,3),nodes,centers) + table.add('shapeMismatch(({}))'.format(options.defgrad), + shapeMismatch.reshape((-1,1)), + scriptID+' '+' '.join(sys.argv[1:])) + if options.volume: - table.labels_append(['volMismatch({})'.format(options.defgrad)]) + volumeMismatch = volumeMismatch(size,table.get(options.defgrad).reshape(grid[2],grid[1],grid[0],3,3),nodes) + table.add('volMismatch(({}))'.format(options.defgrad), + volumeMismatch.reshape((-1,1)), + scriptID+' '+' '.join(sys.argv[1:])) - table.head_write() - if options.shape: - shapeMismatch = shapeMismatch( size,table.data[:,:9].reshape(grid[2],grid[1],grid[0],3,3),nodes,centres) - if options.volume: - volumeMismatch = volumeMismatch(size,table.data[:,:9].reshape(grid[2],grid[1],grid[0],3,3),nodes) - -# ------------------------------------------ output data ------------------------------------------- - for i in range(grid[2]): - for j in range(grid[1]): - for k in range(grid[0]): - table.data_read() - if options.shape: table.data_append(shapeMismatch[i,j,k]) - if options.volume: table.data_append(volumeMismatch[i,j,k]) - table.data_write() - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addCumulative.py b/processing/post/addCumulative.py index b81a9d14f..958c6a70a 100755 --- a/processing/post/addCumulative.py +++ b/processing/post/addCumulative.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import numpy as np @@ -22,79 +23,26 @@ Add cumulative (sum of first to current row) values for given label(s). """, version = scriptID) parser.add_option('-l','--label', - dest='label', + dest='labels', action = 'extend', metavar = '', help = 'columns to cumulate') - parser.add_option('-p','--product', dest='product', action = 'store_true', help = 'product of values instead of sum') (options,filenames) = parser.parse_args() - -if options.label is None: - parser.error('no data column(s) specified.') - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +if options.labels is None: + parser.error('no data column(s) specified.') + for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except IOError: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + for label in options.labels: + table.add('cum_{}({})'.format('prod' if options.product else 'sum',label), + np.cumprod(table.get(label),0) if options.product else np.cumsum(table.get(label),0), + scriptID+' '+' '.join(sys.argv[1:])) - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - errors = [] - remarks = [] - columns = [] - dims = [] - how = 'prod' if options.product else 'sum' - - for what in options.label: - dim = table.label_dimension(what) - if dim < 0: remarks.append('column {} not found...'.format(what)) - else: - dims.append(dim) - columns.append(table.label_index(what)) - table.labels_append('cum_{}({})'.format(how,what) if dim == 1 else - ['{}_cum_{}({})'.format(i+1,how,what) for i in range(dim)] ) # extend ASCII header with new labels - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header --------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - mask = [] - for col,dim in zip(columns,dims): mask += range(col,col+dim) # isolate data columns to cumulate - cumulated = np.ones(len(mask)) if options.product else np.zeros(len(mask)) # prepare output field - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - if options.product: - for i,col in enumerate(mask): - cumulated[i] *= float(table.data[col]) # cumulate values (multiplication) - else: - for i,col in enumerate(mask): - cumulated[i] += float(table.data[col]) # cumulate values (addition) - table.data_append(cumulated) - - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addCurl.py b/processing/post/addCurl.py index 484af9677..f106054b3 100755 --- a/processing/post/addCurl.py +++ b/processing/post/addCurl.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import numpy as np @@ -12,48 +13,6 @@ import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) -def merge_dicts(*dict_args): - """Given any number of dicts, shallow copy and merge into a new dict, with precedence going to key value pairs in latter dicts.""" - result = {} - for dictionary in dict_args: - result.update(dictionary) - return result - -def curlFFT(geomdim,field): - """Calculate curl of a vector or tensor field by transforming into Fourier space.""" - shapeFFT = np.array(np.shape(field))[0:3] - grid = np.array(np.shape(field)[2::-1]) - N = grid.prod() # field size - n = np.array(np.shape(field)[3:]).prod() # data size - - field_fourier = np.fft.rfftn(field,axes=(0,1,2),s=shapeFFT) - curl_fourier = np.empty(field_fourier.shape,'c16') - - # differentiation in Fourier space - TWOPIIMG = 2.0j*np.pi - einsums = { - 3:'slm,ijkl,ijkm->ijks', # vector, 3 -> 3 - 9:'slm,ijkl,ijknm->ijksn', # tensor, 3x3 -> 3x3 - } - k_sk = np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2]))/geomdim[0] - if grid[2]%2 == 0: k_sk[grid[2]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011) - - k_sj = np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1]))/geomdim[1] - if grid[1]%2 == 0: k_sj[grid[1]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011) - - k_si = np.arange(grid[0]//2+1)/geomdim[2] - - kk, kj, ki = np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij') - k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3).astype('c16') - - e = np.zeros((3, 3, 3)) - e[0, 1, 2] = e[1, 2, 0] = e[2, 0, 1] = 1.0 # Levi-Civita symbols - e[0, 2, 1] = e[2, 1, 0] = e[1, 0, 2] = -1.0 - - curl_fourier = np.einsum(einsums[n],e,k_s,field_fourier)*TWOPIIMG - - return np.fft.irfftn(curl_fourier,axes=(0,1,2),s=shapeFFT).reshape([N,n]) - # -------------------------------------------------------------------- # MAIN @@ -61,8 +20,7 @@ def curlFFT(geomdim,field): parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """ Add column(s) containing curl of requested column(s). -Operates on periodic ordered three-dimensional data sets -of vector and tensor fields. +Operates on periodic ordered three-dimensional data sets of vector and tensor fields. """, version = scriptID) parser.add_option('-p','--pos','--periodiccellcenter', @@ -70,93 +28,30 @@ parser.add_option('-p','--pos','--periodiccellcenter', type = 'string', metavar = 'string', help = 'label of coordinates [%default]') parser.add_option('-l','--label', - dest = 'data', + dest = 'labels', action = 'extend', metavar = '', help = 'label(s) of field values') parser.set_defaults(pos = 'pos', ) - (options,filenames) = parser.parse_args() - -if options.data is None: parser.error('no data column specified.') - -# --- define possible data types ------------------------------------------------------------------- - -datatypes = { - 3: {'name': 'vector', - 'shape': [3], - }, - 9: {'name': 'tensor', - 'shape': [3,3], - }, - } - -# --- loop over input files ------------------------------------------------------------------------ - if filenames == []: filenames = [None] +if options.labels is None: parser.error('no data column specified.') + for name in filenames: - try: table = damask.ASCIItable(name = name,buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# --- interpret header ---------------------------------------------------------------------------- + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos)) - table.head_read() - - remarks = [] - errors = [] - active = [] - - coordDim = table.label_dimension(options.pos) - if coordDim != 3: - errors.append('coordinates "{}" must be three-dimensional.'.format(options.pos)) - else: coordCol = table.label_index(options.pos) - - for me in options.data: - dim = table.label_dimension(me) - if dim in datatypes: - active.append(merge_dicts({'label':me},datatypes[dim])) - remarks.append('differentiating {} "{}"...'.format(datatypes[dim]['name'],me)) - else: - remarks.append('skipping "{}" of dimension {}...'.format(me,dim) if dim != -1 else \ - '"{}" not found...'.format(me) ) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - for data in active: - table.labels_append(['{}_curlFFT({})'.format(i+1,data['label']) - for i in range(np.prod(np.array(data['shape'])))]) # extend ASCII header with new labels - table.head_write() - -# --------------- figure out size and grid --------------------------------------------------------- - - table.data_readArray() - grid,size = damask.util.coordGridAndSize(table.data[:,table.label_indexrange(options.pos)]) - -# ------------------------------------------ process value field ----------------------------------- - - stack = [table.data] - for data in active: - # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation - stack.append(curlFFT(size[::-1], - table.data[:,table.label_indexrange(data['label'])]. - reshape(grid[::-1].tolist()+data['shape']))) - -# ------------------------------------------ output result ----------------------------------------- - - if len(stack) > 1: table.data = np.hstack(tuple(stack)) - table.data_writeArray('%.12g') - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + for label in options.labels: + field = table.get(label) + shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor + field = field.reshape(np.append(grid[::-1],shape)) + table.add('curlFFT({})'.format(label), + damask.grid_filters.curl(size[::-1],field).reshape((-1,np.prod(shape))), + scriptID+' '+' '.join(sys.argv[1:])) + + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addDerivative.py b/processing/post/addDerivative.py index 8ebfdf2da..4e9410794 100755 --- a/processing/post/addDerivative.py +++ b/processing/post/addDerivative.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import numpy as np @@ -30,7 +31,7 @@ def derivative(coordinates,what): (coordinates[0] - coordinates[1]) result[-1,:] = (what[-1,:] - what[-2,:]) / \ (coordinates[-1] - coordinates[-2]) - + return result @@ -48,78 +49,26 @@ parser.add_option('-c','--coordinates', type = 'string', metavar='string', help = 'heading of coordinate column') parser.add_option('-l','--label', - dest = 'label', + dest = 'labels', action = 'extend', metavar = '', help = 'heading of column(s) to differentiate') (options,filenames) = parser.parse_args() - -if options.coordinates is None: - parser.error('no coordinate column specified.') -if options.label is None: - parser.error('no data column specified.') - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +if options.coordinates is None: + parser.error('no coordinate column specified.') +if options.labels is None: + parser.error('no data column specified.') + for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + for label in options.labels: + table.add('d({})/d({})'.format(label,options.coordinates), + derivative(table.get(options.coordinates),table.get(label)), + scriptID+' '+' '.join(sys.argv[1:])) - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - errors = [] - remarks = [] - columns = [] - dims = [] - - if table.label_dimension(options.coordinates) != 1: - errors.append('coordinate column {} is not scalar.'.format(options.coordinates)) - - for what in options.label: - dim = table.label_dimension(what) - if dim < 0: remarks.append('column {} not found...'.format(what)) - else: - dims.append(dim) - columns.append(table.label_index(what)) - table.labels_append('d({})/d({})'.format(what,options.coordinates) if dim == 1 else - ['{}_d({})/d({})'.format(i+1,what,options.coordinates) for i in range(dim)] ) # extend ASCII header with new labels - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - table.data_readArray() - - mask = [] - for col,dim in zip(columns,dims): mask += range(col,col+dim) # isolate data columns to differentiate - - differentiated = derivative(table.data[:,table.label_index(options.coordinates)].reshape((len(table.data),1)), - table.data[:,mask]) # calculate numerical derivative - - table.data = np.hstack((table.data,differentiated)) - -# ------------------------------------------ output result ----------------------------------------- - - table.data_writeArray() - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addDeterminant.py b/processing/post/addDeterminant.py index 14f0321be..f2368559d 100755 --- a/processing/post/addDeterminant.py +++ b/processing/post/addDeterminant.py @@ -2,22 +2,16 @@ import os import sys +from io import StringIO from optparse import OptionParser +import numpy as np + import damask - scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) -def determinant(m): - return +m[0]*m[4]*m[8] \ - +m[1]*m[5]*m[6] \ - +m[2]*m[3]*m[7] \ - -m[2]*m[4]*m[6] \ - -m[1]*m[3]*m[8] \ - -m[0]*m[5]*m[7] - # -------------------------------------------------------------------- # MAIN @@ -34,61 +28,18 @@ parser.add_option('-t','--tensor', help = 'heading of columns containing tensor field values') (options,filenames) = parser.parse_args() +if filenames == []: filenames = [None] if options.tensor is None: parser.error('no data column specified.') -# --- loop over input files ------------------------------------------------------------------------- - -if filenames == []: filenames = [None] - for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + for tensor in options.tensor: + table.add('det({})'.format(tensor), + np.linalg.det(table.get(tensor).reshape(-1,3,3)), + scriptID+' '+' '.join(sys.argv[1:])) - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - items = { - 'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'column': []}, - } - errors = [] - remarks = [] - - for type, data in items.items(): - for what in data['labels']: - dim = table.label_dimension(what) - if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type)) - else: - items[type]['column'].append(table.label_index(what)) - table.labels_append('det({})'.format(what)) # extend ASCII header with new labels - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - for type, data in items.items(): - for column in data['column']: - table.data_append(determinant(list(map(float,table.data[column: column+data['dim']])))) - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addDeviator.py b/processing/post/addDeviator.py index c9aeaacfd..ca06034b3 100755 --- a/processing/post/addDeviator.py +++ b/processing/post/addDeviator.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import damask @@ -9,17 +10,6 @@ import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) -oneThird = 1.0/3.0 - -def deviator(m,spherical = False): # Careful, do not change the value of m, its intent(inout)! - sph = oneThird*(m[0]+m[4]+m[8]) - dev = [ - m[0]-sph, m[1], m[2], - m[3], m[4]-sph, m[5], - m[6], m[7], m[8]-sph, - ] - return dev,sph if spherical else dev - # -------------------------------------------------------------------- # MAIN @@ -40,67 +30,22 @@ parser.add_option('-s','--spherical', help = 'report spherical part of tensor (hydrostatic component, pressure)') (options,filenames) = parser.parse_args() - -if options.tensor is None: - parser.error('no data column specified...') - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +if options.tensor is None: + parser.error('no data column specified...') + for name in filenames: - try: - table = damask.ASCIItable(name = name, buffered = False) - except: - continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) + + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + for tensor in options.tensor: + table.add('dev({})'.format(tensor), + damask.mechanics.deviatoric_part(table.get(tensor).reshape(-1,3,3)).reshape((-1,9)), + scriptID+' '+' '.join(sys.argv[1:])) + if options.spherical: + table.add('sph({})'.format(tensor), + damask.mechanics.spherical_part(table.get(tensor).reshape(-1,3,3)), + scriptID+' '+' '.join(sys.argv[1:])) -# ------------------------------------------ read header ------------------------------------------ - - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - items = { - 'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'active':[], 'column': []}, - } - errors = [] - remarks = [] - column = {} - - for type, data in items.items(): - for what in data['labels']: - dim = table.label_dimension(what) - if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type)) - else: - items[type]['active'].append(what) - items[type]['column'].append(table.label_index(what)) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - for type, data in items.items(): - for label in data['active']: - table.labels_append(['{}_dev({})'.format(i+1,label) for i in range(data['dim'])] + \ - (['sph({})'.format(label)] if options.spherical else [])) # extend ASCII header with new labels - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - for type, data in items.items(): - for column in data['column']: - table.data_append(deviator(list(map(float,table.data[column: - column+data['dim']])),options.spherical)) - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addDisplacement.py b/processing/post/addDisplacement.py index 99d07fd18..faabc795f 100755 --- a/processing/post/addDisplacement.py +++ b/processing/post/addDisplacement.py @@ -2,10 +2,10 @@ import os import sys +from io import StringIO from optparse import OptionParser import numpy as np -import scipy.ndimage import damask @@ -14,79 +14,6 @@ scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) -#-------------------------------------------------------------------------------------------------- -def cell2node(cellData,grid): - - nodeData = 0.0 - datalen = np.array(cellData.shape[3:]).prod() - - for i in range(datalen): - node = scipy.ndimage.convolve(cellData.reshape(tuple(grid[::-1])+(datalen,))[...,i], - np.ones((2,2,2))/8., # 2x2x2 neighborhood of cells - mode = 'wrap', - origin = -1, # offset to have cell origin as center - ) # now averaged at cell origins - node = np.append(node,node[np.newaxis,0,:,:,...],axis=0) # wrap along z - node = np.append(node,node[:,0,np.newaxis,:,...],axis=1) # wrap along y - node = np.append(node,node[:,:,0,np.newaxis,...],axis=2) # wrap along x - - nodeData = node[...,np.newaxis] if i==0 else np.concatenate((nodeData,node[...,np.newaxis]),axis=-1) - - return nodeData - -#-------------------------------------------------------------------------------------------------- -def displacementAvgFFT(F,grid,size,nodal=False,transformed=False): - """Calculate average cell center (or nodal) displacement for deformation gradient field specified in each grid cell""" - if nodal: - x, y, z = np.meshgrid(np.linspace(0,size[2],1+grid[2]), - np.linspace(0,size[1],1+grid[1]), - np.linspace(0,size[0],1+grid[0]), - indexing = 'ij') - else: - delta = size/grid*0.5 - x, y, z = np.meshgrid(np.linspace(delta[2],size[2]-delta[2],grid[2]), - np.linspace(delta[1],size[1]-delta[1],grid[1]), - np.linspace(delta[0],size[0]-delta[0],grid[0]), - indexing = 'ij') - - origCoords = np.concatenate((z[:,:,:,None],y[:,:,:,None],x[:,:,:,None]),axis = 3) - - F_fourier = F if transformed else np.fft.rfftn(F,axes=(0,1,2)) # transform or use provided data - Favg = np.real(F_fourier[0,0,0,:,:])/grid.prod() # take zero freq for average - avgDisplacement = np.einsum('ml,ijkl->ijkm',Favg-np.eye(3),origCoords) # dX = Favg.X - - return avgDisplacement - -#-------------------------------------------------------------------------------------------------- -def displacementFluctFFT(F,grid,size,nodal=False,transformed=False): - """Calculate cell center (or nodal) displacement for deformation gradient field specified in each grid cell""" - integrator = 0.5j * size / np.pi - - kk, kj, ki = np.meshgrid(np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2])), - np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1])), - np.arange(grid[0]//2+1), - indexing = 'ij') - k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3) - k_sSquared = np.einsum('...l,...l',k_s,k_s) - k_sSquared[0,0,0] = 1.0 # ignore global average frequency - -#-------------------------------------------------------------------------------------------------- -# integration in Fourier space - - displacement_fourier = -np.einsum('ijkml,ijkl,l->ijkm', - F if transformed else np.fft.rfftn(F,axes=(0,1,2)), - k_s, - integrator, - ) / k_sSquared[...,np.newaxis] - -#-------------------------------------------------------------------------------------------------- -# backtransformation to real space - - displacement = np.fft.irfftn(displacement_fourier,grid[::-1],axes=(0,1,2)) - - return cell2node(displacement,grid) if nodal else displacement - - # -------------------------------------------------------------------- # MAIN # -------------------------------------------------------------------- @@ -100,7 +27,7 @@ Outputs at cell centers or cell nodes (into separate file). parser.add_option('-f', '--defgrad', - dest = 'defgrad', + dest = 'f', metavar = 'string', help = 'label of deformation gradient [%default]') parser.add_option('-p', @@ -113,108 +40,34 @@ parser.add_option('--nodal', action = 'store_true', help = 'output nodal (instead of cell-centered) displacements') -parser.set_defaults(defgrad = 'f', - pos = 'pos', +parser.set_defaults(f = 'f', + pos = 'pos', ) (options,filenames) = parser.parse_args() -# --- loop over input files ------------------------------------------------------------------------- - -if filenames == []: filenames = [None] - for name in filenames: - outname = (os.path.splitext(name)[0] + - '_nodal' + - os.path.splitext(name)[1]) if (options.nodal and name) else None - try: table = damask.ASCIItable(name = name, - outname = outname, - buffered = False) - except: continue - damask.util.report(scriptName,'{}{}'.format(name if name else '', - ' --> {}'.format(outname) if outname else '')) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ - - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - errors = [] - remarks = [] - - if table.label_dimension(options.defgrad) != 9: - errors.append('deformation gradient "{}" is not a 3x3 tensor.'.format(options.defgrad)) - - coordDim = table.label_dimension(options.pos) - if not 3 >= coordDim >= 1: - errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos)) - elif coordDim < 3: - remarks.append('appending {} dimension{} to coordinates "{}"...'.format(3-coordDim, - 's' if coordDim < 2 else '', - options.pos)) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss=True) - continue - -# --------------- figure out size and grid --------------------------------------------------------- - - table.data_readArray([options.defgrad,options.pos]) - table.data_rewind() - - if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape - if table.data[:,9:].shape[1] < 3: - table.data = np.hstack((table.data, - np.zeros((table.data.shape[0], - 3-table.data[:,9:].shape[1]),dtype='f'))) # fill coords up to 3D with zeros - - grid,size = damask.util.coordGridAndSize(table.data[:,9:12]) - N = grid.prod() - - if N != len(table.data): errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*grid)) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ process data ------------------------------------------ - - F_fourier = np.fft.rfftn(table.data[:,:9].reshape(grid[2],grid[1],grid[0],3,3),axes=(0,1,2)) # perform transform only once... - - fluctDisplacement = displacementFluctFFT(F_fourier,grid,size,options.nodal,transformed=True) - avgDisplacement = displacementAvgFFT (F_fourier,grid,size,options.nodal,transformed=True) - -# ------------------------------------------ assemble header --------------------------------------- - - if options.nodal: - table.info_clear() - table.labels_clear() - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.labels_append((['{}_pos' .format(i+1) for i in range(3)] if options.nodal else []) + - ['{}_avg({}).{}' .format(i+1,options.defgrad,options.pos) for i in range(3)] + - ['{}_fluct({}).{}'.format(i+1,options.defgrad,options.pos) for i in range(3)] ) - table.head_write() - -# ------------------------------------------ output data ------------------------------------------- - - Zrange = np.linspace(0,size[2],1+grid[2]) if options.nodal else range(grid[2]) - Yrange = np.linspace(0,size[1],1+grid[1]) if options.nodal else range(grid[1]) - Xrange = np.linspace(0,size[0],1+grid[0]) if options.nodal else range(grid[0]) - - for i,z in enumerate(Zrange): - for j,y in enumerate(Yrange): - for k,x in enumerate(Xrange): - if options.nodal: table.data_clear() - else: table.data_read() - table.data_append([x,y,z] if options.nodal else []) - table.data_append(list( avgDisplacement[i,j,k,:])) - table.data_append(list(fluctDisplacement[i,j,k,:])) - table.data_write() - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos)) + + F = table.get(options.f).reshape(np.append(grid[::-1],(3,3))) + if options.nodal: + table = damask.Table(damask.grid_filters.node_coord0(grid[::-1],size[::-1]).reshape((-1,3)), + {'pos':(3,)}) + table.add('avg({}).{}'.format(options.f,options.pos), + damask.grid_filters.node_displacement_avg(size[::-1],F).reshape((-1,3)), + scriptID+' '+' '.join(sys.argv[1:])) + table.add('fluct({}).{}'.format(options.f,options.pos), + damask.grid_filters.node_displacement_fluct(size[::-1],F).reshape((-1,3)), + scriptID+' '+' '.join(sys.argv[1:])) + table.to_ASCII(sys.stdout if name is None else os.path.splitext(name)[0]+'_nodal.txt') + else: + table.add('avg({}).{}'.format(options.f,options.pos), + damask.grid_filters.cell_displacement_avg(size[::-1],F).reshape((-1,3)), + scriptID+' '+' '.join(sys.argv[1:])) + table.add('fluct({}).{}'.format(options.f,options.pos), + damask.grid_filters.cell_displacement_fluct(size[::-1],F).reshape((-1,3)), + scriptID+' '+' '.join(sys.argv[1:])) + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addDivergence.py b/processing/post/addDivergence.py index 31a18f8e1..cb9486990 100755 --- a/processing/post/addDivergence.py +++ b/processing/post/addDivergence.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import numpy as np @@ -12,53 +13,14 @@ import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) -def merge_dicts(*dict_args): - """Given any number of dicts, shallow copy and merge into a new dict, with precedence going to key value pairs in latter dicts.""" - result = {} - for dictionary in dict_args: - result.update(dictionary) - return result - -def divFFT(geomdim,field): - """Calculate divergence of a vector or tensor field by transforming into Fourier space.""" - shapeFFT = np.array(np.shape(field))[0:3] - grid = np.array(np.shape(field)[2::-1]) - N = grid.prod() # field size - n = np.array(np.shape(field)[3:]).prod() # data size - - field_fourier = np.fft.rfftn(field,axes=(0,1,2),s=shapeFFT) - div_fourier = np.empty(field_fourier.shape[0:len(np.shape(field))-1],'c16') - - # differentiation in Fourier space - TWOPIIMG = 2.0j*np.pi - einsums = { - 3:'ijkl,ijkl->ijk', # vector, 3 -> 1 - 9:'ijkm,ijklm->ijkl', # tensor, 3x3 -> 3 - } - k_sk = np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2]))/geomdim[0] - if grid[2]%2 == 0: k_sk[grid[2]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011) - - k_sj = np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1]))/geomdim[1] - if grid[1]%2 == 0: k_sj[grid[1]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011) - - k_si = np.arange(grid[0]//2+1)/geomdim[2] - - kk, kj, ki = np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij') - k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3).astype('c16') - - div_fourier = np.einsum(einsums[n],k_s,field_fourier)*TWOPIIMG - - return np.fft.irfftn(div_fourier,axes=(0,1,2),s=shapeFFT).reshape([N,n//3]) - # -------------------------------------------------------------------- # MAIN # -------------------------------------------------------------------- -parser = OptionParser(option_class=damask.extendableOption, usage='%prog option(s) [ASCIItable(s)]', description = """ -Add column(s) containing curl of requested column(s). -Operates on periodic ordered three-dimensional data sets -of vector and tensor fields. +parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """ +Add column(s) containing divergence of requested column(s). +Operates on periodic ordered three-dimensional data sets of vector and tensor fields. """, version = scriptID) parser.add_option('-p','--pos','--periodiccellcenter', @@ -66,95 +28,30 @@ parser.add_option('-p','--pos','--periodiccellcenter', type = 'string', metavar = 'string', help = 'label of coordinates [%default]') parser.add_option('-l','--label', - dest = 'data', + dest = 'labels', action = 'extend', metavar = '', help = 'label(s) of field values') parser.set_defaults(pos = 'pos', ) - (options,filenames) = parser.parse_args() - -if options.data is None: parser.error('no data column specified.') - -# --- define possible data types ------------------------------------------------------------------- - -datatypes = { - 3: {'name': 'vector', - 'shape': [3], - }, - 9: {'name': 'tensor', - 'shape': [3,3], - }, - } - -# --- loop over input files ------------------------------------------------------------------------ - if filenames == []: filenames = [None] +if options.labels is None: parser.error('no data column specified.') + for name in filenames: - try: table = damask.ASCIItable(name = name,buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# --- interpret header ---------------------------------------------------------------------------- + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos)) - table.head_read() - - remarks = [] - errors = [] - active = [] - - coordDim = table.label_dimension(options.pos) - if coordDim != 3: - errors.append('coordinates "{}" must be three-dimensional.'.format(options.pos)) - else: coordCol = table.label_index(options.pos) - - for me in options.data: - dim = table.label_dimension(me) - if dim in datatypes: - active.append(merge_dicts({'label':me},datatypes[dim])) - remarks.append('differentiating {} "{}"...'.format(datatypes[dim]['name'],me)) - else: - remarks.append('skipping "{}" of dimension {}...'.format(me,dim) if dim != -1 else \ - '"{}" not found...'.format(me) ) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - for data in active: - table.labels_append(['divFFT({})'.format(data['label']) if data['shape'] == [3] \ - else '{}_divFFT({})'.format(i+1,data['label']) - for i in range(np.prod(np.array(data['shape']))//3)]) # extend ASCII header with new labels - table.head_write() - -# --------------- figure out size and grid --------------------------------------------------------- - - table.data_readArray() - - grid,size = damask.util.coordGridAndSize(table.data[:,table.label_indexrange(options.pos)]) - -# ------------------------------------------ process value field ----------------------------------- - - stack = [table.data] - for data in active: - # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation - stack.append(divFFT(size[::-1], - table.data[:,table.label_indexrange(data['label'])]. - reshape(grid[::-1].tolist()+data['shape']))) - -# ------------------------------------------ output result ----------------------------------------- - - if len(stack) > 1: table.data = np.hstack(tuple(stack)) - table.data_writeArray('%.12g') - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + for label in options.labels: + field = table.get(label) + shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor + field = field.reshape(np.append(grid[::-1],shape)) + table.add('divFFT({})'.format(label), + damask.grid_filters.divergence(size[::-1],field).reshape((-1,np.prod(shape)//3)), + scriptID+' '+' '.join(sys.argv[1:])) + + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addEuclideanDistance.py b/processing/post/addEuclideanDistance.py index 1ca2169f6..be820220a 100755 --- a/processing/post/addEuclideanDistance.py +++ b/processing/post/addEuclideanDistance.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import itertools @@ -121,13 +122,14 @@ parser.set_defaults(pos = 'pos', ) (options,filenames) = parser.parse_args() +if filenames == []: filenames = [None] if options.type is None: - parser.error('no feature type selected.') + parser.error('no feature type selected.') if not set(options.type).issubset(set(list(itertools.chain(*map(lambda x: x['names'],features))))): - parser.error('type must be chosen from (%s).'%(', '.join(map(lambda x:'|'.join(x['names']),features))) ) + parser.error('type must be chosen from (%s).'%(', '.join(map(lambda x:'|'.join(x['names']),features))) ) if 'biplane' in options.type and 'boundary' in options.type: - parser.error('only one from aliases "biplane" and "boundary" possible.') + parser.error('only one from aliases "biplane" and "boundary" possible.') feature_list = [] for i,feature in enumerate(features): @@ -137,104 +139,49 @@ for i,feature in enumerate(features): feature_list.append(i) # remember valid features break -# --- loop over input files ------------------------------------------------------------------------- - -if filenames == []: filenames = [None] - for name in filenames: - try: table = damask.ASCIItable(name = name, buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos)) - table.head_read() + neighborhood = neighborhoods[options.neighborhood] + diffToNeighbor = np.empty(list(grid+2)+[len(neighborhood)],'i') + microstructure = periodic_3Dpad(table.get(options.id).astype('i').reshape(grid,order='F')) -# ------------------------------------------ sanity checks ---------------------------------------- - - errors = [] - remarks = [] - - if not 3 >= table.label_dimension(options.pos) >= 1: - errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos)) - - if table.label_dimension(options.id) != 1: errors.append('grain identifier {} not found.'.format(options.id)) - else: idCol = table.label_index(options.id) - - if remarks != []: - damask.util.croak(remarks) - remarks = [] - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header --------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - for feature in feature_list: - table.labels_append('ED_{}({})'.format(features[feature]['names'][0],options.id)) # extend ASCII header with new labels - table.head_write() - -# --------------- figure out size and grid --------------------------------------------------------- - - table.data_readArray() - - grid,size = damask.util.coordGridAndSize(table.data[:,table.label_indexrange(options.pos)]) - N = grid.prod() - - if N != len(table.data): errors.append('data count {} does not match grid {}.'.format(N,'x'.join(map(str,grid)))) - else: remarks.append('grid: {}x{}x{}'.format(*grid)) - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ process value field ----------------------------------- - - stack = [table.data] - - neighborhood = neighborhoods[options.neighborhood] - diffToNeighbor = np.empty(list(grid+2)+[len(neighborhood)],'i') - microstructure = periodic_3Dpad(table.data[:,idCol].astype('i').reshape(grid,order='F')) - - for i,p in enumerate(neighborhood): - stencil = np.zeros((3,3,3),'i') - stencil[1,1,1] = -1 - stencil[p[0]+1, - p[1]+1, - p[2]+1] = 1 - diffToNeighbor[:,:,:,i] = ndimage.convolve(microstructure,stencil) # compare ID at each point... + for i,p in enumerate(neighborhood): + stencil = np.zeros((3,3,3),'i') + stencil[1,1,1] = -1 + stencil[p[0]+1, + p[1]+1, + p[2]+1] = 1 + diffToNeighbor[:,:,:,i] = ndimage.convolve(microstructure,stencil) # compare ID at each point... # ...to every one in the specified neighborhood # for same IDs at both locations ==> 0 - diffToNeighbor = np.sort(diffToNeighbor) # sort diff such that number of changes in diff (steps)... + diffToNeighbor = np.sort(diffToNeighbor) # sort diff such that number of changes in diff (steps)... # ...reflects number of unique neighbors - uniques = np.where(diffToNeighbor[1:-1,1:-1,1:-1,0] != 0, 1,0) # initialize unique value counter (exclude myself [= 0]) + uniques = np.where(diffToNeighbor[1:-1,1:-1,1:-1,0] != 0, 1,0) # initialize unique value counter (exclude myself [= 0]) - for i in range(1,len(neighborhood)): # check remaining points in neighborhood - uniques += np.where(np.logical_and( - diffToNeighbor[1:-1,1:-1,1:-1,i] != 0, # not myself? - diffToNeighbor[1:-1,1:-1,1:-1,i] != diffToNeighbor[1:-1,1:-1,1:-1,i-1], - ), # flip of ID difference detected? - 1,0) # count that flip + for i in range(1,len(neighborhood)): # check remaining points in neighborhood + uniques += np.where(np.logical_and( + diffToNeighbor[1:-1,1:-1,1:-1,i] != 0, # not myself? + diffToNeighbor[1:-1,1:-1,1:-1,i] != diffToNeighbor[1:-1,1:-1,1:-1,i-1], + ), # flip of ID difference detected? + 1,0) # count that flip - distance = np.ones((len(feature_list),grid[0],grid[1],grid[2]),'d') + distance = np.ones((len(feature_list),grid[0],grid[1],grid[2]),'d') - for i,feature_id in enumerate(feature_list): - distance[i,:,:,:] = np.where(uniques >= features[feature_id]['aliens'],0.0,1.0) # seed with 0.0 when enough unique neighbor IDs are present - distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[options.scale]*3 + for i,feature_id in enumerate(feature_list): + distance[i,:,:,:] = np.where(uniques >= features[feature_id]['aliens'],0.0,1.0) # seed with 0.0 when enough unique neighbor IDs are present + distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[options.scale]*3 - distance = distance.reshape([len(feature_list),grid.prod(),1],order='F') - for i in range(len(feature_list)): - stack.append(distance[i,:]) + distance = distance.reshape([len(feature_list),grid.prod(),1],order='F') -# ------------------------------------------ output result ----------------------------------------- - if len(stack) > 1: table.data = np.hstack(tuple(stack)) - table.data_writeArray('%.12g') + for i,feature in enumerate(feature_list): + table.add('ED_{}({})'.format(features[feature]['names'][0],options.id), + distance[i,:], + scriptID+' '+' '.join(sys.argv[1:])) -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addGaussian.py b/processing/post/addGaussian.py index 9b601a1dc..5f3ec5d60 100755 --- a/processing/post/addGaussian.py +++ b/processing/post/addGaussian.py @@ -2,9 +2,9 @@ import os import sys +from io import StringIO from optparse import OptionParser -import numpy as np from scipy import ndimage import damask @@ -30,7 +30,7 @@ parser.add_option('-p','--pos','--periodiccellcenter', type = 'string', metavar = 'string', help = 'label of coordinates [%default]') parser.add_option('-s','--scalar', - dest = 'scalar', + dest = 'labels', action = 'extend', metavar = '', help = 'label(s) of scalar field values') parser.add_option('-o','--order', @@ -56,78 +56,21 @@ parser.set_defaults(pos = 'pos', ) (options,filenames) = parser.parse_args() - -if options.scalar is None: - parser.error('no data column specified.') - -# --- loop over input files ------------------------------------------------------------------------ - if filenames == []: filenames = [None] +if options.labels is None: parser.error('no data column specified.') + for name in filenames: - try: table = damask.ASCIItable(name = name,buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + damask.grid_filters.coord0_check(table.get(options.pos)) - table.head_read() + for label in options.labels: + table.add('Gauss{}({})'.format(options.sigma,label), + ndimage.filters.gaussian_filter(table.get(label).reshape((-1)), + options.sigma,options.order, + mode = 'wrap' if options.periodic else 'nearest'), + scriptID+' '+' '.join(sys.argv[1:])) -# ------------------------------------------ sanity checks ---------------------------------------- - - items = { - 'scalar': {'dim': 1, 'shape': [1], 'labels':options.scalar, 'active':[], 'column': []}, - } - errors = [] - remarks = [] - column = {} - - if table.label_dimension(options.pos) != 3: errors.append('coordinates {} are not a vector.'.format(options.pos)) - else: colCoord = table.label_index(options.pos) - - for type, data in items.items(): - for what in (data['labels'] if data['labels'] is not None else []): - dim = table.label_dimension(what) - if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type)) - else: - items[type]['active'].append(what) - items[type]['column'].append(table.label_index(what)) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - for type, data in items.items(): - for label in data['active']: - table.labels_append(['Gauss{}({})'.format(options.sigma,label)]) # extend ASCII header with new labels - table.head_write() - -# --------------- figure out size and grid --------------------------------------------------------- - - table.data_readArray() - - grid,size = damask.util.coordGridAndSize(table.data[:,table.label_indexrange(options.pos)]) - -# ------------------------------------------ process value field ----------------------------------- - - stack = [table.data] - for type, data in items.items(): - for i,label in enumerate(data['active']): - stack.append(ndimage.filters.gaussian_filter(table.data[:,data['column'][i]], - options.sigma,options.order, - mode = 'wrap' if options.periodic else 'nearest' - ).reshape([table.data.shape[0],1]) - ) - -# ------------------------------------------ output result ----------------------------------------- - if len(stack) > 1: table.data = np.hstack(tuple(stack)) - table.data_writeArray('%.12g') - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addGradient.py b/processing/post/addGradient.py index bfadb578e..8620c123b 100755 --- a/processing/post/addGradient.py +++ b/processing/post/addGradient.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import numpy as np @@ -12,44 +13,6 @@ import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) -def merge_dicts(*dict_args): - """Given any number of dicts, shallow copy and merge into a new dict, with precedence going to key value pairs in latter dicts.""" - result = {} - for dictionary in dict_args: - result.update(dictionary) - return result - -def gradFFT(geomdim,field): - """Calculate gradient of a vector or scalar field by transforming into Fourier space.""" - shapeFFT = np.array(np.shape(field))[0:3] - grid = np.array(np.shape(field)[2::-1]) - N = grid.prod() # field size - n = np.array(np.shape(field)[3:]).prod() # data size - - field_fourier = np.fft.rfftn(field,axes=(0,1,2),s=shapeFFT) - grad_fourier = np.empty(field_fourier.shape+(3,),'c16') - - # differentiation in Fourier space - TWOPIIMG = 2.0j*np.pi - einsums = { - 1:'ijkl,ijkm->ijkm', # scalar, 1 -> 3 - 3:'ijkl,ijkm->ijklm', # vector, 3 -> 3x3 - } - - k_sk = np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2]))/geomdim[0] - if grid[2]%2 == 0: k_sk[grid[2]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011) - - k_sj = np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1]))/geomdim[1] - if grid[1]%2 == 0: k_sj[grid[1]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011) - - k_si = np.arange(grid[0]//2+1)/geomdim[2] - - kk, kj, ki = np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij') - k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3).astype('c16') - grad_fourier = np.einsum(einsums[n],field_fourier,k_s)*TWOPIIMG - - return np.fft.irfftn(grad_fourier,axes=(0,1,2),s=shapeFFT).reshape([N,3*n]) - # -------------------------------------------------------------------- # MAIN @@ -57,9 +20,7 @@ def gradFFT(geomdim,field): parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """ Add column(s) containing gradient of requested column(s). -Operates on periodic ordered three-dimensional data sets -of vector and scalar fields. - +Operates on periodic ordered three-dimensional data sets of scalar and vector fields. """, version = scriptID) parser.add_option('-p','--pos','--periodiccellcenter', @@ -67,7 +28,7 @@ parser.add_option('-p','--pos','--periodiccellcenter', type = 'string', metavar = 'string', help = 'label of coordinates [%default]') parser.add_option('-l','--label', - dest = 'data', + dest = 'labels', action = 'extend', metavar = '', help = 'label(s) of field values') @@ -75,85 +36,22 @@ parser.set_defaults(pos = 'pos', ) (options,filenames) = parser.parse_args() - -if options.data is None: parser.error('no data column specified.') - -# --- define possible data types ------------------------------------------------------------------- - -datatypes = { - 1: {'name': 'scalar', - 'shape': [1], - }, - 3: {'name': 'vector', - 'shape': [3], - }, - } - -# --- loop over input files ------------------------------------------------------------------------ - if filenames == []: filenames = [None] +if options.labels is None: parser.error('no data column specified.') + for name in filenames: - try: table = damask.ASCIItable(name = name,buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# --- interpret header ---------------------------------------------------------------------------- + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos)) - table.head_read() - - remarks = [] - errors = [] - active = [] - - coordDim = table.label_dimension(options.pos) - if coordDim != 3: - errors.append('coordinates "{}" must be three-dimensional.'.format(options.pos)) - else: coordCol = table.label_index(options.pos) - - for me in options.data: - dim = table.label_dimension(me) - if dim in datatypes: - active.append(merge_dicts({'label':me},datatypes[dim])) - remarks.append('differentiating {} "{}"...'.format(datatypes[dim]['name'],me)) - else: - remarks.append('skipping "{}" of dimension {}...'.format(me,dim) if dim != -1 else \ - '"{}" not found...'.format(me) ) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - for data in active: - table.labels_append(['{}_gradFFT({})'.format(i+1,data['label']) - for i in range(coordDim*np.prod(np.array(data['shape'])))]) # extend ASCII header with new labels - table.head_write() - -# --------------- figure out size and grid --------------------------------------------------------- - - table.data_readArray() - - grid,size = damask.util.coordGridAndSize(table.data[:,table.label_indexrange(options.pos)]) - -# ------------------------------------------ process value field ----------------------------------- - - stack = [table.data] - for data in active: - # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation - stack.append(gradFFT(size[::-1], - table.data[:,table.label_indexrange(data['label'])]. - reshape(grid[::-1].tolist()+data['shape']))) - -# ------------------------------------------ output result ----------------------------------------- - - if len(stack) > 1: table.data = np.hstack(tuple(stack)) - table.data_writeArray('%.12g') - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + for label in options.labels: + field = table.get(label) + shape = (1,) if np.prod(field.shape)//np.prod(grid) == 1 else (3,) # scalar or vector + field = field.reshape(np.append(grid[::-1],shape)) + table.add('gradFFT({})'.format(label), + damask.grid_filters.gradient(size[::-1],field).reshape((-1,np.prod(shape)*3)), + scriptID+' '+' '.join(sys.argv[1:])) + + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addIPFcolor.py b/processing/post/addIPFcolor.py index 0149dd078..014b0147d 100755 --- a/processing/post/addIPFcolor.py +++ b/processing/post/addIPFcolor.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import numpy as np @@ -43,54 +44,25 @@ parser.set_defaults(pole = (0.0,0.0,1.0), ) (options, filenames) = parser.parse_args() +if filenames == []: filenames = [None] # damask.Orientation requires Bravais lattice, but we are only interested in symmetry -symmetry2lattice={'cubic':'bcc','hexagonal':'hex','tetragonal':'bct'} +symmetry2lattice={'cubic':'fcc','hexagonal':'hex','tetragonal':'bct'} lattice = symmetry2lattice[options.symmetry] pole = np.array(options.pole) pole /= np.linalg.norm(pole) -# --- loop over input files ------------------------------------------------------------------------ - -if filenames == []: filenames = [None] - for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ - - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - if not table.label_dimension(options.quaternion) == 4: - damask.util.croak('input {} does not have dimension 4.'.format(options.quaternion)) - table.close(dismiss = True) # close ASCIItable and remove empty file - continue - - column = table.label_index(options.quaternion) - -# ------------------------------------------ assemble header --------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.labels_append(['{}_IPF_{:g}{:g}{:g}_{sym}'.format(i+1,*options.pole,sym = options.symmetry.lower()) for i in range(3)]) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - o = damask.Orientation(np.array(list(map(float,table.data[column:column+4]))), - lattice = lattice).reduced() - - table.data_append(o.IPFcolor(pole)) - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + orientation = table.get(options.quaternion) + color = np.empty((orientation.shape[0],3)) + for i,o in enumerate(orientation): + color[i] = damask.Orientation(o,lattice = lattice).IPFcolor(pole) + + table.add('IPF_{:g}{:g}{:g}_{sym}'.format(*options.pole,sym = options.symmetry.lower()), + color, + scriptID+' '+' '.join(sys.argv[1:])) + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addInfo.py b/processing/post/addInfo.py index 2d8192cc1..5e32510db 100755 --- a/processing/post/addInfo.py +++ b/processing/post/addInfo.py @@ -1,6 +1,8 @@ #!/usr/bin/env python3 import os +import sys +from io import StringIO from optparse import OptionParser import damask @@ -24,35 +26,16 @@ parser.add_option('-i', dest = 'info', action = 'extend', metavar = '', help = 'items to add') - (options,filenames) = parser.parse_args() +if filenames == []: filenames = [None] if options.info is None: parser.error('no info specified.') -# --- loop over input files ------------------------------------------------------------------------ - -if filenames == []: filenames = [None] - for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ assemble header --------------------------------------- + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + table.comments += options.info - table.head_read() - table.info_append(options.info) - table.head_write() - -# ------------------------------------------ pass through data ------------------------------------- - - outputAlive = True - - while outputAlive and table.data_read(): # read next data line of ASCII table - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addMises.py b/processing/post/addMises.py index be11b0f1c..0c2a6db50 100755 --- a/processing/post/addMises.py +++ b/processing/post/addMises.py @@ -2,10 +2,8 @@ import os import sys +from io import StringIO from optparse import OptionParser -from collections import OrderedDict - -import numpy as np import damask @@ -13,15 +11,6 @@ import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) -def Mises(what,tensor): - - dev = tensor - np.trace(tensor)/3.0*np.eye(3) - symdev = 0.5*(dev+dev.T) - return np.sqrt(np.sum(symdev*symdev.T)* - { - 'stress': 3.0/2.0, - 'strain': 2.0/3.0, - }[what.lower()]) # -------------------------------------------------------------------- # MAIN @@ -47,62 +36,21 @@ parser.set_defaults(strain = [], (options,filenames) = parser.parse_args() if options.stress is [] and options.strain is []: - parser.error('no data column specified...') - -# --- loop over input files ------------------------------------------------------------------------- + parser.error('no data column specified...') if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ - - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - items = OrderedDict([ - ('strain', {'dim': 9, 'shape': [3,3], 'labels':options.strain, 'active':[], 'column': []}), - ('stress', {'dim': 9, 'shape': [3,3], 'labels':options.stress, 'active':[], 'column': []}) - ]) - errors = [] - remarks = [] - - for type, data in items.items(): - for what in data['labels']: - dim = table.label_dimension(what) - if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type)) - else: - items[type]['active'].append(what) - items[type]['column'].append(table.label_index(what)) - table.labels_append('Mises({})'.format(what)) # extend ASCII header with new labels - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - for type, data in items.items(): - for column in data['column']: - table.data_append(Mises(type, - np.array(table.data[column:column+data['dim']],'d').reshape(data['shape']))) - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + for strain in options.strain: + table.add('Mises({})'.format(strain), + damask.mechanics.Mises_strain(damask.mechanics.symmetric(table.get(strain).reshape(-1,3,3))), + scriptID+' '+' '.join(sys.argv[1:])) + for stress in options.stress: + table.add('Mises({})'.format(stress), + damask.mechanics.Mises_stress(damask.mechanics.symmetric(table.get(stress).reshape(-1,3,3))), + scriptID+' '+' '.join(sys.argv[1:])) + + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addNorm.py b/processing/post/addNorm.py index c8c0b05bf..4ac2bf899 100755 --- a/processing/post/addNorm.py +++ b/processing/post/addNorm.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import numpy as np @@ -42,7 +43,7 @@ parser.add_option('-n','--norm', type = 'choice', choices = normChoices, metavar='string', help = 'type of element-wise p-norm [frobenius] {%s}'%(','.join(map(str,normChoices)))) parser.add_option('-l','--label', - dest = 'label', + dest = 'labels', action = 'extend', metavar = '', help = 'heading of column(s) to calculate norm of') @@ -50,62 +51,25 @@ parser.set_defaults(norm = 'frobenius', ) (options,filenames) = parser.parse_args() - -if options.norm.lower() not in normChoices: - parser.error('invalid norm ({}) specified.'.format(options.norm)) -if options.label is None: - parser.error('no data column specified.') - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +if options.norm.lower() not in normChoices: + parser.error('invalid norm ({}) specified.'.format(options.norm)) +if options.labels is None: + parser.error('no data column specified.') + for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + for label in options.labels: + data = table.get(label) + data_norm = np.empty((data.shape[0],1)) + for i,d in enumerate(data): + data_norm[i] = norm(options.norm.capitalize(),d) - table.head_read() + table.add('norm{}({})'.format(options.norm.capitalize(),label), + data_norm, + scriptID+' '+' '.join(sys.argv[1:])) -# ------------------------------------------ sanity checks ---------------------------------------- - - errors = [] - remarks = [] - columns = [] - dims = [] - - for what in options.label: - dim = table.label_dimension(what) - if dim < 0: remarks.append('column {} not found...'.format(what)) - else: - dims.append(dim) - columns.append(table.label_index(what)) - table.labels_append('norm{}({})'.format(options.norm.capitalize(),what)) # extend ASCII header with new labels - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - for column,dim in zip(columns,dims): - table.data_append(norm(options.norm.capitalize(), - map(float,table.data[column:column+dim]))) - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addOrientations.py b/processing/post/addOrientations.py index 31ce6aeb3..2c46ee5ee 100755 --- a/processing/post/addOrientations.py +++ b/processing/post/addOrientations.py @@ -125,9 +125,10 @@ R = damask.Rotation.fromAxisAngle(np.array(options.labrotation),options.degrees, if filenames == []: filenames = [None] for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False) - except Exception: continue + try: + table = damask.ASCIItable(name = name, buffered = False) + except IOError: + continue damask.util.report(scriptName,name) # ------------------------------------------ read header ------------------------------------------ diff --git a/processing/post/addPK2.py b/processing/post/addPK2.py index f38753619..185160d79 100755 --- a/processing/post/addPK2.py +++ b/processing/post/addPK2.py @@ -2,10 +2,9 @@ import os import sys +from io import StringIO from optparse import OptionParser -import numpy as np - import damask @@ -36,53 +35,16 @@ parser.set_defaults(defgrad = 'f', ) (options,filenames) = parser.parse_args() - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) - table.head_read() + table.add('S', + damask.mechanics.PK2(table.get(options.defgrad).reshape(-1,3,3), + table.get(options.stress ).reshape(-1,3,3)).reshape(-1,9), + scriptID+' '+' '.join(sys.argv[1:])) -# ------------------------------------------ sanity checks ---------------------------------------- - - errors = [] - column = {} - - for tensor in [options.defgrad,options.stress]: - dim = table.label_dimension(tensor) - if dim < 0: errors.append('column {} not found.'.format(tensor)) - elif dim != 9: errors.append('column {} is not a tensor.'.format(tensor)) - else: - column[tensor] = table.label_index(tensor) - - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.labels_append(['{}_S'.format(i+1) for i in range(9)]) # extend ASCII header with new labels - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - F = np.array(list(map(float,table.data[column[options.defgrad]:column[options.defgrad]+9])),'d').reshape(3,3) - P = np.array(list(map(float,table.data[column[options.stress ]:column[options.stress ]+9])),'d').reshape(3,3) - table.data_append(list(np.dot(np.linalg.inv(F),P).reshape(9))) # [S] =[P].[F-1] - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addPole.py b/processing/post/addPole.py index c8b83b106..58f9235dc 100755 --- a/processing/post/addPole.py +++ b/processing/post/addPole.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import numpy as np @@ -42,52 +43,23 @@ parser.set_defaults(pole = (1.0,0.0,0.0), ) (options, filenames) = parser.parse_args() +if filenames == []: filenames = [None] pole = np.array(options.pole) pole /= np.linalg.norm(pole) -# --- loop over input files ------------------------------------------------------------------------- - -if filenames == []: filenames = [None] - for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ - - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - if not table.label_dimension(options.quaternion) == 4: - damask.util.croak('input {} does not have dimension 4.'.format(options.quaternion)) - table.close(dismiss = True) # close ASCIItable and remove empty file - continue - - column = table.label_index(options.quaternion) - -# ------------------------------------------ assemble header --------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.labels_append(['{}_pole_{}{}{}'.format(i+1,*options.pole) for i in range(2)]) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - o = damask.Rotation(np.array(list(map(float,table.data[column:column+4])))) - - rotatedPole = o*pole # rotate pole according to crystal orientation - (x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection - - table.data_append([np.sqrt(x*x+y*y),np.arctan2(y,x)] if options.polar else [x,y]) # cartesian coordinates - - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + orientation = table.get(options.quaternion) + poles = np.empty((orientation.shape[0],2)) + for i,o in enumerate(orientation): + rotatedPole = damask.Rotation(o)*pole # rotate pole according to crystal orientation + (x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection + poles[i] = [np.sqrt(x*x+y*y),np.arctan2(y,x)] if options.polar else [x,y] # cartesian coordinates + + table.add('pole_{}{}{}'.format(*options.pole), + poles, + scriptID+' '+' '.join(sys.argv[1:])) + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addStrainTensors.py b/processing/post/addStrainTensors.py index 2aa206952..77015a91f 100755 --- a/processing/post/addStrainTensors.py +++ b/processing/post/addStrainTensors.py @@ -2,25 +2,24 @@ import os import sys +from io import StringIO from optparse import OptionParser -import numpy as np - import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) -def operator(stretch,strain,eigenvalues): +def parameters(stretch,strain): """Albrecht Bertram: Elasticity and Plasticity of Large Deformations An Introduction (3rd Edition, 2012), p. 102.""" return { - 'V#ln': np.log(eigenvalues) , - 'U#ln': np.log(eigenvalues) , - 'V#Biot': ( np.ones(3,'d') - 1.0/eigenvalues ) , - 'U#Biot': ( eigenvalues - np.ones(3,'d') ) , - 'V#Green': ( np.ones(3,'d') - 1.0/eigenvalues/eigenvalues) *0.5, - 'U#Green': ( eigenvalues*eigenvalues - np.ones(3,'d')) *0.5, + 'V#ln': ('V',0.0), + 'U#ln': ('U',0.0), + 'V#Biot': ('V',-.5), + 'U#Biot': ('U',+.5), + 'V#Green': ('V',-1.), + 'U#Green': ('U',+1.), }[stretch+'#'+strain] @@ -64,9 +63,10 @@ parser.set_defaults( ) (options,filenames) = parser.parse_args() +if filenames == []: filenames = [None] if len(options.defgrad) > 1: - options.defgrad = options.defgrad[1:] + options.defgrad = options.defgrad[1:] stretches = [] strains = [] @@ -78,84 +78,21 @@ if options.biot: strains.append('Biot') if options.green: strains.append('Green') if options.defgrad is None: - parser.error('no data column specified.') - -# --- loop over input files ------------------------------------------------------------------------- - -if filenames == []: filenames = [None] + parser.error('no data column specified.') for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except IOError: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) + + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) -# ------------------------------------------ read header ------------------------------------------ - - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - items = { - 'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.defgrad, 'column': []}, - } - errors = [] - remarks = [] - - for type, data in items.items(): - for what in data['labels']: - dim = table.label_dimension(what) - if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type)) - else: - items[type]['column'].append(table.label_index(what)) + for defgrad in options.defgrad: + F = table.get(defgrad).reshape((-1,3,3)) for theStretch in stretches: - for theStrain in strains: - table.labels_append(['{}_{}({}){}'.format(i+1, # extend ASCII header with new labels - theStrain, - theStretch, - what if what != 'f' else '') for i in range(9)]) + for theStrain in strains: + (t,m) = parameters(theStretch,theStrain) + label = '{}({}){}'.format(theStrain,theStretch,defgrad if defgrad != 'f' else '') + table.add(label, + damask.mechanics.strain_tensor(F,t,m).reshape((-1,9)), + scriptID+' '+' '.join(sys.argv[1:])) - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - stretch = {} - outputAlive = True - - while outputAlive and table.data_read(): # read next data line of ASCII table - for column in items['tensor']['column']: # loop over all requested defgrads - F = np.array(list(map(float,table.data[column:column+items['tensor']['dim']])),'d').reshape(items['tensor']['shape']) - (U,S,Vh) = np.linalg.svd(F) # singular value decomposition - R_inv = np.dot(U,Vh).T # rotation of polar decomposition - stretch['U'] = np.dot(R_inv,F) # F = RU - stretch['V'] = np.dot(F,R_inv) # F = VR - - for theStretch in stretches: - stretch[theStretch] = np.where(abs(stretch[theStretch]) < 1e-12, 0, stretch[theStretch]) # kill nasty noisy data - (D,V) = np.linalg.eigh((stretch[theStretch]+stretch[theStretch].T)*0.5) # eigen decomposition (of symmetric(ed) matrix) - neg = np.where(D < 0.0) # find negative eigenvalues ... - D[neg] *= -1. # ... flip value ... - V[:,neg] *= -1. # ... and vector - for theStrain in strains: - d = operator(theStretch,theStrain,D) # operate on eigenvalues of U or V - eps = np.dot(V,np.dot(np.diag(d),V.T)).reshape(9) # build tensor back from eigenvalue/vector basis - - table.data_append(list(eps)) - -# ------------------------------------------ output result ----------------------------------------- - - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addTable.py b/processing/post/addTable.py index 7af1dcf35..944214e69 100755 --- a/processing/post/addTable.py +++ b/processing/post/addTable.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import damask @@ -25,56 +26,19 @@ parser.add_option('-a', '--add','--table', help = 'tables to add') (options,filenames) = parser.parse_args() - -if options.table is None: - parser.error('no table specified.') - - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +if options.table is None: + parser.error('no table specified.') + for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False) - except: continue + damask.util.report(scriptName,name) - damask.util.report(scriptName,name) + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) - tables = [] - for addTable in options.table: - try: tables.append(damask.ASCIItable(name = addTable, - buffered = False, - readonly = True) - ) - except: continue + for addTable in options.table: + table2 = damask.Table.from_ASCII(addTable) + table2.data = table2.data[:table.data.shape[0]] + table.join(table2) -# ------------------------------------------ read headers ------------------------------------------ - - table.head_read() - for addTable in tables: addTable.head_read() - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - - for addTable in tables: table.labels_append(addTable.labels(raw = True)) # extend ASCII header with new labels - - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): - for addTable in tables: - outputAlive = addTable.data_read() # read next table's data - if not outputAlive: break - table.data_append(addTable.data) # append to master table - if outputAlive: - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables - for addTable in tables: - addTable.close() + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/averageDown.py b/processing/post/averageDown.py index d94bc8dbd..0d3948251 100755 --- a/processing/post/averageDown.py +++ b/processing/post/averageDown.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import numpy as np @@ -49,6 +50,7 @@ parser.set_defaults(pos = 'pos', ) (options,filenames) = parser.parse_args() +if filenames == []: filenames = [None] packing = np.array(options.packing,dtype = int) shift = np.array(options.shift, dtype = int) @@ -56,46 +58,14 @@ shift = np.array(options.shift, dtype = int) prefix = 'averagedDown{}x{}x{}_'.format(*packing) if any(shift != 0): prefix += 'shift{:+}{:+}{:+}_'.format(*shift) -# --- loop over input files ------------------------------------------------------------------------ - -if filenames == []: filenames = [None] for name in filenames: - try: table = damask.ASCIItable(name = name, - outname = os.path.join(os.path.dirname(name), - prefix+os.path.basename(name)) if name else name, - buffered = False) - except: continue damask.util.report(scriptName,name) - -# ------------------------------------------ read header ------------------------------------------ - - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - errors = [] - remarks = [] - if table.label_dimension(options.pos) != 3: errors.append('coordinates {} are not a vector.'.format(options.pos)) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header --------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# --------------- figure out size and grid --------------------------------------------------------- - - table.data_readArray() + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) if (options.grid is None or options.size is None): - grid,size = damask.util.coordGridAndSize(table.data[:,table.label_indexrange(options.pos)]) + grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos)) else: grid = np.array(options.grid,'i') size = np.array(options.size,'d') @@ -104,37 +74,25 @@ for name in filenames: shift = np.where(grid == 1,0,shift) # reset shift to 0 where grid==1 packedGrid = np.maximum(np.ones(3,'i'),grid//packing) + data = table.data.values.reshape(tuple(grid)+(-1,),order = 'F') averagedDown = scipy.ndimage.filters.uniform_filter( \ np.roll( np.roll( - np.roll(table.data.reshape(list(grid)+[table.data.shape[1]],order = 'F'), + np.roll(data, -shift[0],axis = 0), -shift[1],axis = 1), -shift[2],axis = 2), size = list(packing) + [1], mode = 'wrap', origin = list(-(packing//2)) + [0])\ - [::packing[0],::packing[1],::packing[2],:].reshape((packedGrid.prod(),table.data.shape[1]),order = 'F') + [::packing[0],::packing[1],::packing[2],:].reshape((packedGrid.prod(),-1),order = 'F') - table.data = averagedDown + table = damask.Table(averagedDown,table.shapes,table.comments) -#--- generate grid -------------------------------------------------------------------------------- + coords = damask.grid_filters.cell_coord0(packedGrid,size,shift/packedGrid*size+origin) + table.set(options.pos, coords.reshape((-1,3))) - x = (0.5 + shift[0] + np.arange(packedGrid[0],dtype=float))/packedGrid[0]*size[0] - y = (0.5 + shift[1] + np.arange(packedGrid[1],dtype=float))/packedGrid[1]*size[1] - z = (0.5 + shift[2] + np.arange(packedGrid[2],dtype=float))/packedGrid[2]*size[2] - xx = np.tile( x, packedGrid[1]* packedGrid[2]) - yy = np.tile(np.repeat(y,packedGrid[0] ),packedGrid[2]) - zz = np.repeat(z,packedGrid[0]*packedGrid[1]) - - table.data[:,table.label_indexrange(options.pos)] = np.squeeze(np.dstack((xx,yy,zz))) - -# ------------------------------------------ output result ----------------------------------------- - - table.data_writeArray() - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + outname = os.path.join(os.path.dirname(name),prefix+os.path.basename(name)) + table.to_ASCII(sys.stdout if name is None else outname) diff --git a/processing/post/blowUp.py b/processing/post/blowUp.py index 3dccb1aaf..718858e1c 100755 --- a/processing/post/blowUp.py +++ b/processing/post/blowUp.py @@ -2,8 +2,10 @@ import os import sys +from io import StringIO from optparse import OptionParser +from scipy import ndimage import numpy as np import damask @@ -42,81 +44,29 @@ parser.set_defaults(pos = 'pos', ) (options,filenames) = parser.parse_args() +if filenames == []: filenames = [None] options.packing = np.array(options.packing) prefix = 'blowUp{}x{}x{}_'.format(*options.packing) -# --- loop over input files ------------------------------------------------------------------------- - -if filenames == []: filenames = [None] for name in filenames: - try: table = damask.ASCIItable(name = name, - outname = os.path.join(os.path.dirname(name), - prefix+os.path.basename(name)) if name else name, - buffered = False) - except: continue damask.util.report(scriptName,name) - -# ------------------------------------------ read header ------------------------------------------ - - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - errors = [] - remarks = [] - - if table.label_dimension(options.pos) != 3: errors.append('coordinates "{}" are not a vector.'.format(options.pos)) - - colElem = table.label_index('elem') - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# --------------- figure out size and grid --------------------------------------------------------- - - table.data_readArray(options.pos) - table.data_rewind() - - grid,size = damask.util.coordGridAndSize(table.data) + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos)) packing = np.array(options.packing,'i') outSize = grid*packing -# ------------------------------------------ assemble header -------------------------------------- + data = table.data.values.reshape(tuple(grid)+(-1,)) + blownUp = ndimage.interpolation.zoom(data,tuple(packing)+(1,),order=0,mode='nearest').reshape((outSize.prod(),-1)) - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() + table = damask.Table(blownUp,table.shapes,table.comments) -# ------------------------------------------ process data ------------------------------------------- - - data = np.zeros(outSize.tolist()+[len(table.labels(raw = True))]) - p = np.zeros(3,'i') + coords = damask.grid_filters.cell_coord0(outSize,size,origin) + table.set(options.pos,coords.reshape((-1,3))) + table.set('elem',np.arange(1,outSize.prod()+1)) - for p[2] in range(grid[2]): - for p[1] in range(grid[1]): - for p[0] in range(grid[0]): - d = p*packing - table.data_read() - data[d[0]:d[0]+packing[0], - d[1]:d[1]+packing[1], - d[2]:d[2]+packing[2], - : ] = np.tile(np.array(table.data_asFloat(),'d'),packing.tolist()+[1]) # tile to match blowUp voxel size - elementSize = size/grid/packing - elem = 1 - for c in range(outSize[2]): - for b in range(outSize[1]): - for a in range(outSize[0]): - data[a,b,c,table.label_indexrange(options.pos)] = [a+0.5,b+0.5,c+0.5]*elementSize - if colElem != -1: data[a,b,c,colElem] = elem - table.data = data[a,b,c,:].tolist() - outputAlive = table.data_write() # output processed line - elem += 1 - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + outname = os.path.join(os.path.dirname(name),prefix+os.path.basename(name)) + table.to_ASCII(sys.stdout if name is None else outname) diff --git a/processing/post/growTable.py b/processing/post/growTable.py index 361ea5764..1dbfa8423 100755 --- a/processing/post/growTable.py +++ b/processing/post/growTable.py @@ -2,10 +2,9 @@ import os import sys +from io import StringIO from optparse import OptionParser -import numpy as np - import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] @@ -27,53 +26,18 @@ parser.add_option('-a', '--add','--table', help = 'tables to add') (options,filenames) = parser.parse_args() - -if options.table is None: - parser.error('no table specified.') - - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +if options.table is None: + parser.error('no table specified.') + for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False) - except: continue + damask.util.report(scriptName,name) - damask.util.report(scriptName,name) + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) - tables = [] - for addTable in options.table: - try: tables.append(damask.ASCIItable(name = addTable, - buffered = False, - readonly = True) - ) - except: continue + for growTable in options.table: + table2 = damask.Table.from_ASCII(growTable) + table.append(table2) -# ------------------------------------------ read headers ------------------------------------------ - - table.head_read() - for addTable in tables: addTable.head_read() - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - table.data_readArray() - data = table.data - for addTable in tables: - addTable.data_readArray(table.labels(raw = True)) - data = np.vstack((data,addTable.data)) - table.data = data - table.data_writeArray() - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables - for addTable in tables: - addTable.close() + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/reLabel.py b/processing/post/reLabel.py index e7ad1f1e9..85d16acea 100755 --- a/processing/post/reLabel.py +++ b/processing/post/reLabel.py @@ -2,8 +2,8 @@ import os import sys +from io import StringIO from optparse import OptionParser -import re import damask @@ -35,62 +35,18 @@ parser.set_defaults(label = [], ) (options,filenames) = parser.parse_args() - -pattern = [re.compile('^()(.+)$'), # label pattern for scalar - re.compile('^(\d+_)?(.+)$'), # label pattern for multidimension - ] - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +if len(options.label) != len(options.substitute): + parser.error('number of column labels and substitutes do not match.') + for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + for i,label in enumerate(options.label): + table.rename(label, + options.substitute[i], + scriptID+' '+' '.join(sys.argv[1:])) - table.head_read() - -# ------------------------------------------ process labels --------------------------------------- - - errors = [] - remarks = [] - - if len(options.label) == 0: - errors.append('no labels specified.') - elif len(options.label) != len(options.substitute): - errors.append('mismatch between number of labels ({}) and substitutes ({}).'.format(len(options.label), - len(options.substitute))) - else: - indices = table.label_index (options.label) - dimensions = table.label_dimension(options.label) - for i,index in enumerate(indices): - if index == -1: remarks.append('label "{}" not present...'.format(options.label[i])) - else: - m = pattern[int(dimensions[i]>1)].match(table.tags[index]) # isolate label name - for j in range(dimensions[i]): - table.tags[index+j] = table.tags[index+j].replace(m.group(2),options.substitute[i]) # replace name with substitute - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header --------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/scaleData.py b/processing/post/scaleData.py index 5b03f8e07..58f853251 100755 --- a/processing/post/scaleData.py +++ b/processing/post/scaleData.py @@ -2,10 +2,9 @@ import os import sys +from io import StringIO from optparse import OptionParser -import numpy as np - import damask @@ -23,7 +22,7 @@ Uniformly scale column values by given factor. """, version = scriptID) parser.add_option('-l','--label', - dest = 'label', + dest = 'labels', action = 'extend', metavar = '', help ='column(s) to scale') parser.add_option('-f','--factor', @@ -32,61 +31,21 @@ parser.add_option('-f','--factor', help = 'factor(s) per column') parser.set_defaults(label = [], - ) + factor = []) (options,filenames) = parser.parse_args() - -if len(options.label) != len(options.factor): - parser.error('number of column labels and factors do not match.') - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +if len(options.labels) != len(options.factor): + parser.error('number of column labels and factors do not match.') + for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + for i,label in enumerate(options.labels): + table.set(label, + table.get(label)*float(options.factor[i]), + scriptID+' '+' '.join(sys.argv[1:])) - table.head_read() - - errors = [] - remarks = [] - columns = [] - dims = [] - factors = [] - - for what,factor in zip(options.label,options.factor): - col = table.label_index(what) - if col < 0: remarks.append('column {} not found...'.format(what,type)) - else: - columns.append(col) - factors.append(float(factor)) - dims.append(table.label_dimension(what)) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header --------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - for col,dim,factor in zip(columns,dims,factors): # loop over items - table.data[col:col+dim] = factor * np.array(table.data[col:col+dim],'d') - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/shiftData.py b/processing/post/shiftData.py index 69a9696fa..57b20fbd0 100755 --- a/processing/post/shiftData.py +++ b/processing/post/shiftData.py @@ -2,10 +2,9 @@ import os import sys +from io import StringIO from optparse import OptionParser -import numpy as np - import damask @@ -23,7 +22,7 @@ Uniformly shift column values by given offset. """, version = scriptID) parser.add_option('-l','--label', - dest = 'label', + dest = 'labels', action = 'extend', metavar = '', help ='column(s) to shift') parser.add_option('-o','--offset', @@ -32,61 +31,21 @@ parser.add_option('-o','--offset', help = 'offset(s) per column') parser.set_defaults(label = [], - ) + offset = []) (options,filenames) = parser.parse_args() - -if len(options.label) != len(options.offset): - parser.error('number of column labels and offsets do not match.') - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +if len(options.labels) != len(options.offset): + parser.error('number of column labels and offsets do not match.') + for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + for i,label in enumerate(options.labels): + table.set(label, + table.get(label)+float(options.offset[i]), + scriptID+' '+' '.join(sys.argv[1:])) - table.head_read() - - errors = [] - remarks = [] - columns = [] - dims = [] - offsets = [] - - for what,offset in zip(options.label,options.offset): - col = table.label_index(what) - if col < 0: remarks.append('column {} not found...'.format(what,type)) - else: - columns.append(col) - offsets.append(float(offset)) - dims.append(table.label_dimension(what)) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header --------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - for col,dim,offset in zip(columns,dims,offsets): # loop over items - table.data[col:col+dim] = offset + np.array(table.data[col:col+dim],'d') - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/sortTable.py b/processing/post/sortTable.py index 53a357226..3a3738d18 100755 --- a/processing/post/sortTable.py +++ b/processing/post/sortTable.py @@ -2,10 +2,9 @@ import os import sys +from io import StringIO from optparse import OptionParser -import numpy as np - import damask @@ -26,7 +25,7 @@ With coordinates in columns "x", "y", and "z"; sorting with x slowest and z fast parser.add_option('-l','--label', - dest = 'keys', + dest = 'labels', action = 'extend', metavar = '', help = 'list of column labels (a,b,c,...)') parser.add_option('-r','--reverse', @@ -38,42 +37,14 @@ parser.set_defaults(reverse = False, ) (options,filenames) = parser.parse_args() - - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +if options.labels is None: + parser.error('no labels specified.') for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ assemble header --------------------------------------- + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + table.sort_by(options.labels,not options.reverse) - table.head_read() - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data --------------------------------------- - - table.data_readArray() - - keys = table.labels(raw = True)[::-1] if options.keys is None else options.keys[::-1] # numpy sorts with most significant column as last - - cols = [] - remarks = [] - for i,column in enumerate(table.label_index(keys)): - if column < 0: remarks.append('label "{}" not present...'.format(keys[i])) - else: cols += [table.data[:,column]] - if remarks != []: damask.util.croak(remarks) - - ind = np.lexsort(cols) if cols != [] else np.arange(table.data.shape[0]) - if options.reverse: ind = ind[::-1] - -# ------------------------------------------ output result --------------------------------------- - - table.data = table.data[ind] - table.data_writeArray() - table.close() # close ASCII table + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/vtk_addGridData.py b/processing/post/vtk_addGridData.py index b9beb6abe..8e76cfca8 100755 --- a/processing/post/vtk_addGridData.py +++ b/processing/post/vtk_addGridData.py @@ -1,8 +1,9 @@ #!/usr/bin/env python3 import os +import sys +from io import StringIO from optparse import OptionParser -from collections import defaultdict import vtk from vtk.util import numpy_support @@ -18,11 +19,10 @@ scriptID = ' '.join([scriptName,damask.version]) # MAIN # -------------------------------------------------------------------- -msg = "Add scalars, vectors, and/or an RGB tuple from" -msg += "an ASCIItable to existing VTK grid (.vtr/.vtk/.vtu)." parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', - description = msg, + description = "Add scalars, vectors, tensors, and/or an RGB tuple from ASCIItable " + + "to existing VTK grid (.vtr/.vtk/.vtu).", version = scriptID) parser.add_option( '--vtk', @@ -49,10 +49,10 @@ parser.add_option('-c', '--color', parser.set_defaults(data = [], tensor = [], color = [], - render = False, ) (options, filenames) = parser.parse_args() +if filenames == []: filenames = [None] if not options.vtk: parser.error('No VTK file specified.') if not os.path.exists(options.vtk): parser.error('VTK file does not exist.') @@ -87,65 +87,28 @@ Ncells = rGrid.GetNumberOfCells() damask.util.croak('{}: {} points and {} cells...'.format(options.vtk,Npoints,Ncells)) -# --- loop over input files ------------------------------------------------------------------------- - -if filenames == []: filenames = [None] - for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False, - readonly = True) - except: continue - damask.util.report(scriptName, name) + damask.util.report(scriptName,name) -# --- interpret header ---------------------------------------------------------------------------- - - table.head_read() - - remarks = [] - errors = [] + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + VTKarray = {} - active = defaultdict(list) + for data in options.data: + VTKarray[data] = numpy_support.numpy_to_vtk(table.get(data).copy(), + deep=True,array_type=vtk.VTK_DOUBLE) + VTKarray[data].SetName(data) + + for color in options.color: + VTKarray[color] = numpy_support.numpy_to_vtk((table.get(color)*255).astype(int).copy(), + deep=True,array_type=vtk.VTK_UNSIGNED_CHAR) + VTKarray[color].SetName(color) - for datatype,dimension,label in [['data',99,options.data], - ['tensor',9,options.tensor], - ['color' ,3,options.color], - ]: - for i,dim in enumerate(table.label_dimension(label)): - me = label[i] - if dim == -1: remarks.append('{} "{}" not found...'.format(datatype,me)) - elif dim > dimension: remarks.append('"{}" not of dimension {}...'.format(me,dimension)) - else: - remarks.append('adding {} "{}"...'.format(datatype,me)) - active[datatype].append(me) + for tensor in options.tensor: + data = damask.mechanics.symmetric(table.get(tensor).reshape((-1,3,3))).reshape((-1,9)) + VTKarray[tensor] = numpy_support.numpy_to_vtk(data.copy(), + deep=True,array_type=vtk.VTK_DOUBLE) + VTKarray[tensor].SetName(tensor) - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ process data --------------------------------------- - - table.data_readArray([item for sublist in active.values() for item in sublist]) # read all requested data - - for datatype,labels in active.items(): # loop over scalar,color - for me in labels: # loop over all requested items - VTKtype = vtk.VTK_DOUBLE - VTKdata = table.data[:, table.label_indexrange(me)].copy() # copy to force contiguous layout - - if datatype == 'color': - VTKtype = vtk.VTK_UNSIGNED_CHAR - VTKdata = (VTKdata*255).astype(int) # translate to 0..255 UCHAR - elif datatype == 'tensor': - VTKdata[:,1] = VTKdata[:,3] = 0.5*(VTKdata[:,1]+VTKdata[:,3]) - VTKdata[:,2] = VTKdata[:,6] = 0.5*(VTKdata[:,2]+VTKdata[:,6]) - VTKdata[:,5] = VTKdata[:,7] = 0.5*(VTKdata[:,5]+VTKdata[:,7]) - - VTKarray[me] = numpy_support.numpy_to_vtk(num_array=VTKdata,deep=True,array_type=VTKtype) - VTKarray[me].SetName(me) - - table.close() # close input ASCII table # ------------------------------------------ add data --------------------------------------- @@ -157,16 +120,10 @@ for name in filenames: damask.util.croak('{} mode...'.format(mode)) - for datatype,labels in active.items(): # loop over scalar,color - if datatype == 'color': - if mode == 'cell': rGrid.GetCellData().SetScalars(VTKarray[active['color'][0]]) - elif mode == 'point': rGrid.GetPointData().SetScalars(VTKarray[active['color'][0]]) - for me in labels: # loop over all requested items - if mode == 'cell': rGrid.GetCellData().AddArray(VTKarray[me]) - elif mode == 'point': rGrid.GetPointData().AddArray(VTKarray[me]) - + for data in VTKarray: + if mode == 'cell': rGrid.GetCellData().AddArray(VTKarray[data]) + elif mode == 'point': rGrid.GetPointData().AddArray(VTKarray[data]) rGrid.Modified() - if vtk.VTK_MAJOR_VERSION <= 5: rGrid.Update() # ------------------------------------------ output result --------------------------------------- @@ -184,7 +141,7 @@ if options.render: actor.SetMapper(mapper) # Create the graphics structure. The renderer renders into the -# render window. The render window interactor captures mouse events +# render window. The render window interactively captures mouse events # and will perform appropriate camera or actor manipulation # depending on the nature of the events. diff --git a/processing/post/vtk_addPointCloudData.py b/processing/post/vtk_addPointCloudData.py index 96bacae8a..833bfc88e 100755 --- a/processing/post/vtk_addPointCloudData.py +++ b/processing/post/vtk_addPointCloudData.py @@ -1,8 +1,9 @@ #!/usr/bin/env python3 import os +import sys +from io import StringIO from optparse import OptionParser -from collections import defaultdict import vtk from vtk.util import numpy_support @@ -20,7 +21,8 @@ scriptID = ' '.join([scriptName,damask.version]) parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', - description = """Add scalar and RGB tuples from ASCIItable to existing VTK point cloud (.vtp).""", + description = "Add scalars, vectors, tensors, and/or an RGB tuple from ASCIItable " + + "VTK point cloud (.vtp).", version = scriptID) parser.add_option( '--vtk', @@ -39,9 +41,10 @@ parser.add_option('-t', '--tensor', dest = 'tensor', action = 'extend', metavar = '', help = 'tensor (3x3) value label(s)') -parser.add_option('-c', '--color', dest='color', action='extend', - metavar ='', - help = 'RGB color tuples') +parser.add_option('-c', '--color', + dest = 'color', + action = 'extend', metavar = '', + help = 'RGB color tuple label') parser.set_defaults(data = [], tensor = [], @@ -49,8 +52,9 @@ parser.set_defaults(data = [], ) (options, filenames) = parser.parse_args() +if filenames == []: filenames = [None] -if not options.vtk: parser.error('no VTK file specified.') +if not options.vtk: parser.error('No VTK file specified.') if not os.path.exists(options.vtk): parser.error('VTK file does not exist.') vtk_file,vtk_ext = os.path.splitext(options.vtk) @@ -77,81 +81,35 @@ if Npoints != Ncells or Npoints != Nvertices: damask.util.croak('{}: {} points/vertices/cells...'.format(options.vtk,Npoints)) -# --- loop over input files ------------------------------------------------------------------------- - -if filenames == []: filenames = [None] - for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False, - readonly = True) - except: continue - damask.util.report(scriptName, name) + damask.util.report(scriptName,name) -# --- interpret header ---------------------------------------------------------------------------- - - table.head_read() - - remarks = [] - errors = [] + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + VTKarray = {} - active = defaultdict(list) + for data in options.data: + VTKarray[data] = numpy_support.numpy_to_vtk(table.get(data).copy(), + deep=True,array_type=vtk.VTK_DOUBLE) + VTKarray[data].SetName(data) + + for color in options.color: + VTKarray[color] = numpy_support.numpy_to_vtk((table.get(color)*255).astype(int).copy(), + deep=True,array_type=vtk.VTK_UNSIGNED_CHAR) + VTKarray[color].SetName(color) - for datatype,dimension,label in [['data',0,options.data], - ['tensor',9,options.tensor], - ['color' ,3,options.color], - ]: - for i,dim in enumerate(table.label_dimension(label)): - me = label[i] - if dim == -1: remarks.append('{} "{}" not found...'.format(datatype,me)) - elif dimension > 0 \ - and dim != dimension: remarks.append('"{}" not of dimension {}...'.format(me,dimension)) - else: - remarks.append('adding {}{} "{}"...'.format(datatype if dim > 1 else 'scalar', - '' if dimension > 0 or dim == 1 else '[{}]'.format(dim), - me)) - active[datatype].append(me) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# --------------------------------------- process and add data ----------------------------------- - - table.data_readArray([item for sublist in active.values() for item in sublist]) # read all requested data - - for datatype,labels in active.items(): # loop over scalar,color - for me in labels: # loop over all requested items - VTKtype = vtk.VTK_DOUBLE - VTKdata = table.data[:, table.label_indexrange(me)].copy() # copy to force contiguous layout - - if datatype == 'color': - VTKtype = vtk.VTK_UNSIGNED_CHAR - VTKdata = (VTKdata*255).astype(int) # translate to 0..255 UCHAR - elif datatype == 'tensor': - VTKdata[:,1] = VTKdata[:,3] = 0.5*(VTKdata[:,1]+VTKdata[:,3]) - VTKdata[:,2] = VTKdata[:,6] = 0.5*(VTKdata[:,2]+VTKdata[:,6]) - VTKdata[:,5] = VTKdata[:,7] = 0.5*(VTKdata[:,5]+VTKdata[:,7]) - - VTKarray[me] = numpy_support.numpy_to_vtk(num_array=VTKdata,deep=True,array_type=VTKtype) - VTKarray[me].SetName(me) - - if datatype == 'color': - Polydata.GetPointData().SetScalars(VTKarray[me]) - Polydata.GetCellData().SetScalars(VTKarray[me]) - else: - Polydata.GetPointData().AddArray(VTKarray[me]) - Polydata.GetCellData().AddArray(VTKarray[me]) + for tensor in options.tensor: + data = damask.mechanics.symmetric(table.get(tensor).reshape((-1,3,3))).reshape((-1,9)) + VTKarray[tensor] = numpy_support.numpy_to_vtk(data.copy(), + deep=True,array_type=vtk.VTK_DOUBLE) + VTKarray[tensor].SetName(tensor) - table.input_close() # close input ASCII table + for data in VTKarray: + Polydata.GetPointData().AddArray(VTKarray[data]) + Polydata.Modified() # ------------------------------------------ output result --------------------------------------- - Polydata.Modified() - writer = vtk.vtkXMLPolyDataWriter() writer.SetDataModeToBinary() writer.SetCompressorTypeToZLib() diff --git a/processing/post/vtk_addRectilinearGridData.py b/processing/post/vtk_addRectilinearGridData.py deleted file mode 100755 index 6f5e44e35..000000000 --- a/processing/post/vtk_addRectilinearGridData.py +++ /dev/null @@ -1,199 +0,0 @@ -#!/usr/bin/env python3 - -import os -from optparse import OptionParser - -from collections import defaultdict - -import vtk -from vtk.util import numpy_support - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -msg = "Add scalars, vectors, and/or an RGB tuple from" -msg += "an ASCIItable to existing VTK rectilinear grid (.vtr/.vtk)." -parser = OptionParser(option_class=damask.extendableOption, - usage='%prog options [file[s]]', - description = msg, - version = scriptID) - -parser.add_option( '--vtk', - dest = 'vtk', - type = 'string', metavar = 'string', - help = 'VTK file name') -parser.add_option('-r', '--render', - dest = 'render', - action = 'store_true', - help = 'open output in VTK render window') -parser.add_option('-d', '--data', - dest = 'data', - action = 'extend', metavar = '', - help = 'scalar/vector value(s) label(s)') -parser.add_option('-t', '--tensor', - dest = 'tensor', - action = 'extend', metavar = '', - help = 'tensor (3x3) value label(s)') -parser.add_option('-c', '--color', - dest = 'color', - action = 'extend', metavar = '', - help = 'RGB color tuple label') - -parser.set_defaults(data = [], - tensor = [], - color = [], - render = False, -) - -(options, filenames) = parser.parse_args() - -if not options.vtk: parser.error('no VTK file specified.') -if not os.path.exists(options.vtk): parser.error('VTK file does not exist.') - -vtk_file,vtk_ext = os.path.splitext(options.vtk) -if vtk_ext == '.vtr': - reader = vtk.vtkXMLRectilinearGridReader() - reader.SetFileName(options.vtk) - reader.Update() - rGrid = reader.GetOutput() -elif vtk_ext == '.vtk': - reader = vtk.vtkGenericDataObjectReader() - reader.SetFileName(options.vtk) - reader.Update() - rGrid = reader.GetRectilinearGridOutput() -else: - parser.error('unsupported VTK file type extension.') - -Npoints = rGrid.GetNumberOfPoints() -Ncells = rGrid.GetNumberOfCells() - -damask.util.croak('{}: {} points and {} cells...'.format(options.vtk,Npoints,Ncells)) - -# --- loop over input files ------------------------------------------------------------------------- - -if filenames == []: filenames = [None] - -for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False, - readonly = True) - except: continue - damask.util.report(scriptName, name) - -# --- interpret header ---------------------------------------------------------------------------- - - table.head_read() - - remarks = [] - errors = [] - VTKarray = {} - active = defaultdict(list) - - for datatype,dimension,label in [['data',0,options.data], - ['tensor',9,options.tensor], - ['color' ,3,options.color], - ]: - for i,dim in enumerate(table.label_dimension(label)): - me = label[i] - if dim == -1: remarks.append('{} "{}" not found...'.format(datatype,me)) - elif dimension > 0 \ - and dim != dimension: remarks.append('"{}" not of dimension {}...'.format(me,dimension)) - else: - remarks.append('adding {}{} "{}"...'.format(datatype if dim > 1 else 'scalar', - '' if dimension > 0 or dim == 1 else '[{}]'.format(dim), - me)) - active[datatype].append(me) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ process data --------------------------------------- - - table.data_readArray([item for sublist in active.values() for item in sublist]) # read all requested data - - for datatype,labels in active.items(): # loop over scalar,color - for me in labels: # loop over all requested items - VTKtype = vtk.VTK_DOUBLE - VTKdata = table.data[:, table.label_indexrange(me)].copy() # copy to force contiguous layout - - if datatype == 'color': - VTKtype = vtk.VTK_UNSIGNED_CHAR - VTKdata = (VTKdata*255).astype(int) # translate to 0..255 UCHAR - elif datatype == 'tensor': - VTKdata[:,1] = VTKdata[:,3] = 0.5*(VTKdata[:,1]+VTKdata[:,3]) - VTKdata[:,2] = VTKdata[:,6] = 0.5*(VTKdata[:,2]+VTKdata[:,6]) - VTKdata[:,5] = VTKdata[:,7] = 0.5*(VTKdata[:,5]+VTKdata[:,7]) - - VTKarray[me] = numpy_support.numpy_to_vtk(num_array=VTKdata,deep=True,array_type=VTKtype) - VTKarray[me].SetName(me) - - table.close() # close input ASCII table - -# ------------------------------------------ add data --------------------------------------- - - if len(table.data) == Npoints: mode = 'point' - elif len(table.data) == Ncells: mode = 'cell' - else: - damask.util.croak('data count is incompatible with grid...') - continue - - damask.util.croak('{} mode...'.format(mode)) - - for datatype,labels in active.items(): # loop over scalar,color - if datatype == 'color': - if mode == 'cell': rGrid.GetCellData().SetScalars(VTKarray[active['color'][0]]) - elif mode == 'point': rGrid.GetPointData().SetScalars(VTKarray[active['color'][0]]) - for me in labels: # loop over all requested items - if mode == 'cell': rGrid.GetCellData().AddArray(VTKarray[me]) - elif mode == 'point': rGrid.GetPointData().AddArray(VTKarray[me]) - - rGrid.Modified() - -# ------------------------------------------ output result --------------------------------------- - - writer = vtk.vtkXMLRectilinearGridWriter() - writer.SetDataModeToBinary() - writer.SetCompressorTypeToZLib() - writer.SetFileName(vtk_file+'.'+writer.GetDefaultFileExtension()) - writer.SetInputData(rGrid) - writer.Write() - -# ------------------------------------------ render result --------------------------------------- - -if options.render: - mapper = vtk.vtkDataSetMapper() - mapper.SetInputData(rGrid) - actor = vtk.vtkActor() - actor.SetMapper(mapper) - -# Create the graphics structure. The renderer renders into the -# render window. The render window interactor captures mouse events -# and will perform appropriate camera or actor manipulation -# depending on the nature of the events. - - ren = vtk.vtkRenderer() - - renWin = vtk.vtkRenderWindow() - renWin.AddRenderer(ren) - - ren.AddActor(actor) - ren.SetBackground(1, 1, 1) - renWin.SetSize(200, 200) - - iren = vtk.vtkRenderWindowInteractor() - iren.SetRenderWindow(renWin) - - iren.Initialize() - renWin.Render() - iren.Start() diff --git a/processing/post/vtk_pointCloud.py b/processing/post/vtk_pointCloud.py index 06aad0aca..44f719267 100755 --- a/processing/post/vtk_pointCloud.py +++ b/processing/post/vtk_pointCloud.py @@ -2,10 +2,10 @@ import os import sys +from io import StringIO from optparse import OptionParser import vtk -import numpy as np import damask @@ -33,49 +33,20 @@ parser.set_defaults(pos = 'pos', ) (options, filenames) = parser.parse_args() - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False, - readonly = True) - except: continue damask.util.report(scriptName,name) -# --- interpret header ---------------------------------------------------------------------------- - - table.head_read() - - errors = [] - remarks = [] - coordDim = table.label_dimension(options.pos) - if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos)) - elif coordDim < 3: remarks.append('appending {} dimension{} to coordinates "{}"...'.format(3-coordDim, - 's' if coordDim < 2 else '', - options.pos)) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss=True) - continue + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) # ------------------------------------------ process data --------------------------------------- - table.data_readArray(options.pos) - if table.data.shape[1] < 3: - table.data = np.hstack((table.data, - np.zeros((table.data.shape[0], - 3-table.data.shape[1]),dtype='f'))) # fill coords up to 3D with zeros - Polydata = vtk.vtkPolyData() Points = vtk.vtkPoints() Vertices = vtk.vtkCellArray() - for p in table.data: + for p in table.get(options.pos): pointID = Points.InsertNextPoint(p) Vertices.InsertNextCell(1) Vertices.InsertCellPoint(pointID) @@ -104,5 +75,3 @@ for name in filenames: writer.Write() if name is None: sys.stdout.write(writer.GetOutputString()) - - table.close() diff --git a/processing/post/vtk_rectilinearGrid.py b/processing/post/vtk_rectilinearGrid.py index bb29a5d4c..2ccad6319 100755 --- a/processing/post/vtk_rectilinearGrid.py +++ b/processing/post/vtk_rectilinearGrid.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import vtk @@ -40,48 +41,14 @@ parser.set_defaults(mode = 'cell', ) (options, filenames) = parser.parse_args() - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False, - labeled = True, - readonly = True, - ) - except: continue damask.util.report(scriptName,name) -# --- interpret header ---------------------------------------------------------------------------- - - table.head_read() - - remarks = [] - errors = [] - coordDim = table.label_dimension(options.pos) - if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos)) - elif coordDim < 3: remarks.append('appending {} dimension{} to coordinates "{}"...'.format(3-coordDim, - 's' if coordDim < 2 else '', - options.pos)) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss=True) - continue - -# --------------- figure out size and grid --------------------------------------------------------- - - table.data_readArray(options.pos) - if table.data.shape[1] < 3: - table.data = np.hstack((table.data, - np.zeros((table.data.shape[0], - 3-table.data.shape[1]),dtype='f'))) # fill coords up to 3D with zeros - - coords = [np.unique(table.data[:,i]) for i in range(3)] + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + coords = [np.unique(table.get(options.pos)[:,i]) for i in range(3)] if options.mode == 'cell': coords = [0.5 * np.array([3.0 * coords[i][0] - coords[i][0 + int(len(coords[i]) > 1)]] + \ [coords[i][j-1] + coords[i][j] for j in range(1,len(coords[i]))] + \ @@ -90,13 +57,6 @@ for name in filenames: grid = np.array(list(map(len,coords)),'i') N = grid.prod() if options.mode == 'point' else (grid-1).prod() - if N != len(table.data): - errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*(grid - (options.mode == 'cell')) )) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - # ------------------------------------------ process data --------------------------------------- rGrid = vtk.vtkRectilinearGrid() @@ -135,5 +95,3 @@ for name in filenames: writer.Write() if name is None: sys.stdout.write(writer.GetOutputString()) - - table.close() diff --git a/processing/pre/geom_fromDREAM3D.py b/processing/pre/geom_fromDREAM3D.py index 159793cd8..7d5b1442d 100755 --- a/processing/pre/geom_fromDREAM3D.py +++ b/processing/pre/geom_fromDREAM3D.py @@ -145,7 +145,6 @@ for name in filenames: config_header += [''] for i in range(np.nanmax(microstructure)): config_header += ['[{}{}]'.format(label,i+1), - 'crystallite 1', '(constituent)\tphase {}\ttexture {}\tfraction 1.0'.format(phase[i],i+1), ] diff --git a/processing/pre/geom_fromOsteonGeometry.py b/processing/pre/geom_fromOsteonGeometry.py index 499a8867f..627d92728 100755 --- a/processing/pre/geom_fromOsteonGeometry.py +++ b/processing/pre/geom_fromOsteonGeometry.py @@ -126,15 +126,12 @@ for i in range(3,np.max(microstructure)): config_header = ['', '[canal]', - 'crystallite 1', '(constituent)\tphase 1\ttexture 1\tfraction 1.0', '[interstitial]', - 'crystallite 1', '(constituent)\tphase 2\ttexture 2\tfraction 1.0' ] for i in range(3,np.max(microstructure)): config_header += ['[Point{}]'.format(i-2), - 'crystallite 1', '(constituent)\tphase 3\ttexture {}\tfraction 1.0'.format(i) ] diff --git a/processing/pre/geom_fromTable.py b/processing/pre/geom_fromTable.py index f513c4834..0879f8812 100755 --- a/processing/pre/geom_fromTable.py +++ b/processing/pre/geom_fromTable.py @@ -78,36 +78,15 @@ for name in filenames: table = damask.ASCIItable(name = name,readonly=True) table.head_read() # read ASCII header info -# ------------------------------------------ sanity checks --------------------------------------- - - coordDim = table.label_dimension(options.pos) - - errors = [] - if not 3 >= coordDim >= 2: - errors.append('coordinates "{}" need to have two or three dimensions.'.format(options.pos)) - if not np.all(table.label_dimension(label) == dim): - errors.append('input "{}" needs to have dimension {}.'.format(label,dim)) - if options.phase and table.label_dimension(options.phase) != 1: - errors.append('phase column "{}" is not scalar.'.format(options.phase)) - - if errors != []: - damask.util.croak(errors) - continue table.data_readArray([options.pos] \ + (label if isinstance(label, list) else [label]) \ + ([options.phase] if options.phase else [])) - if coordDim == 2: - table.data = np.insert(table.data,2,np.zeros(len(table.data)),axis=1) # add zero z coordinate for two-dimensional input if options.phase is None: table.data = np.column_stack((table.data,np.ones(len(table.data)))) # add single phase if no phase column given - grid,size = damask.util.coordGridAndSize(table.data[:,0:3]) - coords = [np.unique(table.data[:,i]) for i in range(3)] - mincorner = np.array(list(map(min,coords))) - origin = mincorner - 0.5*size/grid # shift from cell center to corner - + grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.data[:,0:3]) indices = np.lexsort((table.data[:,0],table.data[:,1],table.data[:,2])) # indices of position when sorting x fast, z slow microstructure = np.empty(grid,dtype = int) # initialize empty microstructure @@ -142,7 +121,6 @@ for name in filenames: config_header += [''] for i,data in enumerate(unique): config_header += ['[Grain{}]'.format(i+1), - 'crystallite 1', '(constituent)\tphase {}\ttexture {}\tfraction 1.0'.format(int(data[4]),i+1), ] diff --git a/processing/pre/geom_fromVoronoiTessellation.py b/processing/pre/geom_fromVoronoiTessellation.py index 28e215f85..d5ec43701 100755 --- a/processing/pre/geom_fromVoronoiTessellation.py +++ b/processing/pre/geom_fromVoronoiTessellation.py @@ -290,7 +290,6 @@ for name in filenames: config_header += [''] for ID in grainIDs: config_header += ['[Grain{}]'.format(ID), - 'crystallite 1', '(constituent)\tphase {}\ttexture {}\tfraction 1.0'.format(options.phase,ID) ] diff --git a/processing/pre/geom_toTable.py b/processing/pre/geom_toTable.py index ed33d9c85..494bbaffa 100755 --- a/processing/pre/geom_toTable.py +++ b/processing/pre/geom_toTable.py @@ -2,10 +2,8 @@ import os import sys -from optparse import OptionParser from io import StringIO - -import numpy as np +from optparse import OptionParser import damask @@ -24,38 +22,25 @@ Translate geom description into ASCIItable containing position and microstructur """, version = scriptID) (options, filenames) = parser.parse_args() - - if filenames == []: filenames = [None] for name in filenames: - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) - geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name) + geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name) + damask.util.croak(geom) - damask.util.croak(geom) + coord0 = damask.grid_filters.cell_coord0(geom.grid,geom.size,geom.origin).reshape((-1,3)) -# --- generate grid -------------------------------------------------------------------------------- + comments = geom.comments \ + + [scriptID + ' ' + ' '.join(sys.argv[1:]), + "grid\ta {}\tb {}\tc {}".format(*geom.grid), + "size\tx {}\ty {}\tz {}".format(*geom.size), + "origin\tx {}\ty {}\tz {}".format(*geom.origin), + "homogenization\t{}".format(geom.homogenization)] - grid = geom.get_grid() - size = geom.get_size() - origin = geom.get_origin() + table = damask.Table(coord0,{'pos':(3,)},comments) + table.add('microstructure',geom.microstructure.reshape((-1,1),order='F')) - x = (0.5 + np.arange(grid[0],dtype=float))/grid[0]*size[0]+origin[0] - y = (0.5 + np.arange(grid[1],dtype=float))/grid[1]*size[1]+origin[1] - z = (0.5 + np.arange(grid[2],dtype=float))/grid[2]*size[2]+origin[2] - - xx = np.tile( x, grid[1]* grid[2]) - yy = np.tile(np.repeat(y,grid[0] ),grid[2]) - zz = np.repeat(z,grid[0]*grid[1]) - -# --- create ASCII table -------------------------------------------------------------------------- - - table = damask.ASCIItable(outname = os.path.splitext(name)[0]+'.txt' if name else name) - table.info_append(geom.get_comments() + [scriptID + '\t' + ' '.join(sys.argv[1:])]) - table.labels_append(['{}_{}'.format(1+i,'pos') for i in range(3)]+['microstructure']) - table.head_write() - table.output_flush() - table.data = np.squeeze(np.dstack((xx,yy,zz,geom.microstructure.flatten('F'))),axis=0) - table.data_writeArray() - table.close() + table.to_ASCII(sys.stdout if name is None else \ + os.path.splitext(name)[0]+'.txt') diff --git a/processing/pre/hybridIA_linODFsampling.py b/processing/pre/hybridIA_linODFsampling.py index cf1a473cf..80d82a458 100755 --- a/processing/pre/hybridIA_linODFsampling.py +++ b/processing/pre/hybridIA_linODFsampling.py @@ -19,7 +19,7 @@ def integerFactorization(i): return j def binAsBins(bin,intervals): - """Explode compound bin into 3D bins list""" + """Explode compound bin into 3D bins list.""" bins = [0]*3 bins[0] = (bin//(intervals[1] * intervals[2])) % intervals[0] bins[1] = (bin//intervals[2]) % intervals[1] @@ -27,17 +27,17 @@ def binAsBins(bin,intervals): return bins def binsAsBin(bins,intervals): - """Implode 3D bins into compound bin""" + """Implode 3D bins into compound bin.""" return (bins[0]*intervals[1] + bins[1])*intervals[2] + bins[2] def EulersAsBins(Eulers,intervals,deltas,center): - """Return list of Eulers translated into 3D bins list""" + """Return list of Eulers translated into 3D bins list.""" return [int((euler+(0.5-center)*delta)//delta)%interval \ for euler,delta,interval in zip(Eulers,deltas,intervals) \ ] def binAsEulers(bin,intervals,deltas,center): - """Compound bin number translated into list of Eulers""" + """Compound bin number translated into list of Eulers.""" Eulers = [0.0]*3 Eulers[2] = (bin%intervals[2] + center)*deltas[2] Eulers[1] = (bin//intervals[2]%intervals[1] + center)*deltas[1] @@ -45,7 +45,7 @@ def binAsEulers(bin,intervals,deltas,center): return Eulers def directInvRepetitions(probability,scale): - """Calculate number of samples drawn by direct inversion""" + """Calculate number of samples drawn by direct inversion.""" nDirectInv = 0 for bin in range(len(probability)): # loop over bins nDirectInv += int(round(probability[bin]*scale)) # calc repetition @@ -56,7 +56,7 @@ def directInvRepetitions(probability,scale): # ----- efficient algorithm --------- def directInversion (ODF,nSamples): - """ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)""" + """ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians).""" nOptSamples = max(ODF['nNonZero'],nSamples) # random subsampling if too little samples requested nInvSamples = 0 @@ -118,7 +118,7 @@ def directInversion (ODF,nSamples): # ----- trial and error algorithms --------- def MonteCarloEulers (ODF,nSamples): - """ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)""" + """ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians).""" countMC = 0 maxdV_V = max(ODF['dV_V']) orientations = np.zeros((nSamples,3),'f') @@ -141,7 +141,7 @@ def MonteCarloEulers (ODF,nSamples): def MonteCarloBins (ODF,nSamples): - """ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)""" + """ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians).""" countMC = 0 maxdV_V = max(ODF['dV_V']) orientations = np.zeros((nSamples,3),'f') @@ -163,7 +163,7 @@ def MonteCarloBins (ODF,nSamples): def TothVanHoutteSTAT (ODF,nSamples): - """ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)""" + """ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians).""" orientations = np.zeros((nSamples,3),'f') reconstructedODF = np.zeros(ODF['nBins'],'f') unitInc = 1.0/nSamples @@ -211,10 +211,6 @@ parser.add_option('-p','--phase', dest = 'phase', type = 'int', metavar = 'int', help = 'phase index to be used [%default]') -parser.add_option('--crystallite', - dest = 'crystallite', - type = 'int', metavar = 'int', - help = 'crystallite index to be used [%default]') parser.add_option('-r', '--rnd', dest = 'randomSeed', type = 'int', metavar = 'int', \ @@ -223,7 +219,6 @@ parser.set_defaults(randomSeed = None, number = 500, algorithm = 'IA', phase = 1, - crystallite = 1, ang = True, ) @@ -240,7 +235,7 @@ if filenames == []: filenames = [None] for name in filenames: try: table = damask.ASCIItable(name = name, buffered = False, readonly=True) - except: + except IOError: continue damask.util.report(scriptName,name) @@ -351,7 +346,6 @@ for name in filenames: for i,ID in enumerate(range(nSamples)): materialConfig += ['[Grain%s]'%(str(ID+1).zfill(formatwidth)), - 'crystallite %i'%options.crystallite, '(constituent) phase %i texture %s fraction 1.0'%(options.phase,str(ID+1).rjust(formatwidth)), ] diff --git a/processing/pre/mentat_spectralBox.py b/processing/pre/mentat_spectralBox.py index 89f4a7a43..a61bef57a 100755 --- a/processing/pre/mentat_spectralBox.py +++ b/processing/pre/mentat_spectralBox.py @@ -1,9 +1,10 @@ #!/usr/bin/env python3 -# -*- coding: UTF-8 no BOM -*- -import os,sys -import numpy as np +import os +import sys +from io import StringIO from optparse import OptionParser + import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] @@ -191,78 +192,45 @@ parser.add_option('-p', '--port', dest = 'port', type = 'int', metavar = 'int', help = 'Mentat connection port [%default]') -parser.add_option('--homogenization', - dest = 'homogenization', - type = 'int', metavar = 'int', - help = 'homogenization index to be used [auto]') -parser.set_defaults(port = None, - homogenization = None, -) +parser.set_defaults(port = None, + ) (options, filenames) = parser.parse_args() -if options.port: - try: - import py_mentat - except: - parser.error('no valid Mentat release found.') +if options.port is not None: + try: + import py_mentat + except ImportError: + parser.error('no valid Mentat release found.') # --- loop over input files ------------------------------------------------------------------------ if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - outname = os.path.splitext(name)[0]+'.proc' if name else name, - buffered = False, labeled = False) - except: continue - damask.util.report(scriptName,name) - -# --- interpret header ---------------------------------------------------------------------------- - - table.head_read() - info,extra_header = table.head_getGeom() - if options.homogenization: info['homogenization'] = options.homogenization + damask.util.report(scriptName,name) - damask.util.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))), - 'size x y z: %s'%(' x '.join(map(str,info['size']))), - 'origin x y z: %s'%(' : '.join(map(str,info['origin']))), - 'homogenization: %i'%info['homogenization'], - 'microstructures: %i'%info['microstructures'], - ]) + geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name) + microstructure = geom.get_microstructure().flatten(order='F') - errors = [] - if np.any(info['grid'] < 1): errors.append('invalid grid a b c.') - if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.') - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# --- read data ------------------------------------------------------------------------------------ - - microstructure = table.microstructure_read(info['grid']).reshape(info['grid'].prod(),order='F') # read microstructure - - cmds = [\ - init(), - mesh(info['grid'],info['size']), - material(), - geometry(), - initial_conditions(info['homogenization'],microstructure), - '*identify_sets', - '*show_model', - '*redraw', - '*draw_automatic', - ] - - outputLocals = {} - if options.port: - py_mentat.py_connect('',options.port) - output(cmds,outputLocals,'Mentat') - py_mentat.py_disconnect() - else: - output(cmds,outputLocals,table.__IO__['out']) # bad hack into internals of table class... - - table.close() + cmds = [\ + init(), + mesh(geom.grid,geom.size), + material(), + geometry(), + initial_conditions(geom.homogenization,microstructure), + '*identify_sets', + '*show_model', + '*redraw', + '*draw_automatic', + ] + + outputLocals = {} + if options.port: + py_mentat.py_connect('',options.port) + output(cmds,outputLocals,'Mentat') + py_mentat.py_disconnect() + else: + with sys.stdout if name is None else open(os.path.splitext(name)[0]+'.proc','w') as f: + output(cmds,outputLocals,f) diff --git a/processing/pre/patchFromReconstructedBoundaries.py b/processing/pre/patchFromReconstructedBoundaries.py index fabec0fdf..b710fb2cb 100755 --- a/processing/pre/patchFromReconstructedBoundaries.py +++ b/processing/pre/patchFromReconstructedBoundaries.py @@ -78,13 +78,11 @@ def rcbOrientationParser(content,idcolumn): damask.util.croak('You might not have chosen the correct column for the grain IDs! '+ 'Please check the "--id" option.') raise - except: - raise return grains def rcbParser(content,M,size,tolerance,idcolumn,segmentcolumn): - """Parser for TSL-OIM reconstructed boundary files""" + """Parser for TSL-OIM reconstructed boundary files.""" # find bounding box boxX = [1.*sys.maxint,-1.*sys.maxint] boxY = [1.*sys.maxint,-1.*sys.maxint] @@ -99,8 +97,6 @@ def rcbParser(content,M,size,tolerance,idcolumn,segmentcolumn): damask.util.croak('You might not have chosen the correct column for the segment end points! '+ 'Please check the "--segment" option.') raise - except: - raise (x[0],y[0]) = (M[0]*x[0]+M[1]*y[0],M[2]*x[0]+M[3]*y[0]) # apply transformation to coordinates (x[1],y[1]) = (M[0]*x[1]+M[1]*y[1],M[2]*x[1]+M[3]*y[1]) # to get rcb --> Euler system boxX[0] = min(boxX[0],x[0],x[1]) @@ -728,7 +724,7 @@ def image(name,imgsize,marginX,marginY,rcData): # ------------------------- def inside(x,y,points): - """Tests whether point(x,y) is within polygon described by points""" + """Tests whether point(x,y) is within polygon described by points.""" inside = False npoints=len(points) (x1,y1) = points[npoints-1] # start with last point of points @@ -750,7 +746,7 @@ def inside(x,y,points): # ------------------------- def fftbuild(rcData,height,xframe,yframe,grid,extrusion): - """Build array of grain numbers""" + """Build array of grain numbers.""" maxX = -1.*sys.maxint maxY = -1.*sys.maxint for line in rcData['point']: # find data range @@ -883,7 +879,7 @@ try: boundaryFile = open(args[0]) boundarySegments = boundaryFile.readlines() boundaryFile.close() -except: +except IOError: damask.util.croak('unable to read boundary file "{}".'.format(args[0])) raise @@ -941,19 +937,15 @@ if any(output in options.output for output in ['spectral','mentat']): for i,grain in enumerate(rcData['grainMapping']): config+=['[grain{}]'.format(grain), - 'crystallite\t1', '(constituent)\tphase 1\ttexture {}\tfraction 1.0'.format(i+1)] if (options.xmargin > 0.0): config+=['[x-margin]', - 'crystallite\t1', '(constituent)\tphase 2\ttexture {}\tfraction 1.0\n'.format(len(rcData['grainMapping'])+1)] if (options.ymargin > 0.0): config+=['[y-margin]', - 'crystallite\t1', '(constituent)\tphase 2\ttexture {}\tfraction 1.0\n'.format(len(rcData['grainMapping'])+1)] if (options.xmargin > 0.0 and options.ymargin > 0.0): config+=['[xy-margin]', - 'crystallite\t1', '(constituent)\tphase 2\ttexture {}\tfraction 1.0\n'.format(len(rcData['grainMapping'])+1)] if (options.xmargin > 0.0 or options.ymargin > 0.0): diff --git a/processing/pre/seeds_check.sh b/processing/pre/seeds_check.sh index 025c9eb90..502a19024 100755 --- a/processing/pre/seeds_check.sh +++ b/processing/pre/seeds_check.sh @@ -6,7 +6,6 @@ do vtk_addPointCloudData $seeds \ --data microstructure,weight \ - --inplace \ --vtk ${seeds%.*}.vtp \ done diff --git a/processing/pre/seeds_fromGeom.py b/processing/pre/seeds_fromGeom.py index 889ef6146..2118f049d 100755 --- a/processing/pre/seeds_fromGeom.py +++ b/processing/pre/seeds_fromGeom.py @@ -1,9 +1,12 @@ #!/usr/bin/env python3 -# -*- coding: UTF-8 no BOM -*- -import os,sys -import numpy as np +import os +import sys +from io import StringIO from optparse import OptionParser + +import numpy as np + import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] @@ -29,88 +32,39 @@ parser.add_option('-b', action = 'extend', metavar = '', dest = 'blacklist', help = 'blacklist of grain IDs') -parser.add_option('-p', - '--pos', '--seedposition', - dest = 'pos', - type = 'string', metavar = 'string', - help = 'label of coordinates [%default]') parser.set_defaults(whitelist = [], blacklist = [], - pos = 'pos', ) (options,filenames) = parser.parse_args() - -options.whitelist = list(map(int,options.whitelist)) -options.blacklist = list(map(int,options.blacklist)) - -# --- loop over output files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +options.whitelist = [int(i) for i in options.whitelist] +options.blacklist = [int(i) for i in options.blacklist] + for name in filenames: - try: table = damask.ASCIItable(name = name, - outname = os.path.splitext(name)[0]+'.seeds' if name else name, - buffered = False, - labeled = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) + + geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name) + microstructure = geom.get_microstructure().reshape((-1,1),order='F') -# --- interpret header ---------------------------------------------------------------------------- + mask = np.logical_and(np.in1d(microstructure,options.whitelist,invert=False) if options.whitelist else \ + np.full(geom.grid.prod(),True,dtype=bool), + np.in1d(microstructure,options.blacklist,invert=True) if options.blacklist else \ + np.full(geom.grid.prod(),True,dtype=bool)) + + seeds = np.concatenate((damask.grid_filters.cell_coord0(geom.grid,geom.size).reshape((-1,3)), + microstructure), + axis=1)[mask] + + comments = geom.comments \ + + [scriptID + ' ' + ' '.join(sys.argv[1:]), + "grid\ta {}\tb {}\tc {}".format(*geom.grid), + "size\tx {}\ty {}\tz {}".format(*geom.size), + "origin\tx {}\ty {}\tz {}".format(*geom.origin), + "homogenization\t{}".format(geom.homogenization)] - table.head_read() - info,extra_header = table.head_getGeom() - damask.util.report_geom(info) - - errors = [] - if np.any(info['grid'] < 1): errors.append('invalid grid a b c.') - if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.') - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# --- read data ------------------------------------------------------------------------------------ - - microstructure = table.microstructure_read(info['grid']) # read (linear) microstructure - -# --- generate grid -------------------------------------------------------------------------------- - - x = (0.5 + np.arange(info['grid'][0],dtype=float))/info['grid'][0]*info['size'][0]+info['origin'][0] - y = (0.5 + np.arange(info['grid'][1],dtype=float))/info['grid'][1]*info['size'][1]+info['origin'][1] - z = (0.5 + np.arange(info['grid'][2],dtype=float))/info['grid'][2]*info['size'][2]+info['origin'][2] - - xx = np.tile( x, info['grid'][1]* info['grid'][2]) - yy = np.tile(np.repeat(y,info['grid'][0] ),info['grid'][2]) - zz = np.repeat(z,info['grid'][0]*info['grid'][1]) - - mask = np.logical_and(np.in1d(microstructure,options.whitelist,invert=False) if options.whitelist != [] - else np.full_like(microstructure,True,dtype=bool), - np.in1d(microstructure,options.blacklist,invert=True ) if options.blacklist != [] - else np.full_like(microstructure,True,dtype=bool)) - -# ------------------------------------------ assemble header --------------------------------------- - - table.info_clear() - table.info_append(extra_header+[ - scriptID + ' ' + ' '.join(sys.argv[1:]), - "grid\ta {}\tb {}\tc {}".format(*info['grid']), - "size\tx {}\ty {}\tz {}".format(*info['size']), - "origin\tx {}\ty {}\tz {}".format(*info['origin']), - "homogenization\t{}".format(info['homogenization']), - "microstructures\t{}".format(info['microstructures']), - ]) - table.labels_clear() - table.labels_append(['{dim}_{label}'.format(dim = 1+i,label = options.pos) for i in range(3)]+['microstructure']) - table.head_write() - table.output_flush() - -# --- write seeds information ------------------------------------------------------------ - - table.data = np.squeeze(np.dstack((xx,yy,zz,microstructure)))[mask] - table.data_writeArray() - -# ------------------------------------------ finalize output --------------------------------------- - - table.close() + table = damask.Table(seeds,{'pos':(3,),'microstructure':(1,)},comments) + table.to_ASCII(sys.stdout if name is None else \ + os.path.splitext(name)[0]+'.seeds') diff --git a/processing/pre/seeds_fromPokes.py b/processing/pre/seeds_fromPokes.py index 08e600ffe..1436841d0 100755 --- a/processing/pre/seeds_fromPokes.py +++ b/processing/pre/seeds_fromPokes.py @@ -1,11 +1,14 @@ #!/usr/bin/env python3 -# -*- coding: UTF-8 no BOM -*- -import os,math,sys -import numpy as np -import damask +import os +import sys +from io import StringIO from optparse import OptionParser +import numpy as np + +import damask + scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) @@ -35,117 +38,58 @@ parser.add_option('-y', action = 'store_true', dest = 'y', help = 'poke 45 deg along y') -parser.add_option('-p','--position', - dest = 'position', - type = 'string', metavar = 'string', - help = 'column label for coordinates [%default]') parser.set_defaults(x = False, y = False, box = [0.0,1.0,0.0,1.0,0.0,1.0], N = 16, - position = 'pos', ) (options,filenames) = parser.parse_args() +if filenames == []: filenames = [None] options.box = np.array(options.box).reshape(3,2) -# --- loop over output files ------------------------------------------------------------------------- - -if filenames == []: filenames = [None] - for name in filenames: - try: - table = damask.ASCIItable(name = name, - outname = os.path.splitext(name)[-2]+'_poked_{}.seeds'.format(options.N) if name else name, - buffered = False, labeled = False) - except: continue - damask.util.report(scriptName,name) - -# --- interpret header ---------------------------------------------------------------------------- - - table.head_read() - info,extra_header = table.head_getGeom() - - damask.util.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))), - 'size x y z: %s'%(' x '.join(map(str,info['size']))), - 'origin x y z: %s'%(' : '.join(map(str,info['origin']))), - 'homogenization: %i'%info['homogenization'], - 'microstructures: %i'%info['microstructures'], - ]) - - errors = [] - if np.any(info['grid'] < 1): errors.append('invalid grid a b c.') - if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.') - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# --- read data ------------------------------------------------------------------------------------ - - microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure - -# --- do work ------------------------------------------------------------------------------------ - - newInfo = { - 'microstructures': 0, - } - offset = (np.amin(options.box, axis=1)*info['grid']/info['size']).astype(int) - box = np.amax(options.box, axis=1) - np.amin(options.box, axis=1) - - Nx = int(options.N/math.sqrt(options.N*info['size'][1]*box[1]/info['size'][0]/box[0])) - Ny = int(options.N/math.sqrt(options.N*info['size'][0]*box[0]/info['size'][1]/box[1])) - Nz = int(box[2]*info['grid'][2]) - - damask.util.croak('poking {} x {} x {} in box {} {} {}...'.format(Nx,Ny,Nz,*box)) - - seeds = np.zeros((Nx*Ny*Nz,4),'d') - grid = np.zeros(3,'i') - - n = 0 - for i in range(Nx): - for j in range(Ny): - grid[0] = round((i+0.5)*box[0]*info['grid'][0]/Nx-0.5)+offset[0] - grid[1] = round((j+0.5)*box[1]*info['grid'][1]/Ny-0.5)+offset[1] - for k in range(Nz): - grid[2] = k + offset[2] - grid %= info['grid'] - seeds[n,0:3] = (0.5+grid)/info['grid'] # normalize coordinates to box - seeds[n, 3] = microstructure[grid[0],grid[1],grid[2]] - if options.x: grid[0] += 1 - if options.y: grid[1] += 1 - n += 1 - - newInfo['microstructures'] = len(np.unique(seeds[:,3])) - -# --- report --------------------------------------------------------------------------------------- - if (newInfo['microstructures'] != info['microstructures']): - damask.util.croak('--> microstructures: %i'%newInfo['microstructures']) - - -# ------------------------------------------ assemble header --------------------------------------- - table.info_clear() - table.info_append(extra_header+[ - scriptID + ' ' + ' '.join(sys.argv[1:]), - "poking\ta {}\tb {}\tc {}".format(Nx,Ny,Nz), - "grid\ta {}\tb {}\tc {}".format(*info['grid']), - "size\tx {}\ty {}\tz {}".format(*info['size']), - "origin\tx {}\ty {}\tz {}".format(*info['origin']), - "homogenization\t{}".format(info['homogenization']), - "microstructures\t{}".format(newInfo['microstructures']), - ]) - table.labels_clear() - table.labels_append(['{dim}_{label}'.format(dim = 1+i,label = options.position) for i in range(3)]+['microstructure']) - table.head_write() - table.output_flush() - -# --- write seeds information ------------------------------------------------------------ - - table.data = seeds - table.data_writeArray() + damask.util.report(scriptName,name) + geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name) -# --- output finalization -------------------------------------------------------------------------- - - table.close() # close ASCII table + offset =(np.amin(options.box, axis=1)*geom.grid/geom.size).astype(int) + box = np.amax(options.box, axis=1) \ + - np.amin(options.box, axis=1) + + Nx = int(options.N/np.sqrt(options.N*geom.size[1]*box[1]/geom.size[0]/box[0])) + Ny = int(options.N/np.sqrt(options.N*geom.size[0]*box[0]/geom.size[1]/box[1])) + Nz = int(box[2]*geom.grid[2]) + + damask.util.croak('poking {} x {} x {} in box {} {} {}...'.format(Nx,Ny,Nz,*box)) + + seeds = np.zeros((Nx*Ny*Nz,4),'d') + g = np.zeros(3,'i') + + n = 0 + for i in range(Nx): + for j in range(Ny): + g[0] = round((i+0.5)*box[0]*geom.grid[0]/Nx-0.5)+offset[0] + g[1] = round((j+0.5)*box[1]*geom.grid[1]/Ny-0.5)+offset[1] + for k in range(Nz): + g[2] = k + offset[2] + g %= geom.grid + seeds[n,0:3] = (g+0.5)/geom.grid # normalize coordinates to box + seeds[n, 3] = geom.microstructure[g[0],g[1],g[2]] + if options.x: g[0] += 1 + if options.y: g[1] += 1 + n += 1 + + + comments = geom.comments \ + + [scriptID + ' ' + ' '.join(sys.argv[1:]), + "poking\ta {}\tb {}\tc {}".format(Nx,Ny,Nz), + "grid\ta {}\tb {}\tc {}".format(*geom.grid), + "size\tx {}\ty {}\tz {}".format(*geom.size), + "origin\tx {}\ty {}\tz {}".format(*geom.origin), + "homogenization\t{}".format(geom.homogenization)] + + table = damask.Table(seeds,{'pos':(3,),'microstructure':(1,)},comments) + table.to_ASCII(sys.stdout if name is None else \ + os.path.splitext(name)[0]+'_poked_{}.seeds'.format(options.N)) diff --git a/python/damask/__init__.py b/python/damask/__init__.py index f432ef056..77666561c 100644 --- a/python/damask/__init__.py +++ b/python/damask/__init__.py @@ -9,6 +9,7 @@ name = 'damask' # classes from .environment import Environment # noqa from .asciitable import ASCIItable # noqa +from .table import Table # noqa from .config import Material # noqa from .colormaps import Colormap, Color # noqa @@ -22,4 +23,5 @@ from .util import extendableOption # noqa # functions in modules from . import mechanics # noqa +from . import grid_filters # noqa diff --git a/python/damask/colormaps.py b/python/damask/colormaps.py index b38d47070..e4183e830 100644 --- a/python/damask/colormaps.py +++ b/python/damask/colormaps.py @@ -1,5 +1,3 @@ -import math - import numpy as np class Color(): @@ -328,11 +326,11 @@ class Color(): if self.model != 'CIELAB': return Msh = np.zeros(3,'d') - Msh[0] = math.sqrt(np.dot(self.color,self.color)) + Msh[0] = np.sqrt(np.dot(self.color,self.color)) if (Msh[0] > 0.001): - Msh[1] = math.acos(self.color[0]/Msh[0]) + Msh[1] = np.arccos(self.color[0]/Msh[0]) if (self.color[1] != 0.0): - Msh[2] = math.atan2(self.color[2],self.color[1]) + Msh[2] = np.arctan2(self.color[2],self.color[1]) converted = Color('MSH', Msh) self.model = converted.model @@ -349,9 +347,9 @@ class Color(): if self.model != 'MSH': return Lab = np.zeros(3,'d') - Lab[0] = self.color[0] * math.cos(self.color[1]) - Lab[1] = self.color[0] * math.sin(self.color[1]) * math.cos(self.color[2]) - Lab[2] = self.color[0] * math.sin(self.color[1]) * math.sin(self.color[2]) + Lab[0] = self.color[0] * np.cos(self.color[1]) + Lab[1] = self.color[0] * np.sin(self.color[1]) * np.cos(self.color[2]) + Lab[2] = self.color[0] * np.sin(self.color[1]) * np.sin(self.color[2]) converted = Color('CIELAB', Lab) self.model = converted.model @@ -476,14 +474,14 @@ class Colormap(): if Msh_sat[0] >= Msh_unsat[0]: return Msh_sat[2] else: - hSpin = Msh_sat[1]/math.sin(Msh_sat[1])*math.sqrt(Msh_unsat[0]**2.0-Msh_sat[0]**2)/Msh_sat[0] - if Msh_sat[2] < - math.pi/3.0: hSpin *= -1.0 + hSpin = Msh_sat[1]/np.sin(Msh_sat[1])*np.sqrt(Msh_unsat[0]**2.0-Msh_sat[0]**2)/Msh_sat[0] + if Msh_sat[2] < - np.pi/3.0: hSpin *= -1.0 return Msh_sat[2] + hSpin Msh1 = np.array(lo[:]) Msh2 = np.array(hi[:]) - if (Msh1[1] > 0.05 and Msh2[1] > 0.05 and rad_diff(Msh1,Msh2) > math.pi/3.0): + if (Msh1[1] > 0.05 and Msh2[1] > 0.05 and rad_diff(Msh1,Msh2) > np.pi/3.0): M_mid = max(Msh1[0],Msh2[0],88.0) if frac < 0.5: Msh2 = np.array([M_mid,0.0,0.0],'d') diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index d879946eb..5c9ebe08e 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -1,7 +1,10 @@ from queue import Queue import re import glob +import os +import vtk +from vtk.util import numpy_support import h5py import numpy as np @@ -18,19 +21,26 @@ class DADF5(): """ # ------------------------------------------------------------------ - def __init__(self,filename): + def __init__(self,fname): """ Opens an existing DADF5 file. Parameters ---------- - filename : str + fname : str name of the DADF5 file to be openend. """ - with h5py.File(filename,'r') as f: + with h5py.File(fname,'r') as f: - if f.attrs['DADF5-major'] != 0 or not 2 <= f.attrs['DADF5-minor'] <= 3: + try: + self.version_major = f.attrs['DADF5_version_major'] + self.version_minor = f.attrs['DADF5_version_minor'] + except KeyError: + self.version_major = f.attrs['DADF5-major'] + self.version_minor = f.attrs['DADF5-minor'] + + if self.version_major != 0 or not 2 <= self.version_minor <= 5: raise TypeError('Unsupported DADF5 version {} '.format(f.attrs['DADF5-version'])) self.structured = 'grid' in f['geometry'].attrs.keys() @@ -38,10 +48,14 @@ class DADF5(): if self.structured: self.grid = f['geometry'].attrs['grid'] self.size = f['geometry'].attrs['size'] + if self.version_major == 0 and self.version_minor >= 5: + self.origin = f['geometry'].attrs['origin'] + r=re.compile('inc[0-9]+') - self.increments = [i for i in f.keys() if r.match(i)] - self.times = [round(f[i].attrs['time/s'],12) for i in self.increments] + increments_unsorted = {int(i[3:]):i for i in f.keys() if r.match(i)} + self.increments = [increments_unsorted[i] for i in sorted(increments_unsorted)] + self.times = [round(f[i].attrs['time/s'],12) for i in self.increments] self.Nmaterialpoints, self.Nconstituents = np.shape(f['mapping/cellResults/constituent']) self.materialpoints = [m.decode() for m in np.unique(f['mapping/cellResults/materialpoint']['Name'])] @@ -64,7 +78,7 @@ class DADF5(): 'con_physics': self.con_physics, 'mat_physics': self.mat_physics} - self.filename = filename + self.fname = fname def __manage_visible(self,datasets,what,action): @@ -88,7 +102,7 @@ class DADF5(): elif datasets is False: datasets = [] choice = [datasets] if isinstance(datasets,str) else datasets - + valid = [e for e_ in [glob.fnmatch.filter(getattr(self,what),s) for s in choice] for e in e_] existing = set(self.visible[what]) @@ -165,7 +179,10 @@ class DADF5(): end increment (included) """ - self.__manage_visible(['inc{:05d}'.format(i) for i in range(start,end+1)],'increments','set') + if self.version_minor >= 4: + self.__manage_visible([ 'inc{}'.format(i) for i in range(start,end+1)],'increments','set') + else: + self.__manage_visible(['inc{:05d}'.format(i) for i in range(start,end+1)],'increments','set') def add_by_increment(self,start,end): @@ -180,7 +197,10 @@ class DADF5(): end increment (included) """ - self.__manage_visible(['inc{:05d}'.format(i) for i in range(start,end+1)],'increments','add') + if self.version_minor >= 4: + self.__manage_visible([ 'inc{}'.format(i) for i in range(start,end+1)],'increments','add') + else: + self.__manage_visible(['inc{:05d}'.format(i) for i in range(start,end+1)],'increments','add') def del_by_increment(self,start,end): @@ -195,7 +215,10 @@ class DADF5(): end increment (included) """ - self.__manage_visible(['inc{:05d}'.format(i) for i in range(start,end+1)],'increments','del') + if self.version_minor >= 4: + self.__manage_visible([ 'inc{}'.format(i) for i in range(start,end+1)],'increments','del') + else: + self.__manage_visible(['inc{:05d}'.format(i) for i in range(start,end+1)],'increments','del') def iter_visible(self,what): @@ -298,7 +321,7 @@ class DADF5(): groups = [] - with h5py.File(self.filename,'r') as f: + with h5py.File(self.fname,'r') as f: for i in self.iter_visible('increments'): for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): for oo in self.iter_visible(o): @@ -315,9 +338,9 @@ class DADF5(): def list_data(self): """Return information on all active datasets in the file.""" message = '' - with h5py.File(self.filename,'r') as f: - for s,i in enumerate(self.iter_visible('increments')): - message+='\n{} ({}s)\n'.format(i,self.times[s]) + with h5py.File(self.fname,'r') as f: + for i in self.iter_visible('increments'): + message+='\n{} ({}s)\n'.format(i,self.times[self.increments.index(i)]) for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): for oo in self.iter_visible(o): message+=' {}\n'.format(oo) @@ -336,14 +359,14 @@ class DADF5(): def get_dataset_location(self,label): """Return the location of all active datasets with given label.""" path = [] - with h5py.File(self.filename,'r') as f: + with h5py.File(self.fname,'r') as f: for i in self.iter_visible('increments'): k = '/'.join([i,'geometry',label]) try: f[k] path.append(k) except KeyError as e: - print('unable to locate geometry dataset: {}'.format(str(e))) + pass for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): for oo in self.iter_visible(o): for pp in self.iter_visible(p): @@ -352,20 +375,20 @@ class DADF5(): f[k] path.append(k) except KeyError as e: - print('unable to locate {} dataset: {}'.format(o,str(e))) + pass return path def get_constituent_ID(self,c=0): """Pointwise constituent ID.""" - with h5py.File(self.filename,'r') as f: + with h5py.File(self.fname,'r') as f: names = f['/mapping/cellResults/constituent']['Name'][:,c].astype('str') return np.array([int(n.split('_')[0]) for n in names.tolist()],dtype=np.int32) def get_crystal_structure(self): # ToDo: extension to multi constituents/phase """Info about the crystal structure.""" - with h5py.File(self.filename,'r') as f: + with h5py.File(self.fname,'r') as f: return f[self.get_dataset_location('orientation')[0]].attrs['Lattice'].astype('str') # np.bytes_ to string @@ -375,7 +398,7 @@ class DADF5(): If more than one path is given, the dataset is composed of the individual contributions. """ - with h5py.File(self.filename,'r') as f: + with h5py.File(self.fname,'r') as f: shape = (self.Nmaterialpoints,) + np.shape(f[path[0]])[1:] if len(shape) == 1: shape = shape +(1,) dataset = np.full(shape,np.nan,dtype=np.dtype(f[path[0]])) @@ -416,10 +439,80 @@ class DADF5(): np.linspace(delta[1],self.size[1]-delta[1],self.grid[1]), np.linspace(delta[0],self.size[0]-delta[0],self.grid[0]), ) - return np.concatenate((x[:,:,:,None],y[:,:,:,None],y[:,:,:,None]),axis = 3).reshape([np.product(self.grid),3]) + return np.concatenate((x[:,:,:,None],y[:,:,:,None],z[:,:,:,None]),axis = 3).reshape([np.product(self.grid),3]) else: - with h5py.File(self.filename,'r') as f: + with h5py.File(self.fname,'r') as f: return f['geometry/x_c'][()] + + + def add_absolute(self,x): + """ + Add absolute value. + + Parameters + ---------- + x : str + Label of the dataset containing a scalar, vector, or tensor. + + """ + def __add_absolute(x): + + return { + 'data': np.abs(x['data']), + 'label': '|{}|'.format(x['label']), + 'meta': { + 'Unit': x['meta']['Unit'], + 'Description': 'Absolute value of {} ({})'.format(x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_abs v{}'.format(version) + } + } + + requested = [{'label':x,'arg':'x'}] + + self.__add_generic_pointwise(__add_absolute,requested) + + + def add_calculation(self,formula,label,unit='n/a',description=None,vectorized=True): + """ + Add result of a general formula. + + Parameters + ---------- + formula : str + Formula, refer to datasets by ‘#Label#‘. + label : str + Label of the dataset containing the result of the calculation. + unit : str, optional + Physical unit of the result. + description : str, optional + Human readable description of the result. + vectorized : bool, optional + Indicate whether the formula is written in vectorized form. Default is ‘True’. + + """ + if vectorized is not True: + raise NotImplementedError + + def __add_calculation(**kwargs): + + formula = kwargs['formula'] + for d in re.findall(r'#(.*?)#',formula): + formula = formula.replace('#{}#'.format(d),"kwargs['{}']['data']".format(d)) + + return { + 'data': eval(formula), + 'label': kwargs['label'], + 'meta': { + 'Unit': kwargs['unit'], + 'Description': '{} (formula: {})'.format(kwargs['description'],kwargs['formula']), + 'Creator': 'dadf5.py:add_calculation v{}'.format(version) + } + } + + requested = [{'label':d,'arg':d} for d in set(re.findall(r'#(.*?)#',formula))] # datasets used in the formula + pass_through = {'formula':formula,'label':label,'unit':unit,'description':description} + + self.__add_generic_pointwise(__add_calculation,requested,pass_through) def add_Cauchy(self,P='P',F='F'): @@ -453,6 +546,90 @@ class DADF5(): self.__add_generic_pointwise(__add_Cauchy,requested) + def add_determinant(self,x): + """ + Add the determinant of a tensor. + + Parameters + ---------- + x : str + Label of the dataset containing a tensor. + + """ + def __add_determinant(x): + + return { + 'data': np.linalg.det(x['data']), + 'label': 'det({})'.format(x['label']), + 'meta': { + 'Unit': x['meta']['Unit'], + 'Description': 'Determinant of tensor {} ({})'.format(x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_determinant v{}'.format(version) + } + } + + requested = [{'label':x,'arg':'x'}] + + self.__add_generic_pointwise(__add_determinant,requested) + + + def add_deviator(self,x): + """ + Add the deviatoric part of a tensor. + + Parameters + ---------- + x : str + Label of the dataset containing a tensor. + + """ + def __add_deviator(x): + + if not np.all(np.array(x['data'].shape[1:]) == np.array([3,3])): + raise ValueError + + return { + 'data': mechanics.deviatoric_part(x['data']), + 'label': 's_{}'.format(x['label']), + 'meta': { + 'Unit': x['meta']['Unit'], + 'Description': 'Deviator of tensor {} ({})'.format(x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_deviator v{}'.format(version) + } + } + + requested = [{'label':x,'arg':'x'}] + + self.__add_generic_pointwise(__add_deviator,requested) + + + def add_maximum_shear(self,x): + """ + Add maximum shear components of symmetric tensor. + + Parameters + ---------- + x : str + Label of the dataset containing a symmetric tensor. + + """ + def __add_maximum_shear(x): + + return { + 'data': mechanics.maximum_shear(x['data']), + 'label': 'max_shear({})'.format(x['label']), + 'meta': { + 'Unit': x['meta']['Unit'], + 'Description': 'Maximum shear component of of {} ({})'.format(x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_maximum_shear v{}'.format(version) + } + } + + requested = [{'label':x,'arg':'x'}] + + self.__add_generic_pointwise(__add_maximum_shear,requested) + + def add_Mises(self,x): """ Add the equivalent Mises stress or strain of a symmetric tensor. @@ -523,58 +700,33 @@ class DADF5(): self.__add_generic_pointwise(__add_norm,requested,{'ord':ord}) - def add_absolute(self,x): + def add_principal_components(self,x): """ - Add absolute value. - + Add principal components of symmetric tensor. + + The principal components are sorted in descending order, each repeated according to its multiplicity. + Parameters ---------- x : str - Label of the dataset containing a scalar, vector, or tensor. + Label of the dataset containing a symmetric tensor. """ - def __add_absolute(x): + def __add_principal_components(x): return { - 'data': np.abs(x['data']), - 'label': '|{}|'.format(x['label']), + 'data': mechanics.principal_components(x['data']), + 'label': 'lambda_{}'.format(x['label']), 'meta': { 'Unit': x['meta']['Unit'], - 'Description': 'Absolute value of {} ({})'.format(x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_abs v{}'.format(version) + 'Description': 'Pricipal components of {} ({})'.format(x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_principal_components v{}'.format(version) } } requested = [{'label':x,'arg':'x'}] - - self.__add_generic_pointwise(__add_absolute,requested) - - def add_determinant(self,x): - """ - Add the determinant of a tensor. - - Parameters - ---------- - x : str - Label of the dataset containing a tensor. - - """ - def __add_determinant(x): - - return { - 'data': np.linalg.det(x['data']), - 'label': 'det({})'.format(x['label']), - 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Determinant of tensor {} ({})'.format(x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_determinant v{}'.format(version) - } - } - - requested = [{'label':x,'arg':'x'}] - - self.__add_generic_pointwise(__add_determinant,requested) + self.__add_generic_pointwise(__add_principal_components,requested) def add_spherical(self,x): @@ -607,79 +759,6 @@ class DADF5(): self.__add_generic_pointwise(__add_spherical,requested) - def add_deviator(self,x): - """ - Add the deviatoric part of a tensor. - - Parameters - ---------- - x : str - Label of the dataset containing a tensor. - - """ - def __add_deviator(x): - - if not np.all(np.array(x['data'].shape[1:]) == np.array([3,3])): - raise ValueError - - return { - 'data': mechanics.deviatoric_part(x['data']), - 'label': 's_{}'.format(x['label']), - 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Deviator of tensor {} ({})'.format(x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_deviator v{}'.format(version) - } - } - - requested = [{'label':x,'arg':'x'}] - - self.__add_generic_pointwise(__add_deviator,requested) - - - def add_calculation(self,formula,label,unit='n/a',description=None,vectorized=True): - """ - Add result of a general formula. - - Parameters - ---------- - formula : str - Formula, refer to datasets by ‘#Label#‘. - label : str - Label of the dataset containing the result of the calculation. - unit : str, optional - Physical unit of the result. - description : str, optional - Human readable description of the result. - vectorized : bool, optional - Indicate whether the formula is written in vectorized form. Default is ‘True’. - - """ - if vectorized is not True: - raise NotImplementedError - - def __add_calculation(**kwargs): - - formula = kwargs['formula'] - for d in re.findall(r'#(.*?)#',formula): - formula = formula.replace('#{}#'.format(d),"kwargs['{}']['data']".format(d)) - - return { - 'data': eval(formula), - 'label': kwargs['label'], - 'meta': { - 'Unit': kwargs['unit'], - 'Description': '{} (formula: {})'.format(kwargs['description'],kwargs['formula']), - 'Creator': 'dadf5.py:add_calculation v{}'.format(version) - } - } - - requested = [{'label':d,'arg':d} for d in set(re.findall(r'#(.*?)#',formula))] # datasets used in the formula - pass_through = {'formula':formula,'label':label,'unit':unit,'description':description} - - self.__add_generic_pointwise(__add_calculation,requested,pass_through) - - def add_strain_tensor(self,F='F',t='U',m=0): """ Add strain tensor calculated from a deformation gradient. @@ -712,62 +791,6 @@ class DADF5(): requested = [{'label':F,'arg':'F'}] self.__add_generic_pointwise(__add_strain_tensor,requested,{'t':t,'m':m}) - - - def add_principal_components(self,x): - """ - Add principal components of symmetric tensor. - - The principal components are sorted in descending order, each repeated according to its multiplicity. - - Parameters - ---------- - x : str - Label of the dataset containing a symmetric tensor. - - """ - def __add_principal_components(x): - - return { - 'data': mechanics.principal_components(x['data']), - 'label': 'lambda_{}'.format(x['label']), - 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Pricipal components of {} ({})'.format(x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_principal_components v{}'.format(version) - } - } - - requested = [{'label':x,'arg':'x'}] - - self.__add_generic_pointwise(__add_principal_components,requested) - - - def add_maximum_shear(self,x): - """ - Add maximum shear components of symmetric tensor. - - Parameters - ---------- - x : str - Label of the dataset containing a symmetric tensor. - - """ - def __add_maximum_shear(x): - - return { - 'data': mechanics.maximum_shear(x['data']), - 'label': 'max_shear({})'.format(x['label']), - 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Maximum shear component of of {} ({})'.format(x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_maximum_shear v{}'.format(version) - } - } - - requested = [{'label':x,'arg':'x'}] - - self.__add_generic_pointwise(__add_maximum_shear,requested) def __add_generic_pointwise(self,func,datasets_requested,extra_args={}): @@ -798,7 +821,7 @@ class DADF5(): todo = [] # ToDo: It would be more memory efficient to read only from file when required, i.e. do to it in pool.add_task for group in self.groups_with_datasets([d['label'] for d in datasets_requested]): - with h5py.File(self.filename,'r') as f: + with h5py.File(self.fname,'r') as f: datasets_in = {} for d in datasets_requested: loc = f[group+'/'+d['label']] @@ -813,7 +836,7 @@ class DADF5(): N_not_calculated = len(todo) while N_not_calculated > 0: result = results.get() - with h5py.File(self.filename,'a') as f: # write to file + with h5py.File(self.fname,'a') as f: # write to file dataset_out = f[result['group']].create_dataset(result['label'],data=result['data']) for k in result['meta'].keys(): dataset_out.attrs[k] = result['meta'][k].encode() @@ -824,3 +847,142 @@ class DADF5(): N_added +=1 pool.wait_completion() + + + def to_vtk(self,labels,mode='Cell'): + """ + Export to vtk cell/point data. + + Parameters + ---------- + labels : str or list of + Labels of the datasets to be exported. + mode : str, either 'Cell' or 'Point' + Export in cell format or point format. + Default value is 'Cell'. + + """ + if mode=='Cell': + + if self.structured: + + coordArray = [vtk.vtkDoubleArray(),vtk.vtkDoubleArray(),vtk.vtkDoubleArray()] + for dim in [0,1,2]: + for c in np.linspace(0,self.size[dim],1+self.grid[dim]): + coordArray[dim].InsertNextValue(c) + + vtk_geom = vtk.vtkRectilinearGrid() + vtk_geom.SetDimensions(*(self.grid+1)) + vtk_geom.SetXCoordinates(coordArray[0]) + vtk_geom.SetYCoordinates(coordArray[1]) + vtk_geom.SetZCoordinates(coordArray[2]) + + else: + + nodes = vtk.vtkPoints() + with h5py.File(self.fname,'r') as f: + nodes.SetData(numpy_support.numpy_to_vtk(f['/geometry/x_n'][()],deep=True)) + + vtk_geom = vtk.vtkUnstructuredGrid() + vtk_geom.SetPoints(nodes) + vtk_geom.Allocate(f['/geometry/T_c'].shape[0]) + for i in f['/geometry/T_c']: + vtk_geom.InsertNextCell(vtk.VTK_HEXAHEDRON,8,i-1) # not for all elements! + elif mode == 'Point': + Points = vtk.vtkPoints() + Vertices = vtk.vtkCellArray() + for c in self.cell_coordinates(): + pointID = Points.InsertNextPoint(c) + Vertices.InsertNextCell(1) + Vertices.InsertCellPoint(pointID) + + vtk_geom = vtk.vtkPolyData() + vtk_geom.SetPoints(Points) + vtk_geom.SetVerts(Vertices) + vtk_geom.Modified() + + N_digits = int(np.floor(np.log10(int(self.increments[-1][3:]))))+1 + + for i,inc in enumerate(self.iter_visible('increments')): + vtk_data = [] + + materialpoints_backup = self.visible['materialpoints'].copy() + self.set_visible('materialpoints',False) + for label in (labels if isinstance(labels,list) else [labels]): + for p in self.iter_visible('con_physics'): + if p != 'generic': + for c in self.iter_visible('constituents'): + x = self.get_dataset_location(label) + if len(x) == 0: + continue + array = self.read_dataset(x,0) + shape = [array.shape[0],np.product(array.shape[1:])] + vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), + deep=True,array_type= vtk.VTK_DOUBLE)) + vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) #ToDo: hard coded 1! + vtk_geom.GetCellData().AddArray(vtk_data[-1]) + + else: + x = self.get_dataset_location(label) + if len(x) == 0: + continue + array = self.read_dataset(x,0) + shape = [array.shape[0],np.product(array.shape[1:])] + vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), + deep=True,array_type= vtk.VTK_DOUBLE)) + ph_name = re.compile(r'(?<=(constituent\/))(.*?)(?=(generic))') # identify phase name + dset_name = '1_' + re.sub(ph_name,r'',x[0].split('/',1)[1]) # removing phase name + vtk_data[-1].SetName(dset_name) + vtk_geom.GetCellData().AddArray(vtk_data[-1]) + + self.set_visible('materialpoints',materialpoints_backup) + + constituents_backup = self.visible['constituents'].copy() + self.set_visible('constituents',False) + for label in (labels if isinstance(labels,list) else [labels]): + for p in self.iter_visible('mat_physics'): + if p != 'generic': + for m in self.iter_visible('materialpoints'): + x = self.get_dataset_location(label) + if len(x) == 0: + continue + array = self.read_dataset(x,0) + shape = [array.shape[0],np.product(array.shape[1:])] + vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), + deep=True,array_type= vtk.VTK_DOUBLE)) + vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) #ToDo: why 1_? + vtk_geom.GetCellData().AddArray(vtk_data[-1]) + else: + x = self.get_dataset_location(label) + if len(x) == 0: + continue + array = self.read_dataset(x,0) + shape = [array.shape[0],np.product(array.shape[1:])] + vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), + deep=True,array_type= vtk.VTK_DOUBLE)) + vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) + vtk_geom.GetCellData().AddArray(vtk_data[-1]) + self.set_visible('constituents',constituents_backup) + + if mode=='Cell': + writer = vtk.vtkXMLRectilinearGridWriter() if self.structured else \ + vtk.vtkXMLUnstructuredGridWriter() + x = self.get_dataset_location('u_n') + vtk_data.append(numpy_support.numpy_to_vtk(num_array=self.read_dataset(x,0), + deep=True,array_type=vtk.VTK_DOUBLE)) + vtk_data[-1].SetName('u') + vtk_geom.GetPointData().AddArray(vtk_data[-1]) + elif mode == 'Point': + writer = vtk.vtkXMLPolyDataWriter() + + + file_out = '{}_inc{}.{}'.format(os.path.splitext(os.path.basename(self.fname))[0], + inc[3:].zfill(N_digits), + writer.GetDefaultFileExtension()) + + writer.SetCompressorTypeToZLib() + writer.SetDataModeToBinary() + writer.SetFileName(file_out) + writer.SetInputData(vtk_geom) + + writer.Write() diff --git a/python/damask/geom.py b/python/damask/geom.py index 1c9e10cd1..bfe475730 100644 --- a/python/damask/geom.py +++ b/python/damask/geom.py @@ -205,6 +205,9 @@ class Geom(): else: self.homogenization = homogenization + @property + def grid(self): + return self.get_grid() def get_microstructure(self): """Return the microstructure representation.""" @@ -239,8 +242,8 @@ class Geom(): header.append('homogenization {}'.format(self.get_homogenization())) return header - @classmethod - def from_file(cls,fname): + @staticmethod + def from_file(fname): """ Reads a geom file. @@ -300,7 +303,7 @@ class Geom(): if not np.any(np.mod(microstructure.flatten(),1) != 0.0): # no float present microstructure = microstructure.astype('int') - return cls(microstructure.reshape(grid),size,origin,homogenization,comments) + return Geom(microstructure.reshape(grid),size,origin,homogenization,comments) def to_file(self,fname,pack=None): @@ -419,7 +422,7 @@ class Geom(): ext = os.path.splitext(fname)[1] if ext == '': name = fname + '.' + writer.GetDefaultFileExtension() - elif ext == writer.GetDefaultFileExtension(): + elif ext[1:] == writer.GetDefaultFileExtension(): name = fname else: raise ValueError("unknown extension {}".format(ext)) diff --git a/python/damask/grid_filters.py b/python/damask/grid_filters.py new file mode 100644 index 000000000..36ce3b8e2 --- /dev/null +++ b/python/damask/grid_filters.py @@ -0,0 +1,379 @@ +from scipy import spatial +import numpy as np + +def __ks(size,grid,first_order=False): + """ + Get wave numbers operator. + + Parameters + ---------- + size : numpy.ndarray + physical size of the periodic field. + + """ + k_sk = np.where(np.arange(grid[0])>grid[0]//2,np.arange(grid[0])-grid[0],np.arange(grid[0]))/size[0] + if grid[0]%2 == 0 and first_order: k_sk[grid[0]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011) + + k_sj = np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1]))/size[1] + if grid[1]%2 == 0 and first_order: k_sj[grid[1]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011) + + k_si = np.arange(grid[2]//2+1)/size[2] + + kk, kj, ki = np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij') + return np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3) + + +def curl(size,field): + """ + Calculate curl of a vector or tensor field in Fourier space. + + Parameters + ---------- + size : numpy.ndarray + physical size of the periodic field. + + """ + n = np.prod(field.shape[3:]) + k_s = __ks(size,field.shape[:3],True) + + e = np.zeros((3, 3, 3)) + e[0, 1, 2] = e[1, 2, 0] = e[2, 0, 1] = +1.0 # Levi-Civita symbol + e[0, 2, 1] = e[2, 1, 0] = e[1, 0, 2] = -1.0 + + field_fourier = np.fft.rfftn(field,axes=(0,1,2)) + curl = (np.einsum('slm,ijkl,ijkm ->ijks', e,k_s,field_fourier)*2.0j*np.pi if n == 3 else # vector, 3 -> 3 + np.einsum('slm,ijkl,ijknm->ijksn',e,k_s,field_fourier)*2.0j*np.pi) # tensor, 3x3 -> 3x3 + + return np.fft.irfftn(curl,axes=(0,1,2),s=field.shape[:3]) + + +def divergence(size,field): + """ + Calculate divergence of a vector or tensor field in Fourier space. + + Parameters + ---------- + size : numpy.ndarray + physical size of the periodic field. + + """ + n = np.prod(field.shape[3:]) + k_s = __ks(size,field.shape[:3],True) + + field_fourier = np.fft.rfftn(field,axes=(0,1,2)) + divergence = (np.einsum('ijkl,ijkl ->ijk', k_s,field_fourier)*2.0j*np.pi if n == 3 else # vector, 3 -> 1 + np.einsum('ijkm,ijklm->ijkl',k_s,field_fourier)*2.0j*np.pi) # tensor, 3x3 -> 3 + + return np.fft.irfftn(divergence,axes=(0,1,2),s=field.shape[:3]) + + +def gradient(size,field): + """ + Calculate gradient of a vector or scalar field in Fourier space. + + Parameters + ---------- + size : numpy.ndarray + physical size of the periodic field. + + """ + n = np.prod(field.shape[3:]) + k_s = __ks(size,field.shape[:3],True) + + field_fourier = np.fft.rfftn(field,axes=(0,1,2)) + gradient = (np.einsum('ijkl,ijkm->ijkm', field_fourier,k_s)*2.0j*np.pi if n == 1 else # scalar, 1 -> 3 + np.einsum('ijkl,ijkm->ijklm',field_fourier,k_s)*2.0j*np.pi) # vector, 3 -> 3x3 + + return np.fft.irfftn(gradient,axes=(0,1,2),s=field.shape[:3]) + + +def cell_coord0(grid,size,origin=np.zeros(3)): + """ + Cell center positions (undeformed). + + Parameters + ---------- + grid : numpy.ndarray + number of grid points. + size : numpy.ndarray + physical size of the periodic field. + origin : numpy.ndarray, optional + physical origin of the periodic field. Default is [0.0,0.0,0.0]. + + """ + start = origin + size/grid*.5 + end = origin - size/grid*.5 + size + x, y, z = np.meshgrid(np.linspace(start[2],end[2],grid[2]), + np.linspace(start[1],end[1],grid[1]), + np.linspace(start[0],end[0],grid[0]), + indexing = 'ij') + + return np.concatenate((z[:,:,:,None],y[:,:,:,None],x[:,:,:,None]),axis = 3) + +def cell_displacement_fluct(size,F): + """ + Cell center displacement field from fluctuation part of the deformation gradient field. + + Parameters + ---------- + size : numpy.ndarray + physical size of the periodic field. + F : numpy.ndarray + deformation gradient field. + + """ + integrator = 0.5j*size/np.pi + + k_s = __ks(size,F.shape[:3],False) + k_s_squared = np.einsum('...l,...l',k_s,k_s) + k_s_squared[0,0,0] = 1.0 + + displacement = -np.einsum('ijkml,ijkl,l->ijkm', + np.fft.rfftn(F,axes=(0,1,2)), + k_s, + integrator, + ) / k_s_squared[...,np.newaxis] + + return np.fft.irfftn(displacement,axes=(0,1,2),s=F.shape[:3]) + +def cell_displacement_avg(size,F): + """ + Cell center displacement field from average part of the deformation gradient field. + + Parameters + ---------- + size : numpy.ndarray + physical size of the periodic field. + F : numpy.ndarray + deformation gradient field. + + """ + F_avg = np.average(F,axis=(0,1,2)) + return np.einsum('ml,ijkl->ijkm',F_avg-np.eye(3),cell_coord0(F.shape[:3][::-1],size)) + +def cell_displacement(size,F): + """ + Cell center displacement field from deformation gradient field. + + Parameters + ---------- + size : numpy.ndarray + physical size of the periodic field. + F : numpy.ndarray + deformation gradient field. + + """ + return cell_displacement_avg(size,F) + cell_displacement_fluct(size,F) + +def cell_coord(size,F,origin=np.zeros(3)): + """ + Cell center positions. + + Parameters + ---------- + size : numpy.ndarray + physical size of the periodic field. + F : numpy.ndarray + deformation gradient field. + origin : numpy.ndarray, optional + physical origin of the periodic field. Default is [0.0,0.0,0.0]. + + """ + return cell_coord0(F.shape[:3][::-1],size,origin) + cell_displacement(size,F) + +def cell_coord0_gridSizeOrigin(coord0,ordered=True): + """ + Return grid 'DNA', i.e. grid, size, and origin from array of cell positions. + + Parameters + ---------- + coord0 : numpy.ndarray + array of undeformed cell coordinates. + ordered : bool, optional + expect coord0 data to be ordered (x fast, z slow). + + """ + coords = [np.unique(coord0[:,i]) for i in range(3)] + mincorner = np.array(list(map(min,coords))) + maxcorner = np.array(list(map(max,coords))) + grid = np.array(list(map(len,coords)),'i') + size = grid/np.maximum(grid-1,1) * (maxcorner-mincorner) + delta = size/grid + origin = mincorner - delta*.5 + + # 1D/2D: size/origin combination undefined, set origin to 0.0 + size [np.where(grid==1)] = origin[np.where(grid==1)]*2. + origin[np.where(grid==1)] = 0.0 + + if grid.prod() != len(coord0): + raise ValueError('Data count {} does not match grid {}.'.format(len(coord0),grid)) + + start = origin + delta*.5 + end = origin - delta*.5 + size + + if not np.allclose(coords[0],np.linspace(start[0],end[0],grid[0])) and \ + np.allclose(coords[1],np.linspace(start[1],end[1],grid[1])) and \ + np.allclose(coords[2],np.linspace(start[2],end[2],grid[2])): + raise ValueError('Regular grid spacing violated.') + + if ordered and not np.allclose(coord0.reshape(tuple(grid[::-1])+(3,)),cell_coord0(grid,size,origin)): + raise ValueError('Input data is not a regular grid.') + + return (grid,size,origin) + +def coord0_check(coord0): + """ + Check whether coordinates lie on a regular grid. + + Parameters + ---------- + coord0 : numpy.ndarray + array of undeformed cell coordinates. + + """ + cell_coord0_gridSizeOrigin(coord0,ordered=True) + + + +def node_coord0(grid,size,origin=np.zeros(3)): + """ + Nodal positions (undeformed). + + Parameters + ---------- + grid : numpy.ndarray + number of grid points. + size : numpy.ndarray + physical size of the periodic field. + origin : numpy.ndarray, optional + physical origin of the periodic field. Default is [0.0,0.0,0.0]. + + """ + x, y, z = np.meshgrid(np.linspace(origin[2],size[2]+origin[2],1+grid[2]), + np.linspace(origin[1],size[1]+origin[1],1+grid[1]), + np.linspace(origin[0],size[0]+origin[0],1+grid[0]), + indexing = 'ij') + + return np.concatenate((z[:,:,:,None],y[:,:,:,None],x[:,:,:,None]),axis = 3) + +def node_displacement_fluct(size,F): + """ + Nodal displacement field from fluctuation part of the deformation gradient field. + + Parameters + ---------- + size : numpy.ndarray + physical size of the periodic field. + F : numpy.ndarray + deformation gradient field. + + """ + return cell_2_node(cell_displacement_fluct(size,F)) + +def node_displacement_avg(size,F): + """ + Nodal displacement field from average part of the deformation gradient field. + + Parameters + ---------- + size : numpy.ndarray + physical size of the periodic field. + F : numpy.ndarray + deformation gradient field. + + """ + F_avg = np.average(F,axis=(0,1,2)) + return np.einsum('ml,ijkl->ijkm',F_avg-np.eye(3),node_coord0(F.shape[:3][::-1],size)) + +def node_displacement(size,F): + """ + Nodal displacement field from deformation gradient field. + + Parameters + ---------- + size : numpy.ndarray + physical size of the periodic field. + F : numpy.ndarray + deformation gradient field. + + """ + return node_displacement_avg(size,F) + node_displacement_fluct(size,F) + +def node_coord(size,F,origin=np.zeros(3)): + """ + Nodal positions. + + Parameters + ---------- + size : numpy.ndarray + physical size of the periodic field. + F : numpy.ndarray + deformation gradient field. + origin : numpy.ndarray, optional + physical origin of the periodic field. Default is [0.0,0.0,0.0]. + + """ + return node_coord0(F.shape[:3][::-1],size,origin) + node_displacement(size,F) + +def cell_2_node(cell_data): + """Interpolate periodic cell data to nodal data.""" + n = ( cell_data + np.roll(cell_data,1,(0,1,2)) + + np.roll(cell_data,1,(0,)) + np.roll(cell_data,1,(1,)) + np.roll(cell_data,1,(2,)) + + np.roll(cell_data,1,(0,1)) + np.roll(cell_data,1,(1,2)) + np.roll(cell_data,1,(2,0)))*0.125 + + return np.pad(n,((0,1),(0,1),(0,1))+((0,0),)*len(cell_data.shape[3:]),mode='wrap') + +def node_2_cell(node_data): + """Interpolate periodic nodal data to cell data.""" + c = ( node_data + np.roll(node_data,1,(0,1,2)) + + np.roll(node_data,1,(0,)) + np.roll(node_data,1,(1,)) + np.roll(node_data,1,(2,)) + + np.roll(node_data,1,(0,1)) + np.roll(node_data,1,(1,2)) + np.roll(node_data,1,(2,0)))*0.125 + + return c[:-1,:-1,:-1] + +def node_coord0_gridSizeOrigin(coord0,ordered=False): + """ + Return grid 'DNA', i.e. grid, size, and origin from array of nodal positions. + + Parameters + ---------- + coord0 : numpy.ndarray + array of undeformed nodal coordinates + ordered : bool, optional + expect coord0 data to be ordered (x fast, z slow). + + """ + coords = [np.unique(coord0[:,i]) for i in range(3)] + mincorner = np.array(list(map(min,coords))) + maxcorner = np.array(list(map(max,coords))) + grid = np.array(list(map(len,coords)),'i') - 1 + size = maxcorner-mincorner + origin = mincorner + + if (grid+1).prod() != len(coord0): + raise ValueError('Data count {} does not match grid {}.'.format(len(coord0),grid)) + + if not np.allclose(coords[0],np.linspace(mincorner[0],maxcorner[0],grid[0]+1)) and \ + np.allclose(coords[1],np.linspace(mincorner[1],maxcorner[1],grid[1]+1)) and \ + np.allclose(coords[2],np.linspace(mincorner[2],maxcorner[2],grid[2]+1)): + raise ValueError('Regular grid spacing violated.') + + if ordered and not np.allclose(coord0.reshape(tuple((grid+1)[::-1])+(3,)),node_coord0(grid,size,origin)): + raise ValueError('Input data is not a regular grid.') + + return (grid,size,origin) + + +def regrid(size,F,new_grid): + """tbd.""" + c = cell_coord0(F.shape[:3][::-1],size) \ + + cell_displacement_avg(size,F) \ + + cell_displacement_fluct(size,F) + + outer = np.dot(np.average(F,axis=(0,1,2)),size) + for d in range(3): + c[np.where(c[:,:,:,d]<0)] += outer[d] + c[np.where(c[:,:,:,d]>outer[d])] -= outer[d] + + tree = spatial.cKDTree(c.reshape((-1,3)),boxsize=outer) + return tree.query(cell_coord0(new_grid,outer))[1] diff --git a/python/damask/mechanics.py b/python/damask/mechanics.py index 476682380..307f1d83d 100644 --- a/python/damask/mechanics.py +++ b/python/damask/mechanics.py @@ -19,7 +19,26 @@ def Cauchy(F,P): else: sigma = np.einsum('i,ijk,ilk->ijl',1.0/np.linalg.det(F),P,F) return symmetric(sigma) - + + +def PK2(F,P): + """ + Return 2. Piola-Kirchhoff stress calculated from 1. Piola-Kirchhoff stress and deformation gradient. + + Parameters + ---------- + F : numpy.array of shape (:,3,3) or (3,3) + Deformation gradient. + P : numpy.array of shape (:,3,3) or (3,3) + 1. Piola-Kirchhoff stress. + + """ + if np.shape(F) == np.shape(P) == (3,3): + S = np.dot(np.linalg.inv(F),P) + else: + S = np.einsum('ijk,ikl->ijl',np.linalg.inv(F),P) + return S + def strain_tensor(F,t,m): """ @@ -39,10 +58,10 @@ def strain_tensor(F,t,m): """ F_ = F.reshape((1,3,3)) if F.shape == (3,3) else F - if t == 'U': + if t == 'V': B = np.matmul(F_,transpose(F_)) w,n = np.linalg.eigh(B) - elif t == 'V': + elif t == 'U': C = np.matmul(transpose(F_),F_) w,n = np.linalg.eigh(C) @@ -73,21 +92,27 @@ def deviatoric_part(x): x - np.einsum('ijk,i->ijk',np.broadcast_to(np.eye(3),[x.shape[0],3,3]),spherical_part(x)) -def spherical_part(x): +def spherical_part(x,tensor=False): """ Return spherical (hydrostatic) part of a tensor. - A single scalar is returned, i.e. the hydrostatic part is not mapped on the 3rd order identity - matrix. - Parameters ---------- x : numpy.array of shape (:,3,3) or (3,3) Tensor of which the hydrostatic part is computed. + tensor : bool, optional + Map spherical part onto identity tensor. Default is false """ - return np.trace(x)/3.0 if np.shape(x) == (3,3) else \ - np.trace(x,axis1=1,axis2=2)/3.0 + if x.shape == (3,3): + sph = np.trace(x)/3.0 + return sph if not tensor else np.eye(3)*sph + else: + sph = np.trace(x,axis1=1,axis2=2)/3.0 + if not tensor: + return sph + else: + return np.einsum('ijk,i->ijk',np.broadcast_to(np.eye(3),(x.shape[0],3,3)),sph) def Mises_stress(sigma): diff --git a/python/damask/orientation.py b/python/damask/orientation.py index 85a5c1866..a86ba9331 100644 --- a/python/damask/orientation.py +++ b/python/damask/orientation.py @@ -170,9 +170,18 @@ class Rotation: ################################################################################################ # convert to different orientation representations (numpy arrays) - def asQuaternion(self): - """Unit quaternion: (q, p_1, p_2, p_3).""" - return self.quaternion.asArray() + def asQuaternion(self, + quaternion = False): + """ + Unit quaternion [q, p_1, p_2, p_3] unless quaternion == True: damask.quaternion object. + + Parameters + ---------- + quaternion : bool, optional + return quaternion as DAMASK object. + + """ + return self.quaternion if quaternion else self.quaternion.asArray() def asEulers(self, degrees = False): @@ -190,33 +199,36 @@ class Rotation: return eu def asAxisAngle(self, - degrees = False): + degrees = False, + pair = False): """ - Axis angle pair: ([n_1, n_2, n_3], ω). + Axis angle representation [n_1, n_2, n_3, ω] unless pair == True: ([n_1, n_2, n_3], ω). Parameters ---------- degrees : bool, optional return rotation angle in degrees. + pair : bool, optional + return tuple of axis and angle. """ ax = qu2ax(self.quaternion.asArray()) if degrees: ax[3] = np.degrees(ax[3]) - return ax + return (ax[:3],np.degrees(ax[3])) if pair else ax def asMatrix(self): """Rotation matrix.""" return qu2om(self.quaternion.asArray()) def asRodrigues(self, - vector=False): + vector = False): """ - Rodrigues-Frank vector: ([n_1, n_2, n_3], tan(ω/2)). + Rodrigues-Frank vector representation [n_1, n_2, n_3, tan(ω/2)] unless vector == True: [n_1, n_2, n_3] * tan(ω/2). Parameters ---------- vector : bool, optional - return as array of length 3, i.e. scale the unit vector giving the rotation axis. + return as actual Rodrigues--Frank vector, i.e. rotation axis scaled by tan(ω/2). """ ro = qu2ro(self.quaternion.asArray()) @@ -252,8 +264,8 @@ class Rotation: acceptHomomorph = False, P = -1): - qu = quaternion if isinstance(quaternion, np.ndarray) and quaternion.dtype == np.dtype(float) \ - else np.array(quaternion,dtype=float) + qu = quaternion if isinstance(quaternion, np.ndarray) and quaternion.dtype == np.dtype(float) \ + else np.array(quaternion,dtype=float) if P > 0: qu[1:4] *= -1 # convert from P=1 to P=-1 if qu[0] < 0.0: if acceptHomomorph: @@ -701,14 +713,14 @@ class Symmetry: v = np.array(vector,dtype=float) if proper: # check both improper ... - theComponents = np.dot(basis['improper'],v) + theComponents = np.around(np.dot(basis['improper'],v),12) inSST = np.all(theComponents >= 0.0) if not inSST: # ... and proper SST - theComponents = np.dot(basis['proper'],v) + theComponents = np.around(np.dot(basis['proper'],v),12) inSST = np.all(theComponents >= 0.0) else: v[2] = abs(v[2]) # z component projects identical - theComponents = np.dot(basis['improper'],v) # for positive and negative values + theComponents = np.around(np.dot(basis['improper'],v),12) # for positive and negative values inSST = np.all(theComponents >= 0.0) if color: # have to return color array @@ -875,7 +887,7 @@ class Lattice: [[ 17, 12, 5],[ 17, 7, 17]], [[ 5, 17, 12],[ 17, 17, 7]], [[ 12, -5,-17],[ 7,-17,-17]], - [[-17,-12, 5],[-17, 7, 17]]],dtype='float')} + [[-17,-12, 5],[-17,-7, 17]]],dtype='float')} # Greninger--Troiano' orientation relationship for fcc <-> bcc transformation # from Y. He et al., Journal of Applied Crystallography 39:72-81, 2006 @@ -901,7 +913,7 @@ class Lattice: [[-17,-17, 7],[-17, -5, 12]], [[ 7,-17,-17],[ 12,-17, -5]], [[ 17, -7,-17],[ 5, -12,-17]], - [[ 17,-17, 7],[ 17, -5,-12]], + [[ 17,-17, -7],[ 17, -5,-12]], [[ -7, 17,-17],[-12, 17, -5]], [[-17, 7,-17],[ -5, 12,-17]], [[-17, 17, -7],[-17, 5,-12]]],dtype='float'), @@ -957,7 +969,7 @@ class Lattice: [[ 2, 1, -1],[ 0, -1, 1]], [[ -1, -2, -1],[ 0, -1, 1]], [[ -1, 1, 2],[ 0, -1, 1]], - [[ -1, 2, 1],[ 0, -1, 1]], + [[ 2, -1, 1],[ 0, -1, 1]], #It is wrong in the paper, but matrix is correct [[ -1, 2, 1],[ 0, -1, 1]], [[ -1, -1, -2],[ 0, -1, 1]]],dtype='float')} @@ -1025,7 +1037,7 @@ class Lattice: https://doi.org/10.1016/j.actamat.2004.11.021 """ - models={'KS':self.KS, 'GT':self.GT, "GT'":self.GTprime, + models={'KS':self.KS, 'GT':self.GT, 'GT_prime':self.GTprime, 'NW':self.NW, 'Pitsch': self.Pitsch, 'Bain':self.Bain} try: relationship = models[model] @@ -1046,13 +1058,13 @@ class Lattice: for miller in np.hstack((relationship['planes'],relationship['directions'])): myPlane = miller[myPlane_id]/ np.linalg.norm(miller[myPlane_id]) myDir = miller[myDir_id]/ np.linalg.norm(miller[myDir_id]) - myMatrix = np.array([myDir,np.cross(myPlane,myDir),myPlane]).T + myMatrix = np.array([myDir,np.cross(myPlane,myDir),myPlane]) otherPlane = miller[otherPlane_id]/ np.linalg.norm(miller[otherPlane_id]) otherDir = miller[otherDir_id]/ np.linalg.norm(miller[otherDir_id]) - otherMatrix = np.array([otherDir,np.cross(otherPlane,otherDir),otherPlane]).T + otherMatrix = np.array([otherDir,np.cross(otherPlane,otherDir),otherPlane]) - r['rotations'].append(Rotation.fromMatrix(np.dot(otherMatrix,myMatrix.T))) + r['rotations'].append(Rotation.fromMatrix(np.dot(otherMatrix.T,myMatrix))) return r @@ -1126,10 +1138,9 @@ class Orientation: return (Orientation(r,self.lattice), i,j, k == 1) if symmetries else r # disorientation ... # ... own sym, other sym, # self-->other: True, self<--other: False - - def inFZ(self): return self.lattice.symmetry.inFZ(self.rotation.asRodrigues(vector=True)) + def equivalentOrientations(self,members=[]): """List of orientations which are symmetrically equivalent.""" @@ -1144,7 +1155,8 @@ class Orientation: def relatedOrientations(self,model): """List of orientations related by the given orientation relationship.""" r = self.lattice.relationOperations(model) - return [self.__class__(self.rotation*o,r['lattice']) for o in r['rotations']] + return [self.__class__(o*self.rotation,r['lattice']) for o in r['rotations']] + def reduced(self): """Transform orientation to fall into fundamental zone according to symmetry.""" @@ -1152,7 +1164,8 @@ class Orientation: if self.lattice.symmetry.inFZ(me.rotation.asRodrigues(vector=True)): break return self.__class__(me.rotation,self.lattice) - + + def inversePole(self, axis, proper = False, @@ -1192,9 +1205,9 @@ class Orientation: ref = orientations[0] for o in orientations: closest.append(o.equivalentOrientations( - ref.disorientation(o, - SST = False, # select (o[ther]'s) sym orientation - symmetries = True)[2]).rotation) # with lowest misorientation + ref.disorientation(o, + SST = False, # select (o[ther]'s) sym orientation + symmetries = True)[2]).rotation) # with lowest misorientation return Orientation(Rotation.fromAverage(closest,weights),ref.lattice) diff --git a/python/damask/table.py b/python/damask/table.py new file mode 100644 index 000000000..b3dfc2433 --- /dev/null +++ b/python/damask/table.py @@ -0,0 +1,343 @@ +import re + +import pandas as pd +import numpy as np + +from . import version + +class Table(): + """Store spreadsheet-like data.""" + + def __init__(self,data,shapes,comments=None): + """ + New spreadsheet. + + Parameters + ---------- + data : numpy.ndarray + Data. + shapes : dict with str:tuple pairs + Shapes of the columns. Example 'F':(3,3) for a deformation gradient. + comments : iterable of str, optional + Additional, human-readable information. + + """ + self.comments = [] if comments is None else [c for c in comments] + self.data = pd.DataFrame(data=data) + self.shapes = shapes + self.__label_condensed() + + + def __label_flat(self): + """Label data individually, e.g. v v v ==> 1_v 2_v 3_v.""" + labels = [] + for label,shape in self.shapes.items(): + size = np.prod(shape) + labels += ['{}{}'.format('' if size == 1 else '{}_'.format(i+1),label) for i in range(size)] + self.data.columns = labels + + + def __label_condensed(self): + """Label data condensed, e.g. 1_v 2_v 3_v ==> v v v.""" + labels = [] + for label,shape in self.shapes.items(): + labels += [label] * np.prod(shape) + self.data.columns = labels + + + def __add_comment(self,label,shape,info): + if info is not None: + self.comments.append('{}{}: {}'.format(label, + ' '+str(shape) if np.prod(shape,dtype=int) > 1 else '', + info)) + + + @staticmethod + def from_ASCII(fname): + """ + Create table from ASCII file. + + The first line needs to indicate the number of subsequent header lines as 'n header'. + Vector data column labels are indicated by '1_v, 2_v, ..., n_v'. + Tensor data column labels are indicated by '3x3:1_T, 3x3:2_T, ..., 3x3:9_T'. + + Parameters + ---------- + fname : file, str, or pathlib.Path + Filename or file for reading. + + """ + try: + f = open(fname) + except TypeError: + f = fname + f.seek(0) + + header,keyword = f.readline().split() + if keyword == 'header': + header = int(header) + else: + raise TypeError + + comments = [f.readline()[:-1] for i in range(1,header)] + labels = f.readline().split() + + shapes = {} + for label in labels: + tensor_column = re.search(r'[0-9,x]*?:[0-9]*?_',label) + if tensor_column: + my_shape = tensor_column.group().split(':',1)[0].split('x') + shapes[label.split('_',1)[1]] = tuple([int(d) for d in my_shape]) + else: + vector_column = re.match(r'[0-9]*?_',label) + if vector_column: + shapes[label.split('_',1)[1]] = (int(label.split('_',1)[0]),) + else: + shapes[label] = (1,) + + data = pd.read_csv(f,names=list(range(len(labels))),sep=r'\s+').to_numpy() + + return Table(data,shapes,comments) + + @staticmethod + def from_ang(fname): + """ + Create table from TSL ang file. + + A valid TSL ang file needs to contains the following columns: + * Euler angles (Bunge notation) in radians, 3 floats, label 'eu'. + * Spatial position in meters, 2 floats, label 'pos'. + * Image quality, 1 float, label 'IQ'. + * Confidence index, 1 float, label 'CI'. + * Phase ID, 1 int, label 'ID'. + * SEM signal, 1 float, label 'intensity'. + * Fit, 1 float, label 'fit'. + + Parameters + ---------- + fname : file, str, or pathlib.Path + Filename or file for reading. + + """ + shapes = {'eu':(3,), 'pos':(2,), + 'IQ':(1,), 'CI':(1,), 'ID':(1,), 'intensity':(1,), 'fit':(1,)} + try: + f = open(fname) + except TypeError: + f = fname + f.seek(0) + + content = f.readlines() + + comments = ['table.py:from_ang v {}'.format(version)] + for line in content: + if line.startswith('#'): + comments.append(line.strip()) + else: + break + + data = np.loadtxt(content) + for c in range(data.shape[1]-10): + shapes['n/a_{}'.format(c+1)] = (1,) + + return Table(data,shapes,comments) + + + @property + def labels(self): + return list(self.shapes.keys()) + + + def get(self,label): + """ + Get column data. + + Parameters + ---------- + label : str + Column label. + + """ + if re.match(r'[0-9]*?_',label): + idx,key = label.split('_',1) + data = self.data[key].to_numpy()[:,int(idx)-1].reshape((-1,1)) + else: + data = self.data[label].to_numpy().reshape((-1,)+self.shapes[label]) + + return data.astype(type(data.flatten()[0])) + + + def set(self,label,data,info=None): + """ + Set column data. + + Parameters + ---------- + label : str + Column label. + data : np.ndarray + New data. + info : str, optional + Human-readable information about the new data. + + """ + self.__add_comment(label,data.shape[1:],info) + + if re.match(r'[0-9]*?_',label): + idx,key = label.split('_',1) + iloc = self.data.columns.get_loc(key).tolist().index(True) + int(idx) -1 + self.data.iloc[:,iloc] = data + else: + self.data[label] = data.reshape(self.data[label].shape) + + + def add(self,label,data,info=None): + """ + Add column data. + + Parameters + ---------- + label : str + Column label. + data : np.ndarray + Modified data. + info : str, optional + Human-readable information about the modified data. + + """ + self.__add_comment(label,data.shape[1:],info) + + self.shapes[label] = data.shape[1:] if len(data.shape) > 1 else (1,) + size = np.prod(data.shape[1:],dtype=int) + new = pd.DataFrame(data=data.reshape(-1,size), + columns=[label]*size, + ) + new.index = self.data.index + self.data = pd.concat([self.data,new],axis=1) + + + def delete(self,label): + """ + Delete column data. + + Parameters + ---------- + label : str + Column label. + + """ + self.data.drop(columns=label,inplace=True) + + del self.shapes[label] + + + def rename(self,label_old,label_new,info=None): + """ + Rename column data. + + Parameters + ---------- + label_old : str + Old column label. + label_new : str + New column label. + + """ + self.data.rename(columns={label_old:label_new},inplace=True) + + self.comments.append('{} => {}{}'.format(label_old, + label_new, + '' if info is None else ': {}'.format(info), + )) + + self.shapes = {(label if label != label_old else label_new):self.shapes[label] for label in self.shapes} + + + def sort_by(self,labels,ascending=True): + """ + Sort table by values of given labels. + + Parameters + ---------- + label : str or list + Column labels for sorting. + ascending : bool or list, optional + Set sort order. + + """ + self.__label_flat() + self.data.sort_values(labels,axis=0,inplace=True,ascending=ascending) + self.__label_condensed() + self.comments.append('sorted by [{}]'.format(', '.join(labels))) + + + def append(self,other): + """ + Append other table vertically (similar to numpy.vstack). + + Requires matching labels/shapes and order. + + Parameters + ---------- + other : Table + Table to append + + """ + if self.shapes != other.shapes or not self.data.columns.equals(other.data.columns): + raise KeyError('Labels or shapes or order do not match') + else: + self.data = self.data.append(other.data,ignore_index=True) + + + def join(self,other): + """ + Append other table horizontally (similar to numpy.hstack). + + Requires matching number of rows and no common labels. + + Parameters + ---------- + other : Table + Table to join + + """ + if set(self.shapes) & set(other.shapes) or self.data.shape[0] != other.data.shape[0]: + raise KeyError('Dublicated keys or row count mismatch') + else: + self.data = self.data.join(other.data) + for key in other.shapes: + self.shapes[key] = other.shapes[key] + + + def to_ASCII(self,fname): + """ + Store as plain text file. + + Parameters + ---------- + fname : file, str, or pathlib.Path + Filename or file for reading. + + """ + seen = set() + labels = [] + for l in [x for x in self.data.columns if not (x in seen or seen.add(x))]: + if(self.shapes[l] == (1,)): + labels.append('{}'.format(l)) + elif(len(self.shapes[l]) == 1): + labels += ['{}_{}'.format(i+1,l) \ + for i in range(self.shapes[l][0])] + else: + labels += ['{}:{}_{}'.format('x'.join([str(d) for d in self.shapes[l]]),i+1,l) \ + for i in range(np.prod(self.shapes[l],dtype=int))] + + header = ['{} header'.format(len(self.comments)+1)] \ + + self.comments \ + + [' '.join(labels)] + + try: + f = open(fname,'w') + except TypeError: + f = fname + for line in header: f.write(line+'\n') + self.data.to_csv(f,sep=' ',index=False,header=False) diff --git a/python/damask/util.py b/python/damask/util.py index 63b9aed65..0065daba5 100644 --- a/python/damask/util.py +++ b/python/damask/util.py @@ -7,9 +7,6 @@ from optparse import Option from queue import Queue from threading import Thread - -import numpy as np - class bcolors: """ ASCII Colors (Blender code). @@ -64,19 +61,6 @@ def report(who = None, croak( (emph(who)+': ' if who is not None else '') + (what if what is not None else '') + '\n' ) -# ----------------------------- -def report_geom(info, - what = ['grid','size','origin','homogenization','microstructures']): - """Reports (selected) geometry information.""" - output = { - 'grid' : 'grid a b c: {}'.format(' x '.join(list(map(str,info['grid' ])))), - 'size' : 'size x y z: {}'.format(' x '.join(list(map(str,info['size' ])))), - 'origin' : 'origin x y z: {}'.format(' : '.join(list(map(str,info['origin'])))), - 'homogenization' : 'homogenization: {}'.format(info['homogenization']), - 'microstructures' : 'microstructures: {}'.format(info['microstructures']), - } - for item in what: croak(output[item.lower()]) - # ----------------------------- def emph(what): """Formats string with emphasis.""" @@ -119,30 +103,6 @@ def execute(cmd, if process.returncode != 0: raise RuntimeError('{} failed with returncode {}'.format(cmd,process.returncode)) return out,error -def coordGridAndSize(coordinates): - """Determines grid count and overall physical size along each dimension of an ordered array of coordinates.""" - dim = coordinates.shape[1] - coords = [np.unique(coordinates[:,i]) for i in range(dim)] - mincorner = np.array(list(map(min,coords))) - maxcorner = np.array(list(map(max,coords))) - grid = np.array(list(map(len,coords)),'i') - size = grid/np.maximum(np.ones(dim,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) - size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other ones - delta = size/grid - - N = grid.prod() - - if N != len(coordinates): - raise ValueError('Data count {} does not match grid {}.'.format(len(coordinates),' x '.join(map(repr,grid)))) - - if np.any(np.abs(np.log10((coords[0][1:]-coords[0][:-1])/delta[0])) > 0.01) \ - or np.any(np.abs(np.log10((coords[1][1:]-coords[1][:-1])/delta[1])) > 0.01): - raise ValueError('regular grid spacing {} violated.'.format(' x '.join(map(repr,delta)))) - if dim==3 and np.any(np.abs(np.log10((coords[2][1:]-coords[2][:-1])/delta[2])) > 0.01): - raise ValueError('regular grid spacing {} violated.'.format(' x '.join(map(repr,delta)))) - - return grid,size - # ----------------------------- class extendableOption(Option): """ @@ -221,263 +181,6 @@ class return_message(): return srepr(self.message) -def leastsqBound(func, x0, args=(), bounds=None, Dfun=None, full_output=0, - col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8, - gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None): - from scipy.optimize import _minpack - """ - Non-linear least square fitting (Levenberg-Marquardt method) with - bounded parameters. - the codes of transformation between int <-> ext refers to the work of - Jonathan J. Helmus: https://github.com/jjhelmus/leastsqbound-scipy - other codes refer to the source code of minpack.py: - - An internal parameter list is used to enforce contraints on the fitting - parameters. The transfomation is based on that of MINUIT package. - please see: F. James and M. Winkler. MINUIT User's Guide, 2004. - - bounds : list - (min, max) pairs for each parameter, use None for 'min' or 'max' - when there is no bound in that direction. - For example: if there are two parameters needed to be fitting, then - bounds is [(min1,max1), (min2,max2)] - - This function is based on 'leastsq' of minpack.py, the annotation of - other parameters can be found in 'least_squares.py'. - """ - - def _check_func(checker, argname, thefunc, x0, args, numinputs, - output_shape=None): - from numpy import shape - """The same as that of minpack.py""" - res = np.atleast_1d(thefunc(*((x0[:numinputs],) + args))) - if (output_shape is not None) and (shape(res) != output_shape): - if (output_shape[0] != 1): - if len(output_shape) > 1: - if output_shape[1] == 1: - return shape(res) - msg = "%s: there is a mismatch between the input and output " \ - "shape of the '%s' argument" % (checker, argname) - func_name = getattr(thefunc, '__name__', None) - if func_name: - msg += " '%s'." % func_name - else: - msg += "." - raise TypeError(msg) - if np.issubdtype(res.dtype, np.inexact): - dt = res.dtype - else: - dt = dtype(float) - return shape(res), dt - - def _int2extGrad(p_int, bounds): - """Calculate the gradients of transforming the internal (unconstrained) to external (constrained) parameter.""" - grad = np.empty_like(p_int) - for i, (x, bound) in enumerate(zip(p_int, bounds)): - lower, upper = bound - if lower is None and upper is None: # No constraints - grad[i] = 1.0 - elif upper is None: # only lower bound - grad[i] = x/np.sqrt(x*x + 1.0) - elif lower is None: # only upper bound - grad[i] = -x/np.sqrt(x*x + 1.0) - else: # lower and upper bounds - grad[i] = (upper - lower)*np.cos(x)/2.0 - return grad - - def _int2extFunc(bounds): - """Transform internal parameters into external parameters.""" - local = [_int2extLocal(b) for b in bounds] - - def _transform_i2e(p_int): - p_ext = np.empty_like(p_int) - p_ext[:] = [i(j) for i, j in zip(local, p_int)] - return p_ext - return _transform_i2e - - def _ext2intFunc(bounds): - """Transform external parameters into internal parameters.""" - local = [_ext2intLocal(b) for b in bounds] - - def _transform_e2i(p_ext): - p_int = np.empty_like(p_ext) - p_int[:] = [i(j) for i, j in zip(local, p_ext)] - return p_int - return _transform_e2i - - def _int2extLocal(bound): - """Transform a single internal parameter to an external parameter.""" - lower, upper = bound - if lower is None and upper is None: # no constraints - return lambda x: x - elif upper is None: # only lower bound - return lambda x: lower - 1.0 + np.sqrt(x*x + 1.0) - elif lower is None: # only upper bound - return lambda x: upper + 1.0 - np.sqrt(x*x + 1.0) - else: - return lambda x: lower + ((upper - lower)/2.0)*(np.sin(x) + 1.0) - - def _ext2intLocal(bound): - """Transform a single external parameter to an internal parameter.""" - lower, upper = bound - if lower is None and upper is None: # no constraints - return lambda x: x - elif upper is None: # only lower bound - return lambda x: np.sqrt((x - lower + 1.0)**2 - 1.0) - elif lower is None: # only upper bound - return lambda x: np.sqrt((x - upper - 1.0)**2 - 1.0) - else: - return lambda x: np.arcsin((2.0*(x - lower)/(upper - lower)) - 1.0) - - i2e = _int2extFunc(bounds) - e2i = _ext2intFunc(bounds) - - x0 = np.asarray(x0).flatten() - n = len(x0) - - if len(bounds) != n: - raise ValueError('the length of bounds is inconsistent with the number of parameters ') - - if not isinstance(args, tuple): - args = (args,) - - shape, dtype = _check_func('leastsq', 'func', func, x0, args, n) - m = shape[0] - - if n > m: - raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m)) - if epsfcn is None: - epsfcn = np.finfo(dtype).eps - - def funcWarp(x, *args): - return func(i2e(x), *args) - - xi0 = e2i(x0) - - if Dfun is None: - if maxfev == 0: - maxfev = 200*(n + 1) - retval = _minpack._lmdif(funcWarp, xi0, args, full_output, ftol, xtol, - gtol, maxfev, epsfcn, factor, diag) - else: - if col_deriv: - _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m)) - else: - _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n)) - if maxfev == 0: - maxfev = 100*(n + 1) - - def DfunWarp(x, *args): - return Dfun(i2e(x), *args) - - retval = _minpack._lmder(funcWarp, DfunWarp, xi0, args, full_output, col_deriv, - ftol, xtol, gtol, maxfev, factor, diag) - - errors = {0: ["Improper input parameters.", TypeError], - 1: ["Both actual and predicted relative reductions " - "in the sum of squares\n are at most %f" % ftol, None], - 2: ["The relative error between two consecutive " - "iterates is at most %f" % xtol, None], - 3: ["Both actual and predicted relative reductions in " - "the sum of squares\n are at most %f and the " - "relative error between two consecutive " - "iterates is at \n most %f" % (ftol, xtol), None], - 4: ["The cosine of the angle between func(x) and any " - "column of the\n Jacobian is at most %f in " - "absolute value" % gtol, None], - 5: ["Number of calls to function has reached " - "maxfev = %d." % maxfev, ValueError], - 6: ["ftol=%f is too small, no further reduction " - "in the sum of squares\n is possible.""" % ftol, - ValueError], - 7: ["xtol=%f is too small, no further improvement in " - "the approximate\n solution is possible." % xtol, - ValueError], - 8: ["gtol=%f is too small, func(x) is orthogonal to the " - "columns of\n the Jacobian to machine " - "precision." % gtol, ValueError], - 'unknown': ["Unknown error.", TypeError]} - - info = retval[-1] # The FORTRAN return value - - if info not in [1, 2, 3, 4] and not full_output: - if info in [5, 6, 7, 8]: - np.warnings.warn(errors[info][0], RuntimeWarning) - else: - try: - raise errors[info][1](errors[info][0]) - except KeyError: - raise errors['unknown'][1](errors['unknown'][0]) - - mesg = errors[info][0] - x = i2e(retval[0]) - - if full_output: - grad = _int2extGrad(retval[0], bounds) - retval[1]['fjac'] = (retval[1]['fjac'].T / np.take(grad, - retval[1]['ipvt'] - 1)).T - cov_x = None - if info in [1, 2, 3, 4]: - from numpy.dual import inv - from numpy.linalg import LinAlgError - perm = np.take(np.eye(n), retval[1]['ipvt'] - 1, 0) - r = np.triu(np.transpose(retval[1]['fjac'])[:n, :]) - R = np.dot(r, perm) - try: - cov_x = inv(np.dot(np.transpose(R), R)) - except LinAlgError as inverror: - print(inverror) - pass - return (x, cov_x) + retval[1:-1] + (mesg, info) - else: - return (x, info) - -def _general_function(params, ydata, xdata, function): - return function(xdata, *params) - ydata -def _weighted_general_function(params, ydata, xdata, function, weights): - return (function(xdata, *params) - ydata)*weights - -def curve_fit_bound(f, xdata, ydata, p0=None, sigma=None, bounds=None, **kw): - """Similar as 'curve_fit' in minpack.py.""" - if p0 is None: - # determine number of parameters by inspecting the function - import inspect - args, varargs, varkw, defaults = inspect.getargspec(f) - if len(args) < 2: - msg = "Unable to determine number of fit parameters." - raise ValueError(msg) - if 'self' in args: - p0 = [1.0] * (len(args)-2) - else: - p0 = [1.0] * (len(args)-1) - - if np.isscalar(p0): - p0 = np.array([p0]) - - args = (ydata, xdata, f) - if sigma is None: - func = _general_function - else: - func = _weighted_general_function - args += (1.0/np.asarray(sigma),) - - return_full = kw.pop('full_output', False) - res = leastsqBound(func, p0, args=args, bounds = bounds, full_output=True, **kw) - (popt, pcov, infodict, errmsg, ier) = res - - if ier not in [1, 2, 3, 4]: - msg = "Optimal parameters not found: " + errmsg - raise RuntimeError(msg) - - if (len(ydata) > len(p0)) and pcov is not None: - s_sq = (func(popt, *args)**2).sum()/(len(ydata)-len(p0)) - pcov = pcov * s_sq - else: - pcov = np.inf - - return (popt, pcov, infodict, errmsg, ier) if return_full else (popt, pcov) - - class ThreadPool: """Pool of threads consuming tasks from a queue.""" diff --git a/python/setup.py b/python/setup.py index 515401c59..def343ec1 100644 --- a/python/setup.py +++ b/python/setup.py @@ -15,6 +15,7 @@ setuptools.setup( packages=setuptools.find_packages(), include_package_data=True, install_requires = [ + "pandas", "scipy", "h5py", "vtk" diff --git a/python/tests/conftest.py b/python/tests/conftest.py new file mode 100644 index 000000000..a0504c499 --- /dev/null +++ b/python/tests/conftest.py @@ -0,0 +1,21 @@ +import os + +import pytest + +import damask + +def pytest_addoption(parser): + parser.addoption("--update", + action="store_true", + default=False) + +@pytest.fixture +def update(request): + """Store current results as new reference results.""" + return request.config.getoption("--update") + +@pytest.fixture +def reference_dir_base(): + """Directory containing reference results.""" + env = damask.Environment() + return os.path.join(env.rootDir(),'python','tests','reference') diff --git a/python/tests/reference/DADF5/12grains6x7x8.geom b/python/tests/reference/DADF5/12grains6x7x8.geom new file mode 100644 index 000000000..7bb4a3e4d --- /dev/null +++ b/python/tests/reference/DADF5/12grains6x7x8.geom @@ -0,0 +1,125 @@ +68 header +geom_fromVoronoiTessellation 2.0.3-1073-g6f3cb071 + +[Grain1] +(gauss) phi1 358.98 Phi 65.62 phi2 24.48 +[Grain2] +(gauss) phi1 121.05 Phi 176.11 phi2 295.73 +[Grain3] +(gauss) phi1 43.79 Phi 113.76 phi2 345.90 +[Grain4] +(gauss) phi1 265.15 Phi 62.52 phi2 299.71 +[Grain5] +(gauss) phi1 221.23 Phi 26.54 phi2 207.05 +[Grain6] +(gauss) phi1 249.81 Phi 61.47 phi2 152.14 +[Grain7] +(gauss) phi1 332.45 Phi 99.16 phi2 345.34 +[Grain8] +(gauss) phi1 312.27 Phi 118.27 phi2 181.59 +[Grain9] +(gauss) phi1 303.10 Phi 48.21 phi2 358.03 +[Grain10] +(gauss) phi1 338.26 Phi 48.11 phi2 176.78 +[Grain11] +(gauss) phi1 115.17 Phi 56.54 phi2 223.84 +[Grain12] +(gauss) phi1 281.04 Phi 97.48 phi2 27.94 + +[Grain1] +crystallite 1 +(constituent) phase 1 texture 1 fraction 1.0 +[Grain2] +crystallite 1 +(constituent) phase 1 texture 2 fraction 1.0 +[Grain3] +crystallite 1 +(constituent) phase 1 texture 3 fraction 1.0 +[Grain4] +crystallite 1 +(constituent) phase 1 texture 4 fraction 1.0 +[Grain5] +crystallite 1 +(constituent) phase 1 texture 5 fraction 1.0 +[Grain6] +crystallite 1 +(constituent) phase 1 texture 6 fraction 1.0 +[Grain7] +crystallite 1 +(constituent) phase 1 texture 7 fraction 1.0 +[Grain8] +crystallite 1 +(constituent) phase 1 texture 8 fraction 1.0 +[Grain9] +crystallite 1 +(constituent) phase 1 texture 9 fraction 1.0 +[Grain10] +crystallite 1 +(constituent) phase 1 texture 10 fraction 1.0 +[Grain11] +crystallite 1 +(constituent) phase 1 texture 11 fraction 1.0 +[Grain12] +crystallite 1 +(constituent) phase 1 texture 12 fraction 1.0 + +grid a 6 b 7 c 8 +size x 0.75 y 0.875 z 1.0 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 + 9 3 3 10 9 9 + 9 1 1 1 9 9 + 9 11 1 1 7 9 + 7 11 11 7 7 7 + 7 11 11 7 7 7 +12 3 3 10 7 12 +12 3 3 10 10 12 +12 3 3 1 9 9 + 9 1 1 1 9 9 + 9 1 1 1 7 7 + 7 1 1 7 7 7 +12 12 3 7 7 7 +12 3 3 3 12 12 +12 3 3 3 12 12 +12 3 3 1 1 12 + 9 1 1 1 1 9 + 6 1 1 1 8 8 + 7 6 8 8 8 8 +12 12 8 8 8 12 +12 3 3 3 12 12 +12 3 3 3 12 12 + 5 6 6 6 1 12 + 6 6 6 6 8 8 + 6 6 6 8 8 8 + 8 6 8 8 8 8 +12 5 8 8 8 8 +12 5 5 8 8 12 + 5 5 5 3 12 12 + 5 5 6 6 6 5 + 6 6 6 6 6 6 + 6 6 6 6 8 8 + 4 4 6 8 8 8 + 4 4 2 2 2 8 + 5 5 5 2 2 2 + 5 5 5 5 2 5 + 5 5 5 10 10 5 + 6 6 6 6 10 4 + 4 4 11 11 2 4 + 4 4 11 2 2 4 + 4 4 2 2 2 2 + 5 5 5 2 2 2 + 5 5 5 10 10 5 + 5 5 10 10 10 9 + 4 11 11 11 10 9 + 4 4 11 11 11 4 + 4 4 11 11 2 4 + 4 4 2 2 2 2 + 5 5 2 2 2 2 + 5 5 10 10 10 10 + 9 10 10 10 10 9 + 9 11 11 10 9 9 + 4 11 11 11 9 9 + 4 11 11 11 7 7 + 4 4 11 2 7 7 +12 10 10 10 10 7 + 9 10 10 10 10 9 diff --git a/python/tests/reference/DADF5/12grains6x7x8_tensionY.hdf5 b/python/tests/reference/DADF5/12grains6x7x8_tensionY.hdf5 new file mode 100644 index 000000000..39c17fadb Binary files /dev/null and b/python/tests/reference/DADF5/12grains6x7x8_tensionY.hdf5 differ diff --git a/python/tests/reference/DADF5/material.config b/python/tests/reference/DADF5/material.config new file mode 100644 index 000000000..0b0808a12 --- /dev/null +++ b/python/tests/reference/DADF5/material.config @@ -0,0 +1,129 @@ + +[none] +mech none +ngrains 1 + + +[Grain1] +(gauss) phi1 358.98 Phi 65.62 phi2 24.48 +[Grain2] +(gauss) phi1 121.05 Phi 176.11 phi2 295.73 +[Grain3] +(gauss) phi1 43.79 Phi 113.76 phi2 345.90 +[Grain4] +(gauss) phi1 265.15 Phi 62.52 phi2 299.71 +[Grain5] +(gauss) phi1 221.23 Phi 26.54 phi2 207.05 +[Grain6] +(gauss) phi1 249.81 Phi 61.47 phi2 152.14 +[Grain7] +(gauss) phi1 332.45 Phi 99.16 phi2 345.34 +[Grain8] +(gauss) phi1 312.27 Phi 118.27 phi2 181.59 +[Grain9] +(gauss) phi1 303.10 Phi 48.21 phi2 358.03 +[Grain10] +(gauss) phi1 338.26 Phi 48.11 phi2 176.78 +[Grain11] +(gauss) phi1 115.17 Phi 56.54 phi2 223.84 +[Grain12] +(gauss) phi1 281.04 Phi 97.48 phi2 27.94 + + +[Grain1] +crystallite 1 +(constituent) phase 1 texture 1 fraction 1.0 +[Grain2] +crystallite 1 +(constituent) phase 1 texture 2 fraction 1.0 +[Grain3] +crystallite 1 +(constituent) phase 1 texture 3 fraction 1.0 +[Grain4] +crystallite 1 +(constituent) phase 1 texture 4 fraction 1.0 +[Grain5] +crystallite 1 +(constituent) phase 1 texture 5 fraction 1.0 +[Grain6] +crystallite 1 +(constituent) phase 1 texture 6 fraction 1.0 +[Grain7] +crystallite 1 +(constituent) phase 2 texture 7 fraction 1.0 +[Grain8] +crystallite 1 +(constituent) phase 2 texture 8 fraction 1.0 +[Grain9] +crystallite 1 +(constituent) phase 2 texture 9 fraction 1.0 +[Grain10] +crystallite 1 +(constituent) phase 2 texture 10 fraction 1.0 +[Grain11] +crystallite 1 +(constituent) phase 2 texture 11 fraction 1.0 +[Grain12] +crystallite 1 +(constituent) phase 2 texture 12 fraction 1.0 + + +[pheno_fcc] +elasticity hooke +plasticity phenopowerlaw + +(output) orientation # quaternion +(output) F # deformation gradient tensor +(output) Fe # elastic deformation gradient tensor +(output) Fp # plastic deformation gradient tensor +(output) P # first Piola-Kichhoff stress tensor +(output) Lp # plastic velocity gradient tensor + + +lattice_structure fcc +Nslip 12 # per family +Ntwin 0 # per family + +c11 106.75e9 +c12 60.41e9 +c44 28.34e9 + +gdot0_slip 0.001 +n_slip 20 +tau0_slip 31e6 # per family +tausat_slip 63e6 # per family +a_slip 2.25 +h0_slipslip 75e6 +interaction_slipslip 1 1 1.4 1.4 1.4 1.4 +atol_resistance 1 + +[pheno_bcc] +elasticity hooke +plasticity phenopowerlaw + +(output) orientation # quaternion +(output) F # deformation gradient tensor +(output) Fe # elastic deformation gradient tensor +(output) Fp # plastic deformation gradient tensor +(output) P # first Piola-Kichhoff stress tensor +(output) Lp # plastic velocity gradient tensor + + +lattice_structure bcc +Nslip 12 # per family + +c11 106.75e9 +c12 60.41e9 +c44 28.34e9 + +gdot0_slip 0.001 +n_slip 20 +tau0_slip 31e6 # per family +tausat_slip 63e6 # per family +a_slip 2.25 +h0_slipslip 75e6 +interaction_slipslip 1 1 1.4 1.4 1.4 1.4 +atol_resistance 1 + + +[dummy] diff --git a/python/tests/reference/DADF5/tensionY.load b/python/tests/reference/DADF5/tensionY.load new file mode 100644 index 000000000..9332144d8 --- /dev/null +++ b/python/tests/reference/DADF5/tensionY.load @@ -0,0 +1 @@ +fdot * 0 0 0 1.0e-3 0 0 0 * stress 0 * * * * * * * 0 time 20 incs 40 freq 4 diff --git a/python/tests/reference/Geom/clean_stencil=1.geom b/python/tests/reference/Geom/clean_stencil=1.geom new file mode 100644 index 000000000..2fe5314fe --- /dev/null +++ b/python/tests/reference/Geom/clean_stencil=1.geom @@ -0,0 +1,25 @@ +4 header +grid a 8 b 5 c 4 +size x 8e-06 y 5e-06 z 4e-06 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 + 1 1 2 22 2 2 1 21 + 1 1 6 26 2 2 5 25 + 1 1 10 30 2 2 9 29 + 1 1 14 34 2 2 13 33 + 1 1 18 38 2 2 17 37 + 1 1 3 23 2 2 2 22 + 1 1 7 27 2 2 6 26 + 1 1 11 31 2 2 10 30 + 1 1 15 35 2 2 14 34 + 1 1 19 39 2 2 18 38 + 1 1 4 24 2 2 3 23 + 1 1 8 28 2 2 7 27 + 1 1 12 32 2 2 11 31 + 1 1 16 36 2 2 15 35 + 1 1 20 40 2 2 19 39 + 1 1 5 25 2 2 4 24 + 1 1 9 29 2 2 8 28 + 1 1 13 33 2 2 12 32 + 1 1 17 37 2 2 16 36 + 1 1 21 41 2 2 20 40 diff --git a/python/tests/reference/Geom/clean_stencil=2.geom b/python/tests/reference/Geom/clean_stencil=2.geom new file mode 100644 index 000000000..f074fea56 --- /dev/null +++ b/python/tests/reference/Geom/clean_stencil=2.geom @@ -0,0 +1,25 @@ +4 header +grid a 8 b 5 c 4 +size x 8e-06 y 5e-06 z 4e-06 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 + 1 1 1 2 2 2 1 1 + 1 1 1 2 2 2 2 1 + 1 1 1 6 2 2 2 5 + 1 1 1 10 2 2 2 9 + 1 1 1 14 2 2 2 13 + 1 1 1 2 2 2 2 1 + 1 1 1 2 2 2 2 1 + 1 1 1 6 2 2 2 5 + 1 1 1 10 2 2 2 9 + 1 1 1 14 2 2 2 13 + 1 1 1 3 2 2 2 2 + 1 1 1 3 2 2 2 2 + 1 1 1 7 2 2 2 6 + 1 1 1 11 2 2 2 10 + 1 1 1 15 2 2 2 14 + 1 1 1 4 2 2 2 3 + 1 1 1 4 2 2 2 3 + 1 1 1 8 2 2 2 7 + 1 1 1 12 2 2 2 11 + 1 1 1 16 2 2 2 15 diff --git a/python/tests/reference/Geom/clean_stencil=3.geom b/python/tests/reference/Geom/clean_stencil=3.geom new file mode 100644 index 000000000..63e1dce5a --- /dev/null +++ b/python/tests/reference/Geom/clean_stencil=3.geom @@ -0,0 +1,25 @@ +4 header +grid a 8 b 5 c 4 +size x 8e-06 y 5e-06 z 4e-06 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 + 1 1 1 2 2 2 2 21 + 1 1 1 2 2 2 2 21 + 1 1 1 2 2 2 2 25 + 1 1 1 2 2 2 2 29 + 1 1 1 2 2 2 2 37 + 1 1 1 2 2 2 2 21 + 1 1 1 2 2 2 2 21 + 1 1 1 2 2 2 2 25 + 1 1 1 2 2 2 2 29 + 1 1 1 2 2 2 2 37 + 1 1 1 2 2 2 2 22 + 1 1 1 2 2 2 2 22 + 1 1 1 2 2 2 2 26 + 1 1 1 2 2 2 2 30 + 1 1 1 2 2 2 2 38 + 1 1 1 2 2 2 2 24 + 1 1 1 2 2 2 2 24 + 1 1 1 2 2 2 2 28 + 1 1 1 2 2 2 2 32 + 1 1 1 2 2 2 2 40 diff --git a/python/tests/reference/Geom/clean_stencil=4.geom b/python/tests/reference/Geom/clean_stencil=4.geom new file mode 100644 index 000000000..eef322d3f --- /dev/null +++ b/python/tests/reference/Geom/clean_stencil=4.geom @@ -0,0 +1,25 @@ +4 header +grid a 8 b 5 c 4 +size x 8e-06 y 5e-06 z 4e-06 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 +1 1 1 2 2 2 2 2 +1 1 1 2 2 2 2 2 +1 1 1 2 2 2 2 2 +1 1 1 1 2 2 2 2 +1 1 1 1 2 2 2 2 +1 1 1 2 2 2 2 2 +1 1 1 2 2 2 2 2 +1 1 1 2 2 2 2 2 +1 1 1 1 2 2 2 2 +1 1 1 1 2 2 2 2 +1 1 1 2 2 2 2 2 +1 1 1 2 2 2 2 2 +1 1 1 2 2 2 2 2 +1 1 1 1 2 2 2 2 +1 1 1 1 2 2 2 2 +1 1 1 1 2 2 2 2 +1 1 1 1 2 2 2 2 +1 1 1 1 2 2 2 2 +1 1 1 1 2 2 2 2 +1 1 1 1 2 2 2 2 diff --git a/python/tests/reference/Geom/mirror_directions=x-y-z_reflect=True.geom b/python/tests/reference/Geom/mirror_directions=x-y-z_reflect=True.geom new file mode 100644 index 000000000..77ff709d7 --- /dev/null +++ b/python/tests/reference/Geom/mirror_directions=x-y-z_reflect=True.geom @@ -0,0 +1,85 @@ +4 header +grid a 16 b 10 c 8 +size x 1.6e-05 y 1e-05 z 8e-06 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 + 1 1 2 22 2 2 1 21 21 1 2 2 22 2 1 1 + 1 1 6 26 2 2 5 25 25 5 2 2 26 6 1 1 + 1 1 10 30 2 2 9 29 29 9 2 2 30 10 1 1 + 1 1 14 34 2 2 13 33 33 13 2 2 34 14 1 1 + 1 1 18 38 2 2 17 37 37 17 2 2 38 18 1 1 + 1 1 18 38 2 2 17 37 37 17 2 2 38 18 1 1 + 1 1 14 34 2 2 13 33 33 13 2 2 34 14 1 1 + 1 1 10 30 2 2 9 29 29 9 2 2 30 10 1 1 + 1 1 6 26 2 2 5 25 25 5 2 2 26 6 1 1 + 1 1 2 22 2 2 1 21 21 1 2 2 22 2 1 1 + 1 1 3 23 2 2 2 22 22 2 2 2 23 3 1 1 + 1 1 7 27 2 2 6 26 26 6 2 2 27 7 1 1 + 1 1 11 31 2 2 10 30 30 10 2 2 31 11 1 1 + 1 1 15 35 2 2 14 34 34 14 2 2 35 15 1 1 + 1 1 19 39 2 2 18 38 38 18 2 2 39 19 1 1 + 1 1 19 39 2 2 18 38 38 18 2 2 39 19 1 1 + 1 1 15 35 2 2 14 34 34 14 2 2 35 15 1 1 + 1 1 11 31 2 2 10 30 30 10 2 2 31 11 1 1 + 1 1 7 27 2 2 6 26 26 6 2 2 27 7 1 1 + 1 1 3 23 2 2 2 22 22 2 2 2 23 3 1 1 + 1 1 4 24 2 2 3 23 23 3 2 2 24 4 1 1 + 1 1 8 28 2 2 7 27 27 7 2 2 28 8 1 1 + 1 1 12 32 2 2 11 31 31 11 2 2 32 12 1 1 + 1 1 16 36 2 2 15 35 35 15 2 2 36 16 1 1 + 1 1 20 40 2 2 19 39 39 19 2 2 40 20 1 1 + 1 1 20 40 2 2 19 39 39 19 2 2 40 20 1 1 + 1 1 16 36 2 2 15 35 35 15 2 2 36 16 1 1 + 1 1 12 32 2 2 11 31 31 11 2 2 32 12 1 1 + 1 1 8 28 2 2 7 27 27 7 2 2 28 8 1 1 + 1 1 4 24 2 2 3 23 23 3 2 2 24 4 1 1 + 1 1 5 25 2 2 4 24 24 4 2 2 25 5 1 1 + 1 1 9 29 2 2 8 28 28 8 2 2 29 9 1 1 + 1 1 13 33 2 2 12 32 32 12 2 2 33 13 1 1 + 1 1 17 37 2 2 16 36 36 16 2 2 37 17 1 1 + 1 1 21 41 2 2 20 40 40 20 2 2 41 21 1 1 + 1 1 21 41 2 2 20 40 40 20 2 2 41 21 1 1 + 1 1 17 37 2 2 16 36 36 16 2 2 37 17 1 1 + 1 1 13 33 2 2 12 32 32 12 2 2 33 13 1 1 + 1 1 9 29 2 2 8 28 28 8 2 2 29 9 1 1 + 1 1 5 25 2 2 4 24 24 4 2 2 25 5 1 1 + 1 1 5 25 2 2 4 24 24 4 2 2 25 5 1 1 + 1 1 9 29 2 2 8 28 28 8 2 2 29 9 1 1 + 1 1 13 33 2 2 12 32 32 12 2 2 33 13 1 1 + 1 1 17 37 2 2 16 36 36 16 2 2 37 17 1 1 + 1 1 21 41 2 2 20 40 40 20 2 2 41 21 1 1 + 1 1 21 41 2 2 20 40 40 20 2 2 41 21 1 1 + 1 1 17 37 2 2 16 36 36 16 2 2 37 17 1 1 + 1 1 13 33 2 2 12 32 32 12 2 2 33 13 1 1 + 1 1 9 29 2 2 8 28 28 8 2 2 29 9 1 1 + 1 1 5 25 2 2 4 24 24 4 2 2 25 5 1 1 + 1 1 4 24 2 2 3 23 23 3 2 2 24 4 1 1 + 1 1 8 28 2 2 7 27 27 7 2 2 28 8 1 1 + 1 1 12 32 2 2 11 31 31 11 2 2 32 12 1 1 + 1 1 16 36 2 2 15 35 35 15 2 2 36 16 1 1 + 1 1 20 40 2 2 19 39 39 19 2 2 40 20 1 1 + 1 1 20 40 2 2 19 39 39 19 2 2 40 20 1 1 + 1 1 16 36 2 2 15 35 35 15 2 2 36 16 1 1 + 1 1 12 32 2 2 11 31 31 11 2 2 32 12 1 1 + 1 1 8 28 2 2 7 27 27 7 2 2 28 8 1 1 + 1 1 4 24 2 2 3 23 23 3 2 2 24 4 1 1 + 1 1 3 23 2 2 2 22 22 2 2 2 23 3 1 1 + 1 1 7 27 2 2 6 26 26 6 2 2 27 7 1 1 + 1 1 11 31 2 2 10 30 30 10 2 2 31 11 1 1 + 1 1 15 35 2 2 14 34 34 14 2 2 35 15 1 1 + 1 1 19 39 2 2 18 38 38 18 2 2 39 19 1 1 + 1 1 19 39 2 2 18 38 38 18 2 2 39 19 1 1 + 1 1 15 35 2 2 14 34 34 14 2 2 35 15 1 1 + 1 1 11 31 2 2 10 30 30 10 2 2 31 11 1 1 + 1 1 7 27 2 2 6 26 26 6 2 2 27 7 1 1 + 1 1 3 23 2 2 2 22 22 2 2 2 23 3 1 1 + 1 1 2 22 2 2 1 21 21 1 2 2 22 2 1 1 + 1 1 6 26 2 2 5 25 25 5 2 2 26 6 1 1 + 1 1 10 30 2 2 9 29 29 9 2 2 30 10 1 1 + 1 1 14 34 2 2 13 33 33 13 2 2 34 14 1 1 + 1 1 18 38 2 2 17 37 37 17 2 2 38 18 1 1 + 1 1 18 38 2 2 17 37 37 17 2 2 38 18 1 1 + 1 1 14 34 2 2 13 33 33 13 2 2 34 14 1 1 + 1 1 10 30 2 2 9 29 29 9 2 2 30 10 1 1 + 1 1 6 26 2 2 5 25 25 5 2 2 26 6 1 1 + 1 1 2 22 2 2 1 21 21 1 2 2 22 2 1 1 diff --git a/python/tests/reference/Geom/mirror_directions=x_reflect=False.geom b/python/tests/reference/Geom/mirror_directions=x_reflect=False.geom new file mode 100644 index 000000000..afb3bb5f8 --- /dev/null +++ b/python/tests/reference/Geom/mirror_directions=x_reflect=False.geom @@ -0,0 +1,25 @@ +4 header +grid a 14 b 5 c 4 +size x 1.4e-05 y 5e-06 z 4e-06 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 + 1 1 2 22 2 2 1 21 1 2 2 22 2 1 + 1 1 6 26 2 2 5 25 5 2 2 26 6 1 + 1 1 10 30 2 2 9 29 9 2 2 30 10 1 + 1 1 14 34 2 2 13 33 13 2 2 34 14 1 + 1 1 18 38 2 2 17 37 17 2 2 38 18 1 + 1 1 3 23 2 2 2 22 2 2 2 23 3 1 + 1 1 7 27 2 2 6 26 6 2 2 27 7 1 + 1 1 11 31 2 2 10 30 10 2 2 31 11 1 + 1 1 15 35 2 2 14 34 14 2 2 35 15 1 + 1 1 19 39 2 2 18 38 18 2 2 39 19 1 + 1 1 4 24 2 2 3 23 3 2 2 24 4 1 + 1 1 8 28 2 2 7 27 7 2 2 28 8 1 + 1 1 12 32 2 2 11 31 11 2 2 32 12 1 + 1 1 16 36 2 2 15 35 15 2 2 36 16 1 + 1 1 20 40 2 2 19 39 19 2 2 40 20 1 + 1 1 5 25 2 2 4 24 4 2 2 25 5 1 + 1 1 9 29 2 2 8 28 8 2 2 29 9 1 + 1 1 13 33 2 2 12 32 12 2 2 33 13 1 + 1 1 17 37 2 2 16 36 16 2 2 37 17 1 + 1 1 21 41 2 2 20 40 20 2 2 41 21 1 diff --git a/python/tests/reference/Geom/mirror_directions=y-z_reflect=False.geom b/python/tests/reference/Geom/mirror_directions=y-z_reflect=False.geom new file mode 100644 index 000000000..37d8ae18e --- /dev/null +++ b/python/tests/reference/Geom/mirror_directions=y-z_reflect=False.geom @@ -0,0 +1,53 @@ +4 header +grid a 8 b 8 c 6 +size x 8e-06 y 8.000000000000001e-06 z 6e-06 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 + 1 1 2 22 2 2 1 21 + 1 1 6 26 2 2 5 25 + 1 1 10 30 2 2 9 29 + 1 1 14 34 2 2 13 33 + 1 1 18 38 2 2 17 37 + 1 1 14 34 2 2 13 33 + 1 1 10 30 2 2 9 29 + 1 1 6 26 2 2 5 25 + 1 1 3 23 2 2 2 22 + 1 1 7 27 2 2 6 26 + 1 1 11 31 2 2 10 30 + 1 1 15 35 2 2 14 34 + 1 1 19 39 2 2 18 38 + 1 1 15 35 2 2 14 34 + 1 1 11 31 2 2 10 30 + 1 1 7 27 2 2 6 26 + 1 1 4 24 2 2 3 23 + 1 1 8 28 2 2 7 27 + 1 1 12 32 2 2 11 31 + 1 1 16 36 2 2 15 35 + 1 1 20 40 2 2 19 39 + 1 1 16 36 2 2 15 35 + 1 1 12 32 2 2 11 31 + 1 1 8 28 2 2 7 27 + 1 1 5 25 2 2 4 24 + 1 1 9 29 2 2 8 28 + 1 1 13 33 2 2 12 32 + 1 1 17 37 2 2 16 36 + 1 1 21 41 2 2 20 40 + 1 1 17 37 2 2 16 36 + 1 1 13 33 2 2 12 32 + 1 1 9 29 2 2 8 28 + 1 1 4 24 2 2 3 23 + 1 1 8 28 2 2 7 27 + 1 1 12 32 2 2 11 31 + 1 1 16 36 2 2 15 35 + 1 1 20 40 2 2 19 39 + 1 1 16 36 2 2 15 35 + 1 1 12 32 2 2 11 31 + 1 1 8 28 2 2 7 27 + 1 1 3 23 2 2 2 22 + 1 1 7 27 2 2 6 26 + 1 1 11 31 2 2 10 30 + 1 1 15 35 2 2 14 34 + 1 1 19 39 2 2 18 38 + 1 1 15 35 2 2 14 34 + 1 1 11 31 2 2 10 30 + 1 1 7 27 2 2 6 26 diff --git a/python/tests/reference/Geom/mirror_directions=z-x-y_reflect=False.geom b/python/tests/reference/Geom/mirror_directions=z-x-y_reflect=False.geom new file mode 100644 index 000000000..5d7c23eb0 --- /dev/null +++ b/python/tests/reference/Geom/mirror_directions=z-x-y_reflect=False.geom @@ -0,0 +1,53 @@ +4 header +grid a 14 b 8 c 6 +size x 1.4e-05 y 8.000000000000001e-06 z 6e-06 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 + 1 1 2 22 2 2 1 21 1 2 2 22 2 1 + 1 1 6 26 2 2 5 25 5 2 2 26 6 1 + 1 1 10 30 2 2 9 29 9 2 2 30 10 1 + 1 1 14 34 2 2 13 33 13 2 2 34 14 1 + 1 1 18 38 2 2 17 37 17 2 2 38 18 1 + 1 1 14 34 2 2 13 33 13 2 2 34 14 1 + 1 1 10 30 2 2 9 29 9 2 2 30 10 1 + 1 1 6 26 2 2 5 25 5 2 2 26 6 1 + 1 1 3 23 2 2 2 22 2 2 2 23 3 1 + 1 1 7 27 2 2 6 26 6 2 2 27 7 1 + 1 1 11 31 2 2 10 30 10 2 2 31 11 1 + 1 1 15 35 2 2 14 34 14 2 2 35 15 1 + 1 1 19 39 2 2 18 38 18 2 2 39 19 1 + 1 1 15 35 2 2 14 34 14 2 2 35 15 1 + 1 1 11 31 2 2 10 30 10 2 2 31 11 1 + 1 1 7 27 2 2 6 26 6 2 2 27 7 1 + 1 1 4 24 2 2 3 23 3 2 2 24 4 1 + 1 1 8 28 2 2 7 27 7 2 2 28 8 1 + 1 1 12 32 2 2 11 31 11 2 2 32 12 1 + 1 1 16 36 2 2 15 35 15 2 2 36 16 1 + 1 1 20 40 2 2 19 39 19 2 2 40 20 1 + 1 1 16 36 2 2 15 35 15 2 2 36 16 1 + 1 1 12 32 2 2 11 31 11 2 2 32 12 1 + 1 1 8 28 2 2 7 27 7 2 2 28 8 1 + 1 1 5 25 2 2 4 24 4 2 2 25 5 1 + 1 1 9 29 2 2 8 28 8 2 2 29 9 1 + 1 1 13 33 2 2 12 32 12 2 2 33 13 1 + 1 1 17 37 2 2 16 36 16 2 2 37 17 1 + 1 1 21 41 2 2 20 40 20 2 2 41 21 1 + 1 1 17 37 2 2 16 36 16 2 2 37 17 1 + 1 1 13 33 2 2 12 32 12 2 2 33 13 1 + 1 1 9 29 2 2 8 28 8 2 2 29 9 1 + 1 1 4 24 2 2 3 23 3 2 2 24 4 1 + 1 1 8 28 2 2 7 27 7 2 2 28 8 1 + 1 1 12 32 2 2 11 31 11 2 2 32 12 1 + 1 1 16 36 2 2 15 35 15 2 2 36 16 1 + 1 1 20 40 2 2 19 39 19 2 2 40 20 1 + 1 1 16 36 2 2 15 35 15 2 2 36 16 1 + 1 1 12 32 2 2 11 31 11 2 2 32 12 1 + 1 1 8 28 2 2 7 27 7 2 2 28 8 1 + 1 1 3 23 2 2 2 22 2 2 2 23 3 1 + 1 1 7 27 2 2 6 26 6 2 2 27 7 1 + 1 1 11 31 2 2 10 30 10 2 2 31 11 1 + 1 1 15 35 2 2 14 34 14 2 2 35 15 1 + 1 1 19 39 2 2 18 38 18 2 2 39 19 1 + 1 1 15 35 2 2 14 34 14 2 2 35 15 1 + 1 1 11 31 2 2 10 30 10 2 2 31 11 1 + 1 1 7 27 2 2 6 26 6 2 2 27 7 1 diff --git a/python/tests/reference/Geom/scale_grid=10-10-10.geom b/python/tests/reference/Geom/scale_grid=10-10-10.geom new file mode 100644 index 000000000..43587a615 --- /dev/null +++ b/python/tests/reference/Geom/scale_grid=10-10-10.geom @@ -0,0 +1,105 @@ +4 header +grid a 10 b 10 c 10 +size x 8e-06 y 5e-06 z 4e-06 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 + 1 1 2 2 22 2 2 2 1 21 + 1 1 2 2 22 2 2 2 1 21 + 1 1 6 6 26 2 2 2 5 25 + 1 1 6 6 26 2 2 2 5 25 + 1 1 10 10 30 2 2 2 9 29 + 1 1 10 10 30 2 2 2 9 29 + 1 1 14 14 34 2 2 2 13 33 + 1 1 14 14 34 2 2 2 13 33 + 1 1 18 18 38 2 2 2 17 37 + 1 1 18 18 38 2 2 2 17 37 + 1 1 2 2 22 2 2 2 1 21 + 1 1 2 2 22 2 2 2 1 21 + 1 1 6 6 26 2 2 2 5 25 + 1 1 6 6 26 2 2 2 5 25 + 1 1 10 10 30 2 2 2 9 29 + 1 1 10 10 30 2 2 2 9 29 + 1 1 14 14 34 2 2 2 13 33 + 1 1 14 14 34 2 2 2 13 33 + 1 1 18 18 38 2 2 2 17 37 + 1 1 18 18 38 2 2 2 17 37 + 1 1 3 3 23 2 2 2 2 22 + 1 1 3 3 23 2 2 2 2 22 + 1 1 7 7 27 2 2 2 6 26 + 1 1 7 7 27 2 2 2 6 26 + 1 1 11 11 31 2 2 2 10 30 + 1 1 11 11 31 2 2 2 10 30 + 1 1 15 15 35 2 2 2 14 34 + 1 1 15 15 35 2 2 2 14 34 + 1 1 19 19 39 2 2 2 18 38 + 1 1 19 19 39 2 2 2 18 38 + 1 1 3 3 23 2 2 2 2 22 + 1 1 3 3 23 2 2 2 2 22 + 1 1 7 7 27 2 2 2 6 26 + 1 1 7 7 27 2 2 2 6 26 + 1 1 11 11 31 2 2 2 10 30 + 1 1 11 11 31 2 2 2 10 30 + 1 1 15 15 35 2 2 2 14 34 + 1 1 15 15 35 2 2 2 14 34 + 1 1 19 19 39 2 2 2 18 38 + 1 1 19 19 39 2 2 2 18 38 + 1 1 3 3 23 2 2 2 2 22 + 1 1 3 3 23 2 2 2 2 22 + 1 1 7 7 27 2 2 2 6 26 + 1 1 7 7 27 2 2 2 6 26 + 1 1 11 11 31 2 2 2 10 30 + 1 1 11 11 31 2 2 2 10 30 + 1 1 15 15 35 2 2 2 14 34 + 1 1 15 15 35 2 2 2 14 34 + 1 1 19 19 39 2 2 2 18 38 + 1 1 19 19 39 2 2 2 18 38 + 1 1 4 4 24 2 2 2 3 23 + 1 1 4 4 24 2 2 2 3 23 + 1 1 8 8 28 2 2 2 7 27 + 1 1 8 8 28 2 2 2 7 27 + 1 1 12 12 32 2 2 2 11 31 + 1 1 12 12 32 2 2 2 11 31 + 1 1 16 16 36 2 2 2 15 35 + 1 1 16 16 36 2 2 2 15 35 + 1 1 20 20 40 2 2 2 19 39 + 1 1 20 20 40 2 2 2 19 39 + 1 1 4 4 24 2 2 2 3 23 + 1 1 4 4 24 2 2 2 3 23 + 1 1 8 8 28 2 2 2 7 27 + 1 1 8 8 28 2 2 2 7 27 + 1 1 12 12 32 2 2 2 11 31 + 1 1 12 12 32 2 2 2 11 31 + 1 1 16 16 36 2 2 2 15 35 + 1 1 16 16 36 2 2 2 15 35 + 1 1 20 20 40 2 2 2 19 39 + 1 1 20 20 40 2 2 2 19 39 + 1 1 4 4 24 2 2 2 3 23 + 1 1 4 4 24 2 2 2 3 23 + 1 1 8 8 28 2 2 2 7 27 + 1 1 8 8 28 2 2 2 7 27 + 1 1 12 12 32 2 2 2 11 31 + 1 1 12 12 32 2 2 2 11 31 + 1 1 16 16 36 2 2 2 15 35 + 1 1 16 16 36 2 2 2 15 35 + 1 1 20 20 40 2 2 2 19 39 + 1 1 20 20 40 2 2 2 19 39 + 1 1 5 5 25 2 2 2 4 24 + 1 1 5 5 25 2 2 2 4 24 + 1 1 9 9 29 2 2 2 8 28 + 1 1 9 9 29 2 2 2 8 28 + 1 1 13 13 33 2 2 2 12 32 + 1 1 13 13 33 2 2 2 12 32 + 1 1 17 17 37 2 2 2 16 36 + 1 1 17 17 37 2 2 2 16 36 + 1 1 21 21 41 2 2 2 20 40 + 1 1 21 21 41 2 2 2 20 40 + 1 1 5 5 25 2 2 2 4 24 + 1 1 5 5 25 2 2 2 4 24 + 1 1 9 9 29 2 2 2 8 28 + 1 1 9 9 29 2 2 2 8 28 + 1 1 13 13 33 2 2 2 12 32 + 1 1 13 13 33 2 2 2 12 32 + 1 1 17 17 37 2 2 2 16 36 + 1 1 17 17 37 2 2 2 16 36 + 1 1 21 21 41 2 2 2 20 40 + 1 1 21 21 41 2 2 2 20 40 diff --git a/python/tests/reference/Geom/scale_grid=10-11-10.geom b/python/tests/reference/Geom/scale_grid=10-11-10.geom new file mode 100644 index 000000000..e12fc64fc --- /dev/null +++ b/python/tests/reference/Geom/scale_grid=10-11-10.geom @@ -0,0 +1,115 @@ +4 header +grid a 10 b 11 c 10 +size x 8e-06 y 5e-06 z 4e-06 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 + 1 1 2 2 22 2 2 2 1 21 + 1 1 2 2 22 2 2 2 1 21 + 1 1 6 6 26 2 2 2 5 25 + 1 1 6 6 26 2 2 2 5 25 + 1 1 10 10 30 2 2 2 9 29 + 1 1 10 10 30 2 2 2 9 29 + 1 1 10 10 30 2 2 2 9 29 + 1 1 14 14 34 2 2 2 13 33 + 1 1 14 14 34 2 2 2 13 33 + 1 1 18 18 38 2 2 2 17 37 + 1 1 18 18 38 2 2 2 17 37 + 1 1 2 2 22 2 2 2 1 21 + 1 1 2 2 22 2 2 2 1 21 + 1 1 6 6 26 2 2 2 5 25 + 1 1 6 6 26 2 2 2 5 25 + 1 1 10 10 30 2 2 2 9 29 + 1 1 10 10 30 2 2 2 9 29 + 1 1 10 10 30 2 2 2 9 29 + 1 1 14 14 34 2 2 2 13 33 + 1 1 14 14 34 2 2 2 13 33 + 1 1 18 18 38 2 2 2 17 37 + 1 1 18 18 38 2 2 2 17 37 + 1 1 3 3 23 2 2 2 2 22 + 1 1 3 3 23 2 2 2 2 22 + 1 1 7 7 27 2 2 2 6 26 + 1 1 7 7 27 2 2 2 6 26 + 1 1 11 11 31 2 2 2 10 30 + 1 1 11 11 31 2 2 2 10 30 + 1 1 11 11 31 2 2 2 10 30 + 1 1 15 15 35 2 2 2 14 34 + 1 1 15 15 35 2 2 2 14 34 + 1 1 19 19 39 2 2 2 18 38 + 1 1 19 19 39 2 2 2 18 38 + 1 1 3 3 23 2 2 2 2 22 + 1 1 3 3 23 2 2 2 2 22 + 1 1 7 7 27 2 2 2 6 26 + 1 1 7 7 27 2 2 2 6 26 + 1 1 11 11 31 2 2 2 10 30 + 1 1 11 11 31 2 2 2 10 30 + 1 1 11 11 31 2 2 2 10 30 + 1 1 15 15 35 2 2 2 14 34 + 1 1 15 15 35 2 2 2 14 34 + 1 1 19 19 39 2 2 2 18 38 + 1 1 19 19 39 2 2 2 18 38 + 1 1 3 3 23 2 2 2 2 22 + 1 1 3 3 23 2 2 2 2 22 + 1 1 7 7 27 2 2 2 6 26 + 1 1 7 7 27 2 2 2 6 26 + 1 1 11 11 31 2 2 2 10 30 + 1 1 11 11 31 2 2 2 10 30 + 1 1 11 11 31 2 2 2 10 30 + 1 1 15 15 35 2 2 2 14 34 + 1 1 15 15 35 2 2 2 14 34 + 1 1 19 19 39 2 2 2 18 38 + 1 1 19 19 39 2 2 2 18 38 + 1 1 4 4 24 2 2 2 3 23 + 1 1 4 4 24 2 2 2 3 23 + 1 1 8 8 28 2 2 2 7 27 + 1 1 8 8 28 2 2 2 7 27 + 1 1 12 12 32 2 2 2 11 31 + 1 1 12 12 32 2 2 2 11 31 + 1 1 12 12 32 2 2 2 11 31 + 1 1 16 16 36 2 2 2 15 35 + 1 1 16 16 36 2 2 2 15 35 + 1 1 20 20 40 2 2 2 19 39 + 1 1 20 20 40 2 2 2 19 39 + 1 1 4 4 24 2 2 2 3 23 + 1 1 4 4 24 2 2 2 3 23 + 1 1 8 8 28 2 2 2 7 27 + 1 1 8 8 28 2 2 2 7 27 + 1 1 12 12 32 2 2 2 11 31 + 1 1 12 12 32 2 2 2 11 31 + 1 1 12 12 32 2 2 2 11 31 + 1 1 16 16 36 2 2 2 15 35 + 1 1 16 16 36 2 2 2 15 35 + 1 1 20 20 40 2 2 2 19 39 + 1 1 20 20 40 2 2 2 19 39 + 1 1 4 4 24 2 2 2 3 23 + 1 1 4 4 24 2 2 2 3 23 + 1 1 8 8 28 2 2 2 7 27 + 1 1 8 8 28 2 2 2 7 27 + 1 1 12 12 32 2 2 2 11 31 + 1 1 12 12 32 2 2 2 11 31 + 1 1 12 12 32 2 2 2 11 31 + 1 1 16 16 36 2 2 2 15 35 + 1 1 16 16 36 2 2 2 15 35 + 1 1 20 20 40 2 2 2 19 39 + 1 1 20 20 40 2 2 2 19 39 + 1 1 5 5 25 2 2 2 4 24 + 1 1 5 5 25 2 2 2 4 24 + 1 1 9 9 29 2 2 2 8 28 + 1 1 9 9 29 2 2 2 8 28 + 1 1 13 13 33 2 2 2 12 32 + 1 1 13 13 33 2 2 2 12 32 + 1 1 13 13 33 2 2 2 12 32 + 1 1 17 17 37 2 2 2 16 36 + 1 1 17 17 37 2 2 2 16 36 + 1 1 21 21 41 2 2 2 20 40 + 1 1 21 21 41 2 2 2 20 40 + 1 1 5 5 25 2 2 2 4 24 + 1 1 5 5 25 2 2 2 4 24 + 1 1 9 9 29 2 2 2 8 28 + 1 1 9 9 29 2 2 2 8 28 + 1 1 13 13 33 2 2 2 12 32 + 1 1 13 13 33 2 2 2 12 32 + 1 1 13 13 33 2 2 2 12 32 + 1 1 17 17 37 2 2 2 16 36 + 1 1 17 17 37 2 2 2 16 36 + 1 1 21 21 41 2 2 2 20 40 + 1 1 21 21 41 2 2 2 20 40 diff --git a/python/tests/reference/Geom/scale_grid=10-13-10.geom b/python/tests/reference/Geom/scale_grid=10-13-10.geom new file mode 100644 index 000000000..cbe4afc00 --- /dev/null +++ b/python/tests/reference/Geom/scale_grid=10-13-10.geom @@ -0,0 +1,135 @@ +4 header +grid a 10 b 13 c 10 +size x 8e-06 y 5e-06 z 4e-06 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 + 1 1 2 2 22 2 2 2 1 21 + 1 1 2 2 22 2 2 2 1 21 + 1 1 6 6 26 2 2 2 5 25 + 1 1 6 6 26 2 2 2 5 25 + 1 1 6 6 26 2 2 2 5 25 + 1 1 10 10 30 2 2 2 9 29 + 1 1 10 10 30 2 2 2 9 29 + 1 1 10 10 30 2 2 2 9 29 + 1 1 14 14 34 2 2 2 13 33 + 1 1 14 14 34 2 2 2 13 33 + 1 1 14 14 34 2 2 2 13 33 + 1 1 18 18 38 2 2 2 17 37 + 1 1 18 18 38 2 2 2 17 37 + 1 1 2 2 22 2 2 2 1 21 + 1 1 2 2 22 2 2 2 1 21 + 1 1 6 6 26 2 2 2 5 25 + 1 1 6 6 26 2 2 2 5 25 + 1 1 6 6 26 2 2 2 5 25 + 1 1 10 10 30 2 2 2 9 29 + 1 1 10 10 30 2 2 2 9 29 + 1 1 10 10 30 2 2 2 9 29 + 1 1 14 14 34 2 2 2 13 33 + 1 1 14 14 34 2 2 2 13 33 + 1 1 14 14 34 2 2 2 13 33 + 1 1 18 18 38 2 2 2 17 37 + 1 1 18 18 38 2 2 2 17 37 + 1 1 3 3 23 2 2 2 2 22 + 1 1 3 3 23 2 2 2 2 22 + 1 1 7 7 27 2 2 2 6 26 + 1 1 7 7 27 2 2 2 6 26 + 1 1 7 7 27 2 2 2 6 26 + 1 1 11 11 31 2 2 2 10 30 + 1 1 11 11 31 2 2 2 10 30 + 1 1 11 11 31 2 2 2 10 30 + 1 1 15 15 35 2 2 2 14 34 + 1 1 15 15 35 2 2 2 14 34 + 1 1 15 15 35 2 2 2 14 34 + 1 1 19 19 39 2 2 2 18 38 + 1 1 19 19 39 2 2 2 18 38 + 1 1 3 3 23 2 2 2 2 22 + 1 1 3 3 23 2 2 2 2 22 + 1 1 7 7 27 2 2 2 6 26 + 1 1 7 7 27 2 2 2 6 26 + 1 1 7 7 27 2 2 2 6 26 + 1 1 11 11 31 2 2 2 10 30 + 1 1 11 11 31 2 2 2 10 30 + 1 1 11 11 31 2 2 2 10 30 + 1 1 15 15 35 2 2 2 14 34 + 1 1 15 15 35 2 2 2 14 34 + 1 1 15 15 35 2 2 2 14 34 + 1 1 19 19 39 2 2 2 18 38 + 1 1 19 19 39 2 2 2 18 38 + 1 1 3 3 23 2 2 2 2 22 + 1 1 3 3 23 2 2 2 2 22 + 1 1 7 7 27 2 2 2 6 26 + 1 1 7 7 27 2 2 2 6 26 + 1 1 7 7 27 2 2 2 6 26 + 1 1 11 11 31 2 2 2 10 30 + 1 1 11 11 31 2 2 2 10 30 + 1 1 11 11 31 2 2 2 10 30 + 1 1 15 15 35 2 2 2 14 34 + 1 1 15 15 35 2 2 2 14 34 + 1 1 15 15 35 2 2 2 14 34 + 1 1 19 19 39 2 2 2 18 38 + 1 1 19 19 39 2 2 2 18 38 + 1 1 4 4 24 2 2 2 3 23 + 1 1 4 4 24 2 2 2 3 23 + 1 1 8 8 28 2 2 2 7 27 + 1 1 8 8 28 2 2 2 7 27 + 1 1 8 8 28 2 2 2 7 27 + 1 1 12 12 32 2 2 2 11 31 + 1 1 12 12 32 2 2 2 11 31 + 1 1 12 12 32 2 2 2 11 31 + 1 1 16 16 36 2 2 2 15 35 + 1 1 16 16 36 2 2 2 15 35 + 1 1 16 16 36 2 2 2 15 35 + 1 1 20 20 40 2 2 2 19 39 + 1 1 20 20 40 2 2 2 19 39 + 1 1 4 4 24 2 2 2 3 23 + 1 1 4 4 24 2 2 2 3 23 + 1 1 8 8 28 2 2 2 7 27 + 1 1 8 8 28 2 2 2 7 27 + 1 1 8 8 28 2 2 2 7 27 + 1 1 12 12 32 2 2 2 11 31 + 1 1 12 12 32 2 2 2 11 31 + 1 1 12 12 32 2 2 2 11 31 + 1 1 16 16 36 2 2 2 15 35 + 1 1 16 16 36 2 2 2 15 35 + 1 1 16 16 36 2 2 2 15 35 + 1 1 20 20 40 2 2 2 19 39 + 1 1 20 20 40 2 2 2 19 39 + 1 1 4 4 24 2 2 2 3 23 + 1 1 4 4 24 2 2 2 3 23 + 1 1 8 8 28 2 2 2 7 27 + 1 1 8 8 28 2 2 2 7 27 + 1 1 8 8 28 2 2 2 7 27 + 1 1 12 12 32 2 2 2 11 31 + 1 1 12 12 32 2 2 2 11 31 + 1 1 12 12 32 2 2 2 11 31 + 1 1 16 16 36 2 2 2 15 35 + 1 1 16 16 36 2 2 2 15 35 + 1 1 16 16 36 2 2 2 15 35 + 1 1 20 20 40 2 2 2 19 39 + 1 1 20 20 40 2 2 2 19 39 + 1 1 5 5 25 2 2 2 4 24 + 1 1 5 5 25 2 2 2 4 24 + 1 1 9 9 29 2 2 2 8 28 + 1 1 9 9 29 2 2 2 8 28 + 1 1 9 9 29 2 2 2 8 28 + 1 1 13 13 33 2 2 2 12 32 + 1 1 13 13 33 2 2 2 12 32 + 1 1 13 13 33 2 2 2 12 32 + 1 1 17 17 37 2 2 2 16 36 + 1 1 17 17 37 2 2 2 16 36 + 1 1 17 17 37 2 2 2 16 36 + 1 1 21 21 41 2 2 2 20 40 + 1 1 21 21 41 2 2 2 20 40 + 1 1 5 5 25 2 2 2 4 24 + 1 1 5 5 25 2 2 2 4 24 + 1 1 9 9 29 2 2 2 8 28 + 1 1 9 9 29 2 2 2 8 28 + 1 1 9 9 29 2 2 2 8 28 + 1 1 13 13 33 2 2 2 12 32 + 1 1 13 13 33 2 2 2 12 32 + 1 1 13 13 33 2 2 2 12 32 + 1 1 17 17 37 2 2 2 16 36 + 1 1 17 17 37 2 2 2 16 36 + 1 1 17 17 37 2 2 2 16 36 + 1 1 21 21 41 2 2 2 20 40 + 1 1 21 21 41 2 2 2 20 40 diff --git a/python/tests/reference/Geom/scale_grid=10-20-2.geom b/python/tests/reference/Geom/scale_grid=10-20-2.geom new file mode 100644 index 000000000..6b1888f17 --- /dev/null +++ b/python/tests/reference/Geom/scale_grid=10-20-2.geom @@ -0,0 +1,45 @@ +4 header +grid a 10 b 20 c 2 +size x 8e-06 y 5e-06 z 4e-06 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 + 1 1 2 2 22 2 2 2 1 21 + 1 1 2 2 22 2 2 2 1 21 + 1 1 2 2 22 2 2 2 1 21 + 1 1 6 6 26 2 2 2 5 25 + 1 1 6 6 26 2 2 2 5 25 + 1 1 6 6 26 2 2 2 5 25 + 1 1 6 6 26 2 2 2 5 25 + 1 1 6 6 26 2 2 2 5 25 + 1 1 10 10 30 2 2 2 9 29 + 1 1 10 10 30 2 2 2 9 29 + 1 1 10 10 30 2 2 2 9 29 + 1 1 10 10 30 2 2 2 9 29 + 1 1 14 14 34 2 2 2 13 33 + 1 1 14 14 34 2 2 2 13 33 + 1 1 14 14 34 2 2 2 13 33 + 1 1 14 14 34 2 2 2 13 33 + 1 1 14 14 34 2 2 2 13 33 + 1 1 18 18 38 2 2 2 17 37 + 1 1 18 18 38 2 2 2 17 37 + 1 1 18 18 38 2 2 2 17 37 + 1 1 5 5 25 2 2 2 4 24 + 1 1 5 5 25 2 2 2 4 24 + 1 1 5 5 25 2 2 2 4 24 + 1 1 9 9 29 2 2 2 8 28 + 1 1 9 9 29 2 2 2 8 28 + 1 1 9 9 29 2 2 2 8 28 + 1 1 9 9 29 2 2 2 8 28 + 1 1 9 9 29 2 2 2 8 28 + 1 1 13 13 33 2 2 2 12 32 + 1 1 13 13 33 2 2 2 12 32 + 1 1 13 13 33 2 2 2 12 32 + 1 1 13 13 33 2 2 2 12 32 + 1 1 17 17 37 2 2 2 16 36 + 1 1 17 17 37 2 2 2 16 36 + 1 1 17 17 37 2 2 2 16 36 + 1 1 17 17 37 2 2 2 16 36 + 1 1 17 17 37 2 2 2 16 36 + 1 1 21 21 41 2 2 2 20 40 + 1 1 21 21 41 2 2 2 20 40 + 1 1 21 21 41 2 2 2 20 40 diff --git a/python/tests/reference/Geom/scale_grid=5-4-20.geom b/python/tests/reference/Geom/scale_grid=5-4-20.geom new file mode 100644 index 000000000..043683f6a --- /dev/null +++ b/python/tests/reference/Geom/scale_grid=5-4-20.geom @@ -0,0 +1,85 @@ +4 header +grid a 5 b 4 c 20 +size x 8e-06 y 5e-06 z 4e-06 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 + 1 2 2 2 21 + 1 6 2 2 25 + 1 14 2 2 33 + 1 18 2 2 37 + 1 2 2 2 21 + 1 6 2 2 25 + 1 14 2 2 33 + 1 18 2 2 37 + 1 2 2 2 21 + 1 6 2 2 25 + 1 14 2 2 33 + 1 18 2 2 37 + 1 2 2 2 21 + 1 6 2 2 25 + 1 14 2 2 33 + 1 18 2 2 37 + 1 3 2 2 22 + 1 7 2 2 26 + 1 15 2 2 34 + 1 19 2 2 38 + 1 3 2 2 22 + 1 7 2 2 26 + 1 15 2 2 34 + 1 19 2 2 38 + 1 3 2 2 22 + 1 7 2 2 26 + 1 15 2 2 34 + 1 19 2 2 38 + 1 3 2 2 22 + 1 7 2 2 26 + 1 15 2 2 34 + 1 19 2 2 38 + 1 3 2 2 22 + 1 7 2 2 26 + 1 15 2 2 34 + 1 19 2 2 38 + 1 3 2 2 22 + 1 7 2 2 26 + 1 15 2 2 34 + 1 19 2 2 38 + 1 4 2 2 23 + 1 8 2 2 27 + 1 16 2 2 35 + 1 20 2 2 39 + 1 4 2 2 23 + 1 8 2 2 27 + 1 16 2 2 35 + 1 20 2 2 39 + 1 4 2 2 23 + 1 8 2 2 27 + 1 16 2 2 35 + 1 20 2 2 39 + 1 4 2 2 23 + 1 8 2 2 27 + 1 16 2 2 35 + 1 20 2 2 39 + 1 4 2 2 23 + 1 8 2 2 27 + 1 16 2 2 35 + 1 20 2 2 39 + 1 4 2 2 23 + 1 8 2 2 27 + 1 16 2 2 35 + 1 20 2 2 39 + 1 5 2 2 24 + 1 9 2 2 28 + 1 17 2 2 36 + 1 21 2 2 40 + 1 5 2 2 24 + 1 9 2 2 28 + 1 17 2 2 36 + 1 21 2 2 40 + 1 5 2 2 24 + 1 9 2 2 28 + 1 17 2 2 36 + 1 21 2 2 40 + 1 5 2 2 24 + 1 9 2 2 28 + 1 17 2 2 36 + 1 21 2 2 40 diff --git a/python/tests/reference/Geom/scale_grid=8-10-12.geom b/python/tests/reference/Geom/scale_grid=8-10-12.geom new file mode 100644 index 000000000..5cfe66aba --- /dev/null +++ b/python/tests/reference/Geom/scale_grid=8-10-12.geom @@ -0,0 +1,125 @@ +4 header +grid a 8 b 10 c 12 +size x 8e-06 y 5e-06 z 4e-06 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 + 1 1 2 22 2 2 1 21 + 1 1 2 22 2 2 1 21 + 1 1 6 26 2 2 5 25 + 1 1 6 26 2 2 5 25 + 1 1 10 30 2 2 9 29 + 1 1 10 30 2 2 9 29 + 1 1 14 34 2 2 13 33 + 1 1 14 34 2 2 13 33 + 1 1 18 38 2 2 17 37 + 1 1 18 38 2 2 17 37 + 1 1 2 22 2 2 1 21 + 1 1 2 22 2 2 1 21 + 1 1 6 26 2 2 5 25 + 1 1 6 26 2 2 5 25 + 1 1 10 30 2 2 9 29 + 1 1 10 30 2 2 9 29 + 1 1 14 34 2 2 13 33 + 1 1 14 34 2 2 13 33 + 1 1 18 38 2 2 17 37 + 1 1 18 38 2 2 17 37 + 1 1 3 23 2 2 2 22 + 1 1 3 23 2 2 2 22 + 1 1 7 27 2 2 6 26 + 1 1 7 27 2 2 6 26 + 1 1 11 31 2 2 10 30 + 1 1 11 31 2 2 10 30 + 1 1 15 35 2 2 14 34 + 1 1 15 35 2 2 14 34 + 1 1 19 39 2 2 18 38 + 1 1 19 39 2 2 18 38 + 1 1 3 23 2 2 2 22 + 1 1 3 23 2 2 2 22 + 1 1 7 27 2 2 6 26 + 1 1 7 27 2 2 6 26 + 1 1 11 31 2 2 10 30 + 1 1 11 31 2 2 10 30 + 1 1 15 35 2 2 14 34 + 1 1 15 35 2 2 14 34 + 1 1 19 39 2 2 18 38 + 1 1 19 39 2 2 18 38 + 1 1 3 23 2 2 2 22 + 1 1 3 23 2 2 2 22 + 1 1 7 27 2 2 6 26 + 1 1 7 27 2 2 6 26 + 1 1 11 31 2 2 10 30 + 1 1 11 31 2 2 10 30 + 1 1 15 35 2 2 14 34 + 1 1 15 35 2 2 14 34 + 1 1 19 39 2 2 18 38 + 1 1 19 39 2 2 18 38 + 1 1 3 23 2 2 2 22 + 1 1 3 23 2 2 2 22 + 1 1 7 27 2 2 6 26 + 1 1 7 27 2 2 6 26 + 1 1 11 31 2 2 10 30 + 1 1 11 31 2 2 10 30 + 1 1 15 35 2 2 14 34 + 1 1 15 35 2 2 14 34 + 1 1 19 39 2 2 18 38 + 1 1 19 39 2 2 18 38 + 1 1 4 24 2 2 3 23 + 1 1 4 24 2 2 3 23 + 1 1 8 28 2 2 7 27 + 1 1 8 28 2 2 7 27 + 1 1 12 32 2 2 11 31 + 1 1 12 32 2 2 11 31 + 1 1 16 36 2 2 15 35 + 1 1 16 36 2 2 15 35 + 1 1 20 40 2 2 19 39 + 1 1 20 40 2 2 19 39 + 1 1 4 24 2 2 3 23 + 1 1 4 24 2 2 3 23 + 1 1 8 28 2 2 7 27 + 1 1 8 28 2 2 7 27 + 1 1 12 32 2 2 11 31 + 1 1 12 32 2 2 11 31 + 1 1 16 36 2 2 15 35 + 1 1 16 36 2 2 15 35 + 1 1 20 40 2 2 19 39 + 1 1 20 40 2 2 19 39 + 1 1 4 24 2 2 3 23 + 1 1 4 24 2 2 3 23 + 1 1 8 28 2 2 7 27 + 1 1 8 28 2 2 7 27 + 1 1 12 32 2 2 11 31 + 1 1 12 32 2 2 11 31 + 1 1 16 36 2 2 15 35 + 1 1 16 36 2 2 15 35 + 1 1 20 40 2 2 19 39 + 1 1 20 40 2 2 19 39 + 1 1 4 24 2 2 3 23 + 1 1 4 24 2 2 3 23 + 1 1 8 28 2 2 7 27 + 1 1 8 28 2 2 7 27 + 1 1 12 32 2 2 11 31 + 1 1 12 32 2 2 11 31 + 1 1 16 36 2 2 15 35 + 1 1 16 36 2 2 15 35 + 1 1 20 40 2 2 19 39 + 1 1 20 40 2 2 19 39 + 1 1 5 25 2 2 4 24 + 1 1 5 25 2 2 4 24 + 1 1 9 29 2 2 8 28 + 1 1 9 29 2 2 8 28 + 1 1 13 33 2 2 12 32 + 1 1 13 33 2 2 12 32 + 1 1 17 37 2 2 16 36 + 1 1 17 37 2 2 16 36 + 1 1 21 41 2 2 20 40 + 1 1 21 41 2 2 20 40 + 1 1 5 25 2 2 4 24 + 1 1 5 25 2 2 4 24 + 1 1 9 29 2 2 8 28 + 1 1 9 29 2 2 8 28 + 1 1 13 33 2 2 12 32 + 1 1 13 33 2 2 12 32 + 1 1 17 37 2 2 16 36 + 1 1 17 37 2 2 16 36 + 1 1 21 41 2 2 20 40 + 1 1 21 41 2 2 20 40 diff --git a/python/tests/reference/Rotation/1_BCC.pdf b/python/tests/reference/Rotation/1_BCC.pdf new file mode 100644 index 000000000..445f1d250 Binary files /dev/null and b/python/tests/reference/Rotation/1_BCC.pdf differ diff --git a/python/tests/reference/Rotation/1_FCC.pdf b/python/tests/reference/Rotation/1_FCC.pdf new file mode 100644 index 000000000..1dade6b60 Binary files /dev/null and b/python/tests/reference/Rotation/1_FCC.pdf differ diff --git a/python/tests/reference/Rotation/2_BCC.pdf b/python/tests/reference/Rotation/2_BCC.pdf new file mode 100644 index 000000000..423bc645d Binary files /dev/null and b/python/tests/reference/Rotation/2_BCC.pdf differ diff --git a/python/tests/reference/Rotation/2_FCC.pdf b/python/tests/reference/Rotation/2_FCC.pdf new file mode 100644 index 000000000..813076c61 Binary files /dev/null and b/python/tests/reference/Rotation/2_FCC.pdf differ diff --git a/python/tests/reference/Rotation/PoleFigures_OR.m b/python/tests/reference/Rotation/PoleFigures_OR.m new file mode 100644 index 000000000..1e57ca01d --- /dev/null +++ b/python/tests/reference/Rotation/PoleFigures_OR.m @@ -0,0 +1,38 @@ +% Start MTEX first in Matlab + +tmp = matlab.desktop.editor.getActive; +cd(fileparts(tmp.Filename)); + +symmetry = {crystalSymmetry('m-3m', [1 1 1], 'mineral', 'Iron', 'color', 'light blue')} + +% plotting convention +setMTEXpref('xAxisDirection','north'); +setMTEXpref('zAxisDirection','outOfPlane'); + + +lattice_types = {'BCC','FCC'}; +models = {'Bain','GT','GT_prime','KS','NW','Pitsch'}; + +rotation = containers.Map; +rotation('BCC') = 'Passive Rotation'; +rotation('FCC') = 'Active Rotation'; + +for lattice = lattice_types + for p = 0:length(models)/3-1 + EBSD_data = {loadEBSD(strcat(lattice,'_',models{p*3+1},'.txt'),symmetry,'interface','generic',... + 'ColumnNames', { 'phi1' 'Phi' 'phi2' 'x' 'y'}, 'Bunge', rotation(char(lattice))), + loadEBSD(strcat(lattice,'_',models{p*3+2},'.txt'),symmetry,'interface','generic',... + 'ColumnNames', { 'phi1' 'Phi' 'phi2' 'x' 'y'}, 'Bunge', rotation(char(lattice))), + loadEBSD(strcat(lattice,'_',models{p*3+3},'.txt'),symmetry,'interface','generic',... + 'ColumnNames', { 'phi1' 'Phi' 'phi2' 'x' 'y'}, 'Bunge', rotation(char(lattice)))} + h = [Miller(1,0,0,symmetry{1}),Miller(1,1,0,symmetry{1}),Miller(1,1,1,symmetry{1})]; % 3 pole figures + plotPDF(EBSD_data{1}.orientations,h,'MarkerSize',5,'MarkerColor','r','DisplayName',models{p*3+1}) + hold on + plotPDF(EBSD_data{2}.orientations,h,'MarkerSize',4,'MarkerColor','b','DisplayName',models{p*3+2}) + plotPDF(EBSD_data{3}.orientations,h,'MarkerSize',3,'MarkerColor','g','DisplayName',models{p*3+3}) + legend('show','location','southoutside','Interpreter', 'none') + orient('landscape') + print('-bestfit',strcat(int2str(p+1),'_',char(lattice),'.pdf'),'-dpdf') + close + end +end \ No newline at end of file diff --git a/python/tests/reference/Rotation/bcc_Bain.txt b/python/tests/reference/Rotation/bcc_Bain.txt new file mode 100644 index 000000000..e0bc4f6c7 --- /dev/null +++ b/python/tests/reference/Rotation/bcc_Bain.txt @@ -0,0 +1,5 @@ +1 header +1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos +0.0 45.00000000000001 0.0 1 1 +90.0 45.00000000000001 270.0 1 2 +45.00000000000001 0.0 0.0 1 3 diff --git a/python/tests/reference/Rotation/bcc_GT.txt b/python/tests/reference/Rotation/bcc_GT.txt new file mode 100644 index 000000000..5d5102698 --- /dev/null +++ b/python/tests/reference/Rotation/bcc_GT.txt @@ -0,0 +1,26 @@ +1 header +1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos +283.60440567265294 9.976439066337804 33.24637065555936 1 1 +167.8261034151001 43.397849654402556 183.40022280897963 1 2 +262.1156357053931 43.82007387041961 104.07478363123654 1 3 +103.604405672653 9.976439066337804 213.24637065555936 1 4 +347.8261034151001 43.39784965440255 3.400222808979685 1 5 +82.11563570539313 43.82007387041961 284.0747836312365 1 6 +76.39559432734703 9.976439066337806 326.75362934444064 1 7 +192.17389658489986 43.397849654402556 176.59977719102034 1 8 +97.88436429460687 43.82007387041961 255.92521636876344 1 9 +256.395594327347 9.976439066337804 146.75362934444064 1 10 +12.173896584899929 43.39784965440254 356.59977719102034 1 11 +277.8843642946069 43.82007387041961 75.92521636876346 1 12 +102.17389658489992 43.39784965440254 266.59977719102034 1 13 +346.395594327347 9.976439066337804 56.75362934444064 1 14 +7.884364294606862 43.82007387041961 345.9252163687635 1 15 +282.17389658489986 43.39784965440254 86.59977719102032 1 16 +166.39559432734703 9.976439066337804 236.75362934444058 1 17 +187.88436429460683 43.82007387041961 165.92521636876344 1 18 +257.8261034151001 43.39784965440255 93.40022280897969 1 19 +13.604405672652977 9.976439066337804 303.24637065555936 1 20 +352.1156357053931 43.82007387041961 14.074783631236542 1 21 +77.82610341510008 43.397849654402556 273.4002228089796 1 22 +193.60440567265297 9.976439066337806 123.24637065555939 1 23 +172.11563570539317 43.82007387041961 194.07478363123653 1 24 diff --git a/python/tests/reference/Rotation/bcc_GT_prime.txt b/python/tests/reference/Rotation/bcc_GT_prime.txt new file mode 100644 index 000000000..e398d3139 --- /dev/null +++ b/python/tests/reference/Rotation/bcc_GT_prime.txt @@ -0,0 +1,26 @@ +1 header +1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos +303.24637065555936 9.976439066337804 13.604405672652977 1 1 +165.92521636876344 43.82007387041961 187.88436429460683 1 2 +266.59977719102034 43.39784965440254 102.17389658489992 1 3 +123.24637065555939 9.976439066337804 193.604405672653 1 4 +345.9252163687635 43.82007387041961 7.884364294606862 1 5 +86.59977719102032 43.39784965440254 282.17389658489986 1 6 +56.75362934444064 9.976439066337804 346.395594327347 1 7 +194.07478363123653 43.82007387041961 172.11563570539317 1 8 +93.40022280897969 43.39784965440255 257.8261034151001 1 9 +236.75362934444058 9.976439066337804 166.39559432734697 1 10 +14.074783631236542 43.82007387041961 352.1156357053931 1 11 +273.4002228089796 43.397849654402556 77.82610341510008 1 12 +104.07478363123654 43.82007387041961 262.1156357053931 1 13 +326.75362934444064 9.976439066337806 76.39559432734703 1 14 +3.400222808979685 43.39784965440255 347.8261034151001 1 15 +284.0747836312365 43.82007387041961 82.11563570539313 1 16 +146.75362934444064 9.976439066337804 256.395594327347 1 17 +183.40022280897963 43.397849654402556 167.8261034151001 1 18 +255.92521636876344 43.82007387041961 97.88436429460687 1 19 +33.24637065555936 9.976439066337804 283.60440567265294 1 20 +356.59977719102034 43.39784965440254 12.173896584899929 1 21 +75.92521636876346 43.82007387041961 277.8843642946069 1 22 +213.24637065555936 9.976439066337804 103.604405672653 1 23 +176.59977719102034 43.397849654402556 192.17389658489986 1 24 diff --git a/python/tests/reference/Rotation/bcc_KS.txt b/python/tests/reference/Rotation/bcc_KS.txt new file mode 100644 index 000000000..34b393358 --- /dev/null +++ b/python/tests/reference/Rotation/bcc_KS.txt @@ -0,0 +1,26 @@ +1 header +1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos +335.7965716606702 10.528779365509317 65.79657166067024 1 1 +228.77270547567446 80.40593177313953 85.64260312151849 1 2 +131.22729452432552 80.40593177313954 4.357396878481506 1 3 +24.20342833932977 10.52877936550932 24.20342833932976 1 4 +221.95489158457983 85.70366403943002 80.37863910890589 1 5 +138.04510841542015 85.70366403943004 9.621360891094124 1 6 +131.22729452432552 80.40593177313953 94.35739687848151 1 7 +24.203428339329765 10.52877936550932 114.20342833932976 1 8 +221.95489158457983 85.70366403943004 170.37863910890587 1 9 +138.04510841542015 85.70366403943004 99.62136089109411 1 10 +335.7965716606702 10.52877936550932 155.79657166067025 1 11 +228.77270547567448 80.40593177313954 175.6426031215185 1 12 +335.7965716606702 10.52877936550932 335.7965716606702 1 13 +228.77270547567448 80.40593177313954 355.6426031215185 1 14 +131.2272945243255 80.40593177313954 274.35739687848144 1 15 +24.203428339329747 10.52877936550932 294.2034283393298 1 16 +221.95489158457985 85.70366403943004 350.3786391089059 1 17 +138.04510841542015 85.70366403943004 279.6213608910941 1 18 +41.95489158457986 94.29633596056998 9.621360891094133 1 19 +318.04510841542015 94.29633596056996 80.37863910890589 1 20 +155.79657166067025 169.4712206344907 24.203428339329754 1 21 +48.77270547567448 99.59406822686046 4.357396878481504 1 22 +311.2272945243255 99.59406822686046 85.64260312151852 1 23 +204.20342833932975 169.4712206344907 65.79657166067024 1 24 diff --git a/python/tests/reference/Rotation/bcc_NW.txt b/python/tests/reference/Rotation/bcc_NW.txt new file mode 100644 index 000000000..754c69bba --- /dev/null +++ b/python/tests/reference/Rotation/bcc_NW.txt @@ -0,0 +1,14 @@ +1 header +1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos +225.41555594321144 83.13253115922213 83.08266205989301 1 1 +134.58444405678856 83.13253115922211 6.917337940107012 1 2 +4.702125169424418e-15 9.735610317245317 45.0 1 3 +134.58444405678856 83.13253115922213 276.91733794010696 1 4 +225.4155559432114 83.13253115922213 353.082662059893 1 5 +0.0 9.735610317245317 315.0 1 6 +134.58444405678858 83.13253115922213 96.91733794010702 1 7 +225.41555594321142 83.13253115922213 173.082662059893 1 8 +0.0 9.735610317245317 135.0 1 9 +99.59803029876785 45.81931182053557 166.36129272052355 1 10 +260.40196970123213 45.81931182053556 283.6387072794765 1 11 +180.0 99.73561031724535 225.0 1 12 diff --git a/python/tests/reference/Rotation/bcc_Pitsch.txt b/python/tests/reference/Rotation/bcc_Pitsch.txt new file mode 100644 index 000000000..ef28bbb4d --- /dev/null +++ b/python/tests/reference/Rotation/bcc_Pitsch.txt @@ -0,0 +1,14 @@ +1 header +1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos +6.9173379401070045 83.13253115922213 44.58444405678856 1 1 +45.0 89.99999999999999 279.7356103172453 1 2 +166.36129272052352 45.819311820535574 279.59803029876787 1 3 +83.08266205989301 83.13253115922213 225.41555594321144 1 4 +256.3612927205235 45.819311820535574 189.59803029876787 1 5 +315.0 90.0 9.735610317245369 1 6 +186.917337940107 83.13253115922213 224.58444405678856 1 7 +315.0 90.0 80.26438968275463 1 8 +13.638707279476478 45.81931182053557 260.40196970123213 1 9 +263.082662059893 83.13253115922213 45.415555943211444 1 10 +103.63870727947646 45.819311820535574 170.40196970123213 1 11 +224.99999999999997 90.0 170.26438968275465 1 12 diff --git a/python/tests/reference/Rotation/fcc_Bain.txt b/python/tests/reference/Rotation/fcc_Bain.txt new file mode 100644 index 000000000..876cf3888 --- /dev/null +++ b/python/tests/reference/Rotation/fcc_Bain.txt @@ -0,0 +1,5 @@ +1 header +1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos +180.0 45.00000000000001 180.0 1 1 +270.0 45.00000000000001 90.0 1 2 +315.0 0.0 0.0 1 3 diff --git a/python/tests/reference/Rotation/fcc_GT.txt b/python/tests/reference/Rotation/fcc_GT.txt new file mode 100644 index 000000000..cefae431a --- /dev/null +++ b/python/tests/reference/Rotation/fcc_GT.txt @@ -0,0 +1,26 @@ +1 header +1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos +146.75362934444064 9.976439066337804 256.395594327347 1 1 +356.59977719102034 43.39784965440254 12.173896584899929 1 2 +75.92521636876346 43.82007387041961 277.8843642946069 1 3 +326.75362934444064 9.976439066337806 76.39559432734703 1 4 +176.59977719102034 43.397849654402556 192.17389658489986 1 5 +255.92521636876344 43.82007387041961 97.88436429460687 1 6 +213.24637065555936 9.976439066337804 103.604405672653 1 7 +3.400222808979685 43.39784965440255 347.8261034151001 1 8 +284.0747836312365 43.82007387041961 82.11563570539313 1 9 +33.24637065555936 9.976439066337804 283.60440567265294 1 10 +183.40022280897963 43.397849654402556 167.8261034151001 1 11 +104.07478363123654 43.82007387041961 262.1156357053931 1 12 +273.4002228089796 43.397849654402556 77.82610341510008 1 13 +123.24637065555939 9.976439066337806 193.60440567265297 1 14 +194.07478363123653 43.82007387041961 172.11563570539317 1 15 +93.40022280897969 43.39784965440255 257.8261034151001 1 16 +303.24637065555936 9.976439066337804 13.604405672652977 1 17 +14.074783631236542 43.82007387041961 352.1156357053931 1 18 +86.59977719102032 43.39784965440254 282.17389658489986 1 19 +236.75362934444058 9.976439066337804 166.39559432734703 1 20 +165.92521636876344 43.82007387041961 187.88436429460683 1 21 +266.59977719102034 43.39784965440254 102.17389658489992 1 22 +56.75362934444064 9.976439066337804 346.395594327347 1 23 +345.9252163687635 43.82007387041961 7.884364294606862 1 24 diff --git a/python/tests/reference/Rotation/fcc_GT_prime.txt b/python/tests/reference/Rotation/fcc_GT_prime.txt new file mode 100644 index 000000000..44a9b25ec --- /dev/null +++ b/python/tests/reference/Rotation/fcc_GT_prime.txt @@ -0,0 +1,26 @@ +1 header +1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos +166.39559432734697 9.976439066337804 236.75362934444058 1 1 +352.1156357053931 43.82007387041961 14.074783631236542 1 2 +77.82610341510008 43.397849654402556 273.4002228089796 1 3 +346.395594327347 9.976439066337804 56.75362934444064 1 4 +172.11563570539317 43.82007387041961 194.07478363123653 1 5 +257.8261034151001 43.39784965440255 93.40022280897969 1 6 +193.604405672653 9.976439066337804 123.24637065555939 1 7 +7.884364294606862 43.82007387041961 345.9252163687635 1 8 +282.17389658489986 43.39784965440254 86.59977719102032 1 9 +13.604405672652977 9.976439066337804 303.24637065555936 1 10 +187.88436429460683 43.82007387041961 165.92521636876344 1 11 +102.17389658489992 43.39784965440254 266.59977719102034 1 12 +277.8843642946069 43.82007387041961 75.92521636876346 1 13 +103.604405672653 9.976439066337804 213.24637065555936 1 14 +192.17389658489986 43.397849654402556 176.59977719102034 1 15 +97.88436429460687 43.82007387041961 255.92521636876344 1 16 +283.60440567265294 9.976439066337804 33.24637065555936 1 17 +12.173896584899929 43.39784965440254 356.59977719102034 1 18 +82.11563570539313 43.82007387041961 284.0747836312365 1 19 +256.395594327347 9.976439066337804 146.75362934444064 1 20 +167.8261034151001 43.397849654402556 183.40022280897963 1 21 +262.1156357053931 43.82007387041961 104.07478363123654 1 22 +76.39559432734703 9.976439066337806 326.75362934444064 1 23 +347.8261034151001 43.39784965440255 3.400222808979685 1 24 diff --git a/python/tests/reference/Rotation/fcc_KS.txt b/python/tests/reference/Rotation/fcc_KS.txt new file mode 100644 index 000000000..93fdcf07e --- /dev/null +++ b/python/tests/reference/Rotation/fcc_KS.txt @@ -0,0 +1,26 @@ +1 header +1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos +114.20342833932975 10.52877936550932 204.20342833932972 1 1 +94.3573968784815 80.40593177313954 311.22729452432543 1 2 +175.6426031215185 80.40593177313954 48.77270547567447 1 3 +155.79657166067025 10.52877936550932 155.79657166067025 1 4 +99.62136089109411 85.70366403943004 318.04510841542015 1 5 +170.37863910890587 85.70366403943002 41.954891584579855 1 6 +85.64260312151852 80.40593177313954 48.77270547567448 1 7 +65.79657166067024 10.52877936550932 155.79657166067025 1 8 +9.621360891094124 85.70366403943004 318.04510841542015 1 9 +80.37863910890587 85.70366403943004 41.95489158457987 1 10 +24.203428339329758 10.52877936550932 204.20342833932975 1 11 +4.357396878481486 80.40593177313954 311.2272945243255 1 12 +204.20342833932972 10.52877936550932 204.20342833932972 1 13 +184.35739687848147 80.40593177313954 311.2272945243255 1 14 +265.64260312151845 80.40593177313953 48.77270547567449 1 15 +245.79657166067025 10.528779365509317 155.79657166067025 1 16 +189.62136089109413 85.70366403943004 318.04510841542015 1 17 +260.3786391089059 85.70366403943002 41.954891584579855 1 18 +170.37863910890587 94.29633596056996 138.04510841542015 1 19 +99.62136089109411 94.29633596056998 221.95489158457983 1 20 +155.79657166067025 169.4712206344907 24.203428339329754 1 21 +175.64260312151848 99.59406822686046 131.22729452432552 1 22 +94.35739687848151 99.59406822686046 228.77270547567446 1 23 +114.20342833932975 169.4712206344907 335.7965716606702 1 24 diff --git a/python/tests/reference/Rotation/fcc_NW.txt b/python/tests/reference/Rotation/fcc_NW.txt new file mode 100644 index 000000000..cc9c95a05 --- /dev/null +++ b/python/tests/reference/Rotation/fcc_NW.txt @@ -0,0 +1,14 @@ +1 header +1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos +96.91733794010702 83.13253115922213 314.5844440567886 1 1 +173.082662059893 83.13253115922211 45.41555594321143 1 2 +135.0 9.735610317245317 180.0 1 3 +263.082662059893 83.13253115922213 45.415555943211444 1 4 +186.91733794010702 83.13253115922211 314.5844440567886 1 5 +224.99999999999997 9.735610317245317 180.0 1 6 +83.082662059893 83.13253115922213 45.415555943211444 1 7 +6.917337940106983 83.13253115922211 314.5844440567886 1 8 +45.0 9.73561031724532 180.0 1 9 +13.638707279476469 45.81931182053557 80.40196970123216 1 10 +256.36129272052347 45.81931182053556 279.59803029876775 1 11 +315.0 99.73561031724536 0.0 1 12 diff --git a/python/tests/reference/Rotation/fcc_Pitsch.txt b/python/tests/reference/Rotation/fcc_Pitsch.txt new file mode 100644 index 000000000..aa0c32365 --- /dev/null +++ b/python/tests/reference/Rotation/fcc_Pitsch.txt @@ -0,0 +1,14 @@ +1 header +1_Eulers 2_Eulers 3_Eulers 1_pos 2_pos +135.41555594321144 83.13253115922213 173.082662059893 1 1 +260.26438968275465 90.0 135.0 1 2 +260.40196970123213 45.81931182053557 13.638707279476478 1 3 +314.5844440567886 83.13253115922213 96.91733794010702 1 4 +350.40196970123213 45.81931182053557 283.6387072794765 1 5 +170.26438968275465 90.0 224.99999999999997 1 6 +315.4155559432114 83.13253115922213 353.08266205989304 1 7 +99.73561031724536 90.0 225.0 1 8 +279.59803029876787 45.819311820535574 166.36129272052352 1 9 +134.58444405678856 83.13253115922213 276.91733794010696 1 10 +9.598030298767851 45.819311820535574 76.36129272052355 1 11 +9.735610317245369 90.0 315.0 1 12 diff --git a/python/tests/reference/Table/datatype-mix.txt b/python/tests/reference/Table/datatype-mix.txt new file mode 100644 index 000000000..2f6baa852 --- /dev/null +++ b/python/tests/reference/Table/datatype-mix.txt @@ -0,0 +1,4 @@ +1 header +a b +1.0 hallo +0.1 "hallo test" diff --git a/python/tests/reference/Table/simple.ang b/python/tests/reference/Table/simple.ang new file mode 100644 index 000000000..8e009e2dc --- /dev/null +++ b/python/tests/reference/Table/simple.ang @@ -0,0 +1,138 @@ +# TEM_PIXperUM 1.000000 +# x-star 240.000000 +# y-star 240.000000 +# z-star 240.000000 +# WorkingDistance 20.000000 +# +# Phase 1 +# MaterialName Iron(Alpha) +# Formula +# Info +# Symmetry 43 +# LatticeConstants 2.870 2.870 2.870 90.000 90.000 90.000 +# NumberFamilies 100 +# hklFamilies 9223440 0 2 32763 0.000000 32763 +# hklFamilies 0 0 0 9218712 0.000000 9218712 +# hklFamilies 0 0 3801155 0 0.000000 0 +# hklFamilies 5570652 6619251 7536754 -1203738484 0.000000 -1203738484 +# hklFamilies 7143516 5111900 7864421 32763 0.000000 32763 +# hklFamilies 6488180 7274604 6553717 9220480 0.000000 9220480 +# hklFamilies 3145820 2949169 3145777 0 0.000000 0 +# hklFamilies 3014704 7209057 103 9220488 0.000000 9220488 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 0 0 9220032 0.000000 9220032 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 0 0 -1203728363 0.000000 -1203728363 +# hklFamilies 0 0 0 32763 0.000000 32763 +# hklFamilies 0 0 0 9218628 0.000000 9218628 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 0 0 9218504 0.000000 9218504 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 0 0 9219904 0.000000 9219904 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 0 0 0 -0.000046 0 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 0 0 256 0.000000 256 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 0 0 -1203753636 0.000000 -1203753636 +# hklFamilies 0 0 0 32763 0.000000 32763 +# hklFamilies 0 0 0 9220576 0.000000 9220576 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 0 0 9218736 0.000000 9218736 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 0 0 103219574 0.000000 103219574 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 0 0 9220576 0.000000 9220576 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 0 0 9220692 0.000000 9220692 +# hklFamilies 1434293657 0 0 0 0.000000 0 +# hklFamilies 0 0 0 9218584 0.000000 9218584 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 0 0 9219976 0.000000 9219976 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 0 0 256 0.000000 256 +# hklFamilies 0 0 69473872 0 0.000000 0 +# hklFamilies 0 1889785611 -1546188227 -1203753636 -0.000046 -1203753636 +# hklFamilies 9224144 0 1434294456 32763 0.000000 32763 +# hklFamilies 0 9224160 0 9220672 0.000000 9220672 +# hklFamilies -1168390977 32763 851982 0 0.000000 0 +# hklFamilies 0 304 0 9218816 0.000000 9218816 +# hklFamilies 27030208 0 1434297593 0 0.000000 0 +# hklFamilies 0 9224160 0 101654020 0.000000 101654020 +# hklFamilies 9224064 0 0 0 0.000000 0 +# hklFamilies 0 25563456 0 9220672 0.000000 9220672 +# hklFamilies 9224544 0 25559040 0 0.000000 0 +# hklFamilies 0 25559788 0 9220788 0.000000 9220788 +# hklFamilies 176 0 304 24 0.000000 24 +# hklFamilies 0 25562304 0 4 0.000000 4 +# hklFamilies 9224208 0 0 0 0.000000 0 +# hklFamilies 0 281 0 9220032 0.000000 9220032 +# hklFamilies 0 0 0 0 0.000000 0 +# hklFamilies 0 -1168390977 32763 9220660 0.000000 9220660 +# hklFamilies 21 0 -1168390977 8 0.000000 8 +# hklFamilies 32763 2490388 0 24 0.000000 24 +# hklFamilies 48 0 69650048 25 0.000000 25 +# hklFamilies 0 -1216995621 32763 65535 -0.000046 65535 +# hklFamilies 0 0 25562688 1 0.000000 1 +# hklFamilies 0 0 21776 0 -0.000058 0 +# hklFamilies 25562688 0 25559724 0 0.000000 0 +# hklFamilies 0 25559040 0 1179652 0.000000 1179652 +# hklFamilies 25559724 0 25562304 32763 0.000000 32763 +# hklFamilies 0 48 0 9219904 0.000000 9219904 +# hklFamilies 25562304 0 28 0 0.000000 0 +# hklFamilies 0 0 0 8781958 0.000000 8781958 +# hklFamilies 31 0 0 0 0.000000 0 +# hklFamilies 0 0 0 103304392 0.000000 103304392 +# hklFamilies 3 0 48 0 0.000000 0 +# hklFamilies 0 9224505 0 103219694 -0.000046 103219694 +# hklFamilies 27000832 0 -1168393705 0 0.000000 0 +# hklFamilies 32763 25559040 0 9220192 0.000000 9220192 +# hklFamilies 0 32763 31 0 0.000000 0 +# hklFamilies 0 0 0 9219872 0.000000 9219872 +# hklFamilies 69729712 0 9224640 0 0.000000 0 +# hklFamilies 0 69729904 0 1397706823 0.000000 1397706823 +# hklFamilies 69911504 0 0 59 0.000000 59 +# hklFamilies 0 27007968 0 103219200 0.000000 103219200 +# hklFamilies 0 0 -1216843775 0 0.000000 0 +# hklFamilies 32763 69911504 0 0 0.000000 0 +# hklFamilies -1168296496 32763 9225328 0 0.000000 0 +# hklFamilies 0 1434343267 0 9632160 0.000000 9632160 +# hklFamilies 69908840 0 -1216995621 0 0.000000 0 +# hklFamilies 32763 256 0 9632112 0.000000 9632112 +# hklFamilies 0 0 399376220 0 0.000000 0 +# hklFamilies 21776 1966087 4456474 262148 0.000000 262148 +# hklFamilies 9224704 0 1434198234 0 0.000000 0 +# hklFamilies 0 0 0 9704044 0.000000 9704044 +# hklFamilies -1168373699 32763 1 0 0.000000 0 +# hklFamilies 0 69911504 0 94961568 -0.000046 94961568 +# hklFamilies 1 0 69911504 0 0.000000 0 +# hklFamilies 0 10 0 9220016 0.000000 9220016 +# hklFamilies -1 0 27030208 0 0.000000 0 +# hklFamilies 0 1434488087 18 9219992 -0.000046 9219992 +# ElasticConstants 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 +# ElasticConstants 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 +# ElasticConstants 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 +# ElasticConstants 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 +# ElasticConstants 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 +# ElasticConstants 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 +# Categories1 1 1 1 1 +# +# GRID: SqrGrid +# XSTEP: 0.050000 +# YSTEP: 0.050000 +# NCOLS_ODD: 2 +# NCOLS_EVEN: 2 +# NROWS: 2 +# +# OPERATOR: +# +# SAMPLEID: +# +# SCANID: +# +0.0 0.0 0.0 0.00 0.00 60.0 20.0 1 2.0 1.5 +0.0 2.0 0.0 0.05 0.00 60.0 20.0 1 2.0 1.5 +0.0 2.0 0.0 0.00 0.05 60.0 20.0 1 2.0 1.5 +0.0 0.0 1.0 0.05 0.05 60.0 20.0 1 2.0 1.5 diff --git a/python/tests/reference/Table/whitespace-mix.txt b/python/tests/reference/Table/whitespace-mix.txt new file mode 100644 index 000000000..933a16e77 --- /dev/null +++ b/python/tests/reference/Table/whitespace-mix.txt @@ -0,0 +1,6 @@ +1 header +a b 1_c 2_c +1 2 3 4 +5 6 7 8 +9 10. 12. 12 + diff --git a/python/tests/test_DADF5.py b/python/tests/test_DADF5.py new file mode 100644 index 000000000..8aa8ec174 --- /dev/null +++ b/python/tests/test_DADF5.py @@ -0,0 +1,82 @@ +import shutil +import os + +import pytest +import numpy as np + +from damask import DADF5 +from damask import mechanics + +@pytest.fixture +def default(tmp_path,reference_dir): + """Small DADF5 file in temp location for modification.""" + fname = '12grains6x7x8_tensionY.hdf5' + shutil.copy(os.path.join(reference_dir,fname),tmp_path) + f = DADF5(os.path.join(tmp_path,fname)) + f.set_by_time(20.0,20.0) + return f + +@pytest.fixture +def reference_dir(reference_dir_base): + """Directory containing reference results.""" + return os.path.join(reference_dir_base,'DADF5') + + +class TestDADF5: + + def test_time_increments(self,default): + shape = default.read_dataset(default.get_dataset_location('F'),0).shape + default.set_by_time(0.0,20.0) + for i in default.iter_visible('increments'): + assert shape == default.read_dataset(default.get_dataset_location('F'),0).shape + + + def test_add_absolute(self,default): + default.add_absolute('Fe') + loc = {'Fe': default.get_dataset_location('Fe'), + '|Fe|': default.get_dataset_location('|Fe|')} + in_memory = np.abs(default.read_dataset(loc['Fe'],0)) + in_file = default.read_dataset(loc['|Fe|'],0) + assert np.allclose(in_memory,in_file) + + def test_add_Cauchy(self,default): + default.add_Cauchy('P','F') + loc = {'F': default.get_dataset_location('F'), + 'P': default.get_dataset_location('P'), + 'sigma':default.get_dataset_location('sigma')} + in_memory = mechanics.Cauchy(default.read_dataset(loc['F'],0), + default.read_dataset(loc['P'],0)) + in_file = default.read_dataset(loc['sigma'],0) + assert np.allclose(in_memory,in_file) + + def test_add_determinant(self,default): + default.add_determinant('P') + loc = {'P': default.get_dataset_location('P'), + 'det(P)':default.get_dataset_location('det(P)')} + in_memory = np.linalg.det(default.read_dataset(loc['P'],0)).reshape((-1,1)) + in_file = default.read_dataset(loc['det(P)'],0) + assert np.allclose(in_memory,in_file) + + def test_add_deviator(self,default): + default.add_deviator('P') + loc = {'P' :default.get_dataset_location('P'), + 's_P':default.get_dataset_location('s_P')} + in_memory = mechanics.deviatoric_part(default.read_dataset(loc['P'],0)) + in_file = default.read_dataset(loc['s_P'],0) + assert np.allclose(in_memory,in_file) + + def test_add_norm(self,default): + default.add_norm('F',1) + loc = {'F': default.get_dataset_location('F'), + '|F|_1':default.get_dataset_location('|F|_1')} + in_memory = np.linalg.norm(default.read_dataset(loc['F'],0),ord=1,axis=(1,2),keepdims=True) + in_file = default.read_dataset(loc['|F|_1'],0) + assert np.allclose(in_memory,in_file) + + def test_add_spherical(self,default): + default.add_spherical('P') + loc = {'P': default.get_dataset_location('P'), + 'p_P': default.get_dataset_location('p_P')} + in_memory = mechanics.spherical_part(default.read_dataset(loc['P'],0)).reshape(-1,1) + in_file = default.read_dataset(loc['p_P'],0) + assert np.allclose(in_memory,in_file) diff --git a/python/tests/test_Geom.py b/python/tests/test_Geom.py new file mode 100644 index 000000000..bc3a3e751 --- /dev/null +++ b/python/tests/test_Geom.py @@ -0,0 +1,99 @@ +import copy +import os + +import pytest +import numpy as np + +from damask import Geom + + +def geom_equal(a,b): + return np.all(a.get_microstructure() == b.get_microstructure()) and \ + np.all(a.get_size() == b.get_size()) and \ + np.all(a.get_grid() == b.get_grid()) + +@pytest.fixture +def default(): + """Simple geometry.""" + x=np.concatenate((np.ones(40,dtype=int), + np.arange(2,42), + np.ones(40,dtype=int)*2, + np.arange(1,41))).reshape((8,5,4)) + return Geom(x,[8e-6,5e-6,4e-6]) + +@pytest.fixture +def reference_dir(reference_dir_base): + """Directory containing reference results.""" + return os.path.join(reference_dir_base,'Geom') + + +class TestGeom: + + def test_update(self,default): + modified = copy.deepcopy(default) + modified.update( + default.get_microstructure(), + default.get_size(), + default.get_origin() + ) + assert geom_equal(modified,default) + + + def test_write_read_str(self,default,tmpdir): + default.to_file(str(tmpdir.join('default.geom'))) + new = Geom.from_file(str(tmpdir.join('default.geom'))) + assert geom_equal(new,default) + + def test_write_read_file(self,default,tmpdir): + with open(tmpdir.join('default.geom'),'w') as f: + default.to_file(f) + with open(tmpdir.join('default.geom')) as f: + new = Geom.from_file(f) + assert geom_equal(new,default) + + @pytest.mark.parametrize('pack',[True,False]) + def test_pack(self,default,tmpdir,pack): + default.to_file(tmpdir.join('default.geom'),pack=pack) + new = Geom.from_file(tmpdir.join('default.geom')) + assert geom_equal(new,default) + + @pytest.mark.parametrize('directions,reflect',[ + (['x'], False), + (['x','y','z'],True), + (['z','x','y'],False), + (['y','z'], False) + ] + ) + def test_mirror(self,default,update,reference_dir,directions,reflect): + modified = copy.deepcopy(default) + modified.mirror(directions,reflect) + tag = 'directions={}_reflect={}'.format('-'.join(directions),reflect) + reference = os.path.join(reference_dir,'mirror_{}.geom'.format(tag)) + if update: modified.to_file(reference) + assert geom_equal(modified,Geom.from_file(reference)) + + @pytest.mark.parametrize('stencil',[(1),(2),(3),(4)]) + def test_clean(self,default,update,reference_dir,stencil): + modified = copy.deepcopy(default) + modified.clean(stencil) + tag = 'stencil={}'.format(stencil) + reference = os.path.join(reference_dir,'clean_{}.geom'.format(tag)) + if update: modified.to_file(reference) + assert geom_equal(modified,Geom.from_file(reference)) + + @pytest.mark.parametrize('grid',[ + ((10,11,10)), + ([10,13,10]), + (np.array((10,10,10))), + (np.array((8, 10,12))), + (np.array((5, 4, 20))), + (np.array((10,20,2)) ) + ] + ) + def test_scale(self,default,update,reference_dir,grid): + modified = copy.deepcopy(default) + modified.scale(grid) + tag = 'grid={}'.format('-'.join([str(x) for x in grid])) + reference = os.path.join(reference_dir,'scale_{}.geom'.format(tag)) + if update: modified.to_file(reference) + assert geom_equal(modified,Geom.from_file(reference)) diff --git a/python/tests/test_Rotation.py b/python/tests/test_Rotation.py new file mode 100644 index 000000000..e352e1c26 --- /dev/null +++ b/python/tests/test_Rotation.py @@ -0,0 +1,81 @@ +import os + +import pytest +import numpy as np + +import damask +from damask import Rotation +from damask import Orientation + +n = 1000 + +@pytest.fixture +def default(): + """A set of n random rotations.""" + return [Rotation.fromRandom() for r in range(n)] + +@pytest.fixture +def reference_dir(reference_dir_base): + """Directory containing reference results.""" + return os.path.join(reference_dir_base,'Rotation') + + +class TestRotation: + + def test_Eulers(self,default): + for rot in default: + assert np.allclose(rot.asQuaternion(), + Rotation.fromEulers(rot.asEulers()).asQuaternion()) + + def test_AxisAngle(self,default): + for rot in default: + assert np.allclose(rot.asEulers(), + Rotation.fromAxisAngle(rot.asAxisAngle()).asEulers()) + + def test_Matrix(self,default): + for rot in default: + assert np.allclose(rot.asAxisAngle(), + Rotation.fromMatrix(rot.asMatrix()).asAxisAngle()) + + def test_Rodriques(self,default): + for rot in default: + assert np.allclose(rot.asMatrix(), + Rotation.fromRodrigues(rot.asRodrigues()).asMatrix()) + + def test_Homochoric(self,default): + for rot in default: + assert np.allclose(rot.asRodrigues(), + Rotation.fromHomochoric(rot.asHomochoric()).asRodrigues(),rtol=5.e-5) + + def test_Cubochoric(self,default): + for rot in default: + assert np.allclose(rot.asHomochoric(), + Rotation.fromCubochoric(rot.asCubochoric()).asHomochoric(),rtol=5.e-5) + + def test_Quaternion(self,default): + for rot in default: + assert np.allclose(rot.asCubochoric(), + Rotation.fromQuaternion(rot.asQuaternion()).asCubochoric(),rtol=5.e-5) + + + @pytest.mark.parametrize('model',['Bain','KS','GT','GT_prime','NW','Pitsch']) + @pytest.mark.parametrize('lattice',['fcc','bcc']) + def test_relationship_forward_backward(self,model,lattice): + ori = Orientation(Rotation.fromRandom(),lattice) + for i,r in enumerate(ori.relatedOrientations(model)): + ori2 = r.relatedOrientations(model)[i] + misorientation = ori.rotation.misorientation(ori2.rotation) + assert misorientation.asAxisAngle(degrees=True)[3]<1.0e-5 + + @pytest.mark.parametrize('model',['Bain','KS','GT','GT_prime','NW','Pitsch']) + @pytest.mark.parametrize('lattice',['fcc','bcc']) + def test_relationship_reference(self,update,reference_dir,model,lattice): + reference = os.path.join(reference_dir,'{}_{}.txt'.format(lattice,model)) + ori = Orientation(Rotation(),lattice) + eu = np.array([o.rotation.asEulers(degrees=True) for o in ori.relatedOrientations(model)]) + if update: + coords = np.array([(1,i+1) for i,x in enumerate(eu)]) + table = damask.Table(eu,{'Eulers':(3,)}) + table.add('pos',coords) + table.to_ASCII(reference) + assert np.allclose(eu,damask.Table.from_ASCII(reference).get('Eulers')) diff --git a/python/tests/test_Table.py b/python/tests/test_Table.py new file mode 100644 index 000000000..d5f505e76 --- /dev/null +++ b/python/tests/test_Table.py @@ -0,0 +1,169 @@ +import os + +import pytest +import numpy as np + +from damask import Table + + +@pytest.fixture +def default(): + """Simple Table.""" + x = np.ones((5,13),dtype=float) + return Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['test data','contains only ones']) + +@pytest.fixture +def reference_dir(reference_dir_base): + """Directory containing reference results.""" + return os.path.join(reference_dir_base,'Table') + +class TestTable: + + def test_get_scalar(self,default): + d = default.get('s') + assert np.allclose(d,1.0) and d.shape[1:] == (1,) + + def test_get_vector(self,default): + d = default.get('v') + assert np.allclose(d,1.0) and d.shape[1:] == (3,) + + def test_get_tensor(self,default): + d = default.get('F') + assert np.allclose(d,1.0) and d.shape[1:] == (3,3) + + def test_get_component(self,default): + d = default.get('5_F') + assert np.allclose(d,1.0) and d.shape[1:] == (1,) + + def test_write_read_str(self,default,tmpdir): + default.to_ASCII(str(tmpdir.join('default.txt'))) + new = Table.from_ASCII(str(tmpdir.join('default.txt'))) + assert all(default.data==new.data) + + def test_write_read_file(self,default,tmpdir): + with open(tmpdir.join('default.txt'),'w') as f: + default.to_ASCII(f) + with open(tmpdir.join('default.txt')) as f: + new = Table.from_ASCII(f) + assert all(default.data==new.data) + + def test_read_ang_str(self,reference_dir): + new = Table.from_ang(os.path.join(reference_dir,'simple.ang')) + assert new.data.shape == (4,10) and \ + new.labels == ['eu', 'pos', 'IQ', 'CI', 'ID', 'intensity', 'fit'] + + def test_read_ang_file(self,reference_dir): + f = open(os.path.join(reference_dir,'simple.ang')) + new = Table.from_ang(f) + assert new.data.shape == (4,10) and \ + new.labels == ['eu', 'pos', 'IQ', 'CI', 'ID', 'intensity', 'fit'] + + @pytest.mark.parametrize('fname',['datatype-mix.txt','whitespace-mix.txt']) + def test_read_strange(self,reference_dir,fname): + with open(os.path.join(reference_dir,fname)) as f: + Table.from_ASCII(f) + + def test_set(self,default): + default.set('F',np.zeros((5,3,3)),'set to zero') + d=default.get('F') + assert np.allclose(d,0.0) and d.shape[1:] == (3,3) + + def test_labels(self,default): + assert default.labels == ['F','v','s'] + + def test_add(self,default): + d = np.random.random((5,9)) + default.add('nine',d,'random data') + assert np.allclose(d,default.get('nine')) + + def test_rename_equivalent(self): + x = np.random.random((5,13)) + t = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data']) + s = t.get('s') + t.rename('s','u') + u = t.get('u') + assert np.all(s == u) + + def test_rename_gone(self,default): + default.rename('v','V') + assert 'v' not in default.shapes and 'v' not in default.data.columns + with pytest.raises(KeyError): + default.get('v') + + def test_delete(self,default): + default.delete('v') + assert 'v' not in default.shapes and 'v' not in default.data.columns + with pytest.raises(KeyError): + default.get('v') + + def test_join(self): + x = np.random.random((5,13)) + a = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data']) + y = np.random.random((5,3)) + b = Table(y,{'u':(3,)},['random test data']) + a.join(b) + assert np.array_equal(a.get('u'), b.get('u')) + + def test_join_invalid(self): + x = np.random.random((5,13)) + a = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data']) + with pytest.raises(KeyError): + a.join(a) + + def test_append(self): + x = np.random.random((5,13)) + a = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data']) + a.append(a) + assert np.array_equal(a.data[:5].to_numpy(),a.data[5:].to_numpy()) + + def test_append_invalid(self): + x = np.random.random((5,13)) + a = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data']) + b = Table(x,{'F':(3,3),'u':(3,),'s':(1,)},['random test data']) + with pytest.raises(KeyError): + a.append(b) + + def test_invalid_initialization(self): + x = np.random.random((5,10)) + with pytest.raises(ValueError): + Table(x,{'F':(3,3)}) + + def test_invalid_set(self,default): + x = default.get('v') + with pytest.raises(ValueError): + default.set('F',x,'does not work') + + def test_invalid_get(self,default): + with pytest.raises(KeyError): + default.get('n') + + def test_sort_scalar(self): + x = np.random.random((5,13)) + t = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data']) + unsort = t.get('s') + t.sort_by('s') + sort = t.get('s') + assert np.all(np.sort(unsort,0)==sort) + + def test_sort_component(self): + x = np.random.random((5,12)) + t = Table(x,{'F':(3,3),'v':(3,)},['random test data']) + unsort = t.get('4_F') + t.sort_by('4_F') + sort = t.get('4_F') + assert np.all(np.sort(unsort,0)==sort) + + def test_sort_revert(self): + x = np.random.random((5,12)) + t = Table(x,{'F':(3,3),'v':(3,)},['random test data']) + t.sort_by('4_F',ascending=False) + sort = t.get('4_F') + assert np.all(np.sort(sort,0)==sort[::-1,:]) + + def test_sort(self): + t = Table(np.array([[0,1,],[2,1,]]), + {'v':(2,)}, + ['test data']) + t.add('s',np.array(['b','a'])) + t.sort_by('s') + assert np.all(t.get('1_v') == np.array([2,0]).reshape((2,1))) diff --git a/python/tests/test_grid_filters.py b/python/tests/test_grid_filters.py new file mode 100644 index 000000000..a5455e1ae --- /dev/null +++ b/python/tests/test_grid_filters.py @@ -0,0 +1,79 @@ +import pytest +import numpy as np + +from damask import grid_filters + +class TestGridFilters: + + def test_cell_coord0(self): + size = np.random.random(3) + grid = np.random.randint(8,32,(3)) + coord = grid_filters.cell_coord0(grid,size) + assert np.allclose(coord[0,0,0],size/grid*.5) and coord.shape == tuple(grid[::-1]) + (3,) + + def test_node_coord0(self): + size = np.random.random(3) + grid = np.random.randint(8,32,(3)) + coord = grid_filters.node_coord0(grid,size) + assert np.allclose(coord[-1,-1,-1],size) and coord.shape == tuple(grid[::-1]+1) + (3,) + + def test_coord0(self): + size = np.random.random(3) + grid = np.random.randint(8,32,(3)) + c = grid_filters.cell_coord0(grid+1,size+size/grid) + n = grid_filters.node_coord0(grid,size) + size/grid*.5 + assert np.allclose(c,n) + + @pytest.mark.parametrize('mode',[('cell'),('node')]) + def test_grid_DNA(self,mode): + """Ensure that xx_coord0_gridSizeOrigin is the inverse of xx_coord0.""" + grid = np.random.randint(8,32,(3)) + size = np.random.random(3) + origin = np.random.random(3) + coord0 = eval('grid_filters.{}_coord0(grid,size,origin)'.format(mode)) # noqa + _grid,_size,_origin = eval('grid_filters.{}_coord0_gridSizeOrigin(coord0.reshape((-1,3)))'.format(mode)) + assert np.allclose(grid,_grid) and np.allclose(size,_size) and np.allclose(origin,_origin) + + def test_displacement_fluct_equivalence(self): + """Ensure that fluctuations are periodic.""" + size = np.random.random(3) + grid = np.random.randint(8,32,(3)) + F = np.random.random(tuple(grid)+(3,3)) + assert np.allclose(grid_filters.node_displacement_fluct(size,F), + grid_filters.cell_2_node(grid_filters.cell_displacement_fluct(size,F))) + + def test_interpolation_nonperiodic(self): + size = np.random.random(3) + grid = np.random.randint(8,32,(3)) + F = np.random.random(tuple(grid)+(3,3)) + assert np.allclose(grid_filters.node_coord(size,F) [1:-1,1:-1,1:-1],grid_filters.cell_2_node( + grid_filters.cell_coord(size,F))[1:-1,1:-1,1:-1]) + + @pytest.mark.parametrize('mode',[('cell'),('node')]) + def test_coord0_origin(self,mode): + origin= np.random.random(3) + size = np.random.random(3) # noqa + grid = np.random.randint(8,32,(3)) + shifted = eval('grid_filters.{}_coord0(grid,size,origin)'.format(mode)) + unshifted = eval('grid_filters.{}_coord0(grid,size)'.format(mode)) + if mode == 'cell': + assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid[::-1]) +(3,))) + elif mode == 'node': + assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid[::-1]+1)+(3,))) + + @pytest.mark.parametrize('mode',[('cell'),('node')]) + def test_displacement_avg_vanishes(self,mode): + """Ensure that random fluctuations in F do not result in average displacement.""" + size = np.random.random(3) # noqa + grid = np.random.randint(8,32,(3)) + F = np.random.random(tuple(grid)+(3,3)) + F += np.eye(3) - np.average(F,axis=(0,1,2)) + assert np.allclose(eval('grid_filters.{}_displacement_avg(size,F)'.format(mode)),0.0) + + @pytest.mark.parametrize('mode',[('cell'),('node')]) + def test_displacement_fluct_vanishes(self,mode): + """Ensure that constant F does not result in fluctuating displacement.""" + size = np.random.random(3) # noqa + grid = np.random.randint(8,32,(3)) + F = np.broadcast_to(np.random.random((3,3)), tuple(grid)+(3,3)) # noqa + assert np.allclose(eval('grid_filters.{}_displacement_fluct(size,F)'.format(mode)),0.0) diff --git a/python/tests/test_mechanics.py b/python/tests/test_mechanics.py new file mode 100644 index 000000000..9e1d9bc0c --- /dev/null +++ b/python/tests/test_mechanics.py @@ -0,0 +1,188 @@ +import numpy as np +from damask import mechanics + +class TestMechanics: + + n = 1000 + c = np.random.randint(n) + + + def test_vectorize_Cauchy(self): + P = np.random.random((self.n,3,3)) + F = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.Cauchy(F,P)[self.c], + mechanics.Cauchy(F[self.c],P[self.c])) + + + def test_vectorize_strain_tensor(self): + F = np.random.random((self.n,3,3)) + t = ['V','U'][np.random.randint(0,2)] + m = np.random.random()*10. -5.0 + assert np.allclose(mechanics.strain_tensor(F,t,m)[self.c], + mechanics.strain_tensor(F[self.c],t,m)) + + + def test_vectorize_deviatoric_part(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.deviatoric_part(x)[self.c], + mechanics.deviatoric_part(x[self.c])) + + + def test_vectorize_spherical_part(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.spherical_part(x,True)[self.c], + mechanics.spherical_part(x[self.c],True)) + + + def test_vectorize_Mises_stress(self): + sigma = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.Mises_stress(sigma)[self.c], + mechanics.Mises_stress(sigma[self.c])) + + + def test_vectorize_Mises_strain(self): + epsilon = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.Mises_strain(epsilon)[self.c], + mechanics.Mises_strain(epsilon[self.c])) + + + def test_vectorize_symmetric(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.symmetric(x)[self.c], + mechanics.symmetric(x[self.c])) + + + def test_vectorize_maximum_shear(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.maximum_shear(x)[self.c], + mechanics.maximum_shear(x[self.c])) + + + def test_vectorize_principal_components(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.principal_components(x)[self.c], + mechanics.principal_components(x[self.c])) + + + def test_vectorize_transpose(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.transpose(x)[self.c], + mechanics.transpose(x[self.c])) + + + def test_vectorize_rotational_part(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.rotational_part(x)[self.c], + mechanics.rotational_part(x[self.c])) + + + def test_vectorize_left_stretch(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.left_stretch(x)[self.c], + mechanics.left_stretch(x[self.c])) + + + def test_vectorize_right_stretch(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.right_stretch(x)[self.c], + mechanics.right_stretch(x[self.c])) + + + def test_Cauchy(self): + """Ensure Cauchy stress is symmetrized 1. Piola-Kirchhoff stress for no deformation.""" + P = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.Cauchy(np.broadcast_to(np.eye(3),(self.n,3,3)),P), + mechanics.symmetric(P)) + + def test_polar_decomposition(self): + """F = RU = VR.""" + F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.random((self.n,3,3)) + R = mechanics.rotational_part(F) + V = mechanics.left_stretch(F) + U = mechanics.right_stretch(F) + assert np.allclose(np.matmul(R,U), + np.matmul(V,R)) + + + def test_strain_tensor_no_rotation(self): + """Ensure that left and right stretch give same results for no rotation.""" + F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.random((self.n,3,3)) + m = np.random.random()*20.0-10.0 + assert np.allclose(mechanics.strain_tensor(F,'U',m), + mechanics.strain_tensor(F,'V',m)) + + def test_strain_tensor_rotation_equivalence(self): + """Ensure that left and right strain differ only by a rotation.""" + F = np.broadcast_to(np.eye(3),[self.n,3,3]) + (np.random.random((self.n,3,3))*0.5 - 0.25) + m = np.random.random()*5.0-2.5 + assert np.allclose(np.linalg.det(mechanics.strain_tensor(F,'U',m)), + np.linalg.det(mechanics.strain_tensor(F,'V',m))) + + def test_strain_tensor_rotation(self): + """Ensure that pure rotation results in no strain.""" + F = mechanics.rotational_part(np.random.random((self.n,3,3))) + t = ['V','U'][np.random.randint(0,2)] + m = np.random.random()*2.0 - 1.0 + assert np.allclose(mechanics.strain_tensor(F,t,m), + 0.0) + + def test_rotation_determinant(self): + """ + Ensure that the determinant of the rotational part is +- 1. + + Should be +1, but random F might contain a reflection. + """ + x = np.random.random((self.n,3,3)) + assert np.allclose(np.abs(np.linalg.det(mechanics.rotational_part(x))), + 1.0) + + + def test_spherical_deviatoric_part(self): + """Ensure that full tensor is sum of spherical and deviatoric part.""" + x = np.random.random((self.n,3,3)) + sph = mechanics.spherical_part(x,True) + assert np.allclose(sph + mechanics.deviatoric_part(x), + x) + + def test_deviatoric_Mises(self): + """Ensure that Mises equivalent stress depends only on deviatoric part.""" + x = np.random.random((self.n,3,3)) + full = mechanics.Mises_stress(x) + dev = mechanics.Mises_stress(mechanics.deviatoric_part(x)) + assert np.allclose(full, + dev) + + def test_spherical_mapping(self): + """Ensure that mapping to tensor is correct.""" + x = np.random.random((self.n,3,3)) + tensor = mechanics.spherical_part(x,True) + scalar = mechanics.spherical_part(x) + assert np.allclose(np.linalg.det(tensor), + scalar**3.0) + + def test_spherical_Mises(self): + """Ensure that Mises equivalent strrain of spherical strain is 0.""" + x = np.random.random((self.n,3,3)) + sph = mechanics.spherical_part(x,True) + assert np.allclose(mechanics.Mises_strain(sph), + 0.0) + + def test_symmetric(self): + """Ensure that a symmetric tensor is half of the sum of a tensor and its transpose.""" + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.symmetric(x)*2.0, + mechanics.transpose(x)+x) + + + def test_transpose(self): + """Ensure that a symmetric tensor equals its transpose.""" + x = mechanics.symmetric(np.random.random((self.n,3,3))) + assert np.allclose(mechanics.transpose(x), + x) + + + def test_Mises(self): + """Ensure that equivalent stress is 3/2 of equivalent strain.""" + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.Mises_stress(x)/mechanics.Mises_strain(x), + 1.5) diff --git a/src/CPFEM.f90 b/src/CPFEM.f90 index 9bf8c547c..d74155703 100644 --- a/src/CPFEM.f90 +++ b/src/CPFEM.f90 @@ -87,10 +87,8 @@ subroutine CPFEM_initAll(el,ip) call math_init call rotations_init call FE_init -#ifdef DAMASK_HDF5 call HDF5_utilities_init call results_init -#endif call mesh_init(ip, el) call lattice_init call material_init @@ -374,15 +372,13 @@ subroutine CPFEM_results(inc,time) integer(pInt), intent(in) :: inc real(pReal), intent(in) :: time -#ifdef DAMASK_HDF5 call results_openJobFile call results_addIncrement(inc,time) call constitutive_results call crystallite_results call homogenization_results - call results_removeLink('current') ! ToDo: put this into closeJobFile + call results_finalizeIncrement call results_closeJobFile -#endif end subroutine CPFEM_results diff --git a/src/CPFEM2.f90 b/src/CPFEM2.f90 index 7123602f8..bc3a424e3 100644 --- a/src/CPFEM2.f90 +++ b/src/CPFEM2.f90 @@ -14,7 +14,6 @@ module CPFEM2 use material use lattice use IO - use HDF5 use DAMASK_interface use results use discretization @@ -42,7 +41,7 @@ contains !-------------------------------------------------------------------------------------------------- -!> @brief call (thread safe) all module initializations +!> @brief call all module initializations !-------------------------------------------------------------------------------------------------- subroutine CPFEM_initAll @@ -65,34 +64,28 @@ subroutine CPFEM_initAll call constitutive_init call crystallite_init call homogenization_init - call materialpoint_postResults call CPFEM_init end subroutine CPFEM_initAll + !-------------------------------------------------------------------------------------------------- !> @brief allocate the arrays defined in module CPFEM and initialize them !-------------------------------------------------------------------------------------------------- subroutine CPFEM_init - integer :: ph,homog - character(len=1024) :: rankStr, PlasticItem, HomogItem - integer(HID_T) :: fileHandle, groupPlasticID, groupHomogID + integer :: i + integer(HID_T) :: fileHandle, groupHandle + character(len=pStringLen) :: fileName, datasetName - write(6,'(/,a)') ' <<<+- CPFEM init -+>>>' - flush(6) + write(6,'(/,a)') ' <<<+- CPFEM init -+>>>'; flush(6) - ! *** restore the last converged values of each essential variable if (interface_restartInc > 0) then - if (iand(debug_level(debug_CPFEM), debug_levelExtensive) /= 0) then - write(6,'(a)') '<< CPFEM >> restored state variables of last converged step from hdf5 file' - flush(6) - endif - - write(rankStr,'(a1,i0)')'_',worldrank + write(6,'(/,a,i0,a)') ' reading restart information of increment ', interface_restartInc, ' from file' - fileHandle = HDF5_openFile(trim(getSolverJobName())//trim(rankStr)//'.hdf5') - + write(fileName,'(a,i0,a)') trim(getSolverJobName())//'_',worldrank,'.hdf5' + fileHandle = HDF5_openFile(fileName) + call HDF5_read(fileHandle,crystallite_F0, 'F') call HDF5_read(fileHandle,crystallite_Fp0,'Fp') call HDF5_read(fileHandle,crystallite_Fi0,'Fi') @@ -100,19 +93,19 @@ subroutine CPFEM_init call HDF5_read(fileHandle,crystallite_Li0,'Li') call HDF5_read(fileHandle,crystallite_S0, 'S') - groupPlasticID = HDF5_openGroup(fileHandle,'constituent') - do ph = 1,size(phase_plasticity) - write(PlasticItem,*) ph,'_' - call HDF5_read(groupPlasticID,plasticState(ph)%state0,trim(PlasticItem)//'omega_plastic') + groupHandle = HDF5_openGroup(fileHandle,'constituent') + do i = 1,size(phase_plasticity) + write(datasetName,'(i0,a)') i,'_omega_plastic' + call HDF5_read(groupHandle,plasticState(i)%state0,datasetName) enddo - call HDF5_closeGroup(groupPlasticID) - - groupHomogID = HDF5_openGroup(fileHandle,'materialpoint') - do homog = 1, material_Nhomogenization - write(HomogItem,*) homog,'_' - call HDF5_read(groupHomogID,homogState(homog)%state0, trim(HomogItem)//'omega_homogenization') + call HDF5_closeGroup(groupHandle) + + groupHandle = HDF5_openGroup(fileHandle,'materialpoint') + do i = 1, material_Nhomogenization + write(datasetName,'(i0,a)') i,'_omega_homogenization' + call HDF5_read(groupHandle,homogState(i)%state0,datasetName) enddo - call HDF5_closeGroup(groupHomogID) + call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) endif @@ -126,7 +119,7 @@ end subroutine CPFEM_init !-------------------------------------------------------------------------------------------------- subroutine CPFEM_forward - integer :: i, homog, mySource + integer :: i, j if (iand(debug_level(debug_CPFEM), debug_levelBasic) /= 0) & write(6,'(a)') '<< CPFEM >> aging states' @@ -142,32 +135,31 @@ subroutine CPFEM_forward plasticState(i)%state0 = plasticState(i)%state enddo do i = 1, size(sourceState) - do mySource = 1,phase_Nsources(i) - sourceState(i)%p(mySource)%state0 = sourceState(i)%p(mySource)%state + do j = 1,phase_Nsources(i) + sourceState(i)%p(j)%state0 = sourceState(i)%p(j)%state enddo; enddo - do homog = 1, material_Nhomogenization - homogState (homog)%state0 = homogState (homog)%state - thermalState (homog)%state0 = thermalState (homog)%state - damageState (homog)%state0 = damageState (homog)%state + do i = 1, material_Nhomogenization + homogState (i)%state0 = homogState (i)%state + thermalState(i)%state0 = thermalState(i)%state + damageState (i)%state0 = damageState (i)%state enddo end subroutine CPFEM_forward !-------------------------------------------------------------------------------------------------- -!> @brief Write current constitutive variables for restart to file. +!> @brief Write current restart information (Field and constitutive data) to file. !-------------------------------------------------------------------------------------------------- subroutine CPFEM_restartWrite - integer :: ph, homog - character(len=32) :: rankStr, PlasticItem, HomogItem - integer(HID_T) :: fileHandle, groupPlastic, groupHomog + integer :: i + integer(HID_T) :: fileHandle, groupHandle + character(len=pStringLen) :: fileName, datasetName - - write(6,'(a)') ' writing constitutive data required for restart to file';flush(6) + write(6,'(a)') ' writing field and constitutive data required for restart to file';flush(6) - write(rankStr,'(a1,i0)')'_',worldrank - fileHandle = HDF5_openFile(trim(getSolverJobName())//trim(rankStr)//'.hdf5','a') + write(fileName,'(a,i0,a)') trim(getSolverJobName())//'_',worldrank,'.hdf5' + fileHandle = HDF5_openFile(fileName,'a') call HDF5_write(fileHandle,crystallite_partionedF,'F') call HDF5_write(fileHandle,crystallite_Fp, 'Fp') @@ -176,19 +168,19 @@ subroutine CPFEM_restartWrite call HDF5_write(fileHandle,crystallite_Li, 'Li') call HDF5_write(fileHandle,crystallite_S, 'S') - groupPlastic = HDF5_addGroup(fileHandle,'constituent') - do ph = 1,size(phase_plasticity) - write(PlasticItem,*) ph,'_' - call HDF5_write(groupPlastic,plasticState(ph)%state,trim(PlasticItem)//'omega_plastic') + groupHandle = HDF5_addGroup(fileHandle,'constituent') + do i = 1,size(phase_plasticity) + write(datasetName,'(i0,a)') i,'_omega_plastic' + call HDF5_write(groupHandle,plasticState(i)%state,datasetName) enddo - call HDF5_closeGroup(groupPlastic) + call HDF5_closeGroup(groupHandle) - groupHomog = HDF5_addGroup(fileHandle,'materialpoint') - do homog = 1, material_Nhomogenization - write(HomogItem,*) homog,'_' - call HDF5_write(groupHomog,homogState(homog)%state,trim(HomogItem)//'omega_homogenization') + groupHandle = HDF5_addGroup(fileHandle,'materialpoint') + do i = 1, material_Nhomogenization + write(datasetName,'(i0,a)') i,'_omega_homogenization' + call HDF5_write(groupHandle,homogState(i)%state,datasetName) enddo - call HDF5_closeGroup(groupHomog) + call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) @@ -209,7 +201,7 @@ subroutine CPFEM_results(inc,time) call crystallite_results call homogenization_results call discretization_results - call results_removeLink('current') ! ToDo: put this into closeJobFile + call results_finalizeIncrement call results_closeJobFile end subroutine CPFEM_results diff --git a/src/DAMASK_abaqus.f b/src/DAMASK_abaqus.f index e2c56a06e..0f663dde3 100644 --- a/src/DAMASK_abaqus.f +++ b/src/DAMASK_abaqus.f @@ -143,9 +143,6 @@ subroutine UMAT(STRESS,STATEV,DDSDDE,SSE,SPD,SCD,& outdatedByNewInc, & outdatedFFN1, & lastStep - use homogenization, only: & - materialpoint_sizeResults, & - materialpoint_results implicit none integer(pInt), intent(in) :: & @@ -332,7 +329,7 @@ subroutine UMAT(STRESS,STATEV,DDSDDE,SSE,SPD,SCD,& ddsdde(6,:) = ddsdde_h(5,:) end if - statev = materialpoint_results(1:min(nstatv,materialpoint_sizeResults),npt,mesh_FEasCP('elem', noel)) + statev = 0 if (terminallyIll) pnewdt = 0.5_pReal ! force cutback directly ? !$ call omp_set_num_threads(defaultNumThreadsInt) ! reset number of threads to stored default value diff --git a/src/DAMASK_interface.f90 b/src/DAMASK_interface.f90 index ee8f220ae..27a0084f5 100644 --- a/src/DAMASK_interface.f90 +++ b/src/DAMASK_interface.f90 @@ -10,7 +10,7 @@ !> and working directory. !-------------------------------------------------------------------------------------------------- #define GCC_MIN 6 -#define INTEL_MIN 1600 +#define INTEL_MIN 1700 #define PETSC_MAJOR 3 #define PETSC_MINOR_MIN 10 #define PETSC_MINOR_MAX 12 @@ -269,10 +269,10 @@ subroutine DAMASK_interface_init write(6,'(a,a)') ' Working dir argument: ', trim(workingDirArg) write(6,'(a,a)') ' Geometry argument: ', trim(geometryArg) write(6,'(a,a)') ' Load case argument: ', trim(loadcaseArg) - write(6,'(a,a)') ' Working directory: ', trim(getCWD()) + write(6,'(a,a)') ' Working directory: ', getCWD() write(6,'(a,a)') ' Geometry file: ', trim(geometryFile) write(6,'(a,a)') ' Loadcase file: ', trim(loadCaseFile) - write(6,'(a,a)') ' Solver job name: ', trim(getSolverJobName()) + write(6,'(a,a)') ' Solver job name: ', getSolverJobName() if (interface_restartInc > 0) & write(6,'(a,i6.6)') ' Restart from increment: ', interface_restartInc @@ -308,7 +308,7 @@ subroutine setWorkingDirectory(workingDirectoryArg) workingDirectory = trim(rectifyPath(workingDirectory)) error = setCWD(trim(workingDirectory)) if(error) then - write(6,'(/,a)') ' ERROR: Working directory "'//trim(workingDirectory)//'" does not exist' + write(6,'(/,a)') ' ERROR: Invalid Working directory: '//trim(workingDirectory) call quit(1) endif @@ -318,8 +318,9 @@ end subroutine setWorkingDirectory !-------------------------------------------------------------------------------------------------- !> @brief solver job name (no extension) as combination of geometry and load case name !-------------------------------------------------------------------------------------------------- -character(len=1024) function getSolverJobName() +function getSolverJobName() + character(len=:), allocatable :: getSolverJobName integer :: posExt,posSep posExt = scan(geometryFile,'.',back=.true.) @@ -330,7 +331,7 @@ character(len=1024) function getSolverJobName() posExt = scan(loadCaseFile,'.',back=.true.) posSep = scan(loadCaseFile,'/',back=.true.) - getSolverJobName = trim(getSolverJobName)//'_'//loadCaseFile(posSep+1:posExt-1) + getSolverJobName = getSolverJobName//'_'//loadCaseFile(posSep+1:posExt-1) end function getSolverJobName @@ -338,15 +339,16 @@ end function getSolverJobName !-------------------------------------------------------------------------------------------------- !> @brief basename of geometry file with extension from command line arguments !-------------------------------------------------------------------------------------------------- -character(len=1024) function getGeometryFile(geometryParameter) +function getGeometryFile(geometryParameter) - character(len=1024), intent(in) :: geometryParameter - logical :: file_exists - external :: quit + character(len=:), allocatable :: getGeometryFile + character(len=*), intent(in) :: geometryParameter + logical :: file_exists + external :: quit getGeometryFile = trim(geometryParameter) - if (scan(getGeometryFile,'/') /= 1) getGeometryFile = trim(getCWD())//'/'//trim(getGeometryFile) - getGeometryFile = makeRelativePath(trim(getCWD()), getGeometryFile) + if (scan(getGeometryFile,'/') /= 1) getGeometryFile = getCWD()//'/'//trim(getGeometryFile) + getGeometryFile = makeRelativePath(getCWD(), getGeometryFile) inquire(file=trim(getGeometryFile), exist=file_exists) if (.not. file_exists) then @@ -360,15 +362,16 @@ end function getGeometryFile !-------------------------------------------------------------------------------------------------- !> @brief relative path of loadcase from command line arguments !-------------------------------------------------------------------------------------------------- -character(len=1024) function getLoadCaseFile(loadCaseParameter) +function getLoadCaseFile(loadCaseParameter) - character(len=1024), intent(in) :: loadCaseParameter - logical :: file_exists - external :: quit + character(len=:), allocatable :: getLoadCaseFile + character(len=*), intent(in) :: loadCaseParameter + logical :: file_exists + external :: quit getLoadCaseFile = trim(loadCaseParameter) - if (scan(getLoadCaseFile,'/') /= 1) getLoadCaseFile = trim(getCWD())//'/'//trim(getLoadCaseFile) - getLoadCaseFile = makeRelativePath(trim(getCWD()), getLoadCaseFile) + if (scan(getLoadCaseFile,'/') /= 1) getLoadCaseFile = getCWD()//'/'//trim(getLoadCaseFile) + getLoadCaseFile = makeRelativePath(getCWD(), getLoadCaseFile) inquire(file=trim(getLoadCaseFile), exist=file_exists) if (.not. file_exists) then diff --git a/src/FEsolving.f90 b/src/FEsolving.f90 index 38788a065..8c9683266 100644 --- a/src/FEsolving.f90 +++ b/src/FEsolving.f90 @@ -46,10 +46,10 @@ subroutine FE_init call IO_open_inputFile(FILEUNIT) rewind(FILEUNIT) do - read (FILEUNIT,'(a256)',END=100) line + read (FILEUNIT,'(A)',END=100) line chunkPos = IO_stringPos(line) if(IO_lc(IO_stringValue(line,chunkPos,1)) == 'solver') then - read (FILEUNIT,'(a256)',END=100) line ! next line + read (FILEUNIT,'(A)',END=100) line ! next line chunkPos = IO_stringPos(line) symmetricSolver = (IO_intValue(line,chunkPos,2) /= 1) endif diff --git a/src/HDF5_utilities.f90 b/src/HDF5_utilities.f90 index e4819431e..02e575d98 100644 --- a/src/HDF5_utilities.f90 +++ b/src/HDF5_utilities.f90 @@ -5,9 +5,7 @@ !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !-------------------------------------------------------------------------------------------------- module HDF5_utilities -#if defined(PETSc) || defined(DAMASK_HDF5) use HDF5 -#endif #ifdef PETSc use PETSC #endif @@ -20,7 +18,6 @@ module HDF5_utilities implicit none public -#if defined(PETSc) || defined(DAMASK_HDF5) !-------------------------------------------------------------------------------------------------- !> @brief reads integer or float data of defined shape from file ! ToDo: order of arguments wrong !> @details for parallel IO, all dimension except for the last need to match @@ -279,8 +276,8 @@ logical function HDF5_objectExists(loc_id,path) integer(HID_T), intent(in) :: loc_id character(len=*), intent(in), optional :: path - integer :: hdferr - character(len=256) :: p + integer :: hdferr + character(len=pStringLen) :: p if (present(path)) then p = trim(path) @@ -308,10 +305,10 @@ subroutine HDF5_addAttribute_str(loc_id,attrLabel,attrValue,path) character(len=*), intent(in) :: attrLabel, attrValue character(len=*), intent(in), optional :: path - integer :: hdferr - integer(HID_T) :: attr_id, space_id, type_id - logical :: attrExists - character(len=256) :: p + integer :: hdferr + integer(HID_T) :: attr_id, space_id, type_id + logical :: attrExists + character(len=pStringLen) :: p if (present(path)) then p = trim(path) @@ -355,10 +352,10 @@ subroutine HDF5_addAttribute_int(loc_id,attrLabel,attrValue,path) integer, intent(in) :: attrValue character(len=*), intent(in), optional :: path - integer :: hdferr - integer(HID_T) :: attr_id, space_id - logical :: attrExists - character(len=256) :: p + integer :: hdferr + integer(HID_T) :: attr_id, space_id + logical :: attrExists + character(len=pStringLen) :: p if (present(path)) then p = trim(path) @@ -396,10 +393,10 @@ subroutine HDF5_addAttribute_real(loc_id,attrLabel,attrValue,path) real(pReal), intent(in) :: attrValue character(len=*), intent(in), optional :: path - integer :: hdferr - integer(HID_T) :: attr_id, space_id - logical :: attrExists - character(len=256) :: p + integer :: hdferr + integer(HID_T) :: attr_id, space_id + logical :: attrExists + character(len=pStringLen) :: p if (present(path)) then p = trim(path) @@ -441,7 +438,7 @@ subroutine HDF5_addAttribute_int_array(loc_id,attrLabel,attrValue,path) integer(HID_T) :: attr_id, space_id integer(HSIZE_T),dimension(1) :: array_size logical :: attrExists - character(len=256) :: p + character(len=pStringLen) :: p if (present(path)) then p = trim(path) @@ -485,7 +482,7 @@ subroutine HDF5_addAttribute_real_array(loc_id,attrLabel,attrValue,path) integer(HID_T) :: attr_id, space_id integer(HSIZE_T),dimension(1) :: array_size logical :: attrExists - character(len=256) :: p + character(len=pStringLen) :: p if (present(path)) then p = trim(path) @@ -1928,6 +1925,5 @@ subroutine finalize_write(plist_id, dset_id, filespace_id, memspace_id) if (hdferr < 0) call IO_error(1,ext_msg='finalize_write: h5sclose_f/memspace_id') end subroutine finalize_write -#endif end module HDF5_Utilities diff --git a/src/IO.f90 b/src/IO.f90 index a585fc7c4..0436e9b19 100644 --- a/src/IO.f90 +++ b/src/IO.f90 @@ -11,9 +11,11 @@ module IO implicit none private - character(len=5), parameter, public :: & + character(len=*), parameter, public :: & IO_EOF = '#EOF#' !< end of file string - character(len=207), parameter, private :: & + character, parameter, public :: & + IO_EOL = new_line(' ') !< end of line str + character(len=*), parameter, private :: & IO_DIVIDER = '───────────────────'//& '───────────────────'//& '───────────────────'//& @@ -21,9 +23,8 @@ module IO public :: & IO_init, & IO_read_ASCII, & - IO_open_file, & + IO_open_file, & ! deprecated, use IO_read_ASCII IO_open_jobFile_binary, & - IO_write_jobFile, & IO_isBlank, & IO_getTag, & IO_stringPos, & @@ -32,8 +33,7 @@ module IO IO_intValue, & IO_lc, & IO_error, & - IO_warning, & - IO_intOut + IO_warning #if defined(Marc4DAMASK) || defined(Abaqus) public :: & IO_open_inputFile, & @@ -44,15 +44,10 @@ module IO IO_countDataLines #elif defined(Marc4DAMASK) IO_fixedNoEFloatValue, & - IO_fixedIntValue, & - IO_countNumericalDataLines + IO_fixedIntValue #endif #endif - private :: & - IO_verifyFloatValue, & - IO_verifyIntValue - contains @@ -104,7 +99,7 @@ function IO_read_ASCII(fileName) result(fileContent) ! count lines to allocate string array myTotalLines = 1 do l=1, len(rawData) - if (rawData(l:l) == new_line('')) myTotalLines = myTotalLines+1 + if (rawData(l:l) == IO_EOL) myTotalLines = myTotalLines+1 enddo allocate(fileContent(myTotalLines)) @@ -114,7 +109,7 @@ function IO_read_ASCII(fileName) result(fileContent) startPos = 1 l = 1 do while (l <= myTotalLines) - endPos = merge(startPos + scan(rawData(startPos:),new_line('')) - 2,len(rawData),l /= myTotalLines) + endPos = merge(startPos + scan(rawData(startPos:),IO_EOL) - 2,len(rawData),l /= myTotalLines) if (endPos - startPos > pStringLen-1) then line = rawData(startPos:startPos+pStringLen-1) if (.not. warned) then @@ -244,12 +239,12 @@ subroutine IO_open_inputFile(fileUnit) integer, allocatable, dimension(:) :: chunkPos - character(len=65536) :: line,fname + character(len=pStringLen :: line,fname logical :: createSuccess,fexist do - read(unit2,'(A65536)',END=220) line + read(unit2,'(A)',END=220) line chunkPos = IO_stringPos(line) if (IO_lc(IO_StringValue(line,chunkPos,1))=='*include') then @@ -290,25 +285,6 @@ end subroutine IO_open_inputFile #endif -!-------------------------------------------------------------------------------------------------- -!> @brief opens ASCII file to given unit for writing. File is named after solver job name plus -!! given extension and located in current working directory -!-------------------------------------------------------------------------------------------------- -subroutine IO_write_jobFile(fileUnit,ext) - - integer, intent(in) :: fileUnit !< file unit - character(len=*), intent(in) :: ext !< extension of file - - integer :: myStat - character(len=1024) :: path - - path = trim(getSolverJobName())//'.'//ext - open(fileUnit,status='replace',iostat=myStat,file=path) - if (myStat /= 0) call IO_error(100,el=myStat,ext_msg=path) - -end subroutine IO_write_jobFile - - !-------------------------------------------------------------------------------------------------- !> @brief identifies strings without content !-------------------------------------------------------------------------------------------------- @@ -402,12 +378,12 @@ function IO_stringValue(string,chunkPos,myChunk,silent) character(len=:), allocatable :: IO_stringValue logical, optional,intent(in) :: silent !< switch to trigger verbosity - character(len=16), parameter :: MYNAME = 'IO_stringValue: ' + character(len=*), parameter :: MYNAME = 'IO_stringValue: ' logical :: warn if (present(silent)) then - warn = silent + warn = .not. silent else warn = .false. endif @@ -430,18 +406,17 @@ real(pReal) function IO_floatValue (string,chunkPos,myChunk) integer, dimension(:), intent(in) :: chunkPos !< positions of start and end of each tag/chunk in given string integer, intent(in) :: myChunk !< position number of desired chunk character(len=*), intent(in) :: string !< raw input with known start and end of each chunk - character(len=15), parameter :: MYNAME = 'IO_floatValue: ' - character(len=17), parameter :: VALIDCHARACTERS = '0123456789eEdD.+-' + character(len=*), parameter :: MYNAME = 'IO_floatValue: ' + character(len=*), parameter :: VALIDCHARACTERS = '0123456789eEdD.+-' IO_floatValue = 0.0_pReal valuePresent: if (myChunk > chunkPos(1) .or. myChunk < 1) then call IO_warning(201,el=myChunk,ext_msg=MYNAME//trim(string)) - else valuePresent - IO_floatValue = & - IO_verifyFloatValue(trim(adjustl(string(chunkPos(myChunk*2):chunkPos(myChunk*2+1)))),& - VALIDCHARACTERS,MYNAME) - endif valuePresent + else valuePresent + IO_floatValue = verifyFloatValue(trim(adjustl(string(chunkPos(myChunk*2):chunkPos(myChunk*2+1)))),& + VALIDCHARACTERS,MYNAME) + endif valuePresent end function IO_floatValue @@ -454,15 +429,15 @@ integer function IO_intValue(string,chunkPos,myChunk) character(len=*), intent(in) :: string !< raw input with known start and end of each chunk integer, intent(in) :: myChunk !< position number of desired chunk integer, dimension(:), intent(in) :: chunkPos !< positions of start and end of each tag/chunk in given string - character(len=13), parameter :: MYNAME = 'IO_intValue: ' - character(len=12), parameter :: VALIDCHARACTERS = '0123456789+-' + character(len=*), parameter :: MYNAME = 'IO_intValue: ' + character(len=*), parameter :: VALIDCHARACTERS = '0123456789+-' IO_intValue = 0 valuePresent: if (myChunk > chunkPos(1) .or. myChunk < 1) then call IO_warning(201,el=myChunk,ext_msg=MYNAME//trim(string)) else valuePresent - IO_intValue = IO_verifyIntValue(trim(adjustl(string(chunkPos(myChunk*2):chunkPos(myChunk*2+1)))),& + IO_intValue = verifyIntValue(trim(adjustl(string(chunkPos(myChunk*2):chunkPos(myChunk*2+1)))),& VALIDCHARACTERS,MYNAME) endif valuePresent @@ -478,9 +453,9 @@ real(pReal) function IO_fixedNoEFloatValue (string,ends,myChunk) character(len=*), intent(in) :: string !< raw input with known ends of each chunk integer, intent(in) :: myChunk !< position number of desired chunk integer, dimension(:), intent(in) :: ends !< positions of end of each tag/chunk in given string - character(len=22), parameter :: MYNAME = 'IO_fixedNoEFloatValue ' - character(len=13), parameter :: VALIDBASE = '0123456789.+-' - character(len=12), parameter :: VALIDEXP = '0123456789+-' + character(len=*), parameter :: MYNAME = 'IO_fixedNoEFloatValue ' + character(len=*), parameter :: VALIDBASE = '0123456789.+-' + character(len=*), parameter :: VALIDEXP = '0123456789+-' real(pReal) :: base integer :: expon @@ -488,13 +463,13 @@ real(pReal) function IO_fixedNoEFloatValue (string,ends,myChunk) pos_exp = scan(string(ends(myChunk)+1:ends(myChunk+1)),'+-',back=.true.) hasExponent: if (pos_exp > 1) then - base = IO_verifyFloatValue(trim(adjustl(string(ends(myChunk)+1:ends(myChunk)+pos_exp-1))),& - VALIDBASE,MYNAME//'(base): ') - expon = IO_verifyIntValue(trim(adjustl(string(ends(myChunk)+pos_exp:ends(myChunk+1)))),& - VALIDEXP,MYNAME//'(exp): ') + base = verifyFloatValue(trim(adjustl(string(ends(myChunk)+1:ends(myChunk)+pos_exp-1))),& + VALIDBASE,MYNAME//'(base): ') + expon = verifyIntValue(trim(adjustl(string(ends(myChunk)+pos_exp:ends(myChunk+1)))),& + VALIDEXP,MYNAME//'(exp): ') else hasExponent - base = IO_verifyFloatValue(trim(adjustl(string(ends(myChunk)+1:ends(myChunk+1)))),& - VALIDBASE,MYNAME//'(base): ') + base = verifyFloatValue(trim(adjustl(string(ends(myChunk)+1:ends(myChunk+1)))),& + VALIDBASE,MYNAME//'(base): ') expon = 0 endif hasExponent IO_fixedNoEFloatValue = base*10.0_pReal**real(expon,pReal) @@ -510,10 +485,10 @@ integer function IO_fixedIntValue(string,ends,myChunk) character(len=*), intent(in) :: string !< raw input with known ends of each chunk integer, intent(in) :: myChunk !< position number of desired chunk integer, dimension(:), intent(in) :: ends !< positions of end of each tag/chunk in given string - character(len=20), parameter :: MYNAME = 'IO_fixedIntValue: ' - character(len=12), parameter :: VALIDCHARACTERS = '0123456789+-' + character(len=*), parameter :: MYNAME = 'IO_fixedIntValue: ' + character(len=*), parameter :: VALIDCHARACTERS = '0123456789+-' - IO_fixedIntValue = IO_verifyIntValue(trim(adjustl(string(ends(myChunk)+1:ends(myChunk+1)))),& + IO_fixedIntValue = verifyIntValue(trim(adjustl(string(ends(myChunk)+1:ends(myChunk+1)))),& VALIDCHARACTERS,MYNAME) end function IO_fixedIntValue @@ -542,25 +517,6 @@ pure function IO_lc(string) end function IO_lc -!-------------------------------------------------------------------------------------------------- -!> @brief returns format string for integer values without leading zeros -!-------------------------------------------------------------------------------------------------- -pure function IO_intOut(intToPrint) - - integer, intent(in) :: intToPrint - character(len=41) :: IO_intOut - integer :: N_digits - character(len=19) :: width ! maximum digits for 64 bit integer - character(len=20) :: min_width ! longer for negative values - - N_digits = 1 + int(log10(real(max(abs(intToPrint),1)))) - write(width, '(I19.19)') N_digits - write(min_width, '(I20.20)') N_digits + merge(1,0,intToPrint < 0) - IO_intOut = 'I'//trim(min_width)//'.'//trim(width) - -end function IO_intOut - - !-------------------------------------------------------------------------------------------------- !> @brief write error statements to standard out and terminate the Marc/spectral run with exit #9xxx !> in ABAQUS either time step is reduced or execution terminated @@ -572,8 +528,8 @@ subroutine IO_error(error_ID,el,ip,g,instance,ext_msg) character(len=*), optional, intent(in) :: ext_msg external :: quit - character(len=1024) :: msg - character(len=1024) :: formatString + character(len=pStringLen) :: msg + character(len=pStringLen) :: formatString select case (error_ID) @@ -592,14 +548,8 @@ subroutine IO_error(error_ID,el,ip,g,instance,ext_msg) msg = 'could not read file:' case (103) msg = 'could not assemble input files' - case (104) - msg = '{input} recursion limit reached' - case (105) - msg = 'unknown output:' case (106) msg = 'working directory does not exist:' - case (107) - msg = 'line length exceeds limit of 256' !-------------------------------------------------------------------------------------------------- ! lattice error messages @@ -819,8 +769,8 @@ subroutine IO_warning(warning_ID,el,ip,g,ext_msg) integer, optional, intent(in) :: el,ip,g character(len=*), optional, intent(in) :: ext_msg - character(len=1024) :: msg - character(len=1024) :: formatString + character(len=pStringLen) :: msg + character(len=pStringLen) :: formatString select case (warning_ID) case (1) @@ -904,7 +854,7 @@ end subroutine IO_warning !-------------------------------------------------------------------------------------------------- function IO_read(fileUnit) result(line) - integer, intent(in) :: fileUnit !< file unit + integer, intent(in) :: fileUnit !< file unit character(len=pStringLen) :: line @@ -944,7 +894,7 @@ integer function IO_countDataLines(fileUnit) integer, allocatable, dimension(:) :: chunkPos - character(len=65536) :: line, & + character(len=pStringLen) :: line, & tmp IO_countDataLines = 0 @@ -966,38 +916,6 @@ end function IO_countDataLines #endif -#ifdef Marc4DAMASK -!-------------------------------------------------------------------------------------------------- -!> @brief count lines containig data up to next *keyword -!-------------------------------------------------------------------------------------------------- -integer function IO_countNumericalDataLines(fileUnit) - - integer, intent(in) :: fileUnit !< file handle - - - integer, allocatable, dimension(:) :: chunkPos - character(len=65536) :: line, & - tmp - - IO_countNumericalDataLines = 0 - line = '' - - do while (trim(line) /= IO_EOF) - line = IO_read(fileUnit) - chunkPos = IO_stringPos(line) - tmp = IO_lc(IO_stringValue(line,chunkPos,1)) - if (verify(trim(tmp),'0123456789') == 0) then ! numerical values - IO_countNumericalDataLines = IO_countNumericalDataLines + 1 - else - exit - endif - enddo - backspace(fileUnit) - -end function IO_countNumericalDataLines -#endif - - !-------------------------------------------------------------------------------------------------- !> @brief count items in consecutive lines depending on lines !> @details Marc: ints concatenated by "c" as last char or range of values a "to" b @@ -1011,7 +929,7 @@ integer function IO_countContinuousIntValues(fileUnit) integer :: l,c #endif integer, allocatable, dimension(:) :: chunkPos - character(len=65536) :: line + character(len=pStringLen) :: line IO_countContinuousIntValues = 0 line = '' @@ -1068,21 +986,21 @@ function IO_continuousIntValues(fileUnit,maxN,lookupName,lookupMap,lookupMaxN) integer, intent(in) :: fileUnit, & lookupMaxN integer, dimension(:,:), intent(in) :: lookupMap - character(len=64), dimension(:), intent(in) :: lookupName + character(len=*), dimension(:), intent(in) :: lookupName integer :: i,first,last #ifdef Abaqus integer :: j,l,c #endif integer, allocatable, dimension(:) :: chunkPos - character(len=65536) line - logical rangeGeneration + character(len=pStringLen) :: line + logical :: rangeGeneration IO_continuousIntValues = 0 rangeGeneration = .false. #if defined(Marc4DAMASK) do - read(fileUnit,'(A65536)',end=100) line + read(fileUnit,'(A)',end=100) line chunkPos = IO_stringPos(line) if (chunkPos(1) < 1) then ! empty line exit @@ -1123,14 +1041,14 @@ function IO_continuousIntValues(fileUnit,maxN,lookupName,lookupMap,lookupMaxN) !-------------------------------------------------------------------------------------------------- ! check if the element values in the elset are auto generated backspace(fileUnit) - read(fileUnit,'(A65536)',end=100) line + read(fileUnit,'(A)',end=100) line chunkPos = IO_stringPos(line) do i = 1,chunkPos(1) if (IO_lc(IO_stringValue(line,chunkPos,i)) == 'generate') rangeGeneration = .true. enddo do l = 1,c - read(fileUnit,'(A65536)',end=100) line + read(fileUnit,'(A)',end=100) line chunkPos = IO_stringPos(line) if (verify(IO_stringValue(line,chunkPos,1),'0123456789') > 0) then ! a non-int, i.e. set names follow on this line do i = 1,chunkPos(1) ! loop over set names in line @@ -1168,34 +1086,34 @@ function IO_continuousIntValues(fileUnit,maxN,lookupName,lookupMap,lookupMaxN) !-------------------------------------------------------------------------------------------------- !> @brief returns verified integer value in given string !-------------------------------------------------------------------------------------------------- -integer function IO_verifyIntValue (string,validChars,myName) +integer function verifyIntValue (string,validChars,myName) character(len=*), intent(in) :: string, & !< string for conversion to int value. Must not contain spaces! validChars, & !< valid characters in string myName !< name of caller function (for debugging) integer :: readStatus, invalidWhere - IO_verifyIntValue = 0 + verifyIntValue = 0 invalidWhere = verify(string,validChars) if (invalidWhere == 0) then - read(UNIT=string,iostat=readStatus,FMT=*) IO_verifyIntValue ! no offending chars found + read(UNIT=string,iostat=readStatus,FMT=*) verifyIntValue ! no offending chars found if (readStatus /= 0) & ! error during string to integer conversion call IO_warning(203,ext_msg=myName//'"'//string//'"') else call IO_warning(202,ext_msg=myName//'"'//string//'"') ! complain about offending characters - read(UNIT=string(1:invalidWhere-1),iostat=readStatus,FMT=*) IO_verifyIntValue ! interpret remaining string + read(UNIT=string(1:invalidWhere-1),iostat=readStatus,FMT=*) verifyIntValue ! interpret remaining string if (readStatus /= 0) & ! error during string to integer conversion call IO_warning(203,ext_msg=myName//'"'//string(1:invalidWhere-1)//'"') endif -end function IO_verifyIntValue +end function verifyIntValue !-------------------------------------------------------------------------------------------------- !> @brief returns verified float value in given string !-------------------------------------------------------------------------------------------------- -real(pReal) function IO_verifyFloatValue (string,validChars,myName) +real(pReal) function verifyFloatValue (string,validChars,myName) character(len=*), intent(in) :: string, & !< string for conversion to int value. Must not contain spaces! validChars, & !< valid characters in string @@ -1203,20 +1121,20 @@ real(pReal) function IO_verifyFloatValue (string,validChars,myName) integer :: readStatus, invalidWhere - IO_verifyFloatValue = 0.0_pReal + verifyFloatValue = 0.0_pReal invalidWhere = verify(string,validChars) if (invalidWhere == 0) then - read(UNIT=string,iostat=readStatus,FMT=*) IO_verifyFloatValue ! no offending chars found + read(UNIT=string,iostat=readStatus,FMT=*) verifyFloatValue ! no offending chars found if (readStatus /= 0) & ! error during string to float conversion call IO_warning(203,ext_msg=myName//'"'//string//'"') else call IO_warning(202,ext_msg=myName//'"'//string//'"') ! complain about offending characters - read(UNIT=string(1:invalidWhere-1),iostat=readStatus,FMT=*) IO_verifyFloatValue ! interpret remaining string + read(UNIT=string(1:invalidWhere-1),iostat=readStatus,FMT=*) verifyFloatValue ! interpret remaining string if (readStatus /= 0) & ! error during string to float conversion call IO_warning(203,ext_msg=myName//'"'//string(1:invalidWhere-1)//'"') endif -end function IO_verifyFloatValue +end function verifyFloatValue end module IO diff --git a/src/commercialFEM_fileList.f90 b/src/commercialFEM_fileList.f90 index 79a385818..31bddd141 100644 --- a/src/commercialFEM_fileList.f90 +++ b/src/commercialFEM_fileList.f90 @@ -1,7 +1,7 @@ !-------------------------------------------------------------------------------------------------- !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @brief all DAMASK files without solver -!> @details List of files needed by MSC.Marc, Abaqus/Explicit, and Abaqus/Standard +!> @details List of files needed by MSC.Marc and Abaqus/Standard !-------------------------------------------------------------------------------------------------- #include "IO.f90" #include "numerics.f90" @@ -15,7 +15,6 @@ #include "rotations.f90" #include "FEsolving.f90" #include "element.f90" -#include "mesh_base.f90" #include "HDF5_utilities.f90" #include "results.f90" #include "geometry_plastic_nonlocal.f90" diff --git a/src/config.f90 b/src/config.f90 index 00b473767..cffa9f644 100644 --- a/src/config.f90 +++ b/src/config.f90 @@ -27,7 +27,7 @@ module config config_numerics, & config_debug - character(len=64), dimension(:), allocatable, public, protected :: & + character(len=pStringLen), dimension(:), allocatable, public, protected :: & config_name_phase, & !< name of each phase config_name_homogenization, & !< name of each homogenization config_name_crystallite, & !< name of each crystallite setting @@ -54,7 +54,7 @@ subroutine config_init character(len=pStringLen), dimension(:), allocatable :: fileContent logical :: fileExists - write(6,'(/,a)') ' <<<+- config init -+>>>' + write(6,'(/,a)') ' <<<+- config init -+>>>'; flush(6) verbose = iand(debug_level(debug_material),debug_levelBasic) /= 0 @@ -214,7 +214,7 @@ end function read_materialConfig subroutine parse_materialConfig(sectionNames,part,line, & fileContent) - character(len=64), allocatable, dimension(:), intent(out) :: sectionNames + character(len=pStringLen), allocatable, dimension(:), intent(out) :: sectionNames type(tPartitionedStringList), allocatable, dimension(:), intent(inout) :: part character(len=pStringLen), intent(inout) :: line character(len=pStringLen), dimension(:), intent(in) :: fileContent @@ -222,7 +222,7 @@ subroutine parse_materialConfig(sectionNames,part,line, & integer, allocatable, dimension(:) :: partPosition !< position of [] tags + last line in section integer :: i, j logical :: echo - character(len=pStringLen) :: section_ID + character(len=pStringLen) :: sectionName echo = .false. @@ -246,8 +246,8 @@ subroutine parse_materialConfig(sectionNames,part,line, & partPosition = [partPosition, i] ! needed when actually storing content do i = 1, size(partPosition) -1 - write(section_ID,'('//IO_intOut(size(partPosition))//')') i - sectionNames(i) = trim(section_ID)//'_'//trim(adjustl(IO_getTag(fileContent(partPosition(i)),'[',']'))) + write(sectionName,'(i0,a,a)') i,'_',trim(IO_getTag(fileContent(partPosition(i)),'[',']')) + sectionNames(i) = sectionName do j = partPosition(i) + 1, partPosition(i+1) -1 call part(i)%add(trim(adjustl(fileContent(j)))) enddo diff --git a/src/constitutive.f90 b/src/constitutive.f90 index 6ef5ee73d..9c644a01a 100644 --- a/src/constitutive.f90 +++ b/src/constitutive.f90 @@ -31,7 +31,6 @@ module constitutive integer, public, protected :: & constitutive_plasticity_maxSizeDotState, & - constitutive_source_maxSizePostResults, & constitutive_source_maxSizeDotState interface @@ -321,7 +320,6 @@ module constitutive constitutive_SandItsTangents, & constitutive_collectDotState, & constitutive_collectDeltaState, & - constitutive_postResults, & constitutive_results contains @@ -332,17 +330,9 @@ contains !-------------------------------------------------------------------------------------------------- subroutine constitutive_init - integer, parameter :: FILEUNIT = 204 integer :: & - o, & !< counter in output loop ph, & !< counter in phase loop - s, & !< counter in source loop - ins !< instance of plasticity/source - - integer, dimension(:,:), pointer :: thisSize - character(len=64), dimension(:,:), pointer :: thisOutput - character(len=32) :: outputName !< name of output, intermediate fix until HDF5 output is ready - logical :: knownSource + s !< counter in source loop !-------------------------------------------------------------------------------------------------- ! initialized plasticity @@ -372,58 +362,10 @@ subroutine constitutive_init if (any(phase_kinematics == KINEMATICS_slipplane_opening_ID)) call kinematics_slipplane_opening_init if (any(phase_kinematics == KINEMATICS_thermal_expansion_ID)) call kinematics_thermal_expansion_init - write(6,'(/,a)') ' <<<+- constitutive init -+>>>' - - mainProcess: if (worldrank == 0) then -!-------------------------------------------------------------------------------------------------- -! write description file for constitutive output - call IO_write_jobFile(FILEUNIT,'outputConstitutive') - PhaseLoop: do ph = 1,material_Nphase - activePhase: if (any(material_phaseAt == ph)) then - write(FILEUNIT,'(/,a,/)') '['//trim(config_name_phase(ph))//']' - - SourceLoop: do s = 1, phase_Nsources(ph) - knownSource = .true. ! assume valid - sourceType: select case (phase_source(s,ph)) - case (SOURCE_damage_isoBrittle_ID) sourceType - ins = source_damage_isoBrittle_instance(ph) - outputName = SOURCE_damage_isoBrittle_label - thisOutput => source_damage_isoBrittle_output - thisSize => source_damage_isoBrittle_sizePostResult - case (SOURCE_damage_isoDuctile_ID) sourceType - ins = source_damage_isoDuctile_instance(ph) - outputName = SOURCE_damage_isoDuctile_label - thisOutput => source_damage_isoDuctile_output - thisSize => source_damage_isoDuctile_sizePostResult - case (SOURCE_damage_anisoBrittle_ID) sourceType - ins = source_damage_anisoBrittle_instance(ph) - outputName = SOURCE_damage_anisoBrittle_label - thisOutput => source_damage_anisoBrittle_output - thisSize => source_damage_anisoBrittle_sizePostResult - case (SOURCE_damage_anisoDuctile_ID) sourceType - ins = source_damage_anisoDuctile_instance(ph) - outputName = SOURCE_damage_anisoDuctile_label - thisOutput => source_damage_anisoDuctile_output - thisSize => source_damage_anisoDuctile_sizePostResult - case default sourceType - knownSource = .false. - end select sourceType - if (knownSource) then - write(FILEUNIT,'(a)') '(source)'//char(9)//trim(outputName) - OutputSourceLoop: do o = 1,size(thisOutput(:,ins)) - if(len_trim(thisOutput(o,ins)) > 0) & - write(FILEUNIT,'(a,i4)') trim(thisOutput(o,ins))//char(9),thisSize(o,ins) - enddo OutputSourceLoop - endif - enddo SourceLoop - endif activePhase - enddo PhaseLoop - close(FILEUNIT) - endif mainProcess + write(6,'(/,a)') ' <<<+- constitutive init -+>>>'; flush(6) constitutive_plasticity_maxSizeDotState = 0 constitutive_source_maxSizeDotState = 0 - constitutive_source_maxSizePostResults = 0 PhaseLoop2:do ph = 1,material_Nphase !-------------------------------------------------------------------------------------------------- @@ -440,11 +382,8 @@ subroutine constitutive_init plasticState(ph)%sizeDotState) constitutive_source_maxSizeDotState = max(constitutive_source_maxSizeDotState, & maxval(sourceState(ph)%p(:)%sizeDotState)) - constitutive_source_maxSizePostResults = max(constitutive_source_maxSizePostResults, & - maxval(sourceState(ph)%p(:)%sizePostResults)) enddo PhaseLoop2 - end subroutine constitutive_init @@ -909,58 +848,13 @@ subroutine constitutive_collectDeltaState(S, Fe, Fi, ipc, ip, el) end subroutine constitutive_collectDeltaState -!-------------------------------------------------------------------------------------------------- -!> @brief returns array of constitutive results -!-------------------------------------------------------------------------------------------------- -function constitutive_postResults(S, Fi, ipc, ip, el) - - integer, intent(in) :: & - ipc, & !< component-ID of integration point - ip, & !< integration point - el !< element - real(pReal), dimension(sum(sourceState(material_phaseAt(ipc,el))%p(:)%sizePostResults)) :: & - constitutive_postResults - real(pReal), intent(in), dimension(3,3) :: & - Fi !< intermediate deformation gradient - real(pReal), intent(in), dimension(3,3) :: & - S !< 2nd Piola Kirchhoff stress - integer :: & - startPos, endPos - integer :: & - i, of, instance !< counter in source loop - - constitutive_postResults = 0.0_pReal - - - endPos = 0 - - SourceLoop: do i = 1, phase_Nsources(material_phaseAt(ipc,el)) - startPos = endPos + 1 - endPos = endPos + sourceState(material_phaseAt(ipc,el))%p(i)%sizePostResults - of = material_phasememberAt(ipc,ip,el) - sourceType: select case (phase_source(i,material_phaseAt(ipc,el))) - case (SOURCE_damage_isoBrittle_ID) sourceType - constitutive_postResults(startPos:endPos) = source_damage_isoBrittle_postResults(material_phaseAt(ipc,el),of) - case (SOURCE_damage_isoDuctile_ID) sourceType - constitutive_postResults(startPos:endPos) = source_damage_isoDuctile_postResults(material_phaseAt(ipc,el),of) - case (SOURCE_damage_anisoBrittle_ID) sourceType - constitutive_postResults(startPos:endPos) = source_damage_anisoBrittle_postResults(material_phaseAt(ipc,el),of) - case (SOURCE_damage_anisoDuctile_ID) sourceType - constitutive_postResults(startPos:endPos) = source_damage_anisoDuctile_postResults(material_phaseAt(ipc,el),of) - end select sourceType - - enddo SourceLoop - -end function constitutive_postResults - - !-------------------------------------------------------------------------------------------------- !> @brief writes constitutive results to HDF5 output file !-------------------------------------------------------------------------------------------------- subroutine constitutive_results -#if defined(PETSc) || defined(DAMASK_HDF5) + integer :: p - character(len=256) :: group + character(len=pStringLen) :: group do p=1,size(config_name_phase) group = trim('current/constituent')//'/'//trim(config_name_phase(p)) call results_closeGroup(results_addGroup(group)) @@ -989,8 +883,8 @@ subroutine constitutive_results call plastic_nonlocal_results(phase_plasticityInstance(p),group) end select - enddo -#endif + enddo + end subroutine constitutive_results end module constitutive diff --git a/src/constitutive_plastic_disloUCLA.f90 b/src/constitutive_plastic_disloUCLA.f90 index a2b8f604d..8247d20f5 100644 --- a/src/constitutive_plastic_disloUCLA.f90 +++ b/src/constitutive_plastic_disloUCLA.f90 @@ -28,7 +28,7 @@ submodule(constitutive) plastic_disloUCLA mu, & D_0, & !< prefactor for self-diffusion coefficient Q_cl !< activation energy for dislocation climb - real(pReal), dimension(:), allocatable :: & + real(pReal), dimension(:), allocatable :: & rho_mob_0, & !< initial dislocation density rho_dip_0, & !< initial dipole density b_sl, & !< magnitude of burgers vector [m] @@ -46,18 +46,18 @@ submodule(constitutive) plastic_disloUCLA kink_height, & !< height of the kink pair w, & !< width of the kink pair omega !< attempt frequency for kink pair nucleation - real(pReal), dimension(:,:), allocatable :: & + real(pReal), dimension(:,:), allocatable :: & h_sl_sl, & !< slip resistance from slip activity forestProjectionEdge - real(pReal), dimension(:,:,:), allocatable :: & + real(pReal), dimension(:,:,:), allocatable :: & Schmid, & nonSchmid_pos, & nonSchmid_neg integer :: & sum_N_sl !< total number of active slip system - integer, dimension(:), allocatable :: & + integer, dimension(:), allocatable :: & N_sl !< number of active slip systems for each family - integer(kind(undefined_ID)), dimension(:), allocatable :: & + integer(kind(undefined_ID)), dimension(:),allocatable :: & outputID !< ID of each post result output logical :: & dipoleFormation !< flag indicating consideration of dipole formation @@ -100,16 +100,12 @@ module subroutine plastic_disloUCLA_init sizeState, sizeDotState, & startIndex, endIndex - integer, dimension(0), parameter :: emptyIntArray = [integer::] - real(pReal), dimension(0), parameter :: emptyRealArray = [real(pReal)::] - character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::] - integer(kind(undefined_ID)) :: & outputID character(len=pStringLen) :: & extmsg = '' - character(len=65536), dimension(:), allocatable :: & + character(len=pStringLen), dimension(:), allocatable :: & outputs write(6,'(/,a)') ' <<<+- plastic_'//PLASTICITY_DISLOUCLA_label//' init -+>>>' @@ -269,8 +265,7 @@ module subroutine plastic_disloUCLA_init sizeDotState = size(['rho_mob ','rho_dip ','gamma_sl']) * prm%sum_N_sl sizeState = sizeDotState - call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0, & - prm%sum_N_sl,0,0) + call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0) !-------------------------------------------------------------------------------------------------- ! locally defined state aliases and initialization of state0 and aTolState @@ -442,7 +437,6 @@ end subroutine plastic_disloUCLA_dependentState !> @brief writes results to HDF5 output file !-------------------------------------------------------------------------------------------------- module subroutine plastic_disloUCLA_results(instance,group) -#if defined(PETSc) || defined(DAMASK_HDF5) integer, intent(in) :: instance character(len=*), intent(in) :: group @@ -470,11 +464,6 @@ module subroutine plastic_disloUCLA_results(instance,group) end select enddo outputsLoop end associate - -#else - integer, intent(in) :: instance - character(len=*), intent(in) :: group -#endif end subroutine plastic_disloUCLA_results diff --git a/src/constitutive_plastic_dislotwin.f90 b/src/constitutive_plastic_dislotwin.f90 index 9f5dbb928..0972d9a2b 100644 --- a/src/constitutive_plastic_dislotwin.f90 +++ b/src/constitutive_plastic_dislotwin.f90 @@ -9,7 +9,7 @@ !-------------------------------------------------------------------------------------------------- submodule(constitutive) plastic_dislotwin - real(pReal), parameter :: & + real(pReal), parameter :: & kB = 1.38e-23_pReal !< Boltzmann constant in J/Kelvin enum, bind(c) @@ -160,16 +160,12 @@ module subroutine plastic_dislotwin_init sizeState, sizeDotState, & startIndex, endIndex - integer, dimension(0), parameter :: emptyIntArray = [integer::] - real(pReal), dimension(0), parameter :: emptyRealArray = [real(pReal)::] - character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::] - integer(kind(undefined_ID)) :: & outputID character(len=pStringLen) :: & extmsg = '' - character(len=65536), dimension(:), allocatable :: & + character(len=pStringLen), dimension(:), allocatable :: & outputs write(6,'(/,a)') ' <<<+- constitutive_'//PLASTICITY_DISLOTWIN_label//' init -+>>>' @@ -490,8 +486,7 @@ module subroutine plastic_dislotwin_init + size(['f_tr']) * prm%sum_N_tr sizeState = sizeDotState - call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0, & - prm%sum_N_sl,prm%sum_N_tw,prm%sum_N_tr) + call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0) !-------------------------------------------------------------------------------------------------- ! locally defined state aliases and initialization of state0 and aTolState @@ -561,9 +556,9 @@ module function plastic_dislotwin_homogenizedC(ipc,ip,el) result(homogenizedC) real(pReal), dimension(6,6) :: & homogenizedC integer, intent(in) :: & - ipc, & !< component-ID of integration point - ip, & !< integration point - el !< element + ipc, & !< component-ID of integration point + ip, & !< integration point + el !< element integer :: i, & of @@ -906,7 +901,6 @@ end subroutine plastic_dislotwin_dependentState !> @brief writes results to HDF5 output file !-------------------------------------------------------------------------------------------------- module subroutine plastic_dislotwin_results(instance,group) -#if defined(PETSc) || defined(DAMASK_HDF5) integer, intent(in) :: instance character(len=*), intent(in) :: group @@ -949,11 +943,6 @@ module subroutine plastic_dislotwin_results(instance,group) end select enddo outputsLoop end associate - -#else - integer, intent(in) :: instance - character(len=*) :: group -#endif end subroutine plastic_dislotwin_results diff --git a/src/constitutive_plastic_isotropic.f90 b/src/constitutive_plastic_isotropic.f90 index 86b764022..a7a9acfee 100644 --- a/src/constitutive_plastic_isotropic.f90 +++ b/src/constitutive_plastic_isotropic.f90 @@ -67,14 +67,12 @@ module subroutine plastic_isotropic_init NipcMyPhase, & sizeState, sizeDotState - character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::] - integer(kind(undefined_ID)) :: & outputID character(len=pStringLen) :: & extmsg = '' - character(len=65536), dimension(:), allocatable :: & + character(len=pStringLen), dimension(:), allocatable :: & outputs write(6,'(/,a)') ' <<<+- plastic_'//PLASTICITY_ISOTROPIC_label//' init -+>>>' @@ -163,8 +161,7 @@ module subroutine plastic_isotropic_init sizeDotState = size(['xi ','accumulated_shear']) sizeState = sizeDotState - call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0, & - 1,0,0) + call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0) !-------------------------------------------------------------------------------------------------- ! locally defined state aliases and initialization of state0 and aTolState @@ -355,7 +352,6 @@ end subroutine plastic_isotropic_dotState !> @brief writes results to HDF5 output file !-------------------------------------------------------------------------------------------------- module subroutine plastic_isotropic_results(instance,group) -#if defined(PETSc) || defined(DAMASK_HDF5) integer, intent(in) :: instance character(len=*), intent(in) :: group @@ -370,10 +366,6 @@ module subroutine plastic_isotropic_results(instance,group) end select enddo outputsLoop end associate -#else - integer, intent(in) :: instance - character(len=*) :: group -#endif end subroutine plastic_isotropic_results diff --git a/src/constitutive_plastic_kinehardening.f90 b/src/constitutive_plastic_kinehardening.f90 index 39c3fc01a..2957ff80c 100644 --- a/src/constitutive_plastic_kinehardening.f90 +++ b/src/constitutive_plastic_kinehardening.f90 @@ -84,16 +84,12 @@ module subroutine plastic_kinehardening_init sizeState, sizeDeltaState, sizeDotState, & startIndex, endIndex - integer, dimension(0), parameter :: emptyIntArray = [integer::] - real(pReal), dimension(0), parameter :: emptyRealArray = [real(pReal)::] - character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::] - integer(kind(undefined_ID)) :: & outputID character(len=pStringLen) :: & extmsg = '' - character(len=65536), dimension(:), allocatable :: & + character(len=pStringLen), dimension(:), allocatable :: & outputs write(6,'(/,a)') ' <<<+- plastic_'//PLASTICITY_KINEHARDENING_label//' init -+>>>' @@ -230,8 +226,7 @@ module subroutine plastic_kinehardening_init sizeDeltaState = size(['sense ', 'chi0 ', 'gamma0' ]) * prm%totalNslip sizeState = sizeDotState + sizeDeltaState - call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,sizeDeltaState, & - prm%totalNslip,0,0) + call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,sizeDeltaState) !-------------------------------------------------------------------------------------------------- ! locally defined state aliases and initialization of state0 and aTolState @@ -418,7 +413,6 @@ end subroutine plastic_kinehardening_deltaState !> @brief writes results to HDF5 output file !-------------------------------------------------------------------------------------------------- module subroutine plastic_kinehardening_results(instance,group) -#if defined(PETSc) || defined(DAMASK_HDF5) integer, intent(in) :: instance character(len=*), intent(in) :: group @@ -451,10 +445,6 @@ module subroutine plastic_kinehardening_results(instance,group) end select enddo outputsLoop end associate -#else - integer, intent(in) :: instance - character(len=*) :: group -#endif end subroutine plastic_kinehardening_results diff --git a/src/constitutive_plastic_none.f90 b/src/constitutive_plastic_none.f90 index 14f728cc1..5c336e2d7 100644 --- a/src/constitutive_plastic_none.f90 +++ b/src/constitutive_plastic_none.f90 @@ -29,8 +29,8 @@ module subroutine plastic_none_init if (phase_plasticity(p) /= PLASTICITY_NONE_ID) cycle NipcMyPhase = count(material_phaseAt == p) * discretization_nIP - call material_allocatePlasticState(p,NipcMyPhase,0,0,0, & - 0,0,0) + call material_allocatePlasticState(p,NipcMyPhase,0,0,0) + enddo end subroutine plastic_none_init diff --git a/src/constitutive_plastic_nonlocal.f90 b/src/constitutive_plastic_nonlocal.f90 index 099a1304e..ce052b050 100644 --- a/src/constitutive_plastic_nonlocal.f90 +++ b/src/constitutive_plastic_nonlocal.f90 @@ -13,13 +13,8 @@ submodule(constitutive) plastic_nonlocal IParea => geometry_plastic_nonlocal_IParea0, & IPareaNormal => geometry_plastic_nonlocal_IPareaNormal0 - real(pReal), parameter :: & KB = 1.38e-23_pReal !< Physical parameter, Boltzmann constant in J/Kelvin - - character(len=64), dimension(:,:), allocatable :: & - plastic_nonlocal_output !< name of each post result output - ! storage order of dislocation types integer, dimension(8), parameter :: & @@ -52,7 +47,7 @@ submodule(constitutive) plastic_nonlocal !END DEPRECATED real(pReal), dimension(:,:,:,:,:,:), allocatable :: & - compatibility !< slip system compatibility between me and my neighbors + compatibility !< slip system compatibility between me and my neighbors enum, bind(c) enumerator :: & @@ -138,7 +133,7 @@ submodule(constitutive) plastic_nonlocal nonSchmid_neg !< combined projection of Schmid and non-Schmid contributions to the resolved shear stress (only for screws) integer :: & totalNslip - integer, dimension(:) ,allocatable:: & + integer, dimension(:) ,allocatable :: & Nslip,& colinearSystem !< colinear system to the active slip system (only valid for fcc!) @@ -192,10 +187,6 @@ submodule(constitutive) plastic_nonlocal type(tNonlocalMicrostructure), dimension(:), allocatable :: microstructure - integer(kind(undefined_ID)), dimension(:,:), allocatable :: & - plastic_nonlocal_outputID !< ID of each post result output - - contains !-------------------------------------------------------------------------------------------------- @@ -204,10 +195,6 @@ contains !-------------------------------------------------------------------------------------------------- module subroutine plastic_nonlocal_init - character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::] - integer, dimension(0), parameter :: emptyIntArray = [integer::] - real(pReal), dimension(0), parameter :: emptyRealArray = [real(pReal)::] - integer :: & sizeState, sizeDotState,sizeDependentState, sizeDeltaState, & maxNinstances, & @@ -220,10 +207,10 @@ module subroutine plastic_nonlocal_init integer(kind(undefined_ID)) :: & outputID - character(len=512) :: & + character(len=pStringLen) :: & extmsg = '', & structure - character(len=65536), dimension(:), allocatable :: outputs + character(len=pStringLen), dimension(:), allocatable :: outputs integer :: NofMyPhase write(6,'(/,a)') ' <<<+- constitutive_'//PLASTICITY_NONLOCAL_label//' init -+>>>' @@ -243,10 +230,6 @@ module subroutine plastic_nonlocal_init allocate(dotState(maxNinstances)) allocate(deltaState(maxNinstances)) allocate(microstructure(maxNinstances)) - - allocate(plastic_nonlocal_output(maxval(phase_Noutput), maxNinstances)) - plastic_nonlocal_output = '' - allocate(plastic_nonlocal_outputID(maxval(phase_Noutput), maxNinstances), source=undefined_ID) allocate(totalNslip(maxNinstances), source=0) @@ -472,7 +455,6 @@ module subroutine plastic_nonlocal_init end select if (outputID /= undefined_ID) then - plastic_nonlocal_output(i,phase_plasticityInstance(p)) = outputs(i) prm%outputID = [prm%outputID , outputID] endif @@ -494,8 +476,8 @@ module subroutine plastic_nonlocal_init 'maxDipoleHeightEdge ','maxDipoleHeightScrew' ]) * prm%totalNslip !< other dependent state variables that are not updated by microstructure sizeDeltaState = sizeDotState - call material_allocatePlasticState(p,NofMyPhase,sizeState,sizeDotState,sizeDeltaState, & - prm%totalNslip,0,0) + call material_allocatePlasticState(p,NofMyPhase,sizeState,sizeDotState,sizeDeltaState) + plasticState(p)%nonlocal = .true. plasticState(p)%offsetDeltaState = 0 ! ToDo: state structure does not follow convention @@ -1094,7 +1076,7 @@ end subroutine plastic_nonlocal_kinetics !> @brief calculates plastic velocity gradient and its tangent !-------------------------------------------------------------------------------------------------- module subroutine plastic_nonlocal_LpAndItsTangent(Lp, dLp_dMp, & - Mp, Temperature, volume, ip, el) + Mp, Temperature, volume, ip, el) integer, intent(in) :: & ip, & !< current integration point @@ -1954,11 +1936,8 @@ end function getRho !> @brief writes results to HDF5 output file !-------------------------------------------------------------------------------------------------- module subroutine plastic_nonlocal_results(instance,group) -#if defined(PETSc) || defined(DAMASK_HDF5) - use results, only: & - results_writeDataset - integer, intent(in) :: instance + integer, intent(in) :: instance character(len=*),intent(in) :: group integer :: o @@ -2019,10 +1998,6 @@ module subroutine plastic_nonlocal_results(instance,group) end select enddo outputsLoop end associate -#else - integer, intent(in) :: instance - character(len=*) :: group -#endif end subroutine plastic_nonlocal_results diff --git a/src/constitutive_plastic_phenopowerlaw.f90 b/src/constitutive_plastic_phenopowerlaw.f90 index aa2f47435..3a330ef2a 100644 --- a/src/constitutive_plastic_phenopowerlaw.f90 +++ b/src/constitutive_plastic_phenopowerlaw.f90 @@ -95,16 +95,12 @@ module subroutine plastic_phenopowerlaw_init sizeState, sizeDotState, & startIndex, endIndex - integer, dimension(0), parameter :: emptyIntArray = [integer::] - real(pReal), dimension(0), parameter :: emptyRealArray = [real(pReal)::] - character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::] - integer(kind(undefined_ID)) :: & outputID character(len=pStringLen) :: & extmsg = '' - character(len=65536), dimension(:), allocatable :: & + character(len=pStringLen), dimension(:), allocatable :: & outputs write(6,'(/,a)') ' <<<+- plastic_'//PLASTICITY_PHENOPOWERLAW_label//' init -+>>>' @@ -290,8 +286,7 @@ module subroutine plastic_phenopowerlaw_init + size(['tau_twin ','gamma_twin']) * prm%totalNtwin sizeState = sizeDotState - call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0, & - prm%totalNslip,prm%totalNtwin,0) + call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0) !-------------------------------------------------------------------------------------------------- ! locally defined state aliases and initialization of state0 and aTolState @@ -446,7 +441,6 @@ end subroutine plastic_phenopowerlaw_dotState !> @brief writes results to HDF5 output file !-------------------------------------------------------------------------------------------------- module subroutine plastic_phenopowerlaw_results(instance,group) -#if defined(PETSc) || defined(DAMASK_HDF5) integer, intent(in) :: instance character(len=*), intent(in) :: group @@ -474,11 +468,6 @@ module subroutine plastic_phenopowerlaw_results(instance,group) end select enddo outputsLoop end associate - -#else - integer, intent(in) :: instance - character(len=*), intent(in) :: group -#endif end subroutine plastic_phenopowerlaw_results diff --git a/src/crystallite.f90 b/src/crystallite.f90 index c18588218..73fa17314 100644 --- a/src/crystallite.f90 +++ b/src/crystallite.f90 @@ -76,7 +76,7 @@ module crystallite crystallite_localPlasticity !< indicates this grain to have purely local constitutive law type :: tOutput !< new requested output (per phase) - character(len=65536), allocatable, dimension(:) :: & + character(len=pStringLen), allocatable, dimension(:) :: & label end type tOutput type(tOutput), allocatable, dimension(:) :: output_constituent @@ -107,7 +107,6 @@ module crystallite crystallite_stressTangent, & crystallite_orientations, & crystallite_push33ToRef, & - crystallite_postResults, & crystallite_results contains @@ -118,8 +117,7 @@ contains !-------------------------------------------------------------------------------------------------- subroutine crystallite_init - integer, parameter :: FILEUNIT=434 - logical, dimension(:,:), allocatable :: devNull + logical, dimension(discretization_nIP,discretization_nElem) :: devNull integer :: & c, & !< counter in integration point component loop i, & !< counter in integration point loop @@ -232,13 +230,6 @@ subroutine crystallite_init #endif enddo -!-------------------------------------------------------------------------------------------------- -! write description file for crystallite output - if (worldrank == 0) then - call IO_write_jobFile(FILEUNIT,'outputCrystallite') - write(FILEUNIT,'(/,a,/)') '[not supported anymore]' - close(FILEUNIT) - endif call config_deallocate('material.config/phase') !-------------------------------------------------------------------------------------------------- @@ -731,43 +722,11 @@ function crystallite_push33ToRef(ipc,ip,el, tensor33) end function crystallite_push33ToRef -!-------------------------------------------------------------------------------------------------- -!> @brief return results of particular grain -!-------------------------------------------------------------------------------------------------- -function crystallite_postResults(ipc, ip, el) - - integer, intent(in):: & - el, & !< element index - ip, & !< integration point index - ipc !< grain index - - real(pReal), dimension(1+ & - 1+plasticState(material_phaseAt(ipc,el))%sizePostResults + & - sum(sourceState(material_phaseAt(ipc,el))%p(:)%sizePostResults)) :: & - crystallite_postResults - integer :: & - c - - - crystallite_postResults = 0.0_pReal - crystallite_postResults(1) = 0.0_pReal ! header-like information (length) - c = 1 - - crystallite_postResults(c+1) = real(plasticState(material_phaseAt(ipc,el))%sizePostResults,pReal) ! size of constitutive results - c = c + 1 - if (size(crystallite_postResults)-c > 0) & - crystallite_postResults(c+1:size(crystallite_postResults)) = & - constitutive_postResults(crystallite_S(1:3,1:3,ipc,ip,el), crystallite_Fi(1:3,1:3,ipc,ip,el), & - ipc, ip, el) - -end function crystallite_postResults - - !-------------------------------------------------------------------------------------------------- !> @brief writes crystallite results to HDF5 output file !-------------------------------------------------------------------------------------------------- subroutine crystallite_results -#if defined(PETSc) || defined(DAMASK_HDF5) + integer :: p,o real(pReal), allocatable, dimension(:,:,:) :: selected_tensors type(rotation), allocatable, dimension(:) :: selected_rotations @@ -888,7 +847,7 @@ subroutine crystallite_results enddo end function select_rotations -#endif + end subroutine crystallite_results diff --git a/src/damage_local.f90 b/src/damage_local.f90 index 74ad47c9b..6cb45a391 100644 --- a/src/damage_local.f90 +++ b/src/damage_local.f90 @@ -5,22 +5,16 @@ module damage_local use prec use material - use numerics use config + use numerics use source_damage_isoBrittle use source_damage_isoDuctile use source_damage_anisoBrittle use source_damage_anisoDuctile + use results implicit none private - - integer, dimension(:,:), allocatable, target, public :: & - damage_local_sizePostResult - character(len=64), dimension(:,:), allocatable, target, public :: & - damage_local_output - integer, dimension(:), allocatable, target, public :: & - damage_local_Noutput enum, bind(c) enumerator :: & @@ -28,9 +22,6 @@ module damage_local damage_ID end enum - integer(kind(undefined_ID)), dimension(:,:), allocatable :: & - damage_local_outputID !< ID of each post result output - type :: tParameters integer(kind(undefined_ID)), dimension(:), allocatable :: & outputID @@ -42,7 +33,7 @@ module damage_local public :: & damage_local_init, & damage_local_updateState, & - damage_local_postResults + damage_local_Results contains @@ -52,70 +43,41 @@ contains !-------------------------------------------------------------------------------------------------- subroutine damage_local_init - integer :: maxNinstance,homog,instance,i - integer :: sizeState - integer :: NofMyHomog, h - integer(kind(undefined_ID)) :: & - outputID - character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::] - character(len=65536), dimension(:), allocatable :: & - outputs + integer :: maxNinstance,o,NofMyHomog,h + character(len=pStringLen), dimension(:), allocatable :: outputs - write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_local_label//' init -+>>>' + write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_local_label//' init -+>>>'; flush(6) maxNinstance = count(damage_type == DAMAGE_local_ID) if (maxNinstance == 0) return - allocate(damage_local_sizePostResult (maxval(homogenization_Noutput),maxNinstance),source=0) - allocate(damage_local_output (maxval(homogenization_Noutput),maxNinstance)) - damage_local_output = '' - allocate(damage_local_outputID (maxval(homogenization_Noutput),maxNinstance),source=undefined_ID) - allocate(damage_local_Noutput (maxNinstance), source=0) - allocate(param(maxNinstance)) do h = 1, size(damage_type) if (damage_type(h) /= DAMAGE_LOCAL_ID) cycle - associate(prm => param(damage_typeInstance(h)), & - config => config_homogenization(h)) + associate(prm => param(damage_typeInstance(h)),config => config_homogenization(h)) - outputs = config%getStrings('(output)',defaultVal=emptyStringArray) allocate(prm%outputID(0)) - do i=1, size(outputs) - outputID = undefined_ID - select case(outputs(i)) - - case ('damage') - damage_local_output(i,damage_typeInstance(h)) = outputs(i) - damage_local_Noutput(instance) = damage_local_Noutput(instance) + 1 - damage_local_sizePostResult(i,damage_typeInstance(h)) = 1 - prm%outputID = [prm%outputID , damage_ID] - end select - + do o=1, size(outputs) + select case(outputs(o)) + case ('damage') + prm%outputID = [prm%outputID , damage_ID] + end select enddo - - homog = h - - NofMyHomog = count(material_homogenizationAt == homog) - instance = damage_typeInstance(homog) - - -! allocate state arrays - sizeState = 1 - damageState(homog)%sizeState = sizeState - damageState(homog)%sizePostResults = sum(damage_local_sizePostResult(:,instance)) - allocate(damageState(homog)%state0 (sizeState,NofMyHomog), source=damage_initialPhi(homog)) - allocate(damageState(homog)%subState0(sizeState,NofMyHomog), source=damage_initialPhi(homog)) - allocate(damageState(homog)%state (sizeState,NofMyHomog), source=damage_initialPhi(homog)) + NofMyHomog = count(material_homogenizationAt == h) + damageState(h)%sizeState = 1 + allocate(damageState(h)%state0 (1,NofMyHomog), source=damage_initialPhi(h)) + allocate(damageState(h)%subState0(1,NofMyHomog), source=damage_initialPhi(h)) + allocate(damageState(h)%state (1,NofMyHomog), source=damage_initialPhi(h)) - nullify(damageMapping(homog)%p) - damageMapping(homog)%p => mappingHomogenization(1,:,:) - deallocate(damage(homog)%p) - damage(homog)%p => damageState(homog)%state(1,:) - + nullify(damageMapping(h)%p) + damageMapping(h)%p => mappingHomogenization(1,:,:) + deallocate(damage(h)%p) + damage(h)%p => damageState(h)%state(1,:) + end associate enddo @@ -211,35 +173,27 @@ end subroutine damage_local_getSourceAndItsTangent !-------------------------------------------------------------------------------------------------- -!> @brief return array of damage results +!> @brief writes results to HDF5 output file !-------------------------------------------------------------------------------------------------- -function damage_local_postResults(ip,el) +subroutine damage_local_results(homog,group) - integer, intent(in) :: & - ip, & !< integration point - el !< element - real(pReal), dimension(sum(damage_local_sizePostResult(:,damage_typeInstance(material_homogenizationAt(el))))) :: & - damage_local_postResults - - integer :: instance, homog, offset, o, c - - homog = material_homogenizationAt(el) - offset = damageMapping(homog)%p(ip,el) - instance = damage_typeInstance(homog) - associate(prm => param(instance)) - c = 0 + integer, intent(in) :: homog + character(len=*), intent(in) :: group + integer :: o + + associate(prm => param(damage_typeInstance(homog))) outputsLoop: do o = 1,size(prm%outputID) select case(prm%outputID(o)) - - case (damage_ID) - damage_local_postResults(c+1) = damage(homog)%p(offset) - c = c + 1 - end select + + case (damage_ID) + call results_writeDataset(group,damage(homog)%p,'phi',& + 'damage indicator','-') + end select enddo outputsLoop - end associate -end function damage_local_postResults +end subroutine damage_local_results + end module damage_local diff --git a/src/damage_none.f90 b/src/damage_none.f90 index 62d2cc0eb..d3b1b73c5 100644 --- a/src/damage_none.f90 +++ b/src/damage_none.f90 @@ -19,26 +19,23 @@ contains !-------------------------------------------------------------------------------------------------- subroutine damage_none_init - integer :: & - homog, & - NofMyHomog + integer :: h,NofMyHomog - write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_NONE_LABEL//' init -+>>>' + write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_NONE_LABEL//' init -+>>>'; flush(6) - initializeInstances: do homog = 1, size(config_homogenization) + do h = 1, size(config_homogenization) + if (damage_type(h) /= DAMAGE_NONE_ID) cycle + + NofMyHomog = count(material_homogenizationAt == h) + damageState(h)%sizeState = 0 + allocate(damageState(h)%state0 (0,NofMyHomog)) + allocate(damageState(h)%subState0(0,NofMyHomog)) + allocate(damageState(h)%state (0,NofMyHomog)) - myhomog: if (damage_type(homog) == DAMAGE_NONE_ID) then - NofMyHomog = count(material_homogenizationAt == homog) - damageState(homog)%sizeState = 0 - allocate(damageState(homog)%state0 (0,NofMyHomog)) - allocate(damageState(homog)%subState0(0,NofMyHomog)) - allocate(damageState(homog)%state (0,NofMyHomog)) + deallocate(damage(h)%p) + allocate (damage(h)%p(1), source=damage_initialPhi(h)) - deallocate(damage(homog)%p) - allocate (damage(homog)%p(1), source=damage_initialPhi(homog)) - - endif myhomog - enddo initializeInstances + enddo end subroutine damage_none_init diff --git a/src/damage_nonlocal.f90 b/src/damage_nonlocal.f90 index 0a8a3c867..17bdecaca 100644 --- a/src/damage_nonlocal.f90 +++ b/src/damage_nonlocal.f90 @@ -1,29 +1,22 @@ !-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for non-locally evolving damage field -!> @details to be done !-------------------------------------------------------------------------------------------------- module damage_nonlocal use prec use material - use numerics use config + use numerics use crystallite use lattice use source_damage_isoBrittle use source_damage_isoDuctile use source_damage_anisoBrittle use source_damage_anisoDuctile + use results implicit none private - - integer, dimension(:,:), allocatable, target, public :: & - damage_nonlocal_sizePostResult - character(len=64), dimension(:,:), allocatable, target, public :: & - damage_nonlocal_output - integer, dimension(:), allocatable, target, public :: & - damage_nonlocal_Noutput enum, bind(c) enumerator :: & @@ -45,7 +38,7 @@ module damage_nonlocal damage_nonlocal_getDiffusion33, & damage_nonlocal_getMobility, & damage_nonlocal_putNonLocalDamage, & - damage_nonlocal_postResults + damage_nonlocal_Results contains @@ -55,70 +48,44 @@ contains !-------------------------------------------------------------------------------------------------- subroutine damage_nonlocal_init - integer :: maxNinstance,homog,instance,o,i - integer :: sizeState - integer :: NofMyHomog, h - integer(kind(undefined_ID)) :: & - outputID - character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::] - character(len=65536), dimension(:), allocatable :: & - outputs + integer :: maxNinstance,o,NofMyHomog,h + character(len=pStringLen), dimension(:), allocatable :: outputs - write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_nonlocal_label//' init -+>>>' + write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_nonlocal_label//' init -+>>>'; flush(6) maxNinstance = count(damage_type == DAMAGE_nonlocal_ID) if (maxNinstance == 0) return - - allocate(damage_nonlocal_sizePostResult (maxval(homogenization_Noutput),maxNinstance),source=0) - allocate(damage_nonlocal_output (maxval(homogenization_Noutput),maxNinstance)) - damage_nonlocal_output = '' - allocate(damage_nonlocal_Noutput (maxNinstance), source=0) allocate(param(maxNinstance)) do h = 1, size(damage_type) if (damage_type(h) /= DAMAGE_NONLOCAL_ID) cycle - associate(prm => param(damage_typeInstance(h)), & - config => config_homogenization(h)) + associate(prm => param(damage_typeInstance(h)),config => config_homogenization(h)) - instance = damage_typeInstance(h) outputs = config%getStrings('(output)',defaultVal=emptyStringArray) allocate(prm%outputID(0)) - do i=1, size(outputs) - outputID = undefined_ID - select case(outputs(i)) - - case ('damage') - damage_nonlocal_output(i,damage_typeInstance(h)) = outputs(i) - damage_nonlocal_Noutput(instance) = damage_nonlocal_Noutput(instance) + 1 - damage_nonlocal_sizePostResult(i,damage_typeInstance(h)) = 1 - prm%outputID = [prm%outputID , damage_ID] - end select - + do o=1, size(outputs) + select case(outputs(o)) + case ('damage') + prm%outputID = [prm%outputID, damage_ID] + end select enddo - homog = h + NofMyHomog = count(material_homogenizationAt == h) + damageState(h)%sizeState = 1 + allocate(damageState(h)%state0 (1,NofMyHomog), source=damage_initialPhi(h)) + allocate(damageState(h)%subState0(1,NofMyHomog), source=damage_initialPhi(h)) + allocate(damageState(h)%state (1,NofMyHomog), source=damage_initialPhi(h)) - NofMyHomog = count(material_homogenizationAt == homog) - instance = damage_typeInstance(homog) - - -! allocate state arrays - sizeState = 1 - damageState(homog)%sizeState = sizeState - damageState(homog)%sizePostResults = sum(damage_nonlocal_sizePostResult(:,instance)) - allocate(damageState(homog)%state0 (sizeState,NofMyHomog), source=damage_initialPhi(homog)) - allocate(damageState(homog)%subState0(sizeState,NofMyHomog), source=damage_initialPhi(homog)) - allocate(damageState(homog)%state (sizeState,NofMyHomog), source=damage_initialPhi(homog)) - - nullify(damageMapping(homog)%p) - damageMapping(homog)%p => mappingHomogenization(1,:,:) - deallocate(damage(homog)%p) - damage(homog)%p => damageState(homog)%state(1,:) - + nullify(damageMapping(h)%p) + damageMapping(h)%p => mappingHomogenization(1,:,:) + deallocate(damage(h)%p) + damage(h)%p => damageState(h)%state(1,:) + end associate enddo + end subroutine damage_nonlocal_init @@ -247,35 +214,26 @@ end subroutine damage_nonlocal_putNonLocalDamage !-------------------------------------------------------------------------------------------------- -!> @brief return array of damage results +!> @brief writes results to HDF5 output file !-------------------------------------------------------------------------------------------------- -function damage_nonlocal_postResults(ip,el) +subroutine damage_nonlocal_results(homog,group) - integer, intent(in) :: & - ip, & !< integration point - el !< element - real(pReal), dimension(sum(damage_nonlocal_sizePostResult(:,damage_typeInstance(material_homogenizationAt(el))))) :: & - damage_nonlocal_postResults - - integer :: & - instance, homog, offset, o, c - - homog = material_homogenizationAt(el) - offset = damageMapping(homog)%p(ip,el) - instance = damage_typeInstance(homog) - associate(prm => param(instance)) - c = 0 + integer, intent(in) :: homog + character(len=*), intent(in) :: group + integer :: o + + associate(prm => param(damage_typeInstance(homog))) outputsLoop: do o = 1,size(prm%outputID) select case(prm%outputID(o)) - - case (damage_ID) - damage_nonlocal_postResults(c+1) = damage(homog)%p(offset) - c = c + 1 - end select + + case (damage_ID) + call results_writeDataset(group,damage(homog)%p,'phi',& + 'damage indicator','-') + end select enddo outputsLoop - end associate -end function damage_nonlocal_postResults + +end subroutine damage_nonlocal_results end module damage_nonlocal diff --git a/src/discretization.f90 b/src/discretization.f90 index 873148666..5f9d3f521 100644 --- a/src/discretization.f90 +++ b/src/discretization.f90 @@ -78,7 +78,7 @@ end subroutine discretization_init !> @brief write the displacements !-------------------------------------------------------------------------------------------------- subroutine discretization_results -#if defined(PETSc) || defined(DAMASK_HDF5) + real(pReal), dimension(:,:), allocatable :: u call results_closeGroup(results_addGroup(trim('current/geometry'))) @@ -90,7 +90,7 @@ subroutine discretization_results u = discretization_IPcoords & - discretization_IPcoords0 call results_writeDataset('current/geometry',u,'u_c','cell center displacements','m') -#endif + end subroutine discretization_results diff --git a/src/element.f90 b/src/element.f90 index 02f5fb762..3a1e3f5a3 100644 --- a/src/element.f90 +++ b/src/element.f90 @@ -39,7 +39,7 @@ module element integer, parameter, private :: & NELEMTYPE = 13 - integer, dimension(NelemType), parameter, private :: NNODE = & + integer, dimension(NELEMTYPE), parameter, private :: NNODE = & [ & 3, & ! 2D 3node 1ip 6, & ! 2D 6node 3ip @@ -57,7 +57,7 @@ module element 20 & ! 3D 20node 27ip ] !< number of nodes that constitute a specific type of element - integer, dimension(NelemType), parameter, public :: GEOMTYPE = & + integer, dimension(NELEMTYPE), parameter, public :: GEOMTYPE = & [ & 1, & 2, & @@ -74,8 +74,7 @@ module element 10 & ] !< geometry type of particular element type - !integer, dimension(maxval(geomType)), parameter, private :: NCELLNODE = & ! Intel 16.0 complains - integer, dimension(10), parameter, private :: NCELLNODE = & + integer, dimension(maxval(GEOMTYPE)), parameter, private :: NCELLNODE = & [ & 3, & 7, & @@ -89,8 +88,7 @@ module element 64 & ] !< number of cell nodes in a specific geometry type - !integer, dimension(maxval(geomType)), parameter, private :: NIP = & ! Intel 16.0 complains - integer, dimension(10), parameter, private :: NIP = & + integer, dimension(maxval(GEOMTYPE)), parameter, private :: NIP = & [ & 1, & 3, & @@ -104,8 +102,7 @@ module element 27 & ] !< number of IPs in a specific geometry type - !integer, dimension(maxval(geomType)), parameter, private :: CELLTYPE = & ! Intel 16.0 complains - integer, dimension(10), parameter, private :: CELLTYPE = & + integer, dimension(maxval(GEOMTYPE)), parameter, private :: CELLTYPE = & [ & 1, & ! 2D 3node 2, & ! 2D 4node @@ -119,8 +116,7 @@ module element 4 & ! 3D 8node ] !< cell type that is used by each geometry type - !integer, dimension(maxval(cellType)), parameter, private :: nIPNeighbor = & ! Intel 16.0 complains - integer, dimension(4), parameter, private :: NIPNEIGHBOR = & + integer, dimension(maxval(CELLTYPE)), parameter, private :: NIPNEIGHBOR = & [ & 3, & ! 2D 3node 4, & ! 2D 4node @@ -128,8 +124,7 @@ module element 6 & ! 3D 8node ] !< number of ip neighbors / cell faces in a specific cell type - !integer, dimension(maxval(cellType)), parameter, private :: NCELLNODESPERCELLFACE = & ! Intel 16.0 complains - integer, dimension(4), parameter, private :: NCELLNODEPERCELLFACE = & + integer, dimension(maxval(CELLTYPE)), parameter, private :: NCELLNODEPERCELLFACE = & [ & 2, & ! 2D 3node 2, & ! 2D 4node @@ -137,8 +132,7 @@ module element 4 & ! 3D 8node ] !< number of cell nodes in a specific cell type - !integer, dimension(maxval(CELLTYPE)), parameter, private :: NCELLNODEPERCELL = & ! Intel 16.0 complains - integer, dimension(4), parameter, private :: NCELLNODEPERCELL = & + integer, dimension(maxval(CELLTYPE)), parameter, private :: NCELLNODEPERCELL = & [ & 3, & ! 2D 3node 4, & ! 2D 4node diff --git a/src/future.f90 b/src/future.f90 index 354a522e4..b7eb3fec9 100644 --- a/src/future.f90 +++ b/src/future.f90 @@ -10,7 +10,7 @@ module future contains -#if defined(__GFORTRAN__) && __GNUC__<9 || __INTEL_COMPILER<1800 +#if defined(__GFORTRAN__) && __GNUC__<9 || defined(__INTEL_COMPILER) && INTEL_COMPILER<1800 !-------------------------------------------------------------------------------------------------- !> @brief substitute for the findloc intrinsic (only for integer, dimension(:) at the moment) !-------------------------------------------------------------------------------------------------- diff --git a/src/geometry_plastic_nonlocal.f90 b/src/geometry_plastic_nonlocal.f90 index 408306b2b..7890af5aa 100644 --- a/src/geometry_plastic_nonlocal.f90 +++ b/src/geometry_plastic_nonlocal.f90 @@ -10,32 +10,24 @@ module geometry_plastic_nonlocal use results implicit none - private + public - integer, public, protected :: & + integer, protected :: & geometry_plastic_nonlocal_nIPneighbors - integer, dimension(:,:,:,:), allocatable, public, protected :: & + integer, dimension(:,:,:,:), allocatable, protected :: & geometry_plastic_nonlocal_IPneighborhood !< 6 or less neighboring IPs as [element ID, IP ID, face ID that point to me] - real(pReal), dimension(:,:), allocatable, public, protected :: & + real(pReal), dimension(:,:), allocatable, protected :: & geometry_plastic_nonlocal_IPvolume0 !< volume associated with IP (initially!) - real(pReal), dimension(:,:,:), allocatable, public, protected :: & + real(pReal), dimension(:,:,:), allocatable, protected :: & geometry_plastic_nonlocal_IParea0 !< area of interface to neighboring IP (initially!) - real(pReal), dimension(:,:,:,:), allocatable, public, protected :: & + real(pReal), dimension(:,:,:,:), allocatable, protected :: & geometry_plastic_nonlocal_IPareaNormal0 !< area normal of interface to neighboring IP (initially!) - public :: & - geometry_plastic_nonlocal_setIPneighborhood, & - geometry_plastic_nonlocal_setIPvolume, & - geometry_plastic_nonlocal_setIParea, & - geometry_plastic_nonlocal_setIPareaNormal, & - geometry_plastic_nonlocal_results, & - geometry_plastic_nonlocal_disable - contains !--------------------------------------------------------------------------------------------------- @@ -96,7 +88,7 @@ end subroutine geometry_plastic_nonlocal_setIPareaNormal !--------------------------------------------------------------------------------------------------- -!> @brief Frees memory used by variables only needed by plastic_nonlocal +!> @brief Free memory used by variables only needed by plastic_nonlocal !--------------------------------------------------------------------------------------------------- subroutine geometry_plastic_nonlocal_disable @@ -116,13 +108,12 @@ end subroutine geometry_plastic_nonlocal_disable !--------------------------------------------------------------------------------------------------- -!> @brief Writes geometry data to results file +!> @brief Write geometry data to results file !--------------------------------------------------------------------------------------------------- subroutine geometry_plastic_nonlocal_results integer, dimension(:), allocatable :: shp -#if defined(PETSc) || defined(DAMASK_HDF5) call results_openJobFile writeVolume: block @@ -151,7 +142,6 @@ subroutine geometry_plastic_nonlocal_results call results_closeJobFile -#endif end subroutine geometry_plastic_nonlocal_results diff --git a/src/grid/DAMASK_grid.f90 b/src/grid/DAMASK_grid.f90 index 51c97456b..b324a5afc 100644 --- a/src/grid/DAMASK_grid.f90 +++ b/src/grid/DAMASK_grid.f90 @@ -15,11 +15,7 @@ program DAMASK_spectral use config use debug use math - use mesh_grid use CPFEM2 - use FEsolving - use numerics - use homogenization use material use spectral_utilities use grid_mech_spectral_basic @@ -40,7 +36,7 @@ program DAMASK_spectral N_t = 0, & !< # of time indicators found in load case file N_n = 0, & !< # of increment specifiers found in load case file N_def = 0 !< # of rate of deformation specifiers found in load case file - character(len=65536) :: & + character(len=pStringLen) :: & line !-------------------------------------------------------------------------------------------------- @@ -80,12 +76,6 @@ program DAMASK_spectral type(tLoadCase), allocatable, dimension(:) :: loadCases !< array of all load cases type(tLoadCase) :: newLoadCase type(tSolutionState), allocatable, dimension(:) :: solres - integer(MPI_OFFSET_KIND) :: fileOffset - integer(MPI_OFFSET_KIND), dimension(:), allocatable :: outputSize - integer, parameter :: maxByteOut = 2147483647-4096 !< limit of one file output write https://trac.mpich.org/projects/mpich/ticket/1742 - integer, parameter :: maxRealOut = maxByteOut/pReal - integer(pLongInt), dimension(2) :: outputIndex - PetscErrorCode :: ierr procedure(grid_mech_spectral_basic_init), pointer :: & mech_init procedure(grid_mech_spectral_basic_forward), pointer :: & @@ -103,7 +93,7 @@ program DAMASK_spectral !-------------------------------------------------------------------------------------------------- ! init DAMASK (all modules) call CPFEM_initAll - write(6,'(/,a)') ' <<<+- DAMASK_spectral init -+>>>' + write(6,'(/,a)') ' <<<+- DAMASK_spectral init -+>>>'; flush(6) write(6,'(/,a)') ' Shanthraj et al., Handbook of Mechanics of Materials, 2019' write(6,'(a)') ' https://doi.org/10.1007/978-981-10-6855-3_80' @@ -257,7 +247,7 @@ program DAMASK_spectral reportAndCheck: if (worldrank == 0) then write (loadcase_string, '(i6)' ) currentLoadCase - write(6,'(/,1x,a,i6)') 'load case: ', currentLoadCase + write(6,'(/,1x,a,i0)') 'load case: ', currentLoadCase if (.not. newLoadCase%followFormerTrajectory) write(6,'(2x,a)') 'drop guessing along trajectory' if (newLoadCase%deformation%myType == 'l') then do j = 1, 3 @@ -280,10 +270,8 @@ program DAMASK_spectral enddo if (any(newLoadCase%stress%maskLogical .eqv. & newLoadCase%deformation%maskLogical)) errorID = 831 ! exclusive or masking only - if (any(newLoadCase%stress%maskLogical .and. & - transpose(newLoadCase%stress%maskLogical) .and. & - reshape([ .false.,.true.,.true.,.true.,.false.,.true.,.true.,.true.,.false.],[ 3,3]))) & - errorID = 838 ! no rotation is allowed by stress BC + if (any(newLoadCase%stress%maskLogical .and. transpose(newLoadCase%stress%maskLogical) & + .and. (math_I3<1))) errorID = 838 ! no rotation is allowed by stress BC write(6,'(2x,a)') 'stress / GPa:' do i = 1, 3; do j = 1, 3 if(newLoadCase%stress%maskLogical(i,j)) then @@ -300,14 +288,14 @@ program DAMASK_spectral write(6,'(2x,a,/,3(3(3x,f12.7,1x)/))',advance='no') 'rotation of loadframe:',& transpose(newLoadCase%rot%asMatrix()) if (newLoadCase%time < 0.0_pReal) errorID = 834 ! negative time increment - write(6,'(2x,a,f12.6)') 'time: ', newLoadCase%time + write(6,'(2x,a,f0.3)') 'time: ', newLoadCase%time if (newLoadCase%incs < 1) errorID = 835 ! non-positive incs count - write(6,'(2x,a,i5)') 'increments: ', newLoadCase%incs + write(6,'(2x,a,i0)') 'increments: ', newLoadCase%incs if (newLoadCase%outputfrequency < 1) errorID = 836 ! non-positive result frequency - write(6,'(2x,a,i5)') 'output frequency: ', newLoadCase%outputfrequency + write(6,'(2x,a,i0)') 'output frequency: ', newLoadCase%outputfrequency if (newLoadCase%restartfrequency < 1) errorID = 839 ! non-positive restart frequency if (newLoadCase%restartfrequency < huge(0)) & - write(6,'(2x,a,i5)') 'restart frequency: ', newLoadCase%restartfrequency + write(6,'(2x,a,i0)') 'restart frequency: ', newLoadCase%restartfrequency if (errorID > 0) call IO_error(error_ID = errorID, ext_msg = loadcase_string) ! exit with error message endif reportAndCheck loadCases = [loadCases,newLoadCase] ! load case is ok, append it @@ -335,26 +323,10 @@ program DAMASK_spectral ! write header of output file if (worldrank == 0) then writeHeader: if (interface_restartInc < 1) then - open(newunit=fileUnit,file=trim(getSolverJobName())//& - '.spectralOut',form='UNFORMATTED',status='REPLACE') - write(fileUnit) 'load:', trim(loadCaseFile) ! ... and write header - write(fileUnit) 'workingdir:', 'n/a' - write(fileUnit) 'geometry:', trim(geometryFile) - write(fileUnit) 'grid:', grid - write(fileUnit) 'size:', geomSize - write(fileUnit) 'materialpoint_sizeResults:', materialpoint_sizeResults - write(fileUnit) 'loadcases:', size(loadCases) - write(fileUnit) 'frequencies:', loadCases%outputfrequency ! one entry per LoadCase - write(fileUnit) 'times:', loadCases%time ! one entry per LoadCase - write(fileUnit) 'logscales:', loadCases%logscale - write(fileUnit) 'increments:', loadCases%incs ! one entry per LoadCase - write(fileUnit) 'startingIncrement:', interface_restartInc ! start with writing out the previous inc - write(fileUnit) 'eoh' - close(fileUnit) ! end of header open(newunit=statUnit,file=trim(getSolverJobName())//'.sta',form='FORMATTED',status='REPLACE') write(statUnit,'(a)') 'Increment Time CutbackLevel Converged IterationsNeeded' ! statistics file if (iand(debug_level(debug_spectral),debug_levelBasic) /= 0) & - write(6,'(/,a)') ' header of result and statistics file written out' + write(6,'(/,a)') ' header of statistics file written out' flush(6) else writeHeader open(newunit=statUnit,file=trim(getSolverJobName())//& @@ -362,40 +334,11 @@ program DAMASK_spectral endif writeHeader endif -!-------------------------------------------------------------------------------------------------- -! prepare MPI parallel out (including opening of file) - allocate(outputSize(worldsize), source = 0_MPI_OFFSET_KIND) - outputSize(worldrank+1) = size(materialpoint_results,kind=MPI_OFFSET_KIND)*int(pReal,MPI_OFFSET_KIND) - call MPI_allreduce(MPI_IN_PLACE,outputSize,worldsize,MPI_LONG,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process - if (ierr /= 0) call IO_error(error_ID=894, ext_msg='MPI_allreduce') - call MPI_file_open(PETSC_COMM_WORLD, trim(getSolverJobName())//'.spectralOut', & - MPI_MODE_WRONLY + MPI_MODE_APPEND, & - MPI_INFO_NULL, & - fileUnit, & - ierr) - if (ierr /= 0) call IO_error(error_ID=894, ext_msg='MPI_file_open') - call MPI_file_get_position(fileUnit,fileOffset,ierr) ! get offset from header - if (ierr /= 0) call IO_error(error_ID=894, ext_msg='MPI_file_get_position') - fileOffset = fileOffset + sum(outputSize(1:worldrank)) ! offset of my process in file (header + processes before me) - call MPI_file_seek (fileUnit,fileOffset,MPI_SEEK_SET,ierr) - if (ierr /= 0) call IO_error(error_ID=894, ext_msg='MPI_file_seek') - writeUndeformed: if (interface_restartInc < 1) then write(6,'(1/,a)') ' ... writing initial configuration to file ........................' call CPFEM_results(0,0.0_pReal) - do i = 1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output - outputIndex = int([(i-1)*((maxRealOut)/materialpoint_sizeResults)+1, & - min(i*((maxRealOut)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt) - call MPI_file_write(fileUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)), & - [(outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)]), & - int((outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)), & - MPI_DOUBLE, MPI_STATUS_IGNORE, ierr) - if (ierr /= 0) call IO_error(error_ID=894, ext_msg='MPI_file_write') - enddo - fileOffset = fileOffset + sum(outputSize) ! forward to current file position endif writeUndeformed - loadCaseLooping: do currentLoadCase = 1, size(loadCases) time0 = time ! load case start time guess = loadCases(currentLoadCase)%followFormerTrajectory ! change of load case? homogeneous guess for the first inc @@ -439,19 +382,12 @@ program DAMASK_spectral !-------------------------------------------------------------------------------------------------- ! report begin of new step write(6,'(/,a)') ' ###########################################################################' - write(6,'(1x,a,es12.5'//& - ',a,'//IO_intOut(inc) //',a,'//IO_intOut(loadCases(currentLoadCase)%incs)//& - ',a,'//IO_intOut(stepFraction) //',a,'//IO_intOut(subStepFactor**cutBackLevel)//& - ',a,'//IO_intOut(currentLoadCase)//',a,'//IO_intOut(size(loadCases))//')') & + write(6,'(1x,a,es12.5,6(a,i0))') & 'Time', time, & 's: Increment ', inc,'/',loadCases(currentLoadCase)%incs,& '-', stepFraction,'/',subStepFactor**cutBackLevel,& ' of load case ', currentLoadCase,'/',size(loadCases) - write(incInfo,& - '(a,'//IO_intOut(totalIncsCounter)//& - ',a,'//IO_intOut(sum(loadCases%incs))//& - ',a,'//IO_intOut(stepFraction)//& - ',a,'//IO_intOut(subStepFactor**cutBackLevel)//')') & + write(incInfo,'(4(a,i0))') & 'Increment ',totalIncsCounter,'/',sum(loadCases%incs),& '-', stepFraction,'/',subStepFactor**cutBackLevel flush(6) @@ -526,7 +462,6 @@ program DAMASK_spectral write(6,'(/,a)') ' cutting back ' else ! no more options to continue call IO_warning(850) - call MPI_File_close(fileUnit,ierr) close(statUnit) call quit(0) ! quit endif @@ -536,29 +471,14 @@ program DAMASK_spectral cutBackLevel = max(0, cutBackLevel - 1) ! try half number of subincs next inc if (all(solres(:)%converged)) then - write(6,'(/,a,'//IO_intOut(totalIncsCounter)//',a)') & ! report converged inc - ' increment ', totalIncsCounter, ' converged' + write(6,'(/,a,i0,a)') ' increment ', totalIncsCounter, ' converged' else - write(6,'(/,a,'//IO_intOut(totalIncsCounter)//',a)') & ! report non-converged inc - ' increment ', totalIncsCounter, ' NOT converged' + write(6,'(/,a,i0,a)') ' increment ', totalIncsCounter, ' NOT converged' endif; flush(6) if (mod(inc,loadCases(currentLoadCase)%outputFrequency) == 0) then ! at output frequency write(6,'(1/,a)') ' ... writing results to file ......................................' flush(6) - call materialpoint_postResults() - call MPI_File_seek (fileUnit,fileOffset,MPI_SEEK_SET,ierr) - if (ierr /= 0) call IO_error(894, ext_msg='MPI_file_seek') - do i=1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output - outputIndex=int([(i-1)*((maxRealOut)/materialpoint_sizeResults)+1, & - min(i*((maxRealOut)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt) - call MPI_file_write(fileUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)),& - [(outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)]), & - int((outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)),& - MPI_DOUBLE, MPI_STATUS_IGNORE, ierr) - if(ierr /=0) call IO_error(894, ext_msg='MPI_file_write') - enddo - fileOffset = fileOffset + sum(outputSize) ! forward to current file position call CPFEM_results(totalIncsCounter,time) endif if (mod(inc,loadCases(currentLoadCase)%restartFrequency) == 0) then @@ -575,7 +495,6 @@ program DAMASK_spectral !-------------------------------------------------------------------------------------------------- ! report summary of whole calculation write(6,'(/,a)') ' ###########################################################################' - call MPI_file_close(fileUnit,ierr) close(statUnit) call quit(0) ! no complains ;) diff --git a/src/grid/grid_damage_spectral.f90 b/src/grid/grid_damage_spectral.f90 index 1fb91b49b..97b20a6db 100644 --- a/src/grid/grid_damage_spectral.f90 +++ b/src/grid/grid_damage_spectral.f90 @@ -15,7 +15,6 @@ module grid_damage_spectral use mesh_grid use damage_nonlocal use numerics - use damage_nonlocal implicit none private @@ -55,7 +54,7 @@ contains !-------------------------------------------------------------------------------------------------- subroutine grid_damage_spectral_init - PetscInt, dimension(worldsize) :: localK + PetscInt, dimension(0:worldsize-1) :: localK integer :: i, j, k, cell DM :: damage_grid Vec :: uBound, lBound @@ -79,8 +78,8 @@ subroutine grid_damage_spectral_init ! initialize solver specific parts of PETSc call SNESCreate(PETSC_COMM_WORLD,damage_snes,ierr); CHKERRQ(ierr) call SNESSetOptionsPrefix(damage_snes,'damage_',ierr);CHKERRQ(ierr) - localK = 0 - localK(worldrank+1) = grid3 + localK = 0 + localK(worldrank) = grid3 call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary diff --git a/src/grid/grid_mech_FEM.f90 b/src/grid/grid_mech_FEM.f90 index e87e02d09..b51ebb56a 100644 --- a/src/grid/grid_mech_FEM.f90 +++ b/src/grid/grid_mech_FEM.f90 @@ -7,22 +7,20 @@ module grid_mech_FEM #include #include - use DAMASK_interface - use HDF5_utilities use PETScdmda use PETScsnes + use prec - use CPFEM2 - use IO - use debug + use DAMASK_interface + use HDF5_utilities + use math + use spectral_utilities use FEsolving use numerics use homogenization - use DAMASK_interface - use spectral_utilities use discretization use mesh_grid - use math + use debug implicit none private @@ -52,10 +50,10 @@ module grid_mech_FEM F_aimDot = 0.0_pReal, & !< assumed rate of average deformation gradient F_aim = math_I3, & !< current prescribed deformation gradient F_aim_lastIter = math_I3, & - F_aim_lastInc = math_I3, & !< previous average deformation gradient + F_aim_lastInc = math_I3, & !< previous average deformation gradient P_av = 0.0_pReal !< average 1st Piola--Kirchhoff stress - character(len=1024), private :: incInfo !< time and increment information + character(len=pStringLen), private :: incInfo !< time and increment information real(pReal), private, dimension(3,3,3,3) :: & C_volAvg = 0.0_pReal, & !< current volume average stiffness @@ -82,8 +80,8 @@ contains !-------------------------------------------------------------------------------------------------- subroutine grid_mech_FEM_init - real(pReal) :: HGCoeff = 0e-2_pReal - PetscInt, dimension(:), allocatable :: localK + real(pReal) :: HGCoeff = 0.0e-2_pReal + PetscInt, dimension(0:worldsize-1) :: localK real(pReal), dimension(3,3) :: & temp33_Real = 0.0_pReal real(pReal), dimension(4,8) :: & @@ -96,15 +94,14 @@ subroutine grid_mech_FEM_init 1.0_pReal,-1.0_pReal,-1.0_pReal,-1.0_pReal, & 1.0_pReal, 1.0_pReal, 1.0_pReal, 1.0_pReal], [4,8]) PetscErrorCode :: ierr - integer :: rank - integer(HID_T) :: fileHandle, groupHandle - character(len=1024) :: rankStr + integer(HID_T) :: fileHandle, groupHandle + character(len=pStringLen) :: fileName real(pReal), dimension(3,3,3,3) :: devNull PetscScalar, pointer, dimension(:,:,:,:) :: & u_current,u_lastInc - write(6,'(/,a)') ' <<<+- grid_mech_FEM init -+>>>' - + write(6,'(/,a)') ' <<<+- grid_mech_FEM init -+>>>'; flush(6) + !-------------------------------------------------------------------------------------------------- ! set default and user defined options for PETSc call PETScOptionsInsertString(PETSC_NULL_OPTIONS,'-mech_snes_type newtonls -mech_ksp_type fgmres & @@ -123,10 +120,9 @@ subroutine grid_mech_FEM_init ! initialize solver specific parts of PETSc call SNESCreate(PETSC_COMM_WORLD,mech_snes,ierr); CHKERRQ(ierr) call SNESSetOptionsPrefix(mech_snes,'mech_',ierr);CHKERRQ(ierr) - allocate(localK(worldsize), source = 0); localK(worldrank+1) = grid3 - do rank = 1, worldsize - call MPI_Bcast(localK(rank),1,MPI_INTEGER,rank-1,PETSC_COMM_WORLD,ierr) - enddo + localK = 0 + localK(worldrank) = grid3 + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, & DMDA_STENCIL_BOX, & @@ -184,11 +180,10 @@ subroutine grid_mech_FEM_init !-------------------------------------------------------------------------------------------------- ! init fields restartRead: if (interface_restartInc > 0) then - write(6,'(/,a,'//IO_intOut(interface_restartInc)//',a)') & - 'reading values of increment ', interface_restartInc, ' from file' + write(6,'(/,a,i0,a)') ' reading restart data of increment ', interface_restartInc, ' from file' - write(rankStr,'(a1,i0)')'_',worldrank - fileHandle = HDF5_openFile(trim(getSolverJobName())//trim(rankStr)//'.hdf5') + write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' + fileHandle = HDF5_openFile(fileName) groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(groupHandle,F_aim, 'F_aim') @@ -214,8 +209,7 @@ subroutine grid_mech_FEM_init CHKERRQ(ierr) restartRead2: if (interface_restartInc > 0) then - write(6,'(/,a,'//IO_intOut(interface_restartInc)//',a)') & - 'reading more values of increment ', interface_restartInc, ' from file' + write(6,'(/,a,i0,a)') ' reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(groupHandle,C_volAvg, 'C_volAvg') call HDF5_read(groupHandle,C_volAvgLastInc,'C_volAvgLastInc') @@ -355,7 +349,7 @@ end subroutine grid_mech_FEM_forward !-------------------------------------------------------------------------------------------------- !> @brief Age !-------------------------------------------------------------------------------------------------- -subroutine grid_mech_FEM_updateCoords() +subroutine grid_mech_FEM_updateCoords call utilities_updateCoords(F) @@ -365,20 +359,20 @@ end subroutine grid_mech_FEM_updateCoords !-------------------------------------------------------------------------------------------------- !> @brief Write current solver and constitutive data for restart to file !-------------------------------------------------------------------------------------------------- -subroutine grid_mech_FEM_restartWrite() +subroutine grid_mech_FEM_restartWrite PetscErrorCode :: ierr + integer(HID_T) :: fileHandle, groupHandle PetscScalar, dimension(:,:,:,:), pointer :: u_current,u_lastInc - integer(HID_T) :: fileHandle, groupHandle - character(len=32) :: rankStr + character(len=pStringLen) :: fileName call DMDAVecGetArrayF90(mech_grid,solution_current,u_current,ierr); CHKERRQ(ierr) call DMDAVecGetArrayF90(mech_grid,solution_lastInc,u_lastInc,ierr); CHKERRQ(ierr) - write(6,'(a)') ' writing solver data required for restart to file';flush(6) + write(6,'(a)') ' writing solver data required for restart to file'; flush(6) - write(rankStr,'(a1,i0)')'_',worldrank - fileHandle = HDF5_openFile(trim(getSolverJobName())//trim(rankStr)//'.hdf5','w') + write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' + fileHandle = HDF5_openFile(fileName,'w') groupHandle = HDF5_addGroup(fileHandle,'solver') call HDF5_write(groupHandle,F_aim, 'F_aim') @@ -478,8 +472,7 @@ subroutine formResidual(da_local,x_local, & ! begin of new iteration newIteration: if (totalIter <= PETScIter) then totalIter = totalIter + 1 - write(6,'(1x,a,3(a,'//IO_intOut(itmax)//'))') & - trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter+1, '≤', itmax + write(6,'(1x,a,3(a,i0))') trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter+1, '≤', itmax if (iand(debug_level(debug_spectral),debug_spectralRotation) /= 0) & write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') & ' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotTensor2(F_aim,active=.true.)) diff --git a/src/grid/grid_mech_spectral_basic.f90 b/src/grid/grid_mech_spectral_basic.f90 index 36caabf90..af8cbc377 100644 --- a/src/grid/grid_mech_spectral_basic.f90 +++ b/src/grid/grid_mech_spectral_basic.f90 @@ -15,13 +15,11 @@ module grid_mech_spectral_basic use HDF5_utilities use math use spectral_utilities - use IO use FEsolving use config use numerics use homogenization use mesh_grid - use CPFEM2 use debug implicit none @@ -57,7 +55,7 @@ module grid_mech_spectral_basic F_aim_lastInc = math_I3, & !< previous average deformation gradient P_av = 0.0_pReal !< average 1st Piola--Kirchhoff stress - character(len=1024), private :: incInfo !< time and increment information + character(len=pStringLen), private :: incInfo !< time and increment information real(pReal), private, dimension(3,3,3,3) :: & C_volAvg = 0.0_pReal, & !< current volume average stiffness C_volAvgLastInc = 0.0_pReal, & !< previous volume average stiffness @@ -94,11 +92,11 @@ subroutine grid_mech_spectral_basic_init PetscScalar, pointer, dimension(:,:,:,:) :: & F ! pointer to solution data PetscInt, dimension(worldsize) :: localK - integer(HID_T) :: fileHandle, groupHandle - integer :: fileUnit - character(len=1024) :: rankStr + integer(HID_T) :: fileHandle, groupHandle + integer :: fileUnit + character(len=pStringLen) :: fileName - write(6,'(/,a)') ' <<<+- grid_mech_spectral_basic init -+>>>' + write(6,'(/,a)') ' <<<+- grid_mech_spectral_basic init -+>>>'; flush(6) write(6,'(/,a)') ' Eisenlohr et al., International Journal of Plasticity 46:37–53, 2013' write(6,'(a)') ' https://doi.org/10.1016/j.ijplas.2012.09.012' @@ -151,11 +149,10 @@ subroutine grid_mech_spectral_basic_init call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) ! places pointer on PETSc data restartRead: if (interface_restartInc > 0) then - write(6,'(/,a,'//IO_intOut(interface_restartInc)//',a)') & - ' reading values of increment ', interface_restartInc, ' from file' + write(6,'(/,a,i0,a)') ' reading restart data of increment ', interface_restartInc, ' from file' - write(rankStr,'(a1,i0)')'_',worldrank - fileHandle = HDF5_openFile(trim(getSolverJobName())//trim(rankStr)//'.hdf5') + write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' + fileHandle = HDF5_openFile(fileName) groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(groupHandle,F_aim, 'F_aim') @@ -177,8 +174,7 @@ subroutine grid_mech_spectral_basic_init call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) ! deassociate pointer restartRead2: if (interface_restartInc > 0) then - write(6,'(/,a,'//IO_intOut(interface_restartInc)//',a)') & - 'reading more values of increment ', interface_restartInc, ' from file' + write(6,'(/,a,i0,a)') ' reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(groupHandle,C_volAvg, 'C_volAvg') call HDF5_read(groupHandle,C_volAvgLastInc,'C_volAvgLastInc') @@ -286,7 +282,7 @@ subroutine grid_mech_spectral_basic_forward(cutBack,guess,timeinc,timeinc_old,lo F_aimDot = merge(stress_BC%maskFloat*(F_aim-F_aim_lastInc)/timeinc_old, 0.0_pReal, guess) F_aim_lastInc = F_aim - !-------------------------------------------------------------------------------------------------- + !----------------------------------------------------------------------------------------------- ! calculate rate for aim if (deformation_BC%myType=='l') then ! calculate F_aimDot from given L and current F F_aimDot = & @@ -320,7 +316,7 @@ end subroutine grid_mech_spectral_basic_forward !-------------------------------------------------------------------------------------------------- !> @brief Age !-------------------------------------------------------------------------------------------------- -subroutine grid_mech_spectral_basic_updateCoords() +subroutine grid_mech_spectral_basic_updateCoords PetscErrorCode :: ierr PetscScalar, dimension(:,:,:,:), pointer :: F @@ -335,19 +331,19 @@ end subroutine grid_mech_spectral_basic_updateCoords !-------------------------------------------------------------------------------------------------- !> @brief Write current solver and constitutive data for restart to file !-------------------------------------------------------------------------------------------------- -subroutine grid_mech_spectral_basic_restartWrite() +subroutine grid_mech_spectral_basic_restartWrite PetscErrorCode :: ierr + integer(HID_T) :: fileHandle, groupHandle PetscScalar, dimension(:,:,:,:), pointer :: F - integer(HID_T) :: fileHandle, groupHandle - character(len=32) :: rankStr + character(len=pStringLen) :: fileName call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) - write(6,'(a)') ' writing solver data required for restart to file';flush(6) + write(6,'(a)') ' writing solver data required for restart to file'; flush(6) - write(rankStr,'(a1,i0)')'_',worldrank - fileHandle = HDF5_openFile(trim(getSolverJobName())//trim(rankStr)//'.hdf5','w') + write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' + fileHandle = HDF5_openFile(fileName,'w') groupHandle = HDF5_addGroup(fileHandle,'solver') call HDF5_write(groupHandle,F_aim, 'F_aim') @@ -366,7 +362,7 @@ subroutine grid_mech_spectral_basic_restartWrite() if (num%update_gamma) call utilities_saveReferenceStiffness call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) - + end subroutine grid_mech_spectral_basic_restartWrite @@ -442,8 +438,7 @@ subroutine formResidual(in, F, & ! begin of new iteration newIteration: if (totalIter <= PETScIter) then totalIter = totalIter + 1 - write(6,'(1x,a,3(a,'//IO_intOut(itmax)//'))') & - trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter, '≤', itmax + write(6,'(1x,a,3(a,i0))') trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter, '≤', itmax if (iand(debug_level(debug_spectral),debug_spectralRotation) /= 0) & write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') & ' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotTensor2(F_aim,active=.true.)) diff --git a/src/grid/grid_mech_spectral_polarisation.f90 b/src/grid/grid_mech_spectral_polarisation.f90 index 07d9a5afc..bdc65a8c5 100644 --- a/src/grid/grid_mech_spectral_polarisation.f90 +++ b/src/grid/grid_mech_spectral_polarisation.f90 @@ -16,13 +16,11 @@ module grid_mech_spectral_polarisation use math use rotations use spectral_utilities - use IO use FEsolving use config use numerics use homogenization use mesh_grid - use CPFEM2 use debug implicit none @@ -61,7 +59,7 @@ module grid_mech_spectral_polarisation F_av = 0.0_pReal, & !< average incompatible def grad field P_av = 0.0_pReal !< average 1st Piola--Kirchhoff stress - character(len=1024), private :: incInfo !< time and increment information + character(len=pStringLen), private :: incInfo !< time and increment information real(pReal), private, dimension(3,3,3,3) :: & C_volAvg = 0.0_pReal, & !< current volume average stiffness C_volAvgLastInc = 0.0_pReal, & !< previous volume average stiffness @@ -102,12 +100,12 @@ subroutine grid_mech_spectral_polarisation_init FandF_tau, & ! overall pointer to solution data F, & ! specific (sub)pointer F_tau ! specific (sub)pointer - PetscInt, dimension(worldsize) :: localK - integer(HID_T) :: fileHandle, groupHandle - integer :: fileUnit - character(len=1024) :: rankStr + PetscInt, dimension(0:worldsize-1) :: localK + integer(HID_T) :: fileHandle, groupHandle + integer :: fileUnit + character(len=pStringLen) :: fileName - write(6,'(/,a)') ' <<<+- grid_mech_spectral_polarisation init -+>>>' + write(6,'(/,a)') ' <<<+- grid_mech_spectral_polarisation init -+>>>'; flush(6) write(6,'(/,a)') ' Shanthraj et al., International Journal of Plasticity 66:31–45, 2015' write(6,'(a)') ' https://doi.org/10.1016/j.ijplas.2014.02.006' @@ -132,8 +130,8 @@ subroutine grid_mech_spectral_polarisation_init ! initialize solver specific parts of PETSc call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr) call SNESSetOptionsPrefix(snes,'mech_',ierr);CHKERRQ(ierr) - localK = 0 - localK(worldrank+1) = grid3 + localK = 0 + localK(worldrank) = grid3 call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary @@ -161,11 +159,10 @@ subroutine grid_mech_spectral_polarisation_init F_tau => FandF_tau(9:17,:,:,:) restartRead: if (interface_restartInc > 0) then - write(6,'(/,a,'//IO_intOut(interface_restartInc)//',a)') & - ' reading values of increment ', interface_restartInc, ' from file' - - write(rankStr,'(a1,i0)')'_',worldrank - fileHandle = HDF5_openFile(trim(getSolverJobName())//trim(rankStr)//'.hdf5') + write(6,'(/,a,i0,a)') ' reading restart data of increment ', interface_restartInc, ' from file' + + write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' + fileHandle = HDF5_openFile(fileName) groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(groupHandle,F_aim, 'F_aim') @@ -191,8 +188,7 @@ subroutine grid_mech_spectral_polarisation_init call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) ! deassociate pointer restartRead2: if (interface_restartInc > 0) then - write(6,'(/,a,'//IO_intOut(interface_restartInc)//',a)') & - ' reading more values of increment ', interface_restartInc, ' from file' + write(6,'(/,a,i0,a)') ' reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(groupHandle,C_volAvg, 'C_volAvg') call HDF5_read(groupHandle,C_volAvgLastInc,'C_volAvgLastInc') @@ -364,7 +360,7 @@ end subroutine grid_mech_spectral_polarisation_forward !-------------------------------------------------------------------------------------------------- !> @brief Age !-------------------------------------------------------------------------------------------------- -subroutine grid_mech_spectral_polarisation_updateCoords() +subroutine grid_mech_spectral_polarisation_updateCoords PetscErrorCode :: ierr PetscScalar, dimension(:,:,:,:), pointer :: FandF_tau @@ -379,21 +375,21 @@ end subroutine grid_mech_spectral_polarisation_updateCoords !-------------------------------------------------------------------------------------------------- !> @brief Write current solver and constitutive data for restart to file !-------------------------------------------------------------------------------------------------- -subroutine grid_mech_spectral_polarisation_restartWrite() +subroutine grid_mech_spectral_polarisation_restartWrite PetscErrorCode :: ierr + integer(HID_T) :: fileHandle, groupHandle PetscScalar, dimension(:,:,:,:), pointer :: FandF_tau, F, F_tau - integer(HID_T) :: fileHandle, groupHandle - character(len=32) :: rankStr + character(len=pStringLen) :: fileName call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) F => FandF_tau(0: 8,:,:,:) F_tau => FandF_tau(9:17,:,:,:) - write(6,'(a)') ' writing solver data required for restart to file';flush(6) + write(6,'(a)') ' writing solver data required for restart to file'; flush(6) - write(rankStr,'(a1,i0)')'_',worldrank - fileHandle = HDF5_openFile(trim(getSolverJobName())//trim(rankStr)//'.hdf5','w') + write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' + fileHandle = HDF5_openFile(fileName,'w') groupHandle = HDF5_addGroup(fileHandle,'solver') call HDF5_write(groupHandle,F_aim, 'F_aim') @@ -511,8 +507,7 @@ subroutine formResidual(in, FandF_tau, & ! begin of new iteration newIteration: if (totalIter <= PETScIter) then totalIter = totalIter + 1 - write(6,'(1x,a,3(a,'//IO_intOut(itmax)//'))') & - trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter, '≤', itmax + write(6,'(1x,a,3(a,i0))') trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter, '≤', itmax if (iand(debug_level(debug_spectral),debug_spectralRotation) /= 0) & write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') & ' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotTensor2(F_aim,active=.true.)) diff --git a/src/grid/grid_thermal_spectral.f90 b/src/grid/grid_thermal_spectral.f90 index c7f886f13..2d69f075e 100644 --- a/src/grid/grid_thermal_spectral.f90 +++ b/src/grid/grid_thermal_spectral.f90 @@ -14,8 +14,8 @@ module grid_thermal_spectral use spectral_utilities use mesh_grid use thermal_conduction - use material use numerics + use material implicit none private @@ -55,7 +55,7 @@ contains !-------------------------------------------------------------------------------------------------- subroutine grid_thermal_spectral_init - PetscInt, dimension(worldsize) :: localK + PetscInt, dimension(0:worldsize-1) :: localK integer :: i, j, k, cell DM :: thermal_grid PetscScalar, dimension(:,:,:), pointer :: x_scal @@ -77,8 +77,8 @@ subroutine grid_thermal_spectral_init ! initialize solver specific parts of PETSc call SNESCreate(PETSC_COMM_WORLD,thermal_snes,ierr); CHKERRQ(ierr) call SNESSetOptionsPrefix(thermal_snes,'thermal_',ierr);CHKERRQ(ierr) - localK = 0 - localK(worldrank+1) = grid3 + localK = 0 + localK(worldrank) = grid3 call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index a1265556a..68c372b26 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -700,7 +700,7 @@ function utilities_maskedCompliance(rot_BC,mask_stress,C) c_reduced, & !< reduced stiffness (depending on number of stress BC) sTimesC !< temp variable to check inversion logical :: errmatinv - character(len=1024):: formatString + character(len=pStringLen):: formatString mask_stressVector = reshape(transpose(mask_stress), [9]) size_reduced = count(mask_stressVector) @@ -1123,7 +1123,7 @@ subroutine utilities_saveReferenceStiffness fileUnit if (worldrank == 0) then - write(6,'(a)') ' writing reference stiffness data required for restart to file';flush(6) + write(6,'(a)') ' writing reference stiffness data required for restart to file'; flush(6) fileUnit = IO_open_jobFile_binary('C_ref','w') write(fileUnit) C_ref close(fileUnit) diff --git a/src/homogenization.f90 b/src/homogenization.f90 index 0a61cbd3a..ee1e689e5 100644 --- a/src/homogenization.f90 +++ b/src/homogenization.f90 @@ -35,12 +35,6 @@ module homogenization materialpoint_P !< first P--K stress of IP real(pReal), dimension(:,:,:,:,:,:), allocatable, public :: & materialpoint_dPdF !< tangent of first P--K stress at IP - real(pReal), dimension(:,:,:), allocatable, public :: & - materialpoint_results !< results array of material point - integer, public, protected :: & - materialpoint_sizeResults, & - thermal_maxSizePostResults, & - damage_maxSizePostResults real(pReal), dimension(:,:,:,:), allocatable :: & materialpoint_subF0, & !< def grad of IP at beginning of homogenization increment @@ -125,7 +119,6 @@ module homogenization public :: & homogenization_init, & materialpoint_stressAndItsTangent, & - materialpoint_postResults, & homogenization_results contains @@ -136,14 +129,6 @@ contains !-------------------------------------------------------------------------------------------------- subroutine homogenization_init - integer, parameter :: FILEUNIT = 200 - integer :: e,i,p - integer, dimension(:,:), pointer :: thisSize - integer, dimension(:) , pointer :: thisNoutput - character(len=64), dimension(:,:), pointer :: thisOutput - character(len=32) :: outputName !< name of output, intermediate fix until HDF5 output is ready - logical :: valid - if (any(homogenization_type == HOMOGENIZATION_NONE_ID)) call mech_none_init if (any(homogenization_type == HOMOGENIZATION_ISOSTRAIN_ID)) call mech_isostrain_init if (any(homogenization_type == HOMOGENIZATION_RGC_ID)) call mech_RGC_init @@ -156,80 +141,6 @@ subroutine homogenization_init if (any(damage_type == DAMAGE_local_ID)) call damage_local_init if (any(damage_type == DAMAGE_nonlocal_ID)) call damage_nonlocal_init -!-------------------------------------------------------------------------------------------------- -! write description file for homogenization output - mainProcess: if (worldrank == 0) then - call IO_write_jobFile(FILEUNIT,'outputHomogenization') - do p = 1,size(config_homogenization) - if (any(material_homogenizationAt == p)) then - write(FILEUNIT,'(/,a,/)') '['//trim(config_name_homogenization(p))//']' - write(FILEUNIT,'(a)') '(type) n/a' - write(FILEUNIT,'(a,i4)') '(ngrains)'//char(9),homogenization_Ngrains(p) - - i = thermal_typeInstance(p) ! which instance of this thermal type - valid = .true. ! assume valid - select case(thermal_type(p)) ! split per thermal type - case (THERMAL_isothermal_ID) - outputName = THERMAL_isothermal_label - thisNoutput => null() - thisOutput => null() - thisSize => null() - case (THERMAL_adiabatic_ID) - outputName = THERMAL_adiabatic_label - thisNoutput => thermal_adiabatic_Noutput - thisOutput => thermal_adiabatic_output - thisSize => thermal_adiabatic_sizePostResult - case (THERMAL_conduction_ID) - outputName = THERMAL_conduction_label - thisNoutput => thermal_conduction_Noutput - thisOutput => thermal_conduction_output - thisSize => thermal_conduction_sizePostResult - case default - valid = .false. - end select - if (valid) then - write(FILEUNIT,'(a)') '(thermal)'//char(9)//trim(outputName) - if (thermal_type(p) /= THERMAL_isothermal_ID) then - do e = 1,thisNoutput(i) - write(FILEUNIT,'(a,i4)') trim(thisOutput(e,i))//char(9),thisSize(e,i) - enddo - endif - endif - - i = damage_typeInstance(p) ! which instance of this damage type - valid = .true. ! assume valid - select case(damage_type(p)) ! split per damage type - case (DAMAGE_none_ID) - outputName = DAMAGE_none_label - thisNoutput => null() - thisOutput => null() - thisSize => null() - case (DAMAGE_local_ID) - outputName = DAMAGE_local_label - thisNoutput => damage_local_Noutput - thisOutput => damage_local_output - thisSize => damage_local_sizePostResult - case (DAMAGE_nonlocal_ID) - outputName = DAMAGE_nonlocal_label - thisNoutput => damage_nonlocal_Noutput - thisOutput => damage_nonlocal_output - thisSize => damage_nonlocal_sizePostResult - case default - valid = .false. - end select - if (valid) then - write(FILEUNIT,'(a)') '(damage)'//char(9)//trim(outputName) - if (damage_type(p) /= DAMAGE_none_ID) then - do e = 1,thisNoutput(i) - write(FILEUNIT,'(a,i4)') trim(thisOutput(e,i))//char(9),thisSize(e,i) - enddo - endif - endif - endif - enddo - close(FILEUNIT) - endif mainProcess - call config_deallocate('material.config/homogenization') !-------------------------------------------------------------------------------------------------- @@ -249,23 +160,7 @@ subroutine homogenization_init allocate(materialpoint_converged(discretization_nIP,discretization_nElem), source=.true.) allocate(materialpoint_doneAndHappy(2,discretization_nIP,discretization_nElem), source=.true.) -!-------------------------------------------------------------------------------------------------- -! allocate and initialize global state and postresutls variables - thermal_maxSizePostResults = 0 - damage_maxSizePostResults = 0 - do p = 1,size(config_homogenization) - thermal_maxSizePostResults = max(thermal_maxSizePostResults, thermalState (p)%sizePostResults) - damage_maxSizePostResults = max(damage_maxSizePostResults ,damageState (p)%sizePostResults) - enddo - - materialpoint_sizeResults = 1 & ! grain count - + 1 + thermal_maxSizePostResults & - + damage_maxSizePostResults & - + homogenization_maxNgrains * ( 1 & ! crystallite size - + 1 + constitutive_source_maxSizePostResults) - allocate(materialpoint_results(materialpoint_sizeResults,discretization_nIP,discretization_nElem)) - - write(6,'(/,a)') ' <<<+- homogenization init -+>>>' + write(6,'(/,a)') ' <<<+- homogenization init -+>>>'; flush(6) if (iand(debug_level(debug_homogenization), debug_levelBasic) /= 0) then write(6,'(a32,1x,7(i8,1x))') 'materialpoint_dPdF: ', shape(materialpoint_dPdF) @@ -581,52 +476,6 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt) end subroutine materialpoint_stressAndItsTangent -!-------------------------------------------------------------------------------------------------- -!> @brief parallelized calculation of result array at material points -!-------------------------------------------------------------------------------------------------- -subroutine materialpoint_postResults - - integer :: & - thePos, & - theSize, & - myNgrains, & - g, & !< grain number - i, & !< integration point number - e !< element number - - !$OMP PARALLEL DO PRIVATE(myNgrains,thePos,theSize) - elementLooping: do e = FEsolving_execElem(1),FEsolving_execElem(2) - myNgrains = homogenization_Ngrains(material_homogenizationAt(e)) - IpLooping: do i = FEsolving_execIP(1,e),FEsolving_execIP(2,e) - thePos = 0 - - theSize = thermalState (material_homogenizationAt(e))%sizePostResults & - + damageState (material_homogenizationAt(e))%sizePostResults - materialpoint_results(thePos+1,i,e) = real(theSize,pReal) ! tell size of homogenization results - thePos = thePos + 1 - - if (theSize > 0) then ! any homogenization results to mention? - materialpoint_results(thePos+1:thePos+theSize,i,e) = postResults(i,e) - thePos = thePos + theSize - endif - - materialpoint_results(thePos+1,i,e) = real(myNgrains,pReal) ! tell number of grains at materialpoint - thePos = thePos + 1 - - grainLooping :do g = 1,myNgrains - theSize = 1 + & - 1 + plasticState (material_phaseAt(g,e))%sizePostResults + & - sum(sourceState(material_phaseAt(g,e))%p(:)%sizePostResults) - materialpoint_results(thePos+1:thePos+theSize,i,e) = crystallite_postResults(g,i,e) ! tell crystallite results - thePos = thePos + theSize - enddo grainLooping - enddo IpLooping - enddo elementLooping - !$OMP END PARALLEL DO - -end subroutine materialpoint_postResults - - !-------------------------------------------------------------------------------------------------- !> @brief partition material point def grad onto constituents !-------------------------------------------------------------------------------------------------- @@ -738,90 +587,58 @@ subroutine averageStressAndItsTangent(ip,el) end subroutine averageStressAndItsTangent -!-------------------------------------------------------------------------------------------------- -!> @brief return array of homogenization results for post file inclusion. call only, -!> if homogenization_sizePostResults(i,e) > 0 !! -!-------------------------------------------------------------------------------------------------- -function postResults(ip,el) - - integer, intent(in) :: & - ip, & !< integration point - el !< element number - real(pReal), dimension( thermalState (material_homogenizationAt(el))%sizePostResults & - + damageState (material_homogenizationAt(el))%sizePostResults) :: & - postResults - integer :: & - startPos, endPos ,& - homog - - - postResults = 0.0_pReal - startPos = 1 - endPos = thermalState(material_homogenizationAt(el))%sizePostResults - chosenThermal: select case (thermal_type(material_homogenizationAt(el))) - - case (THERMAL_adiabatic_ID) chosenThermal - homog = material_homogenizationAt(el) - postResults(startPos:endPos) = & - thermal_adiabatic_postResults(homog,thermal_typeInstance(homog),thermalMapping(homog)%p(ip,el)) - case (THERMAL_conduction_ID) chosenThermal - homog = material_homogenizationAt(el) - postResults(startPos:endPos) = & - thermal_conduction_postResults(homog,thermal_typeInstance(homog),thermalMapping(homog)%p(ip,el)) - - end select chosenThermal - - startPos = endPos + 1 - endPos = endPos + damageState(material_homogenizationAt(el))%sizePostResults - chosenDamage: select case (damage_type(material_homogenizationAt(el))) - - case (DAMAGE_local_ID) chosenDamage - postResults(startPos:endPos) = damage_local_postResults(ip, el) - case (DAMAGE_nonlocal_ID) chosenDamage - postResults(startPos:endPos) = damage_nonlocal_postResults(ip, el) - - end select chosenDamage - -end function postResults - - !-------------------------------------------------------------------------------------------------- !> @brief writes homogenization results to HDF5 output file !-------------------------------------------------------------------------------------------------- subroutine homogenization_results -#if defined(PETSc) || defined(DAMASK_HDF5) use material, only: & material_homogenization_type => homogenization_type integer :: p - character(len=256) :: group + character(len=pStringLen) :: group_base,group !real(pReal), dimension(:,:,:), allocatable :: temp do p=1,size(config_name_homogenization) - group = trim('current/materialpoint')//'/'//trim(config_name_homogenization(p)) + group_base = 'current/materialpoint/'//trim(config_name_homogenization(p)) + call results_closeGroup(results_addGroup(group_base)) + + group = trim(group_base)//'/generic' call results_closeGroup(results_addGroup(group)) - - group = trim(group)//'/mech' - - call results_closeGroup(results_addGroup(group)) - select case(material_homogenization_type(p)) - case(HOMOGENIZATION_rgc_ID) - call mech_RGC_results(homogenization_typeInstance(p),group) - end select - - group = trim('current/materialpoint')//'/'//trim(config_name_homogenization(p))//'/generic' - call results_closeGroup(results_addGroup(group)) - !temp = reshape(materialpoint_F,[3,3,discretization_nIP*discretization_nElem]) !call results_writeDataset(group,temp,'F',& ! 'deformation gradient','1') !temp = reshape(materialpoint_P,[3,3,discretization_nIP*discretization_nElem]) !call results_writeDataset(group,temp,'P',& ! '1st Piola-Kirchoff stress','Pa') + + group = trim(group_base)//'/mech' + call results_closeGroup(results_addGroup(group)) + select case(material_homogenization_type(p)) + case(HOMOGENIZATION_rgc_ID) + call mech_RGC_results(homogenization_typeInstance(p),group) + end select + + group = trim(group_base)//'/damage' + call results_closeGroup(results_addGroup(group)) + select case(damage_type(p)) + case(DAMAGE_LOCAL_ID) + call damage_local_results(p,group) + case(DAMAGE_NONLOCAL_ID) + call damage_nonlocal_results(p,group) + end select + + group = trim(group_base)//'/thermal' + call results_closeGroup(results_addGroup(group)) + select case(thermal_type(p)) + case(THERMAL_ADIABATIC_ID) + call thermal_adiabatic_results(p,group) + case(THERMAL_CONDUCTION_ID) + call thermal_conduction_results(p,group) + end select + + enddo - enddo -#endif end subroutine homogenization_results end module homogenization diff --git a/src/homogenization_mech_RGC.f90 b/src/homogenization_mech_RGC.f90 index 61a1997cd..c493c4190 100644 --- a/src/homogenization_mech_RGC.f90 +++ b/src/homogenization_mech_RGC.f90 @@ -74,12 +74,10 @@ module subroutine mech_RGC_init NofMyHomog, & sizeState, nIntFaceTot - character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::] - integer(kind(undefined_ID)) :: & outputID - character(len=65536), dimension(:), allocatable :: & + character(len=pStringLen), dimension(:), allocatable :: & outputs write(6,'(/,a)') ' <<<+- homogenization_'//HOMOGENIZATION_RGC_label//' init -+>>>' @@ -928,7 +926,6 @@ end subroutine mech_RGC_averageStressAndItsTangent !> @brief writes results to HDF5 output file !-------------------------------------------------------------------------------------------------- module subroutine mech_RGC_results(instance,group) -#if defined(PETSc) || defined(DAMASK_HDF5) integer, intent(in) :: instance character(len=*), intent(in) :: group @@ -962,11 +959,6 @@ module subroutine mech_RGC_results(instance,group) enddo outputsLoop end associate -#else - integer, intent(in) :: instance - character(len=*), intent(in) :: group -#endif - end subroutine mech_RGC_results diff --git a/src/homogenization_mech_isostrain.f90 b/src/homogenization_mech_isostrain.f90 index cdc078925..9345d1eda 100644 --- a/src/homogenization_mech_isostrain.f90 +++ b/src/homogenization_mech_isostrain.f90 @@ -33,7 +33,7 @@ module subroutine mech_isostrain_init Ninstance, & h, & NofMyHomog - character(len=65536) :: & + character(len=pStringLen) :: & tag = '' write(6,'(/,a)') ' <<<+- homogenization_'//HOMOGENIZATION_ISOSTRAIN_label//' init -+>>>' diff --git a/src/kinematics_cleavage_opening.f90 b/src/kinematics_cleavage_opening.f90 index eec8a1986..6b060a8d9 100644 --- a/src/kinematics_cleavage_opening.f90 +++ b/src/kinematics_cleavage_opening.f90 @@ -65,7 +65,7 @@ subroutine kinematics_cleavage_opening_init integer :: maxNinstance,p,instance - write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_cleavage_opening_LABEL//' init -+>>>' + write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_cleavage_opening_LABEL//' init -+>>>'; flush(6) maxNinstance = count(phase_kinematics == KINEMATICS_cleavage_opening_ID) if (maxNinstance == 0) return diff --git a/src/kinematics_slipplane_opening.f90 b/src/kinematics_slipplane_opening.f90 index c0f198985..2c94448bd 100644 --- a/src/kinematics_slipplane_opening.f90 +++ b/src/kinematics_slipplane_opening.f90 @@ -51,7 +51,7 @@ subroutine kinematics_slipplane_opening_init integer :: maxNinstance,p,instance - write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_slipplane_opening_LABEL//' init -+>>>' + write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_slipplane_opening_LABEL//' init -+>>>'; flush(6) maxNinstance = count(phase_kinematics == KINEMATICS_slipplane_opening_ID) if (maxNinstance == 0) return @@ -134,51 +134,35 @@ subroutine kinematics_slipplane_opening_LiAndItsTangent(Ld, dLd_dTstar, S, ipc, dLd_dTstar = 0.0_pReal do i = 1, prm%totalNslip - projection_d = math_outer(prm%slip_direction(1:3,i),prm%slip_normal(1:3,i)) + projection_d = math_outer(prm%slip_direction(1:3,i), prm%slip_normal(1:3,i)) projection_t = math_outer(prm%slip_transverse(1:3,i),prm%slip_normal(1:3,i)) - projection_n = math_outer(prm%slip_normal(1:3,i),prm%slip_normal(1:3,i)) + projection_n = math_outer(prm%slip_normal(1:3,i), prm%slip_normal(1:3,i)) traction_d = math_mul33xx33(S,projection_d) traction_t = math_mul33xx33(S,projection_t) traction_n = math_mul33xx33(S,projection_n) - traction_crit = prm%critLoad(i)* damage(homog)%p(damageOffset) ! degrading critical load carrying capacity by damage + traction_crit = prm%critLoad(i)* damage(homog)%p(damageOffset) ! degrading critical load carrying capacity by damage - udotd = sign(1.0_pReal,traction_d)* & - prm%sdot0* & - (abs(traction_d)/traction_crit - & - abs(traction_d)/prm%critLoad(i))**prm%n - if (abs(udotd) > tol_math_check) then - Ld = Ld + udotd*projection_d - dudotd_dt = udotd*prm%n/traction_d - forall (k=1:3,l=1:3,m=1:3,n=1:3) & - dLd_dTstar(k,l,m,n) = dLd_dTstar(k,l,m,n) + & - dudotd_dt*projection_d(k,l)*projection_d(m,n) - endif - - udott = sign(1.0_pReal,traction_t)* & - prm%sdot0* & - (abs(traction_t)/traction_crit - & - abs(traction_t)/prm%critLoad(i))**prm%n - if (abs(udott) > tol_math_check) then - Ld = Ld + udott*projection_t - dudott_dt = udott*prm%n/traction_t - forall (k=1:3,l=1:3,m=1:3,n=1:3) & - dLd_dTstar(k,l,m,n) = dLd_dTstar(k,l,m,n) + & - dudott_dt*projection_t(k,l)*projection_t(m,n) - endif + udotd = sign(1.0_pReal,traction_d)* prm%sdot0* ( abs(traction_d)/traction_crit & + - abs(traction_d)/prm%critLoad(i))**prm%n + udott = sign(1.0_pReal,traction_t)* prm%sdot0* ( abs(traction_t)/traction_crit & + - abs(traction_t)/prm%critLoad(i))**prm%n + udotn = prm%sdot0* ( max(0.0_pReal,traction_n)/traction_crit & + - max(0.0_pReal,traction_n)/prm%critLoad(i))**prm%n - udotn = & - prm%sdot0* & - (max(0.0_pReal,traction_n)/traction_crit - & - max(0.0_pReal,traction_n)/prm%critLoad(i))**prm%n - if (abs(udotn) > tol_math_check) then - Ld = Ld + udotn*projection_n - dudotn_dt = udotn*prm%n/traction_n - forall (k=1:3,l=1:3,m=1:3,n=1:3) & - dLd_dTstar(k,l,m,n) = dLd_dTstar(k,l,m,n) + & - dudotn_dt*projection_n(k,l)*projection_n(m,n) - endif + dudotd_dt = udotd*prm%n/traction_d + dudott_dt = udott*prm%n/traction_t + dudotn_dt = udotn*prm%n/traction_n + + forall (k=1:3,l=1:3,m=1:3,n=1:3) & + dLd_dTstar(k,l,m,n) = dLd_dTstar(k,l,m,n) + dudotd_dt*projection_d(k,l)*projection_d(m,n) & + + dudott_dt*projection_t(k,l)*projection_t(m,n) & + + dudotn_dt*projection_n(k,l)*projection_n(m,n) + + Ld = Ld + udotd*projection_d & + + udott*projection_t & + + udotn*projection_n enddo end associate diff --git a/src/kinematics_thermal_expansion.f90 b/src/kinematics_thermal_expansion.f90 index 814d604ed..7f7994959 100644 --- a/src/kinematics_thermal_expansion.f90 +++ b/src/kinematics_thermal_expansion.f90 @@ -42,7 +42,7 @@ subroutine kinematics_thermal_expansion_init real(pReal), dimension(:), allocatable :: & temp - write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_thermal_expansion_LABEL//' init -+>>>' + write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_thermal_expansion_LABEL//' init -+>>>'; flush(6) Ninstance = count(phase_kinematics == KINEMATICS_thermal_expansion_ID) diff --git a/src/lattice.f90 b/src/lattice.f90 index dfeac6ce1..36a682ab3 100644 --- a/src/lattice.f90 +++ b/src/lattice.f90 @@ -371,7 +371,7 @@ module lattice 1,-1, 1, -2,-1, 1, & -1, 1, 1, -1,-2, 1, & 1, 1, 1, 1,-2, 1 & - ],pReal),[ 3 + 3,LATTICE_BCT_NSLIP]) !< slip systems for bct sorted by Bieler + ],pReal),shape(LATTICE_BCT_SYSTEMSLIP)) !< slip systems for bct sorted by Bieler !-------------------------------------------------------------------------------------------------- ! isotropic @@ -387,7 +387,7 @@ module lattice 0, 1, 0, 1, 0, 0, & 0, 0, 1, 0, 1, 0, & 1, 0, 0, 0, 0, 1 & - ],pReal),[ 3 + 3,LATTICE_ISO_NCLEAVAGE]) + ],pReal),shape(LATTICE_ISO_SYSTEMCLEAVAGE)) !-------------------------------------------------------------------------------------------------- @@ -404,7 +404,7 @@ module lattice 0, 1, 0, 1, 0, 0, & 0, 0, 1, 0, 1, 0, & 1, 0, 0, 0, 0, 1 & - ],pReal),[ 3 + 3,LATTICE_ORT_NCLEAVAGE]) + ],pReal),shape(LATTICE_ORT_SYSTEMCLEAVAGE)) @@ -487,18 +487,18 @@ module lattice lattice_labels_twin contains + !-------------------------------------------------------------------------------------------------- !> @brief Module initialization !-------------------------------------------------------------------------------------------------- subroutine lattice_init - integer :: Nphases - character(len=65536) :: & + integer :: Nphases, p + character(len=pStringLen) :: & tag = '' - integer :: i,p + real(pReal) :: CoverA real(pReal), dimension(:), allocatable :: & - temp, & - CoverA !< c/a ratio for low symmetry type lattice + temp write(6,'(/,a)') ' <<<+- lattice init -+>>>' @@ -516,15 +516,13 @@ subroutine lattice_init allocate(lattice_specificHeat ( Nphases), source=0.0_pReal) allocate(lattice_referenceTemperature ( Nphases), source=300.0_pReal) - allocate(lattice_mu(Nphases), source=0.0_pReal) - allocate(lattice_nu(Nphases), source=0.0_pReal) + allocate(lattice_mu(Nphases), source=0.0_pReal) + allocate(lattice_nu(Nphases), source=0.0_pReal) allocate(lattice_Scleavage(3,3,3,lattice_maxNcleavage,Nphases),source=0.0_pReal) allocate(lattice_NcleavageSystem(lattice_maxNcleavageFamily,Nphases),source=0) - allocate(CoverA(Nphases),source=0.0_pReal) - do p = 1, size(config_phase) tag = config_phase(p)%getString('lattice_structure') select case(trim(tag(1:3))) @@ -554,7 +552,7 @@ subroutine lattice_init lattice_C66(6,6,p) = config_phase(p)%getFloat('c66',defaultVal=0.0_pReal) - CoverA(p) = config_phase(p)%getFloat('c/a',defaultVal=0.0_pReal) + CoverA = config_phase(p)%getFloat('c/a',defaultVal=0.0_pReal) lattice_thermalConductivity33(1,1,p) = config_phase(p)%getFloat('thermal_conductivity11',defaultVal=0.0_pReal) lattice_thermalConductivity33(2,2,p) = config_phase(p)%getFloat('thermal_conductivity22',defaultVal=0.0_pReal) @@ -574,14 +572,12 @@ subroutine lattice_init lattice_DamageDiffusion33(2,2,p) = config_phase(p)%getFloat( 'damage_diffusion22',defaultVal=0.0_pReal) lattice_DamageDiffusion33(3,3,p) = config_phase(p)%getFloat( 'damage_diffusion33',defaultVal=0.0_pReal) lattice_DamageMobility(p) = config_phase(p)%getFloat( 'damage_mobility',defaultVal=0.0_pReal) - enddo - do i = 1,Nphases - if ((CoverA(i) < 1.0_pReal .or. CoverA(i) > 2.0_pReal) & - .and. lattice_structure(i) == LATTICE_hex_ID) call IO_error(131,el=i) ! checking physical significance of c/a - if ((CoverA(i) > 2.0_pReal) & - .and. lattice_structure(i) == LATTICE_bct_ID) call IO_error(131,el=i) ! checking physical significance of c/a - call lattice_initializeStructure(i, CoverA(i)) + if ((CoverA < 1.0_pReal .or. CoverA > 2.0_pReal) & + .and. lattice_structure(p) == LATTICE_hex_ID) call IO_error(131,el=p) ! checking physical significance of c/a + if ((CoverA > 2.0_pReal) & + .and. lattice_structure(p) == LATTICE_bct_ID) call IO_error(131,el=p) ! checking physical significance of c/a + call lattice_initializeStructure(p, CoverA) enddo end subroutine lattice_init @@ -1956,7 +1952,7 @@ function lattice_labels_slip(Nslip,structure) result(labels) if (any(Nslip < 0)) & call IO_error(144,ext_msg='Nslip '//trim(structure)) - labels = getLabels(Nslip,NslipMax,slipSystems,structure) + labels = getLabels(Nslip,NslipMax,slipSystems) end function lattice_labels_slip @@ -1997,7 +1993,7 @@ function lattice_labels_twin(Ntwin,structure) result(labels) if (any(Ntwin < 0)) & call IO_error(144,ext_msg='Ntwin '//trim(structure)) - labels = getLabels(Ntwin,NtwinMax,twinSystems,structure) + labels = getLabels(Ntwin,NtwinMax,twinSystems) end function lattice_labels_twin @@ -2120,8 +2116,8 @@ end function buildInteraction function buildCoordinateSystem(active,potential,system,structure,cOverA) integer, dimension(:), intent(in) :: & - active, & - potential + active, & !< # of active systems per family + potential !< # of potential systems per family real(pReal), dimension(:,:), intent(in) :: & system character(len=*), intent(in) :: & @@ -2312,14 +2308,13 @@ end subroutine buildTransformationSystem !-------------------------------------------------------------------------------------------------- !> @brief select active systems as strings !-------------------------------------------------------------------------------------------------- -function getlabels(active,potential,system,structure) result(labels) +function getlabels(active,potential,system) result(labels) - integer, dimension(:), intent(in) :: & - active, & - potential - real(pReal), dimension(:,:), intent(in) :: & + integer, dimension(:), intent(in) :: & + active, & !< # of active systems per family + potential !< # of potential systems per family + real(pReal), dimension(:,:), intent(in) :: & system - character(len=*), intent(in) :: structure !< lattice structure character(len=:), dimension(:), allocatable :: labels character(len=:), allocatable :: label @@ -2341,15 +2336,16 @@ function getlabels(active,potential,system,structure) result(labels) p = sum(potential(1:f-1))+s i = 1 - label(i:i) = merge('[','<',structure(1:3) /= 'bct') + label(i:i) = '[' direction: do j = 1, size(system,1)/2 write(label(i+1:i+2),"(I2.1)") int(system(j,p)) label(i+3:i+3) = ' ' i = i + 3 enddo direction label(i:i) = ']' + i = i +1 - label(i:i) = merge('(','{',structure(1:3) /= 'bct') + label(i:i) = '(' normal: do j = size(system,1)/2+1, size(system,1) write(label(i+1:i+2),"(I2.1)") int(system(j,p)) label(i+3:i+3) = ' ' diff --git a/src/list.f90 b/src/list.f90 index 79eafc964..689227545 100644 --- a/src/list.f90 +++ b/src/list.f90 @@ -261,7 +261,7 @@ end function getInt !! error unless default is given. If raw is true, the the complete string is returned, otherwise !! the individual chunks are returned !-------------------------------------------------------------------------------------------------- -character(len=65536) function getString(this,key,defaultVal,raw) +character(len=pStringLen) function getString(this,key,defaultVal,raw) class(tPartitionedStringList), target, intent(in) :: this character(len=*), intent(in) :: key @@ -400,13 +400,13 @@ end function getInts !-------------------------------------------------------------------------------------------------- function getStrings(this,key,defaultVal,raw) - character(len=65536),dimension(:), allocatable :: getStrings + character(len=pStringLen),dimension(:), allocatable :: getStrings class(tPartitionedStringList),target, intent(in) :: this character(len=*), intent(in) :: key character(len=*), dimension(:), intent(in), optional :: defaultVal logical, intent(in), optional :: raw type(tPartitionedStringList), pointer :: item - character(len=65536) :: str + character(len=pStringLen) :: str integer :: i logical :: found, & whole, & diff --git a/src/material.f90 b/src/material.f90 index 2f70fe97b..a4494ed6e 100644 --- a/src/material.f90 +++ b/src/material.f90 @@ -354,12 +354,10 @@ subroutine material_init call config_deallocate('material.config/microstructure') call config_deallocate('material.config/texture') -#if defined(PETSc) || defined(DAMASK_HDF5) call results_openJobFile call results_mapping_constituent(material_phaseAt,material_phaseMemberAt,config_name_phase) call results_mapping_materialpoint(material_homogenizationAt,material_homogenizationMemberAt,config_name_homogenization) call results_closeJobFile -#endif !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -393,8 +391,8 @@ end subroutine material_init !-------------------------------------------------------------------------------------------------- subroutine material_parseHomogenization - integer :: h - character(len=65536) :: tag + integer :: h + character(len=pStringLen) :: tag allocate(homogenization_type(size(config_homogenization)), source=HOMOGENIZATION_undefined_ID) allocate(thermal_type(size(config_homogenization)), source=THERMAL_isothermal_ID) @@ -484,11 +482,11 @@ end subroutine material_parseHomogenization !-------------------------------------------------------------------------------------------------- subroutine material_parseMicrostructure - character(len=65536), dimension(:), allocatable :: & + character(len=pStringLen), dimension(:), allocatable :: & strings integer, allocatable, dimension(:) :: chunkPos integer :: e, m, c, i - character(len=65536) :: & + character(len=pStringLen) :: & tag allocate(microstructure_Nconstituents(size(config_microstructure)), source=0) @@ -542,7 +540,7 @@ end subroutine material_parseMicrostructure subroutine material_parsePhase integer :: sourceCtr, kinematicsCtr, stiffDegradationCtr, p - character(len=65536), dimension(:), allocatable :: str + character(len=pStringLen), dimension(:), allocatable :: str allocate(phase_elasticity(size(config_phase)),source=ELASTICITY_undefined_ID) @@ -596,9 +594,9 @@ subroutine material_parsePhase #if defined(__GFORTRAN__) || defined(__PGI) str = ['GfortranBug86277'] str = config_phase(p)%getStrings('(source)',defaultVal=str) - if (str(1) == 'GfortranBug86277') str = [character(len=65536)::] + if (str(1) == 'GfortranBug86277') str = [character(len=pStringLen)::] #else - str = config_phase(p)%getStrings('(source)',defaultVal=[character(len=65536)::]) + str = config_phase(p)%getStrings('(source)',defaultVal=[character(len=pStringLen)::]) #endif do sourceCtr = 1, size(str) select case (trim(str(sourceCtr))) @@ -620,9 +618,9 @@ subroutine material_parsePhase #if defined(__GFORTRAN__) || defined(__PGI) str = ['GfortranBug86277'] str = config_phase(p)%getStrings('(kinematics)',defaultVal=str) - if (str(1) == 'GfortranBug86277') str = [character(len=65536)::] + if (str(1) == 'GfortranBug86277') str = [character(len=pStringLen)::] #else - str = config_phase(p)%getStrings('(kinematics)',defaultVal=[character(len=65536)::]) + str = config_phase(p)%getStrings('(kinematics)',defaultVal=[character(len=pStringLen)::]) #endif do kinematicsCtr = 1, size(str) select case (trim(str(kinematicsCtr))) @@ -637,9 +635,9 @@ subroutine material_parsePhase #if defined(__GFORTRAN__) || defined(__PGI) str = ['GfortranBug86277'] str = config_phase(p)%getStrings('(stiffness_degradation)',defaultVal=str) - if (str(1) == 'GfortranBug86277') str = [character(len=65536)::] + if (str(1) == 'GfortranBug86277') str = [character(len=pStringLen)::] #else - str = config_phase(p)%getStrings('(stiffness_degradation)',defaultVal=[character(len=65536)::]) + str = config_phase(p)%getStrings('(stiffness_degradation)',defaultVal=[character(len=pStringLen)::]) #endif do stiffDegradationCtr = 1, size(str) select case (trim(str(stiffDegradationCtr))) @@ -665,8 +663,8 @@ end subroutine material_parsePhase !-------------------------------------------------------------------------------------------------- subroutine material_parseTexture - integer :: j, t - character(len=65536), dimension(:), allocatable :: strings ! Values for given key in material config + integer :: j,t + character(len=pStringLen), dimension(:), allocatable :: strings ! Values for given key in material config integer, dimension(:), allocatable :: chunkPos real(pReal), dimension(3,3) :: transformation ! maps texture to microstructure coordinate system real(pReal), dimension(3) :: Eulers ! Euler angles in degrees from file @@ -702,29 +700,27 @@ subroutine material_parseTexture do j = 1, 3 ! look for "x", "y", and "z" entries select case (strings(j)) case('x', '+x') - transformation(j,1:3) = [ 1.0_pReal, 0.0_pReal, 0.0_pReal] ! original axis is now +x-axis + transformation(j,1:3) = [ 1.0_pReal, 0.0_pReal, 0.0_pReal] ! original axis is now +x-axis case('-x') - transformation(j,1:3) = [-1.0_pReal, 0.0_pReal, 0.0_pReal] ! original axis is now -x-axis + transformation(j,1:3) = [-1.0_pReal, 0.0_pReal, 0.0_pReal] ! original axis is now -x-axis case('y', '+y') - transformation(j,1:3) = [ 0.0_pReal, 1.0_pReal, 0.0_pReal] ! original axis is now +y-axis + transformation(j,1:3) = [ 0.0_pReal, 1.0_pReal, 0.0_pReal] ! original axis is now +y-axis case('-y') - transformation(j,1:3) = [ 0.0_pReal,-1.0_pReal, 0.0_pReal] ! original axis is now -y-axis + transformation(j,1:3) = [ 0.0_pReal,-1.0_pReal, 0.0_pReal] ! original axis is now -y-axis case('z', '+z') - transformation(j,1:3) = [ 0.0_pReal, 0.0_pReal, 1.0_pReal] ! original axis is now +z-axis + transformation(j,1:3) = [ 0.0_pReal, 0.0_pReal, 1.0_pReal] ! original axis is now +z-axis case('-z') - transformation(j,1:3) = [ 0.0_pReal, 0.0_pReal,-1.0_pReal] ! original axis is now -z-axis + transformation(j,1:3) = [ 0.0_pReal, 0.0_pReal,-1.0_pReal] ! original axis is now -z-axis case default call IO_error(157,t) end select enddo - if(dNeq(math_det33(transformation),1.0_pReal)) call IO_error(157,t) call transformation_%fromMatrix(transformation) texture_orientation(t) = texture_orientation(t) * transformation_ endif enddo - end subroutine material_parseTexture @@ -732,26 +728,19 @@ end subroutine material_parseTexture !> @brief allocates the plastic state of a phase !-------------------------------------------------------------------------------------------------- subroutine material_allocatePlasticState(phase,NofMyPhase,& - sizeState,sizeDotState,sizeDeltaState,& - Nslip,Ntwin,Ntrans) + sizeState,sizeDotState,sizeDeltaState) integer, intent(in) :: & phase, & NofMyPhase, & sizeState, & sizeDotState, & - sizeDeltaState, & - Nslip, & - Ntwin, & - Ntrans + sizeDeltaState plasticState(phase)%sizeState = sizeState plasticState(phase)%sizeDotState = sizeDotState plasticState(phase)%sizeDeltaState = sizeDeltaState plasticState(phase)%offsetDeltaState = sizeState-sizeDeltaState ! deltaState occupies latter part of state by definition - plasticState(phase)%Nslip = Nslip - plasticState(phase)%Ntwin = Ntwin - plasticState(phase)%Ntrans= Ntrans allocate(plasticState(phase)%aTolState (sizeState), source=0.0_pReal) allocate(plasticState(phase)%state0 (sizeState,NofMyPhase), source=0.0_pReal) diff --git a/src/math.f90 b/src/math.f90 index 0b06c9186..62c22b6d8 100644 --- a/src/math.f90 +++ b/src/math.f90 @@ -398,22 +398,22 @@ pure function math_exp33(A,n) real(pReal), dimension(3,3), intent(in) :: A real(pReal), dimension(3,3) :: B, math_exp33 real(pReal) :: invFac - integer :: order - - B = math_I3 ! init - invFac = 1.0_pReal ! 0! - math_exp33 = B ! A^0 = eye2 + integer :: n_ if (present(n)) then - order = n + n_ = n else - order = 5 + n_ = 5 endif - - do i = 1, order - invFac = invFac/real(i,pReal) ! invfac = 1/i! + + invFac = 1.0_pReal ! 0! + B = math_I3 + math_exp33 = math_I3 ! A^0 = I + + do i = 1, n_ + invFac = invFac/real(i,pReal) ! invfac = 1/(i!) B = matmul(B,A) - math_exp33 = math_exp33 + invFac*B ! exp = SUM (A^i)/i! + math_exp33 = math_exp33 + invFac*B ! exp = SUM (A^i)/(i!) enddo end function math_exp33 @@ -489,8 +489,8 @@ function math_invSym3333(A) real(pReal), dimension(6*(64+2)) :: work logical :: error external :: & - dgetrf, & - dgetri + dgetrf, & + dgetri temp66 = math_sym3333to66(A) call dgetrf(6,6,temp66,6,ipiv6,ierr) @@ -519,8 +519,8 @@ subroutine math_invert(InvA, error, A) real(pReal), dimension(size(A,1)*(64+2)) :: work integer :: ierr external :: & - dgetrf, & - dgetri + dgetrf, & + dgetri invA = A call dgetrf(size(A,1),size(A,1),invA,size(A,1),ipiv,ierr) @@ -882,16 +882,20 @@ real(pReal) function math_sampleGaussVar(meanvalue, stddev, width) real(pReal), intent(in), optional :: width ! width of considered values as multiples of standard deviation real(pReal), dimension(2) :: rnd ! random numbers real(pReal) :: scatter, & ! normalized scatter around meanvalue - myWidth + width_ if (abs(stddev) < tol_math_check) then math_sampleGaussVar = meanvalue else - myWidth = merge(width,3.0_pReal,present(width)) ! use +-3*sigma as default value for scatter if not given - + if (present(width)) then + width_ = width + else + width_ = 3.0_pReal ! use +-3*sigma as default scatter + endif + do call random_number(rnd) - scatter = myWidth * (2.0_pReal * rnd(1) - 1.0_pReal) + scatter = width_ * (2.0_pReal * rnd(1) - 1.0_pReal) if (rnd(2) <= exp(-0.5_pReal * scatter ** 2.0_pReal)) exit ! test if scattered value is drawn enddo diff --git a/src/mesh/DAMASK_FEM.f90 b/src/mesh/DAMASK_FEM.f90 index 3d4c332a0..9b9b95b91 100644 --- a/src/mesh/DAMASK_FEM.f90 +++ b/src/mesh/DAMASK_FEM.f90 @@ -27,7 +27,7 @@ program DAMASK_FEM integer, allocatable, dimension(:) :: chunkPos ! this is longer than needed for geometry parsing integer :: & N_def = 0 !< # of rate of deformation specifiers found in load case file - character(len=65536) :: & + character(len=pStringLen) :: & line !-------------------------------------------------------------------------------------------------- @@ -73,7 +73,7 @@ program DAMASK_FEM !-------------------------------------------------------------------------------------------------- ! init DAMASK (all modules) call CPFEM_initAll - write(6,'(/,a)') ' <<<+- DAMASK_FEM init -+>>>' + write(6,'(/,a)') ' <<<+- DAMASK_FEM init -+>>>'; flush(6) ! reading basic information from load case file and allocate data structure containing load cases call DMGetDimension(geomMesh,dimPlex,ierr); CHKERRA(ierr) !< dimension of mesh (2D or 3D) @@ -296,19 +296,12 @@ program DAMASK_FEM !-------------------------------------------------------------------------------------------------- ! report begin of new step write(6,'(/,a)') ' ###########################################################################' - write(6,'(1x,a,es12.5'//& - ',a,'//IO_intOut(inc)//',a,'//IO_intOut(loadCases(currentLoadCase)%incs)//& - ',a,'//IO_intOut(stepFraction)//',a,'//IO_intOut(subStepFactor**cutBackLevel)//& - ',a,'//IO_intOut(currentLoadCase)//',a,'//IO_intOut(size(loadCases))//')') & + write(6,'(1x,a,es12.5,6(a,i0))')& 'Time', time, & 's: Increment ', inc, '/', loadCases(currentLoadCase)%incs,& '-', stepFraction, '/', subStepFactor**cutBackLevel,& ' of load case ', currentLoadCase,'/',size(loadCases) - write(incInfo,& - '(a,'//IO_intOut(totalIncsCounter)//& - ',a,'//IO_intOut(sum(loadCases%incs))//& - ',a,'//IO_intOut(stepFraction)//& - ',a,'//IO_intOut(subStepFactor**cutBackLevel)//')') & + write(incInfo,'(4(a,i0))') & 'Increment ',totalIncsCounter,'/',sum(loadCases%incs),& '-',stepFraction, '/', subStepFactor**cutBackLevel flush(6) @@ -373,11 +366,9 @@ program DAMASK_FEM cutBackLevel = max(0, cutBackLevel - 1) ! try half number of subincs next inc if (all(solres(:)%converged)) then - write(6,'(/,a,'//IO_intOut(totalIncsCounter)//',a)') & ! report converged inc - ' increment ', totalIncsCounter, ' converged' + write(6,'(/,a,i0,a)') ' increment ', totalIncsCounter, ' converged' else - write(6,'(/,a,'//IO_intOut(totalIncsCounter)//',a)') & ! report non-converged inc - ' increment ', totalIncsCounter, ' NOT converged' + write(6,'(/,a,i0,a)') ' increment ', totalIncsCounter, ' NOT converged' endif; flush(6) if (mod(inc,loadCases(currentLoadCase)%outputFrequency) == 0) then ! at output frequency diff --git a/src/mesh_FEM.f90 b/src/mesh_FEM.f90 index 4b4ff18aa..28d09a9f5 100644 --- a/src/mesh_FEM.f90 +++ b/src/mesh_FEM.f90 @@ -20,7 +20,6 @@ module mesh use FEsolving use FEM_Zoo use prec - use mesh_base implicit none private @@ -53,18 +52,6 @@ module mesh PetscInt, dimension(:), allocatable, public, protected :: & mesh_boundaries - - type, public, extends(tMesh) :: tMesh_FEM - - - contains - procedure, pass(self) :: tMesh_FEM_init - generic, public :: init => tMesh_FEM_init - end type tMesh_FEM - - type(tMesh_FEM), public, protected :: theMesh - - public :: & mesh_init, & mesh_FEM_build_ipVolumes, & @@ -72,24 +59,6 @@ module mesh contains -subroutine tMesh_FEM_init(self,dimen,order,nodes) - - integer, intent(in) :: dimen - integer, intent(in) :: order - real(pReal), intent(in), dimension(:,:) :: nodes - class(tMesh_FEM) :: self - - if (dimen == 2) then - if (order == 1) call self%tMesh%init('mesh',1,nodes) - if (order == 2) call self%tMesh%init('mesh',2,nodes) - elseif(dimen == 3) then - if (order == 1) call self%tMesh%init('mesh',6,nodes) - if (order == 2) call self%tMesh%init('mesh',8,nodes) - endif - - end subroutine tMesh_FEM_init - - !-------------------------------------------------------------------------------------------------- !> @brief initializes the mesh by calling all necessary private routines the mesh module @@ -217,8 +186,6 @@ subroutine mesh_init forall (j = 1:mesh_NcpElems) FEsolving_execIP(2,j) = FE_Nips(FE_geomtype(mesh_element(2,j))) ! ...up to own IP count for each element allocate(mesh_node0(3,mesh_Nnodes),source=0.0_pReal) - call theMesh%init(dimplex,integrationOrder,mesh_node0) - call theMesh%setNelems(mesh_NcpElems) call discretization_init(mesh_element(3,:),mesh_element(4,:),& reshape(mesh_ipCoordinates,[3,mesh_maxNips*mesh_NcpElems]), & diff --git a/src/mesh_abaqus.f90 b/src/mesh_abaqus.f90 index 15332b3fb..f4f5113e8 100644 --- a/src/mesh_abaqus.f90 +++ b/src/mesh_abaqus.f90 @@ -7,7 +7,6 @@ !-------------------------------------------------------------------------------------------------- module mesh use prec - use mesh_base use geometry_plastic_nonlocal use discretization use math diff --git a/src/mesh_base.f90 b/src/mesh_base.f90 deleted file mode 100644 index dab7059ee..000000000 --- a/src/mesh_base.f90 +++ /dev/null @@ -1,74 +0,0 @@ -!-------------------------------------------------------------------------------------------------- -!> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH -!> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH -!> @author Christoph Koords, Max-Planck-Institut für Eisenforschung GmbH -!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH -!> @brief Sets up the mesh for the solvers MSC.Marc,FEM, Abaqus and the spectral solver -!-------------------------------------------------------------------------------------------------- -module mesh_base - - use prec - use element - - implicit none - -!--------------------------------------------------------------------------------------------------- -!> Properties of a whole mesh (consisting of one type of elements) -!--------------------------------------------------------------------------------------------------- - type, public :: tMesh - type(tElement) :: & - elem - real(pReal), dimension(:,:), allocatable, public :: & - ipVolume, & !< volume associated with each IP (initially!) - node_0, & !< node x,y,z coordinates (initially) - node !< node x,y,z coordinates (deformed) - integer(pInt), dimension(:,:), allocatable, public :: & - cellnodeParent !< cellnode's parent element ID, cellnode's intra-element ID - character(pStringLen) :: type = "n/a" - integer(pInt) :: & - Nnodes, & !< total number of nodes in mesh - Nelems = -1_pInt, & - elemType, & - Ncells, & - nIPneighbors, & - NcellNodes - integer(pInt), dimension(:,:), allocatable, public :: & - connectivity - contains - procedure, pass(self) :: tMesh_base_init - procedure :: setNelems => tMesh_base_setNelems ! not needed once we compute the cells from the connectivity - generic, public :: init => tMesh_base_init - end type tMesh - -contains - -subroutine tMesh_base_init(self,meshType,elemType,nodes) - - class(tMesh) :: self - character(len=*), intent(in) :: meshType - integer(pInt), intent(in) :: elemType - real(pReal), dimension(:,:), intent(in) :: nodes - - write(6,'(/,a)') ' <<<+- mesh_base_init -+>>>' - - write(6,*)' mesh type ',meshType - write(6,*)' # node ',size(nodes,2) - - self%type = meshType - call self%elem%init(elemType) - self%node_0 = nodes - self%nNodes = size(nodes,2) - -end subroutine tMesh_base_init - - -subroutine tMesh_base_setNelems(self,Nelems) - - class(tMesh) :: self - integer(pInt), intent(in) :: Nelems - - self%Nelems = Nelems - -end subroutine tMesh_base_setNelems - -end module mesh_base diff --git a/src/mesh_grid.f90 b/src/mesh_grid.f90 index 2b337f047..a24fee9bc 100644 --- a/src/mesh_grid.f90 +++ b/src/mesh_grid.f90 @@ -27,9 +27,8 @@ module mesh_grid integer, public, protected :: & grid3, & !< (local) grid in 3rd direction grid3Offset !< (local) grid offset in 3rd direction - real(pReal), dimension(3), public, protected :: & - geomSize + geomSize !< (global) physical size real(pReal), public, protected :: & size3, & !< (local) size in 3rd direction size3offset !< (local) size offset in 3rd direction @@ -49,7 +48,8 @@ subroutine mesh_init(ip,el) include 'fftw3-mpi.f03' real(pReal), dimension(3) :: & - mySize !< domain size of this process + mySize, & !< domain size of this process + origin !< (global) distance to origin integer, dimension(3) :: & myGrid !< domain grid of this process @@ -61,9 +61,9 @@ subroutine mesh_init(ip,el) integer(C_INTPTR_T) :: & devNull, z, z_offset - write(6,'(/,a)') ' <<<+- mesh_grid init -+>>>' + write(6,'(/,a)') ' <<<+- mesh_grid init -+>>>'; flush(6) - call readGeom(grid,geomSize,microstructureAt,homogenizationAt) + call readGeom(grid,geomSize,origin,microstructureAt,homogenizationAt) !-------------------------------------------------------------------------------------------------- ! grid solver specific quantities @@ -104,8 +104,9 @@ subroutine mesh_init(ip,el) ! store geometry information for post processing call results_openJobFile call results_closeGroup(results_addGroup('geometry')) - call results_addAttribute('grid',grid,'geometry') - call results_addAttribute('size',geomSize,'geometry') + call results_addAttribute('grid', grid, 'geometry') + call results_addAttribute('size', geomSize,'geometry') + call results_addAttribute('origin',origin, 'geometry') call results_closeJobFile !-------------------------------------------------------------------------------------------------- @@ -129,10 +130,13 @@ end subroutine mesh_init !> @details important variables have an implicit "save" attribute. Therefore, this function is ! supposed to be called only once! !-------------------------------------------------------------------------------------------------- -subroutine readGeom(grid,geomSize,microstructure,homogenization) +subroutine readGeom(grid,geomSize,origin,microstructure,homogenization) - integer, dimension(3), intent(out) :: grid ! grid (for all processes!) - real(pReal), dimension(3), intent(out) :: geomSize ! size (for all processes!) + integer, dimension(3), intent(out) :: & + grid ! grid (for all processes!) + real(pReal), dimension(3), intent(out) :: & + geomSize, & ! size (for all processes!) + origin ! origin (for all processes!) integer, dimension(:), intent(out), allocatable :: & microstructure, & homogenization @@ -169,7 +173,7 @@ subroutine readGeom(grid,geomSize,microstructure,homogenization) !-------------------------------------------------------------------------------------------------- ! get header length endPos = index(rawData,new_line('')) - if(endPos <= index(rawData,'head')) then + if(endPos <= index(rawData,'head')) then ! ToDo: Should be 'header' startPos = len(rawData) call IO_error(error_ID=841, ext_msg='readGeom') else @@ -181,6 +185,7 @@ subroutine readGeom(grid,geomSize,microstructure,homogenization) !-------------------------------------------------------------------------------------------------- ! read and interprete header + origin = 0.0_pReal l = 0 do while (l < headerLength .and. startPos < len(rawData)) endPos = startPos + index(rawData(startPos:),new_line('')) - 1 @@ -221,8 +226,23 @@ subroutine readGeom(grid,geomSize,microstructure,homogenization) enddo endif + case ('origin') + if (chunkPos(1) > 6) then + do j = 2,6,2 + select case (IO_lc(IO_stringValue(line,chunkPos,j))) + case('x') + origin(1) = IO_floatValue(line,chunkPos,j+1) + case('y') + origin(2) = IO_floatValue(line,chunkPos,j+1) + case('z') + origin(3) = IO_floatValue(line,chunkPos,j+1) + end select + enddo + endif + case ('homogenization') if (chunkPos(1) > 1) h = IO_intValue(line,chunkPos,2) + end select enddo @@ -257,13 +277,13 @@ subroutine readGeom(grid,geomSize,microstructure,homogenization) compression: if (IO_lc(IO_stringValue(line,chunkPos,2)) == 'of') then c = IO_intValue(line,chunkPos,1) microstructure(e:e+c-1) = [(IO_intValue(line,chunkPos,3),i = 1,IO_intValue(line,chunkPos,1))] - else if (IO_lc(IO_stringValue(line,chunkPos,2)) == 'to') then compression + else if (IO_lc(IO_stringValue(line,chunkPos,2)) == 'to') then compression c = abs(IO_intValue(line,chunkPos,3) - IO_intValue(line,chunkPos,1)) + 1 o = merge(+1, -1, IO_intValue(line,chunkPos,3) > IO_intValue(line,chunkPos,1)) microstructure(e:e+c-1) = [(i, i = IO_intValue(line,chunkPos,1),IO_intValue(line,chunkPos,3),o)] else compression c = chunkPos(1) - microstructure(e:e+c-1) = [(IO_intValue(line,chunkPos,i+1), i=0, c-1)] + microstructure(e:e+c-1) = [(IO_intValue(line,chunkPos,i+1), i=0, c-1)] endif compression endif noCompression @@ -276,7 +296,7 @@ end subroutine readGeom !--------------------------------------------------------------------------------------------------- -!> @brief Calculate undeformed position of IPs/cell centres (pretend to be an element) +!> @brief Calculate undeformed position of IPs/cell centers (pretend to be an element) !--------------------------------------------------------------------------------------------------- function IPcoordinates0(grid,geomSize,grid3Offset) @@ -347,7 +367,7 @@ pure function cellEdgeNormal(nElems) integer, intent(in) :: nElems - real, dimension(3,6,1,nElems) :: cellEdgeNormal + real(pReal), dimension(3,6,1,nElems) :: cellEdgeNormal cellEdgeNormal(1:3,1,1,:) = spread([+1.0_pReal, 0.0_pReal, 0.0_pReal],2,nElems) cellEdgeNormal(1:3,2,1,:) = spread([-1.0_pReal, 0.0_pReal, 0.0_pReal],2,nElems) diff --git a/src/mesh_marc.f90 b/src/mesh_marc.f90 index b550f4229..00fd76326 100644 --- a/src/mesh_marc.f90 +++ b/src/mesh_marc.f90 @@ -9,7 +9,6 @@ module mesh use IO use prec use math - use mesh_base use DAMASK_interface use IO use debug @@ -37,12 +36,6 @@ module mesh mesh_mapFEtoCPelem, & !< [sorted FEid, corresponding CPid] mesh_mapFEtoCPnode !< [sorted FEid, corresponding CPid] -!-------------------------------------------------------------------------------------------------- -! DEPRECATED - real(pReal), dimension(:,:,:), allocatable, public :: & - mesh_ipCoordinates !< IP x,y,z coordinates (after deformation!) -!-------------------------------------------------------------------------------------------------- - public :: & mesh_init, & mesh_FEasCP @@ -97,8 +90,6 @@ subroutine mesh_init(ip,el) calcMode(ip,mesh_FEasCP('elem',el)) = .true. ! first ip,el needs to be already pingponged to "calc" - allocate(mesh_ipCoordinates(3,elem%nIPs,nElems),source=0.0_pReal) ! deprecated - allocate(cellNodeDefinition(elem%nNodes-1)) allocate(connectivity_cell(elem%NcellNodesPerCell,elem%nIPs,nElems)) call buildCells(connectivity_cell,cellNodeDefinition,& @@ -149,7 +140,6 @@ subroutine writeGeometry(elemType, & real(pReal), dimension(:,:), allocatable :: & coordinates_temp -#if defined(DAMASK_HDF5) call results_openJobFile call results_closeGroup(results_addGroup('geometry')) @@ -170,7 +160,6 @@ subroutine writeGeometry(elemType, & 'coordinates of the material points','m') call results_closeJobFile -#endif end subroutine writeGeometry @@ -499,8 +488,7 @@ subroutine inputRead_mapNodes(fileContent) chunkPos = IO_stringPos(fileContent(l)) if( IO_lc(IO_stringValue(fileContent(l),chunkPos,1)) == 'coordinates' ) then do i = 1,size(mesh_mapFEtoCPnode,2) - mesh_mapFEtoCPnode(1,i) = IO_fixedIntValue (fileContent(l+1+i),[0,10],1) - mesh_mapFEtoCPnode(2,i) = i + mesh_mapFEtoCPnode(1:2,i) = [IO_fixedIntValue (fileContent(l+1+i),[0,10],1),i] ! ToDo: use IO_intValue enddo exit endif @@ -531,9 +519,9 @@ subroutine inputRead_elemNodes(nodes, & chunkPos = IO_stringPos(fileContent(l)) if( IO_lc(IO_stringValue(fileContent(l),chunkPos,1)) == 'coordinates' ) then do i=1,nNode - m = mesh_FEasCP('node',IO_fixedIntValue(fileContent(l+1+i),node_ends,1)) + m = mesh_FEasCP('node',IO_fixedIntValue(fileContent(l+1+i),node_ends,1)) !ToDo: use IO_intValue do j = 1,3 - nodes(j,m) = mesh_unitlength * IO_fixedNoEFloatValue(fileContent(l+1+i),node_ends,j+1) + nodes(j,m) = mesh_unitlength * IO_fixedNoEFloatValue(fileContent(l+1+i),node_ends,j+1) !ToDo: use IO_floatValue enddo enddo exit diff --git a/src/prec.f90 b/src/prec.f90 index f0475e9b6..4c4449551 100644 --- a/src/prec.f90 +++ b/src/prec.f90 @@ -42,8 +42,7 @@ module prec sizeState = 0, & !< size of state sizeDotState = 0, & !< size of dot state, i.e. state(1:sizeDot) follows time evolution by dotState rates offsetDeltaState = 0, & !< index offset of delta state - sizeDeltaState = 0, & !< size of delta state, i.e. state(offset+1:offset+sizeDelta) follows time evolution by deltaState increments - sizePostResults = 0 !< size of output data + sizeDeltaState = 0 !< size of delta state, i.e. state(offset+1:offset+sizeDelta) follows time evolution by deltaState increments real(pReal), pointer, dimension(:), contiguous :: & atolState real(pReal), pointer, dimension(:,:), contiguous :: & ! a pointer is needed here because we might point to state/doState. However, they will never point to something, but are rather allocated and, hence, contiguous @@ -62,10 +61,6 @@ module prec end type type, extends(tState), public :: tPlasticState - integer :: & - nSlip = 0, & - nTwin = 0, & - nTrans = 0 logical :: & nonlocal = .false. real(pReal), pointer, dimension(:,:) :: & @@ -84,6 +79,13 @@ module prec real(pReal), private, parameter :: PREAL_EPSILON = epsilon(0.0_pReal) !< minimum positive number such that 1.0 + EPSILON /= 1.0. real(pReal), private, parameter :: PREAL_MIN = tiny(0.0_pReal) !< smallest normalized floating point number + integer, dimension(0), parameter, public :: & + emptyIntArray = [integer::] + real(pReal), dimension(0), parameter, public :: & + emptyRealArray = [real(pReal)::] + character(len=pStringLen), dimension(0), parameter, public :: & + emptyStringArray = [character(len=pStringLen)::] + private :: & unitTest diff --git a/src/quaternions.f90 b/src/quaternions.f90 index 8efb985ed..0ca404880 100644 --- a/src/quaternions.f90 +++ b/src/quaternions.f90 @@ -1,37 +1,9 @@ -! ################################################################### -! Copyright (c) 2013-2015, Marc De Graef/Carnegie Mellon University -! Modified 2017-2019, Martin Diehl/Max-Planck-Institut für Eisenforschung GmbH -! All rights reserved. -! -! Redistribution and use in source and binary forms, with or without modification, are -! permitted provided that the following conditions are met: -! -! - Redistributions of source code must retain the above copyright notice, this list -! of conditions and the following disclaimer. -! - Redistributions in binary form must reproduce the above copyright notice, this -! list of conditions and the following disclaimer in the documentation and/or -! other materials provided with the distribution. -! - Neither the names of Marc De Graef, Carnegie Mellon University nor the names -! of its contributors may be used to endorse or promote products derived from -! this software without specific prior written permission. -! -! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -! DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -! SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -! CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -! OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -! USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -! ################################################################### - !--------------------------------------------------------------------------------------------------- -!> @author Marc De Graef, Carnegie Mellon University !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH +!> @author Philip Eisenlohr, Michigan State University !> @brief general quaternion math, not limited to unit quaternions -!> @details w is the real part, (x, y, z) are the imaginary parts. +!> @details w is the real part, (x, y, z) are the imaginary parts. +!> @details https://en.wikipedia.org/wiki/Quaternion !--------------------------------------------------------------------------------------------------- module quaternions use prec @@ -78,15 +50,16 @@ module quaternions procedure, public :: abs__ procedure, public :: dot_product__ - procedure, public :: conjg__ procedure, public :: exp__ procedure, public :: log__ - - procedure, public :: homomorphed => quat_homomorphed - procedure, public :: asArray + procedure, public :: conjg => conjg__ procedure, public :: real => real__ procedure, public :: aimag => aimag__ + procedure, public :: homomorphed + procedure, public :: asArray + procedure, public :: inverse + end type interface assignment (=) @@ -117,6 +90,14 @@ module quaternions interface log module procedure log__ end interface log + + interface real + module procedure real__ + end interface real + + interface aimag + module procedure aimag__ + end interface aimag private :: & unitTest @@ -125,49 +106,46 @@ contains !-------------------------------------------------------------------------------------------------- -!> @brief doing self test +!> @brief do self test !-------------------------------------------------------------------------------------------------- subroutine quaternions_init - write(6,'(/,a)') ' <<<+- quaternions init -+>>>' + write(6,'(/,a)') ' <<<+- quaternions init -+>>>'; flush(6) call unitTest end subroutine quaternions_init !--------------------------------------------------------------------------------------------------- -!> constructor for a quaternion from a 4-vector +!> construct a quaternion from a 4-vector !--------------------------------------------------------------------------------------------------- type(quaternion) pure function init__(array) real(pReal), intent(in), dimension(4) :: array - init__%w=array(1) - init__%x=array(2) - init__%y=array(3) - init__%z=array(4) + init__%w = array(1) + init__%x = array(2) + init__%y = array(3) + init__%z = array(4) end function init__ !--------------------------------------------------------------------------------------------------- -!> assing a quaternion +!> assign a quaternion !--------------------------------------------------------------------------------------------------- elemental pure subroutine assign_quat__(self,other) type(quaternion), intent(out) :: self type(quaternion), intent(in) :: other - self%w = other%w - self%x = other%x - self%y = other%y - self%z = other%z - + self = [other%w,other%x,other%y,other%z] + end subroutine assign_quat__ !--------------------------------------------------------------------------------------------------- -!> assing a 4-vector +!> assign a 4-vector !--------------------------------------------------------------------------------------------------- pure subroutine assign_vec__(self,other) @@ -183,67 +161,57 @@ end subroutine assign_vec__ !--------------------------------------------------------------------------------------------------- -!> addition of two quaternions +!> add a quaternion !--------------------------------------------------------------------------------------------------- type(quaternion) elemental pure function add__(self,other) class(quaternion), intent(in) :: self,other - add__%w = self%w + other%w - add__%x = self%x + other%x - add__%y = self%y + other%y - add__%z = self%z + other%z - + add__ = [ self%w, self%x, self%y ,self%z] & + + [other%w, other%x, other%y,other%z] + end function add__ !--------------------------------------------------------------------------------------------------- -!> unary positive operator +!> return (unary positive operator) !--------------------------------------------------------------------------------------------------- type(quaternion) elemental pure function pos__(self) class(quaternion), intent(in) :: self - pos__%w = self%w - pos__%x = self%x - pos__%y = self%y - pos__%z = self%z - + pos__ = self * (+1.0_pReal) + end function pos__ !--------------------------------------------------------------------------------------------------- -!> subtraction of two quaternions +!> subtract a quaternion !--------------------------------------------------------------------------------------------------- type(quaternion) elemental pure function sub__(self,other) class(quaternion), intent(in) :: self,other - sub__%w = self%w - other%w - sub__%x = self%x - other%x - sub__%y = self%y - other%y - sub__%z = self%z - other%z - + sub__ = [ self%w, self%x, self%y ,self%z] & + - [other%w, other%x, other%y,other%z] + end function sub__ !--------------------------------------------------------------------------------------------------- -!> unary positive operator +!> negate (unary negative operator) !--------------------------------------------------------------------------------------------------- type(quaternion) elemental pure function neg__(self) class(quaternion), intent(in) :: self - neg__%w = -self%w - neg__%x = -self%x - neg__%y = -self%y - neg__%z = -self%z - + neg__ = self * (-1.0_pReal) + end function neg__ !--------------------------------------------------------------------------------------------------- -!> multiplication of two quaternions +!> multiply with a quaternion !--------------------------------------------------------------------------------------------------- type(quaternion) elemental pure function mul_quat__(self,other) @@ -258,23 +226,20 @@ end function mul_quat__ !--------------------------------------------------------------------------------------------------- -!> multiplication of quaternions with scalar +!> multiply with a scalar !--------------------------------------------------------------------------------------------------- type(quaternion) elemental pure function mul_scal__(self,scal) class(quaternion), intent(in) :: self - real(pReal), intent(in) :: scal - - mul_scal__%w = self%w*scal - mul_scal__%x = self%x*scal - mul_scal__%y = self%y*scal - mul_scal__%z = self%z*scal + real(pReal), intent(in) :: scal + mul_scal__ = [self%w,self%x,self%y,self%z]*scal + end function mul_scal__ !--------------------------------------------------------------------------------------------------- -!> division of two quaternions +!> divide by a quaternion !--------------------------------------------------------------------------------------------------- type(quaternion) elemental pure function div_quat__(self,other) @@ -286,12 +251,12 @@ end function div_quat__ !--------------------------------------------------------------------------------------------------- -!> divisiont of quaternions by scalar +!> divide by a scalar !--------------------------------------------------------------------------------------------------- type(quaternion) elemental pure function div_scal__(self,scal) class(quaternion), intent(in) :: self - real(pReal), intent(in) :: scal + real(pReal), intent(in) :: scal div_scal__ = [self%w,self%x,self%y,self%z]/scal @@ -299,7 +264,7 @@ end function div_scal__ !--------------------------------------------------------------------------------------------------- -!> equality of two quaternions +!> test equality !--------------------------------------------------------------------------------------------------- logical elemental pure function eq__(self,other) @@ -312,7 +277,7 @@ end function eq__ !--------------------------------------------------------------------------------------------------- -!> inequality of two quaternions +!> test inequality !--------------------------------------------------------------------------------------------------- logical elemental pure function neq__(self,other) @@ -324,20 +289,7 @@ end function neq__ !--------------------------------------------------------------------------------------------------- -!> quaternion to the power of a scalar -!--------------------------------------------------------------------------------------------------- -type(quaternion) elemental pure function pow_scal__(self,expon) - - class(quaternion), intent(in) :: self - real(pReal), intent(in) :: expon - - pow_scal__ = exp(log(self)*expon) - -end function pow_scal__ - - -!--------------------------------------------------------------------------------------------------- -!> quaternion to the power of a quaternion +!> raise to the power of a quaternion !--------------------------------------------------------------------------------------------------- type(quaternion) elemental pure function pow_quat__(self,expon) @@ -350,45 +302,60 @@ end function pow_quat__ !--------------------------------------------------------------------------------------------------- -!> exponential of a quaternion -!> ToDo: Lacks any check for invalid operations +!> raise to the power of a scalar +!--------------------------------------------------------------------------------------------------- +type(quaternion) elemental pure function pow_scal__(self,expon) + + class(quaternion), intent(in) :: self + real(pReal), intent(in) :: expon + + pow_scal__ = exp(log(self)*expon) + +end function pow_scal__ + + +!--------------------------------------------------------------------------------------------------- +!> take exponential !--------------------------------------------------------------------------------------------------- type(quaternion) elemental pure function exp__(self) class(quaternion), intent(in) :: self real(pReal) :: absImag - absImag = norm2([self%x, self%y, self%z]) + absImag = norm2(aimag(self)) - exp__ = exp(self%w) * [ cos(absImag), & - self%x/absImag * sin(absImag), & - self%y/absImag * sin(absImag), & - self%z/absImag * sin(absImag)] + exp__ = merge(exp(self%w) * [ cos(absImag), & + self%x/absImag * sin(absImag), & + self%y/absImag * sin(absImag), & + self%z/absImag * sin(absImag)], & + IEEE_value(1.0_pReal,IEEE_SIGNALING_NAN), & + dNeq0(absImag)) end function exp__ !--------------------------------------------------------------------------------------------------- -!> logarithm of a quaternion -!> ToDo: Lacks any check for invalid operations +!> take logarithm !--------------------------------------------------------------------------------------------------- type(quaternion) elemental pure function log__(self) class(quaternion), intent(in) :: self real(pReal) :: absImag - absImag = norm2([self%x, self%y, self%z]) + absImag = norm2(aimag(self)) - log__ = [log(abs(self)), & - self%x/absImag * acos(self%w/abs(self)), & - self%y/absImag * acos(self%w/abs(self)), & - self%z/absImag * acos(self%w/abs(self))] + log__ = merge([log(abs(self)), & + self%x/absImag * acos(self%w/abs(self)), & + self%y/absImag * acos(self%w/abs(self)), & + self%z/absImag * acos(self%w/abs(self))], & + IEEE_value(1.0_pReal,IEEE_SIGNALING_NAN), & + dNeq0(absImag)) end function log__ !--------------------------------------------------------------------------------------------------- -!> norm of a quaternion +!> return norm !--------------------------------------------------------------------------------------------------- real(pReal) elemental pure function abs__(a) @@ -400,7 +367,7 @@ end function abs__ !--------------------------------------------------------------------------------------------------- -!> dot product of two quaternions +!> calculate dot product !--------------------------------------------------------------------------------------------------- real(pReal) elemental pure function dot_product__(a,b) @@ -412,31 +379,31 @@ end function dot_product__ !--------------------------------------------------------------------------------------------------- -!> conjugate complex of a quaternion +!> take conjugate complex !--------------------------------------------------------------------------------------------------- type(quaternion) elemental pure function conjg__(a) class(quaternion), intent(in) :: a - conjg__ = quaternion([a%w, -a%x, -a%y, -a%z]) + conjg__ = [a%w, -a%x, -a%y, -a%z] end function conjg__ !--------------------------------------------------------------------------------------------------- -!> homomorphed quaternion of a quaternion +!> homomorph !--------------------------------------------------------------------------------------------------- -type(quaternion) elemental pure function quat_homomorphed(self) +type(quaternion) elemental pure function homomorphed(self) class(quaternion), intent(in) :: self - quat_homomorphed = quaternion(-[self%w,self%x,self%y,self%z]) + homomorphed = - self -end function quat_homomorphed +end function homomorphed !--------------------------------------------------------------------------------------------------- -!> quaternion as plain array +!> return as plain array !--------------------------------------------------------------------------------------------------- pure function asArray(self) @@ -449,7 +416,7 @@ end function asArray !--------------------------------------------------------------------------------------------------- -!> real part of a quaternion +!> real part (scalar) !--------------------------------------------------------------------------------------------------- pure function real__(self) @@ -462,7 +429,7 @@ end function real__ !--------------------------------------------------------------------------------------------------- -!> imaginary part of a quaternion +!> imaginary part (3-vector) !--------------------------------------------------------------------------------------------------- pure function aimag__(self) @@ -474,46 +441,87 @@ pure function aimag__(self) end function aimag__ +!--------------------------------------------------------------------------------------------------- +!> inverse +!--------------------------------------------------------------------------------------------------- +type(quaternion) elemental pure function inverse(self) + + class(quaternion), intent(in) :: self + + inverse = conjg(self)/abs(self)**2.0_pReal + +end function inverse + + !-------------------------------------------------------------------------------------------------- !> @brief check correctness of (some) quaternions functions !-------------------------------------------------------------------------------------------------- subroutine unitTest real(pReal), dimension(4) :: qu - type(quaternion) :: q, q_2 call random_number(qu) - q = qu + qu = (qu-0.5_pReal) * 2.0_pReal + q = quaternion(qu) - q_2 = q + q - if(any(dNeq(q_2%asArray(),2.0_pReal*qu))) call IO_error(401,ext_msg='add__') - - q_2 = q - q - if(any(dNeq0(q_2%asArray()))) call IO_error(401,ext_msg='sub__') - - q_2 = q * 5.0_preal - if(any(dNeq(q_2%asArray(),5.0_pReal*qu))) call IO_error(401,ext_msg='mul__') - - q_2 = q / 0.5_preal - if(any(dNeq(q_2%asArray(),2.0_pReal*qu))) call IO_error(401,ext_msg='div__') - - q_2 = q - if(q_2 /= q) call IO_error(401,ext_msg='eq__') + q_2= qu + if(any(dNeq(q%asArray(),q_2%asArray()))) call IO_error(401,ext_msg='assign_vec__') - if(any(dNeq(q%asArray(),qu))) call IO_error(401,ext_msg='eq__') - if(dNeq(q%real(), qu(1))) call IO_error(401,ext_msg='real()') - if(any(dNeq(q%aimag(), qu(2:4)))) call IO_error(401,ext_msg='aimag()') + q_2 = q + q + if(any(dNeq(q_2%asArray(),2.0_pReal*qu))) call IO_error(401,ext_msg='add__') + + q_2 = q - q + if(any(dNeq0(q_2%asArray()))) call IO_error(401,ext_msg='sub__') + + q_2 = q * 5.0_pReal + if(any(dNeq(q_2%asArray(),5.0_pReal*qu))) call IO_error(401,ext_msg='mul__') + + q_2 = q / 0.5_pReal + if(any(dNeq(q_2%asArray(),2.0_pReal*qu))) call IO_error(401,ext_msg='div__') + q_2 = q * 0.3_pReal + if(dNeq0(abs(q)) .and. q_2 == q) call IO_error(401,ext_msg='eq__') + + q_2 = q + if(q_2 /= q) call IO_error(401,ext_msg='neq__') + + if(dNeq(abs(q),norm2(qu))) call IO_error(401,ext_msg='abs__') + if(dNeq(abs(q)**2.0_pReal, real(q*q%conjg()),1.0e-14_pReal)) & + call IO_error(401,ext_msg='abs__/*conjg') + + if(any(dNeq(q%asArray(),qu))) call IO_error(401,ext_msg='eq__') + if(dNeq(q%real(), qu(1))) call IO_error(401,ext_msg='real()') + if(any(dNeq(q%aimag(), qu(2:4)))) call IO_error(401,ext_msg='aimag()') + q_2 = q%homomorphed() - if(q /= q_2* (-1.0_pReal)) call IO_error(401,ext_msg='homomorphed') - if(dNeq(q_2%real(), qu(1)* (-1.0_pReal))) call IO_error(401,ext_msg='homomorphed/real') - if(any(dNeq(q_2%aimag(),qu(2:4)*(-1.0_pReal)))) call IO_error(401,ext_msg='homomorphed/aimag') - + if(q /= q_2* (-1.0_pReal)) call IO_error(401,ext_msg='homomorphed') + if(dNeq(q_2%real(), qu(1)* (-1.0_pReal))) call IO_error(401,ext_msg='homomorphed/real') + if(any(dNeq(q_2%aimag(),qu(2:4)*(-1.0_pReal)))) call IO_error(401,ext_msg='homomorphed/aimag') + q_2 = conjg(q) - if(dNeq(q_2%real(), q%real())) call IO_error(401,ext_msg='conjg/real') - if(any(dNeq(q_2%aimag(),q%aimag()*(-1.0_pReal)))) call IO_error(401,ext_msg='conjg/aimag') - + if(dNeq(abs(q),abs(q_2))) call IO_error(401,ext_msg='conjg/abs') + if(q /= conjg(q_2)) call IO_error(401,ext_msg='conjg/involution') + if(dNeq(q_2%real(), q%real())) call IO_error(401,ext_msg='conjg/real') + if(any(dNeq(q_2%aimag(),q%aimag()*(-1.0_pReal)))) call IO_error(401,ext_msg='conjg/aimag') + + if(abs(q) > 0.0_pReal) then + q_2 = q * q%inverse() + if( dNeq(real(q_2), 1.0_pReal,1.0e-15_pReal)) call IO_error(401,ext_msg='inverse/real') + if(any(dNeq0(aimag(q_2), 1.0e-15_pReal))) call IO_error(401,ext_msg='inverse/aimag') + + q_2 = q/abs(q) + q_2 = conjg(q_2) - inverse(q_2) + if(any(dNeq0(q_2%asArray(),1.0e-15_pReal))) call IO_error(401,ext_msg='inverse/conjg') + endif + +#if !(defined(__GFORTRAN__) && __GNUC__ < 9) + if (norm2(aimag(q)) > 0.0_pReal) then + if (dNeq0(abs(q-exp(log(q))),1.0e-13_pReal)) call IO_error(401,ext_msg='exp/log') + if (dNeq0(abs(q-log(exp(q))),1.0e-13_pReal)) call IO_error(401,ext_msg='log/exp') + endif +#endif + end subroutine unitTest diff --git a/src/quit.f90 b/src/quit.f90 index 146071600..5c421c86a 100644 --- a/src/quit.f90 +++ b/src/quit.f90 @@ -2,8 +2,7 @@ !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @brief quit subroutine !> @details exits the program and reports current time and duration. Exit code 0 signals -!> everything is fine. Exit code 1 signals an error, message according to IO_error. Exit code -!> 2 signals no severe problems, but some increments did not converge +!> everything is fine. Exit code 1 signals an error, message according to IO_error. !-------------------------------------------------------------------------------------------------- subroutine quit(stop_id) #include @@ -11,7 +10,7 @@ subroutine quit(stop_id) #ifdef _OPENMP use MPI #endif - use hdf5 + use HDF5 implicit none integer, intent(in) :: stop_id diff --git a/src/results.f90 b/src/results.f90 index 93355b6c4..ca2a868fb 100644 --- a/src/results.f90 +++ b/src/results.f90 @@ -16,7 +16,6 @@ module results implicit none private -#if defined(PETSc) || defined(DAMASK_HDF5) integer(HID_T) :: resultsFile interface results_writeDataset @@ -48,6 +47,7 @@ module results results_openJobFile, & results_closeJobFile, & results_addIncrement, & + results_finalizeIncrement, & results_addGroup, & results_openGroup, & results_closeGroup, & @@ -69,15 +69,14 @@ subroutine results_init write(6,'(a)') ' https://doi.org/10.1007/s40192-017-0084-5' resultsFile = HDF5_openFile(trim(getSolverJobName())//'.hdf5','w',.true.) - call HDF5_addAttribute(resultsFile,'DADF5-version',0.3_pReal) - call HDF5_addAttribute(resultsFile,'DADF5-major',0) - call HDF5_addAttribute(resultsFile,'DADF5-minor',3) - call HDF5_addAttribute(resultsFile,'DAMASK',DAMASKVERSION) + call results_addAttribute('DADF5_version_major',0) + call results_addAttribute('DADF5_version_minor',5) + call results_addAttribute('DAMASK_version',DAMASKVERSION) call get_command(commandLine) - call HDF5_addAttribute(resultsFile,'call',trim(commandLine)) - call HDF5_closeGroup(results_addGroup('mapping')) - call HDF5_closeGroup(results_addGroup('mapping/cellResults')) - call HDF5_closeFile(resultsFile) + call results_addAttribute('call',trim(commandLine)) + call results_closeGroup(results_addGroup('mapping')) + call results_closeGroup(results_addGroup('mapping/cellResults')) + call results_closeJobFile end subroutine results_init @@ -111,17 +110,27 @@ subroutine results_addIncrement(inc,time) real(pReal), intent(in) :: time character(len=pStringLen) :: incChar - write(incChar,'(i5.5)') inc ! allow up to 99999 increments - call HDF5_closeGroup(results_addGroup(trim('inc'//trim(adjustl(incChar))))) + write(incChar,'(i10)') inc + call results_closeGroup(results_addGroup(trim('inc'//trim(adjustl(incChar))))) call results_setLink(trim('inc'//trim(adjustl(incChar))),'current') - call HDF5_addAttribute(resultsFile,'time/s',time,trim('inc'//trim(adjustl(incChar)))) - - call HDF5_closeGroup(results_addGroup('current/constituent')) - call HDF5_closeGroup(results_addGroup('current/materialpoint')) + call results_addAttribute('time/s',time,trim('inc'//trim(adjustl(incChar)))) + call results_closeGroup(results_addGroup('current/constituent')) + call results_closeGroup(results_addGroup('current/materialpoint')) end subroutine results_addIncrement +!-------------------------------------------------------------------------------------------------- +!> @brief finalize increment +!> @details remove soft link +!-------------------------------------------------------------------------------------------------- +subroutine results_finalizeIncrement + + call results_removeLink('current') + +end subroutine results_finalizeIncrement + + !-------------------------------------------------------------------------------------------------- !> @brief open a group from the results file !-------------------------------------------------------------------------------------------------- @@ -175,9 +184,14 @@ end subroutine results_setLink !-------------------------------------------------------------------------------------------------- subroutine results_addAttribute_str(attrLabel,attrValue,path) - character(len=*), intent(in) :: attrLabel, attrValue, path + character(len=*), intent(in) :: attrLabel, attrValue + character(len=*), intent(in), optional :: path - call HDF5_addAttribute(resultsFile,attrLabel, attrValue, path) + if (present(path)) then + call HDF5_addAttribute(resultsFile,attrLabel, attrValue, path) + else + call HDF5_addAttribute(resultsFile,attrLabel, attrValue) + endif end subroutine results_addAttribute_str @@ -187,10 +201,15 @@ end subroutine results_addAttribute_str !-------------------------------------------------------------------------------------------------- subroutine results_addAttribute_int(attrLabel,attrValue,path) - character(len=*), intent(in) :: attrLabel, path - integer, intent(in) :: attrValue + character(len=*), intent(in) :: attrLabel + integer, intent(in) :: attrValue + character(len=*), intent(in), optional :: path - call HDF5_addAttribute(resultsFile,attrLabel, attrValue, path) + if (present(path)) then + call HDF5_addAttribute(resultsFile,attrLabel, attrValue, path) + else + call HDF5_addAttribute(resultsFile,attrLabel, attrValue) + endif end subroutine results_addAttribute_int @@ -200,10 +219,15 @@ end subroutine results_addAttribute_int !-------------------------------------------------------------------------------------------------- subroutine results_addAttribute_real(attrLabel,attrValue,path) - character(len=*), intent(in) :: attrLabel, path - real(pReal), intent(in) :: attrValue + character(len=*), intent(in) :: attrLabel + real(pReal), intent(in) :: attrValue + character(len=*), intent(in), optional :: path - call HDF5_addAttribute(resultsFile,attrLabel, attrValue, path) + if (present(path)) then + call HDF5_addAttribute(resultsFile,attrLabel, attrValue, path) + else + call HDF5_addAttribute(resultsFile,attrLabel, attrValue) + endif end subroutine results_addAttribute_real @@ -213,10 +237,15 @@ end subroutine results_addAttribute_real !-------------------------------------------------------------------------------------------------- subroutine results_addAttribute_int_array(attrLabel,attrValue,path) - character(len=*), intent(in) :: attrLabel, path + character(len=*), intent(in) :: attrLabel integer, intent(in), dimension(:) :: attrValue + character(len=*), intent(in), optional :: path - call HDF5_addAttribute(resultsFile,attrLabel, attrValue, path) + if (present(path)) then + call HDF5_addAttribute(resultsFile,attrLabel, attrValue, path) + else + call HDF5_addAttribute(resultsFile,attrLabel, attrValue) + endif end subroutine results_addAttribute_int_array @@ -226,10 +255,15 @@ end subroutine results_addAttribute_int_array !-------------------------------------------------------------------------------------------------- subroutine results_addAttribute_real_array(attrLabel,attrValue,path) - character(len=*), intent(in) :: attrLabel, path + character(len=*), intent(in) :: attrLabel real(pReal), intent(in), dimension(:) :: attrValue + character(len=*), intent(in), optional :: path - call HDF5_addAttribute(resultsFile,attrLabel, attrValue, path) + if (present(path)) then + call HDF5_addAttribute(resultsFile,attrLabel, attrValue, path) + else + call HDF5_addAttribute(resultsFile,attrLabel, attrValue) + endif end subroutine results_addAttribute_real_array @@ -453,9 +487,9 @@ end subroutine results_writeScalarDataset_rotation !-------------------------------------------------------------------------------------------------- subroutine results_mapping_constituent(phaseAt,memberAt,label) - integer, dimension(:,:), intent(in) :: phaseAt !< phase section at (constituent,element) - integer, dimension(:,:,:), intent(in) :: memberAt !< phase member at (constituent,IP,element) - character(len=64), dimension(:), intent(in) :: label !< label of each phase section + integer, dimension(:,:), intent(in) :: phaseAt !< phase section at (constituent,element) + integer, dimension(:,:,:), intent(in) :: memberAt !< phase member at (constituent,IP,element) + character(len=pStringLen), dimension(:), intent(in) :: label !< label of each phase section integer, dimension(size(memberAt,1),size(memberAt,2),size(memberAt,3)) :: & phaseAt_perIP, & @@ -588,9 +622,9 @@ end subroutine results_mapping_constituent !-------------------------------------------------------------------------------------------------- subroutine results_mapping_materialpoint(homogenizationAt,memberAt,label) - integer, dimension(:), intent(in) :: homogenizationAt !< homogenization section at (element) - integer, dimension(:,:), intent(in) :: memberAt !< homogenization member at (IP,element) - character(len=64), dimension(:), intent(in) :: label !< label of each homogenization section + integer, dimension(:), intent(in) :: homogenizationAt !< homogenization section at (element) + integer, dimension(:,:), intent(in) :: memberAt !< homogenization member at (IP,element) + character(len=pStringLen), dimension(:), intent(in) :: label !< label of each homogenization section integer, dimension(size(memberAt,1),size(memberAt,2)) :: & homogenizationAt_perIP, & @@ -722,7 +756,6 @@ end subroutine results_mapping_materialpoint !!> @brief adds the backward mapping from spatial position and constituent ID to results !!-------------------------------------------------------------------------------------------------- !subroutine HDF5_backwardMappingPhase(material_phase,phasememberat,phase_name,dataspace_size,mpiOffset,mpiOffset_phase) -! use hdf5 ! integer(pInt), intent(in), dimension(:,:,:) :: material_phase, phasememberat ! character(len=*), intent(in), dimension(:) :: phase_name @@ -836,7 +869,6 @@ end subroutine results_mapping_materialpoint !!> @brief adds the backward mapping from spatial position and constituent ID to results !!-------------------------------------------------------------------------------------------------- !subroutine HDF5_backwardMappingHomog(material_homog,homogmemberat,homogenization_name,dataspace_size,mpiOffset,mpiOffset_homog) -! use hdf5 ! integer(pInt), intent(in), dimension(:,:) :: material_homog, homogmemberat ! character(len=*), intent(in), dimension(:) :: homogenization_name @@ -943,7 +975,6 @@ end subroutine results_mapping_materialpoint !!> @brief adds the unique cell to node mapping !!-------------------------------------------------------------------------------------------------- !subroutine HDF5_mappingCells(mapping) -! use hdf5 ! integer(pInt), intent(in), dimension(:) :: mapping @@ -979,5 +1010,4 @@ end subroutine results_mapping_materialpoint !end subroutine HDF5_mappingCells -#endif end module results diff --git a/src/rotations.f90 b/src/rotations.f90 index a4a0bac88..5deb02a20 100644 --- a/src/rotations.f90 +++ b/src/rotations.f90 @@ -596,7 +596,7 @@ function om2ax(om) result(ax) else call dgeev('N','V',3,om_,3,Wr,Wi,devNull,3,VR,3,work,size(work,1),ierr) if (ierr /= 0) call IO_error(0,ext_msg='Error in om2ax: DGEEV return not zero') -#if defined(__GFORTRAN__) && __GNUC__ < 9 || __INTEL_COMPILER < 1800 +#if defined(__GFORTRAN__) && __GNUC__<9 || defined(__INTEL_COMPILER) && INTEL_COMPILER<1800 i = maxloc(merge(1,0,cEq(cmplx(Wr,Wi,pReal),cmplx(1.0_pReal,0.0_pReal,pReal),tol=1.0e-14_pReal)),dim=1) #else i = findloc(cEq(cmplx(Wr,Wi,pReal),cmplx(1.0_pReal,0.0_pReal,pReal),tol=1.0e-14_pReal),.true.,dim=1) !find eigenvalue (1,0) diff --git a/src/source_damage_anisoBrittle.f90 b/src/source_damage_anisoBrittle.f90 index 9997f81e5..e5ed05799 100644 --- a/src/source_damage_anisoBrittle.f90 +++ b/src/source_damage_anisoBrittle.f90 @@ -13,19 +13,14 @@ module source_damage_anisoBrittle use discretization use config use lattice + use results implicit none private - integer, dimension(:), allocatable, public, protected :: & + integer, dimension(:), allocatable :: & source_damage_anisoBrittle_offset, & !< which source is my current source mechanism? source_damage_anisoBrittle_instance !< instance of source mechanism - - integer, dimension(:,:), allocatable, target, public :: & - source_damage_anisoBrittle_sizePostResult !< size of each post result output - - character(len=64), dimension(:,:), allocatable, target, public :: & - source_damage_anisoBrittle_output !< name of each post result output integer, dimension(:,:), allocatable :: & source_damage_anisoBrittle_Ncleavage !< number of cleavage systems per family @@ -61,7 +56,7 @@ module source_damage_anisoBrittle source_damage_anisoBrittle_init, & source_damage_anisoBrittle_dotState, & source_damage_anisobrittle_getRateAndItsTangent, & - source_damage_anisoBrittle_postResults + source_damage_anisoBrittle_results contains @@ -74,17 +69,15 @@ subroutine source_damage_anisoBrittle_init integer :: Ninstance,phase,instance,source,sourceOffset integer :: NofMyPhase,p ,i - integer, dimension(0), parameter :: emptyIntArray = [integer::] - character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::] integer(kind(undefined_ID)) :: & outputID character(len=pStringLen) :: & extmsg = '' - character(len=65536), dimension(:), allocatable :: & + character(len=pStringLen), dimension(:), allocatable :: & outputs - write(6,'(/,a)') ' <<<+- source_'//SOURCE_DAMAGE_ANISOBRITTLE_LABEL//' init -+>>>' + write(6,'(/,a)') ' <<<+- source_'//SOURCE_DAMAGE_ANISOBRITTLE_LABEL//' init -+>>>'; flush(6) Ninstance = count(phase_source == SOURCE_damage_anisoBrittle_ID) if (Ninstance == 0) return @@ -102,10 +95,6 @@ subroutine source_damage_anisoBrittle_init enddo enddo - allocate(source_damage_anisoBrittle_sizePostResult(maxval(phase_Noutput),Ninstance), source=0) - allocate(source_damage_anisoBrittle_output(maxval(phase_Noutput),Ninstance)) - source_damage_anisoBrittle_output = '' - allocate(source_damage_anisoBrittle_Ncleavage(lattice_maxNcleavageFamily,Ninstance), source=0) allocate(param(Ninstance)) @@ -154,8 +143,6 @@ subroutine source_damage_anisoBrittle_init select case(outputs(i)) case ('anisobrittle_drivingforce') - source_damage_anisoBrittle_sizePostResult(i,source_damage_anisoBrittle_instance(p)) = 1 - source_damage_anisoBrittle_output(i,source_damage_anisoBrittle_instance(p)) = outputs(i) prm%outputID = [prm%outputID, damage_drivingforce_ID] end select @@ -171,7 +158,6 @@ subroutine source_damage_anisoBrittle_init call material_allocateSourceState(phase,sourceOffset,NofMyPhase,1,1,0) - sourceState(phase)%p(sourceOffset)%sizePostResults = sum(source_damage_anisoBrittle_sizePostResult(:,instance)) sourceState(phase)%p(sourceOffset)%aTolState=param(instance)%aTol @@ -262,39 +248,30 @@ subroutine source_damage_anisobrittle_getRateAndItsTangent(localphiDot, dLocalph dLocalphiDot_dPhi = -sourceState(phase)%p(sourceOffset)%state(1,constituent) -end subroutine source_damage_anisobrittle_getRateAndItsTangent +end subroutine source_damage_anisoBrittle_getRateAndItsTangent !-------------------------------------------------------------------------------------------------- -!> @brief return array of local damage results +!> @brief writes results to HDF5 output file !-------------------------------------------------------------------------------------------------- -function source_damage_anisoBrittle_postResults(phase, constituent) +subroutine source_damage_anisoBrittle_results(phase,group) - integer, intent(in) :: & - phase, & - constituent - - real(pReal), dimension(sum(source_damage_anisoBrittle_sizePostResult(:, & - source_damage_anisoBrittle_instance(phase)))) :: & - source_damage_anisoBrittle_postResults - - integer :: & - instance, sourceOffset, o, c - - instance = source_damage_anisoBrittle_instance(phase) + integer, intent(in) :: phase + character(len=*), intent(in) :: group + integer :: sourceOffset, o, instance + + instance = source_damage_anisoBrittle_instance(phase) sourceOffset = source_damage_anisoBrittle_offset(phase) - c = 0 - - do o = 1,size(param(instance)%outputID) - select case(param(instance)%outputID(o)) + associate(prm => param(instance), stt => sourceState(phase)%p(sourceOffset)%state) + outputsLoop: do o = 1,size(prm%outputID) + select case(prm%outputID(o)) case (damage_drivingforce_ID) - source_damage_anisoBrittle_postResults(c+1) = & - sourceState(phase)%p(sourceOffset)%state(1,constituent) - c = c + 1 - + call results_writeDataset(group,stt,'tbd','driving force','tbd') end select - enddo -end function source_damage_anisoBrittle_postResults + enddo outputsLoop + end associate + +end subroutine source_damage_anisoBrittle_results end module source_damage_anisoBrittle diff --git a/src/source_damage_anisoDuctile.f90 b/src/source_damage_anisoDuctile.f90 index 409466e48..fef897914 100644 --- a/src/source_damage_anisoDuctile.f90 +++ b/src/source_damage_anisoDuctile.f90 @@ -12,21 +12,15 @@ module source_damage_anisoDuctile use discretization use material use config + use results implicit none private - integer, dimension(:), allocatable, public, protected :: & + integer, dimension(:), allocatable :: & source_damage_anisoDuctile_offset, & !< which source is my current damage mechanism? source_damage_anisoDuctile_instance !< instance of damage source mechanism - integer, dimension(:,:), allocatable, target, public :: & - source_damage_anisoDuctile_sizePostResult !< size of each post result output - - character(len=64), dimension(:,:), allocatable, target, public :: & - source_damage_anisoDuctile_output !< name of each post result output - - enum, bind(c) enumerator :: undefined_ID, & damage_drivingforce_ID @@ -54,7 +48,7 @@ module source_damage_anisoDuctile source_damage_anisoDuctile_init, & source_damage_anisoDuctile_dotState, & source_damage_anisoDuctile_getRateAndItsTangent, & - source_damage_anisoDuctile_postResults + source_damage_anisoDuctile_results contains @@ -68,17 +62,15 @@ subroutine source_damage_anisoDuctile_init integer :: Ninstance,phase,instance,source,sourceOffset integer :: NofMyPhase,p ,i - integer, dimension(0), parameter :: emptyIntArray = [integer::] - character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::] integer(kind(undefined_ID)) :: & outputID character(len=pStringLen) :: & extmsg = '' - character(len=65536), dimension(:), allocatable :: & + character(len=pStringLen), dimension(:), allocatable :: & outputs - write(6,'(/,a)') ' <<<+- source_'//SOURCE_DAMAGE_ANISODUCTILE_LABEL//' init -+>>>' + write(6,'(/,a)') ' <<<+- source_'//SOURCE_DAMAGE_ANISODUCTILE_LABEL//' init -+>>>'; flush(6) Ninstance = count(phase_source == SOURCE_damage_anisoDuctile_ID) if (Ninstance == 0) return @@ -95,11 +87,6 @@ subroutine source_damage_anisoDuctile_init source_damage_anisoDuctile_offset(phase) = source enddo enddo - - allocate(source_damage_anisoDuctile_sizePostResult(maxval(phase_Noutput),Ninstance),source=0) - allocate(source_damage_anisoDuctile_output(maxval(phase_Noutput),Ninstance)) - source_damage_anisoDuctile_output = '' - allocate(param(Ninstance)) @@ -139,8 +126,6 @@ subroutine source_damage_anisoDuctile_init select case(outputs(i)) case ('anisoductile_drivingforce') - source_damage_anisoDuctile_sizePostResult(i,source_damage_anisoDuctile_instance(p)) = 1 - source_damage_anisoDuctile_output(i,source_damage_anisoDuctile_instance(p)) = outputs(i) prm%outputID = [prm%outputID, damage_drivingforce_ID] end select @@ -156,8 +141,7 @@ subroutine source_damage_anisoDuctile_init sourceOffset = source_damage_anisoDuctile_offset(phase) call material_allocateSourceState(phase,sourceOffset,NofMyPhase,1,1,0) - sourceState(phase)%p(sourceOffset)%sizePostResults = sum(source_damage_anisoDuctile_sizePostResult(:,instance)) - sourceState(phase)%p(sourceOffset)%aTolState=param(instance)%aTol + sourceState(phase)%p(sourceOffset)%aTolState=param(instance)%aTol enddo @@ -226,35 +210,26 @@ end subroutine source_damage_anisoDuctile_getRateAndItsTangent !-------------------------------------------------------------------------------------------------- -!> @brief return array of local damage results +!> @brief writes results to HDF5 output file !-------------------------------------------------------------------------------------------------- -function source_damage_anisoDuctile_postResults(phase, constituent) +subroutine source_damage_anisoDuctile_results(phase,group) - integer, intent(in) :: & - phase, & - constituent - real(pReal), dimension(sum(source_damage_anisoDuctile_sizePostResult(:, & - source_damage_anisoDuctile_instance(phase)))) :: & - source_damage_anisoDuctile_postResults - - integer :: & - instance, sourceOffset, o, c - - instance = source_damage_anisoDuctile_instance(phase) + integer, intent(in) :: phase + character(len=*), intent(in) :: group + integer :: sourceOffset, o, instance + + instance = source_damage_anisoDuctile_instance(phase) sourceOffset = source_damage_anisoDuctile_offset(phase) - - c = 0 - - do o = 1,size(param(instance)%outputID) - select case(param(instance)%outputID(o)) - case (damage_drivingforce_ID) - source_damage_anisoDuctile_postResults(c+1) = & - sourceState(phase)%p(sourceOffset)%state(1,constituent) - c = c + 1 - - end select - enddo -end function source_damage_anisoDuctile_postResults + associate(prm => param(instance), stt => sourceState(phase)%p(sourceOffset)%state) + outputsLoop: do o = 1,size(prm%outputID) + select case(prm%outputID(o)) + case (damage_drivingforce_ID) + call results_writeDataset(group,stt,'tbd','driving force','tbd') + end select + enddo outputsLoop + end associate + +end subroutine source_damage_anisoDuctile_results end module source_damage_anisoDuctile diff --git a/src/source_damage_isoBrittle.f90 b/src/source_damage_isoBrittle.f90 index 89f5a038c..53c0b77a7 100644 --- a/src/source_damage_isoBrittle.f90 +++ b/src/source_damage_isoBrittle.f90 @@ -12,16 +12,13 @@ module source_damage_isoBrittle use discretization use material use config + use results implicit none private - integer, dimension(:), allocatable, public, protected :: & + integer, dimension(:), allocatable :: & source_damage_isoBrittle_offset, & source_damage_isoBrittle_instance - integer, dimension(:,:), allocatable, target, public :: & - source_damage_isoBrittle_sizePostResult - character(len=64), dimension(:,:), allocatable, target, public :: & - source_damage_isoBrittle_output enum, bind(c) enumerator :: & @@ -46,7 +43,7 @@ module source_damage_isoBrittle source_damage_isoBrittle_init, & source_damage_isoBrittle_deltaState, & source_damage_isoBrittle_getRateAndItsTangent, & - source_damage_isoBrittle_postResults + source_damage_isoBrittle_Results contains @@ -59,16 +56,15 @@ subroutine source_damage_isoBrittle_init integer :: Ninstance,phase,instance,source,sourceOffset integer :: NofMyPhase,p,i - character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::] integer(kind(undefined_ID)) :: & outputID character(len=pStringLen) :: & extmsg = '' - character(len=65536), dimension(:), allocatable :: & + character(len=pStringLen), dimension(:), allocatable :: & outputs - write(6,'(/,a)') ' <<<+- source_'//SOURCE_DAMAGE_ISOBRITTLE_LABEL//' init -+>>>' + write(6,'(/,a)') ' <<<+- source_'//SOURCE_DAMAGE_ISOBRITTLE_LABEL//' init -+>>>'; flush(6) Ninstance = count(phase_source == SOURCE_damage_isoBrittle_ID) if (Ninstance == 0) return @@ -85,10 +81,6 @@ subroutine source_damage_isoBrittle_init source_damage_isoBrittle_offset(phase) = source enddo enddo - - allocate(source_damage_isoBrittle_sizePostResult(maxval(phase_Noutput),Ninstance),source=0) - allocate(source_damage_isoBrittle_output(maxval(phase_Noutput),Ninstance)) - source_damage_isoBrittle_output = '' allocate(param(Ninstance)) @@ -122,8 +114,6 @@ subroutine source_damage_isoBrittle_init select case(outputs(i)) case ('isobrittle_drivingforce') - source_damage_isoBrittle_sizePostResult(i,source_damage_isoBrittle_instance(p)) = 1 - source_damage_isoBrittle_output(i,source_damage_isoBrittle_instance(p)) = outputs(i) prm%outputID = [prm%outputID, damage_drivingforce_ID] end select @@ -139,7 +129,6 @@ subroutine source_damage_isoBrittle_init sourceOffset = source_damage_isoBrittle_offset(phase) call material_allocateSourceState(phase,sourceOffset,NofMyPhase,1,1,1) - sourceState(phase)%p(sourceOffset)%sizePostResults = sum(source_damage_isoBrittle_sizePostResult(:,instance)) sourceState(phase)%p(sourceOffset)%aTolState=param(instance)%aTol enddo @@ -214,35 +203,29 @@ subroutine source_damage_isoBrittle_getRateAndItsTangent(localphiDot, dLocalphiD - sourceState(phase)%p(sourceOffset)%state(1,constituent) end subroutine source_damage_isoBrittle_getRateAndItsTangent - -!-------------------------------------------------------------------------------------------------- -!> @brief return array of local damage results -!-------------------------------------------------------------------------------------------------- -function source_damage_isoBrittle_postResults(phase, constituent) - integer, intent(in) :: & - phase, & - constituent - real(pReal), dimension(sum(source_damage_isoBrittle_sizePostResult(:, & - source_damage_isoBrittle_instance(phase)))) :: & - source_damage_isoBrittle_postResults - integer :: & - instance, sourceOffset, o, c - - instance = source_damage_isoBrittle_instance(phase) +!-------------------------------------------------------------------------------------------------- +!> @brief writes results to HDF5 output file +!-------------------------------------------------------------------------------------------------- +subroutine source_damage_isoBrittle_results(phase,group) + + integer, intent(in) :: phase + character(len=*), intent(in) :: group + integer :: sourceOffset, o, instance + + instance = source_damage_isoBrittle_instance(phase) sourceOffset = source_damage_isoBrittle_offset(phase) - c = 0 - - do o = 1,size(param(instance)%outputID) - select case(param(instance)%outputID(o)) + associate(prm => param(instance), stt => sourceState(phase)%p(sourceOffset)%state) + outputsLoop: do o = 1,size(prm%outputID) + select case(prm%outputID(o)) case (damage_drivingforce_ID) - source_damage_isoBrittle_postResults(c+1) = sourceState(phase)%p(sourceOffset)%state(1,constituent) - c = c + 1 - + call results_writeDataset(group,stt,'tbd','driving force','tbd') end select - enddo -end function source_damage_isoBrittle_postResults + enddo outputsLoop + end associate + +end subroutine source_damage_isoBrittle_results end module source_damage_isoBrittle diff --git a/src/source_damage_isoDuctile.f90 b/src/source_damage_isoDuctile.f90 index 65930cd07..6ee588d0c 100644 --- a/src/source_damage_isoDuctile.f90 +++ b/src/source_damage_isoDuctile.f90 @@ -11,24 +11,18 @@ module source_damage_isoDuctile use discretization use material use config + use results implicit none private - integer, dimension(:), allocatable, public, protected :: & + integer, dimension(:), allocatable :: & source_damage_isoDuctile_offset, & !< which source is my current damage mechanism? source_damage_isoDuctile_instance !< instance of damage source mechanism - integer, dimension(:,:), allocatable, target, public :: & - source_damage_isoDuctile_sizePostResult !< size of each post result output - - character(len=64), dimension(:,:), allocatable, target, public :: & - source_damage_isoDuctile_output !< name of each post result output - - enum, bind(c) enumerator :: undefined_ID, & damage_drivingforce_ID - end enum !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!11 ToDo + end enum !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ToDo type, private :: tParameters !< container type for internal constitutive parameters real(pReal) :: & @@ -46,7 +40,7 @@ module source_damage_isoDuctile source_damage_isoDuctile_init, & source_damage_isoDuctile_dotState, & source_damage_isoDuctile_getRateAndItsTangent, & - source_damage_isoDuctile_postResults + source_damage_isoDuctile_Results contains @@ -59,13 +53,12 @@ subroutine source_damage_isoDuctile_init integer :: Ninstance,phase,instance,source,sourceOffset integer :: NofMyPhase,p,i - character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::] integer(kind(undefined_ID)) :: & outputID character(len=pStringLen) :: & extmsg = '' - character(len=65536), dimension(:), allocatable :: & + character(len=pStringLen), dimension(:), allocatable :: & outputs write(6,'(/,a)') ' <<<+- source_'//SOURCE_DAMAGE_ISODUCTILE_LABEL//' init -+>>>' @@ -85,10 +78,6 @@ subroutine source_damage_isoDuctile_init source_damage_isoDuctile_offset(phase) = source enddo enddo - - allocate(source_damage_isoDuctile_sizePostResult(maxval(phase_Noutput),Ninstance),source=0) - allocate(source_damage_isoDuctile_output(maxval(phase_Noutput),Ninstance)) - source_damage_isoDuctile_output = '' allocate(param(Ninstance)) @@ -122,8 +111,6 @@ subroutine source_damage_isoDuctile_init select case(outputs(i)) case ('isoductile_drivingforce') - source_damage_isoDuctile_sizePostResult(i,source_damage_isoDuctile_instance(p)) = 1 - source_damage_isoDuctile_output(i,source_damage_isoDuctile_instance(p)) = outputs(i) prm%outputID = [prm%outputID, damage_drivingforce_ID] end select @@ -138,9 +125,7 @@ subroutine source_damage_isoDuctile_init sourceOffset = source_damage_isoDuctile_offset(phase) call material_allocateSourceState(phase,sourceOffset,NofMyPhase,1,1,0) - sourceState(phase)%p(sourceOffset)%sizePostResults = sum(source_damage_isoDuctile_sizePostResult(:,instance)) sourceState(phase)%p(sourceOffset)%aTolState=param(instance)%aTol - enddo @@ -196,35 +181,30 @@ subroutine source_damage_isoDuctile_getRateAndItsTangent(localphiDot, dLocalphiD dLocalphiDot_dPhi = -sourceState(phase)%p(sourceOffset)%state(1,constituent) end subroutine source_damage_isoDuctile_getRateAndItsTangent - -!-------------------------------------------------------------------------------------------------- -!> @brief return array of local damage results -!-------------------------------------------------------------------------------------------------- -function source_damage_isoDuctile_postResults(phase, constituent) - integer, intent(in) :: & - phase, & - constituent - real(pReal), dimension(sum(source_damage_isoDuctile_sizePostResult(:, & - source_damage_isoDuctile_instance(phase)))) :: & - source_damage_isoDuctile_postResults - integer :: & - instance, sourceOffset, o, c +!-------------------------------------------------------------------------------------------------- +!> @brief writes results to HDF5 output file +!-------------------------------------------------------------------------------------------------- +subroutine source_damage_isoDuctile_results(phase,group) + + integer, intent(in) :: phase + character(len=*), intent(in) :: group + integer :: sourceOffset, o, instance - instance = source_damage_isoDuctile_instance(phase) - sourceOffset = source_damage_isoDuctile_offset(phase) + instance = source_damage_isoDuctile_instance(phase) + sourceOffset = source_damage_isoDuctile_offset(phase) - c = 0 + associate(prm => param(instance), stt => sourceState(phase)%p(sourceOffset)%state) + outputsLoop: do o = 1,size(prm%outputID) + select case(prm%outputID(o)) + case (damage_drivingforce_ID) + call results_writeDataset(group,stt,'tbd','driving force','tbd') + end select + enddo outputsLoop + end associate - do o = 1,size(param(instance)%outputID) - select case(param(instance)%outputID(o)) - case (damage_drivingforce_ID) - source_damage_isoDuctile_postResults(c+1) = sourceState(phase)%p(sourceOffset)%state(1,constituent) - c = c + 1 +end subroutine source_damage_isoDuctile_results - end select - enddo -end function source_damage_isoDuctile_postResults end module source_damage_isoDuctile diff --git a/src/source_thermal_dissipation.f90 b/src/source_thermal_dissipation.f90 index 9b18efef5..e13742a90 100644 --- a/src/source_thermal_dissipation.f90 +++ b/src/source_thermal_dissipation.f90 @@ -14,16 +14,10 @@ module source_thermal_dissipation implicit none private - integer, dimension(:), allocatable, public, protected :: & + integer, dimension(:), allocatable :: & source_thermal_dissipation_offset, & !< which source is my current thermal dissipation mechanism? source_thermal_dissipation_instance !< instance of thermal dissipation source mechanism - integer, dimension(:,:), allocatable, target, public :: & - source_thermal_dissipation_sizePostResult !< size of each post result output - - character(len=64), dimension(:,:), allocatable, target, public :: & - source_thermal_dissipation_output !< name of each post result output - type :: tParameters !< container type for internal constitutive parameters real(pReal) :: & kappa @@ -45,10 +39,9 @@ contains !-------------------------------------------------------------------------------------------------- subroutine source_thermal_dissipation_init - integer :: Ninstance,instance,source,sourceOffset - integer :: NofMyPhase,p + integer :: Ninstance,instance,source,sourceOffset,NofMyPhase,p - write(6,'(/,a)') ' <<<+- source_'//SOURCE_thermal_dissipation_label//' init -+>>>' + write(6,'(/,a)') ' <<<+- source_'//SOURCE_thermal_dissipation_label//' init -+>>>'; flush(6) Ninstance = count(phase_source == SOURCE_thermal_dissipation_ID) @@ -68,10 +61,6 @@ subroutine source_thermal_dissipation_init enddo enddo - allocate(source_thermal_dissipation_sizePostResult(maxval(phase_Noutput),Ninstance),source=0) - allocate(source_thermal_dissipation_output (maxval(phase_Noutput),Ninstance)) - source_thermal_dissipation_output = '' - do p=1, size(config_phase) if (all(phase_source(:,p) /= SOURCE_THERMAL_DISSIPATION_ID)) cycle instance = source_thermal_dissipation_instance(p) diff --git a/src/source_thermal_externalheat.f90 b/src/source_thermal_externalheat.f90 index 1b9d03529..7ae37f037 100644 --- a/src/source_thermal_externalheat.f90 +++ b/src/source_thermal_externalheat.f90 @@ -14,18 +14,9 @@ module source_thermal_externalheat implicit none private - integer, dimension(:), allocatable, public, protected :: & - source_thermal_externalheat_offset, & !< which source is my current thermal dissipation mechanism? - source_thermal_externalheat_instance !< instance of thermal dissipation source mechanism - - integer, dimension(:,:), allocatable, target, public :: & - source_thermal_externalheat_sizePostResult !< size of each post result output - - character(len=64), dimension(:,:), allocatable, target, public :: & - source_thermal_externalheat_output !< name of each post result output - - integer, dimension(:), allocatable, target, public :: & - source_thermal_externalheat_Noutput !< number of outputs per instance of this source + integer, dimension(:), allocatable :: & + source_thermal_externalheat_offset, & !< which source is my current thermal dissipation mechanism? + source_thermal_externalheat_instance !< instance of thermal dissipation source mechanism type :: tParameters !< container type for internal constitutive parameters real(pReal), dimension(:), allocatable :: & @@ -52,9 +43,9 @@ contains !-------------------------------------------------------------------------------------------------- subroutine source_thermal_externalheat_init - integer :: maxNinstance,instance,source,sourceOffset,NofMyPhase,p + integer :: maxNinstance,instance,source,sourceOffset,NofMyPhase,p - write(6,'(/,a)') ' <<<+- source_'//SOURCE_thermal_externalheat_label//' init -+>>>' + write(6,'(/,a)') ' <<<+- source_'//SOURCE_thermal_externalheat_label//' init -+>>>'; flush(6) maxNinstance = count(phase_source == SOURCE_thermal_externalheat_ID) @@ -73,11 +64,6 @@ subroutine source_thermal_externalheat_init enddo enddo - allocate(source_thermal_externalheat_sizePostResult(maxval(phase_Noutput),maxNinstance),source=0) - allocate(source_thermal_externalheat_output (maxval(phase_Noutput),maxNinstance)) - source_thermal_externalheat_output = '' - allocate(source_thermal_externalheat_Noutput(maxNinstance), source=0) - allocate(param(maxNinstance)) do p=1, size(config_phase) @@ -117,6 +103,7 @@ subroutine source_thermal_externalheat_dotState(phase, of) end subroutine source_thermal_externalheat_dotState + !-------------------------------------------------------------------------------------------------- !> @brief returns local heat generation rate !-------------------------------------------------------------------------------------------------- diff --git a/src/system_routines.f90 b/src/system_routines.f90 index 0611c96db..932eefeb6 100644 --- a/src/system_routines.f90 +++ b/src/system_routines.f90 @@ -93,21 +93,24 @@ end function isDirectory !-------------------------------------------------------------------------------------------------- !> @brief gets the current working directory !-------------------------------------------------------------------------------------------------- -character(len=1024) function getCWD() +function getCWD() character(kind=C_CHAR), dimension(1024) :: charArray ! C string is an array + character(len=:), allocatable :: getCWD integer(C_INT) :: stat integer :: i call getCurrentWorkDir_C(charArray,stat) + if (stat /= 0_C_INT) then getCWD = 'Error occured when getting currend working directory' else - getCWD = repeat('',len(getCWD)) + allocate(character(len=1024)::getCWD) arrayToString: do i=1,len(getCWD) if (charArray(i) /= C_NULL_CHAR) then getCWD(i:i)=charArray(i) else + getCWD = getCWD(:i-1) exit endif enddo arrayToString @@ -119,21 +122,24 @@ end function getCWD !-------------------------------------------------------------------------------------------------- !> @brief gets the current host name !-------------------------------------------------------------------------------------------------- -character(len=1024) function getHostName() +function getHostName() character(kind=C_CHAR), dimension(1024) :: charArray ! C string is an array + character(len=:), allocatable :: getHostName integer(C_INT) :: stat integer :: i call getHostName_C(charArray,stat) + if (stat /= 0_C_INT) then getHostName = 'Error occured when getting host name' else - getHostName = repeat('',len(getHostName)) + allocate(character(len=1024)::getHostName) arrayToString: do i=1,len(getHostName) if (charArray(i) /= C_NULL_CHAR) then getHostName(i:i)=charArray(i) else + getHostName = getHostName(:i-1) exit endif enddo arrayToString diff --git a/src/thermal_adiabatic.f90 b/src/thermal_adiabatic.f90 index 2aa69bec5..d96604e59 100644 --- a/src/thermal_adiabatic.f90 +++ b/src/thermal_adiabatic.f90 @@ -7,6 +7,7 @@ module thermal_adiabatic use config use numerics use material + use results use source_thermal_dissipation use source_thermal_externalheat use crystallite @@ -14,22 +15,19 @@ module thermal_adiabatic implicit none private - - integer, dimension(:,:), allocatable, target, public :: & - thermal_adiabatic_sizePostResult !< size of each post result output - character(len=64), dimension(:,:), allocatable, target, public :: & - thermal_adiabatic_output !< name of each post result output - - integer, dimension(:), allocatable, target, public :: & - thermal_adiabatic_Noutput !< number of outputs per instance of this thermal model - + enum, bind(c) enumerator :: undefined_ID, & temperature_ID end enum - integer(kind(undefined_ID)), dimension(:,:), allocatable :: & - thermal_adiabatic_outputID !< ID of each post result output - + + type :: tParameters + integer(kind(undefined_ID)), dimension(:), allocatable :: & + outputID + end type tParameters + + type(tparameters), dimension(:), allocatable :: & + param public :: & thermal_adiabatic_init, & @@ -37,7 +35,7 @@ module thermal_adiabatic thermal_adiabatic_getSourceAndItsTangent, & thermal_adiabatic_getSpecificHeat, & thermal_adiabatic_getMassDensity, & - thermal_adiabatic_postResults + thermal_adiabatic_results contains @@ -48,53 +46,45 @@ contains !-------------------------------------------------------------------------------------------------- subroutine thermal_adiabatic_init - integer :: maxNinstance,section,instance,i,sizeState,NofMyHomog - character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::] - character(len=65536), dimension(:), allocatable :: outputs + integer :: maxNinstance,o,h,NofMyHomog + character(len=pStringLen), dimension(:), allocatable :: outputs - write(6,'(/,a)') ' <<<+- thermal_'//THERMAL_ADIABATIC_label//' init -+>>>' + write(6,'(/,a)') ' <<<+- thermal_'//THERMAL_ADIABATIC_label//' init -+>>>'; flush(6) maxNinstance = count(thermal_type == THERMAL_adiabatic_ID) if (maxNinstance == 0) return - allocate(thermal_adiabatic_sizePostResult (maxval(homogenization_Noutput),maxNinstance),source=0) - allocate(thermal_adiabatic_output (maxval(homogenization_Noutput),maxNinstance)) - thermal_adiabatic_output = '' - allocate(thermal_adiabatic_outputID (maxval(homogenization_Noutput),maxNinstance),source=undefined_ID) - allocate(thermal_adiabatic_Noutput (maxNinstance), source=0) - + allocate(param(maxNinstance)) - initializeInstances: do section = 1, size(thermal_type) - if (thermal_type(section) /= THERMAL_adiabatic_ID) cycle - NofMyHomog=count(material_homogenizationAt==section) - instance = thermal_typeInstance(section) - outputs = config_homogenization(section)%getStrings('(output)',defaultVal=emptyStringArray) - do i=1, size(outputs) - select case(outputs(i)) + do h = 1, size(thermal_type) + if (thermal_type(h) /= THERMAL_adiabatic_ID) cycle + associate(prm => param(thermal_typeInstance(h)),config => config_homogenization(h)) + + outputs = config%getStrings('(output)',defaultVal=emptyStringArray) + allocate(prm%outputID(0)) + + do o=1, size(outputs) + select case(outputs(o)) case('temperature') - thermal_adiabatic_Noutput(instance) = thermal_adiabatic_Noutput(instance) + 1 - thermal_adiabatic_outputID(thermal_adiabatic_Noutput(instance),instance) = temperature_ID - thermal_adiabatic_output(thermal_adiabatic_Noutput(instance),instance) = outputs(i) - thermal_adiabatic_sizePostResult(thermal_adiabatic_Noutput(instance),instance) = 1 + prm%outputID = [prm%outputID, temperature_ID] end select enddo + + NofMyHomog=count(material_homogenizationAt==h) + thermalState(h)%sizeState = 1 + allocate(thermalState(h)%state0 (1,NofMyHomog), source=thermal_initialT(h)) + allocate(thermalState(h)%subState0(1,NofMyHomog), source=thermal_initialT(h)) + allocate(thermalState(h)%state (1,NofMyHomog), source=thermal_initialT(h)) - ! allocate state arrays - sizeState = 1 - thermalState(section)%sizeState = sizeState - thermalState(section)%sizePostResults = sum(thermal_adiabatic_sizePostResult(:,instance)) - allocate(thermalState(section)%state0 (sizeState,NofMyHomog), source=thermal_initialT(section)) - allocate(thermalState(section)%subState0(sizeState,NofMyHomog), source=thermal_initialT(section)) - allocate(thermalState(section)%state (sizeState,NofMyHomog), source=thermal_initialT(section)) - - nullify(thermalMapping(section)%p) - thermalMapping(section)%p => mappingHomogenization(1,:,:) - deallocate(temperature(section)%p) - temperature(section)%p => thermalState(section)%state(1,:) - deallocate(temperatureRate(section)%p) - allocate (temperatureRate(section)%p(NofMyHomog), source=0.0_pReal) - - enddo initializeInstances + nullify(thermalMapping(h)%p) + thermalMapping(h)%p => mappingHomogenization(1,:,:) + deallocate(temperature(h)%p) + temperature(h)%p => thermalState(h)%state(1,:) + deallocate(temperatureRate(h)%p) + allocate (temperatureRate(h)%p(NofMyHomog), source=0.0_pReal) + + end associate + enddo end subroutine thermal_adiabatic_init @@ -252,32 +242,26 @@ end function thermal_adiabatic_getMassDensity !-------------------------------------------------------------------------------------------------- -!> @brief return array of thermal results +!> @brief writes results to HDF5 output file !-------------------------------------------------------------------------------------------------- -function thermal_adiabatic_postResults(homog,instance,of) result(postResults) - - integer, intent(in) :: & - homog, & - instance, & - of - - real(pReal), dimension(sum(thermal_adiabatic_sizePostResult(:,instance))) :: & - postResults - - integer :: & - o, c - - c = 0 - - do o = 1,thermal_adiabatic_Noutput(instance) - select case(thermal_adiabatic_outputID(o,instance)) +subroutine thermal_adiabatic_results(homog,group) + + integer, intent(in) :: homog + character(len=*), intent(in) :: group + integer :: o - case (temperature_ID) - postResults(c+1) = temperature(homog)%p(of) - c = c + 1 - end select - enddo - -end function thermal_adiabatic_postResults + associate(prm => param(damage_typeInstance(homog))) + + outputsLoop: do o = 1,size(prm%outputID) + select case(prm%outputID(o)) + + case (temperature_ID) + call results_writeDataset(group,temperature(homog)%p,'T',& + 'temperature','K') + end select + enddo outputsLoop + end associate + +end subroutine thermal_adiabatic_results end module thermal_adiabatic diff --git a/src/thermal_conduction.f90 b/src/thermal_conduction.f90 index e513d709f..537b0366b 100644 --- a/src/thermal_conduction.f90 +++ b/src/thermal_conduction.f90 @@ -7,28 +7,27 @@ module thermal_conduction use material use config use lattice + use results use crystallite use source_thermal_dissipation use source_thermal_externalheat implicit none private - - integer, dimension(:,:), allocatable, target, public :: & - thermal_conduction_sizePostResult !< size of each post result output - character(len=64), dimension(:,:), allocatable, target, public :: & - thermal_conduction_output !< name of each post result output - - integer, dimension(:), allocatable, target, public :: & - thermal_conduction_Noutput !< number of outputs per instance of this damage - + enum, bind(c) - enumerator :: undefined_ID, & - temperature_ID + enumerator :: & + undefined_ID, & + temperature_ID end enum - integer(kind(undefined_ID)), dimension(:,:), allocatable, private :: & - thermal_conduction_outputID !< ID of each post result output - + + type :: tParameters + integer(kind(undefined_ID)), dimension(:), allocatable :: & + outputID + end type tParameters + + type(tparameters), dimension(:), allocatable :: & + param public :: & thermal_conduction_init, & @@ -37,7 +36,7 @@ module thermal_conduction thermal_conduction_getSpecificHeat, & thermal_conduction_getMassDensity, & thermal_conduction_putTemperatureAndItsRate, & - thermal_conduction_postResults + thermal_conduction_results contains @@ -49,56 +48,45 @@ contains subroutine thermal_conduction_init - integer :: maxNinstance,section,instance,i - integer :: sizeState - integer :: NofMyHomog - character(len=65536), dimension(0), parameter :: emptyStringArray = [character(len=65536)::] - character(len=65536), dimension(:), allocatable :: outputs + integer :: maxNinstance,o,NofMyHomog,h + character(len=pStringLen), dimension(:), allocatable :: outputs - write(6,'(/,a)') ' <<<+- thermal_'//THERMAL_CONDUCTION_label//' init -+>>>' + write(6,'(/,a)') ' <<<+- thermal_'//THERMAL_CONDUCTION_label//' init -+>>>'; flush(6) maxNinstance = count(thermal_type == THERMAL_conduction_ID) if (maxNinstance == 0) return - allocate(thermal_conduction_sizePostResult (maxval(homogenization_Noutput),maxNinstance),source=0) - allocate(thermal_conduction_output (maxval(homogenization_Noutput),maxNinstance)) - thermal_conduction_output = '' - allocate(thermal_conduction_outputID (maxval(homogenization_Noutput),maxNinstance),source=undefined_ID) - allocate(thermal_conduction_Noutput (maxNinstance), source=0) - + allocate(param(maxNinstance)) - initializeInstances: do section = 1, size(thermal_type) - if (thermal_type(section) /= THERMAL_conduction_ID) cycle - NofMyHomog=count(material_homogenizationAt==section) - instance = thermal_typeInstance(section) - outputs = config_homogenization(section)%getStrings('(output)',defaultVal=emptyStringArray) - do i=1, size(outputs) - select case(outputs(i)) + do h = 1, size(thermal_type) + if (thermal_type(h) /= THERMAL_conduction_ID) cycle + associate(prm => param(thermal_typeInstance(h)),config => config_homogenization(h)) + + outputs = config%getStrings('(output)',defaultVal=emptyStringArray) + allocate(prm%outputID(0)) + + do o=1, size(outputs) + select case(outputs(o)) case('temperature') - thermal_conduction_Noutput(instance) = thermal_conduction_Noutput(instance) + 1 - thermal_conduction_outputID(thermal_conduction_Noutput(instance),instance) = temperature_ID - thermal_conduction_output(thermal_conduction_Noutput(instance),instance) = outputs(i) - thermal_conduction_sizePostResult(thermal_conduction_Noutput(instance),instance) = 1 + prm%outputID = [prm%outputID, temperature_ID] end select enddo + + NofMyHomog=count(material_homogenizationAt==h) + thermalState(h)%sizeState = 0 + allocate(thermalState(h)%state0 (0,NofMyHomog)) + allocate(thermalState(h)%subState0(0,NofMyHomog)) + allocate(thermalState(h)%state (0,NofMyHomog)) - - ! allocate state arrays - sizeState = 0 - thermalState(section)%sizeState = sizeState - thermalState(section)%sizePostResults = sum(thermal_conduction_sizePostResult(:,instance)) - allocate(thermalState(section)%state0 (sizeState,NofMyHomog)) - allocate(thermalState(section)%subState0(sizeState,NofMyHomog)) - allocate(thermalState(section)%state (sizeState,NofMyHomog)) - - nullify(thermalMapping(section)%p) - thermalMapping(section)%p => mappingHomogenization(1,:,:) - deallocate(temperature (section)%p) - allocate (temperature (section)%p(NofMyHomog), source=thermal_initialT(section)) - deallocate(temperatureRate(section)%p) - allocate (temperatureRate(section)%p(NofMyHomog), source=0.0_pReal) - - enddo initializeInstances + nullify(thermalMapping(h)%p) + thermalMapping(h)%p => mappingHomogenization(1,:,:) + deallocate(temperature (h)%p) + allocate (temperature (h)%p(NofMyHomog), source=thermal_initialT(h)) + deallocate(temperatureRate(h)%p) + allocate (temperatureRate(h)%p(NofMyHomog), source=0.0_pReal) + + end associate + enddo end subroutine thermal_conduction_init @@ -213,7 +201,8 @@ function thermal_conduction_getSpecificHeat(ip,el) thermal_conduction_getSpecificHeat/real(homogenization_Ngrains(material_homogenizationAt(el)),pReal) end function thermal_conduction_getSpecificHeat - + + !-------------------------------------------------------------------------------------------------- !> @brief returns homogenized mass density !-------------------------------------------------------------------------------------------------- @@ -263,33 +252,28 @@ subroutine thermal_conduction_putTemperatureAndItsRate(T,Tdot,ip,el) end subroutine thermal_conduction_putTemperatureAndItsRate - + !-------------------------------------------------------------------------------------------------- -!> @brief return array of thermal results +!> @brief writes results to HDF5 output file !-------------------------------------------------------------------------------------------------- -function thermal_conduction_postResults(homog,instance,of) result(postResults) - - integer, intent(in) :: & - homog, & - instance, & - of - - real(pReal), dimension(sum(thermal_conduction_sizePostResult(:,instance))) :: & - postResults - - integer :: & - o, c - - c = 0 - do o = 1,thermal_conduction_Noutput(instance) - select case(thermal_conduction_outputID(o,instance)) +subroutine thermal_conduction_results(homog,group) + + integer, intent(in) :: homog + character(len=*), intent(in) :: group + integer :: o - case (temperature_ID) - postResults(c+1) = temperature(homog)%p(of) - c = c + 1 - end select - enddo - -end function thermal_conduction_postResults + associate(prm => param(damage_typeInstance(homog))) + + outputsLoop: do o = 1,size(prm%outputID) + select case(prm%outputID(o)) + + case (temperature_ID) + call results_writeDataset(group,temperature(homog)%p,'T',& + 'temperature','K') + end select + enddo outputsLoop + end associate + +end subroutine thermal_conduction_results end module thermal_conduction diff --git a/src/thermal_isothermal.f90 b/src/thermal_isothermal.f90 index f06239944..3cafc6402 100644 --- a/src/thermal_isothermal.f90 +++ b/src/thermal_isothermal.f90 @@ -3,7 +3,6 @@ !> @brief material subroutine for isothermal temperature field !-------------------------------------------------------------------------------------------------- module thermal_isothermal - use prec use config use material @@ -20,27 +19,25 @@ contains !-------------------------------------------------------------------------------------------------- subroutine thermal_isothermal_init - integer :: & - homog, & - NofMyHomog + integer :: h,NofMyHomog - write(6,'(/,a)') ' <<<+- thermal_'//THERMAL_isothermal_label//' init -+>>>' + write(6,'(/,a)') ' <<<+- thermal_'//THERMAL_isothermal_label//' init -+>>>'; flush(6) - initializeInstances: do homog = 1, material_Nhomogenization - - if (thermal_type(homog) /= THERMAL_isothermal_ID) cycle - NofMyHomog = count(material_homogenizationAt == homog) - thermalState(homog)%sizeState = 0 - allocate(thermalState(homog)%state0 (0,NofMyHomog), source=0.0_pReal) - allocate(thermalState(homog)%subState0(0,NofMyHomog), source=0.0_pReal) - allocate(thermalState(homog)%state (0,NofMyHomog), source=0.0_pReal) + do h = 1, size(config_homogenization) + if (thermal_type(h) /= THERMAL_isothermal_ID) cycle + + NofMyHomog = count(material_homogenizationAt == h) + thermalState(h)%sizeState = 0 + allocate(thermalState(h)%state0 (0,NofMyHomog)) + allocate(thermalState(h)%subState0(0,NofMyHomog)) + allocate(thermalState(h)%state (0,NofMyHomog)) - deallocate(temperature (homog)%p) - allocate (temperature (homog)%p(1), source=thermal_initialT(homog)) - deallocate(temperatureRate(homog)%p) - allocate (temperatureRate(homog)%p(1), source=0.0_pReal) + deallocate(temperature (h)%p) + allocate (temperature (h)%p(1), source=thermal_initialT(h)) + deallocate(temperatureRate(h)%p) + allocate (temperatureRate(h)%p(1)) - enddo initializeInstances + enddo end subroutine thermal_isothermal_init