Merge remote-tracking branch 'origin/development' into initial-eigenstrain
This commit is contained in:
commit
f4f0163d5e
|
@ -0,0 +1,183 @@
|
|||
name: Grid and Mesh Solver
|
||||
on: [push]
|
||||
|
||||
env:
|
||||
HOMEBREW_NO_ANALYTICS: "ON" # Make Homebrew installation a little quicker
|
||||
HOMEBREW_NO_AUTO_UPDATE: "ON"
|
||||
HOMEBREW_NO_BOTTLE_SOURCE_FALLBACK: "ON"
|
||||
HOMEBREW_NO_GITHUB_API: "ON"
|
||||
HOMEBREW_NO_INSTALL_CLEANUP: "ON"
|
||||
|
||||
jobs:
|
||||
|
||||
gcc:
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
gcc_v: [9, 10, 11] # Version of GCC compilers
|
||||
|
||||
env:
|
||||
GCC_V: ${{ matrix.gcc_v }}
|
||||
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: GCC - Install (Linux)
|
||||
if: contains( matrix.os, 'ubuntu')
|
||||
run: |
|
||||
sudo add-apt-repository ppa:ubuntu-toolchain-r/test
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-${GCC_V} gfortran-${GCC_V} g++-${GCC_V}
|
||||
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${GCC_V} 100 \
|
||||
--slave /usr/bin/gfortran gfortran /usr/bin/gfortran-${GCC_V} \
|
||||
--slave /usr/bin/g++ g++ /usr/bin/g++-${GCC_V} \
|
||||
--slave /usr/bin/gcov gcov /usr/bin/gcov-${GCC_V}
|
||||
|
||||
- name: GCC - Install (macOS)
|
||||
if: contains( matrix.os, 'macos')
|
||||
run: |
|
||||
brew install gcc@${GCC_V} || brew upgrade gcc@${GCC_V} || true
|
||||
brew link gcc@${GCC_V}
|
||||
|
||||
- name: PETSc - Cache download
|
||||
id: petsc-download
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: download
|
||||
key: petsc-3.16.0.tar.gz
|
||||
|
||||
- name: PETSc - Download
|
||||
if: steps.petsc-download.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
wget -q https://ftp.mcs.anl.gov/pub/petsc/release-snapshots/petsc-3.16.0.tar.gz -P download
|
||||
|
||||
- name: PETSc - Prepare
|
||||
run: |
|
||||
tar -xf download/petsc-3.16.0.tar.gz -C .
|
||||
export PETSC_DIR=${PWD}/petsc-3.16.0
|
||||
export PETSC_ARCH=gcc${GCC_V}
|
||||
printenv >> $GITHUB_ENV
|
||||
|
||||
- name: PETSc - Cache installation
|
||||
id: petsc-install
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: petsc-3.16.0
|
||||
key: petsc-3.16.0-${{ matrix.os }}-gcc${{ matrix.gcc_v }}-${{ hashFiles('**/petscversion.h') }}
|
||||
|
||||
- name: PETSc - Install (Linux)
|
||||
if: contains( matrix.os, 'ubuntu')
|
||||
run: |
|
||||
cd petsc-3.16.0
|
||||
./configure --with-fc=gfortran --with-cc=gcc --with-cxx=g++ \
|
||||
--download-mpich --download-fftw --download-hdf5 --download-hdf5-fortran-bindings=1 --download-zlib \
|
||||
--with-mpi-f90module-visibility=0
|
||||
make all
|
||||
|
||||
- name: PETSc - Install (macOS)
|
||||
if: contains( matrix.os, 'macos')
|
||||
run: |
|
||||
cd petsc-3.16.0
|
||||
./configure --with-fc=gfortran-${GCC_V} --with-cc=gcc-${GCC_V} --with-cxx=g++-${GCC_V} \
|
||||
--download-openmpi --download-fftw --download-hdf5 --download-hdf5-fortran-bindings=1 --download-zlib
|
||||
make all
|
||||
|
||||
- name: DAMASK - Compile
|
||||
run: |
|
||||
cmake -B build/grid -DDAMASK_SOLVER=grid -DCMAKE_INSTALL_PREFIX=${PWD}
|
||||
cmake --build build/grid --parallel
|
||||
cmake --install build/grid
|
||||
cmake -B build/mesh -DDAMASK_SOLVER=mesh -DCMAKE_INSTALL_PREFIX=${PWD}
|
||||
cmake --build build/mesh --parallel
|
||||
cmake --install build/mesh
|
||||
|
||||
- name: DAMASK - Run
|
||||
run: |
|
||||
./bin/DAMASK_grid -l tensionX.yaml -g 20grains16x16x16.vti -w examples/grid
|
||||
|
||||
intel:
|
||||
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
intel_v: [classic, llvm] # Variant of Intel compilers
|
||||
|
||||
env:
|
||||
INTEL_V: ${{ matrix.intel_v }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Intel - Install
|
||||
run: |
|
||||
wget -q https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB
|
||||
sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB
|
||||
rm GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB
|
||||
echo "deb https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list
|
||||
sudo apt-get update
|
||||
sudo apt-get install \
|
||||
intel-basekit \
|
||||
intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic intel-oneapi-compiler-fortran \
|
||||
intel-oneapi-openmp intel-oneapi-mkl-devel
|
||||
source /opt/intel/oneapi/setvars.sh
|
||||
printenv >> $GITHUB_ENV
|
||||
|
||||
- name: PETSc - Cache download
|
||||
id: petsc-download
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: download
|
||||
key: petsc-3.16.0.tar.gz
|
||||
|
||||
- name: PETSc - Download
|
||||
if: steps.petsc-download.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
wget -q https://ftp.mcs.anl.gov/pub/petsc/release-snapshots/petsc-3.16.0.tar.gz -P download
|
||||
|
||||
- name: PETSc - Prepare
|
||||
run: |
|
||||
tar -xf download/petsc-3.16.0.tar.gz -C .
|
||||
export PETSC_DIR=${PWD}/petsc-3.16.0
|
||||
export PETSC_ARCH=intel-${INTEL_V}
|
||||
printenv >> $GITHUB_ENV
|
||||
|
||||
- name: PETSc - Cache installation
|
||||
id: petsc-install
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: petsc-3.16.0
|
||||
key: petsc-3.16.0-intel-${{ matrix.intel_v }}-${{ hashFiles('**/petscversion.h') }}
|
||||
|
||||
- name: PETSc - Install (classic)
|
||||
if: contains( matrix.intel_v, 'classic')
|
||||
run: |
|
||||
cd petsc-3.16.0
|
||||
./configure --with-fc=mpiifort --with-cc=mpiicc --with-cxx=mpiicpc \
|
||||
--download-fftw --download-hdf5 --download-hdf5-fortran-bindings=1 --download-zlib
|
||||
make all
|
||||
|
||||
- name: PETSc - Install (LLVM)
|
||||
if: contains( matrix.intel_v, 'llvm')
|
||||
run: |
|
||||
cd petsc-3.16.0
|
||||
./configure --with-fc=mpiifort --with-cc="mpiicc -cc=icx" --with-cxx="mpiicpc -cxx=icpx" \
|
||||
--download-fftw --download-hdf5 --download-hdf5-fortran-bindings=1 --download-zlib
|
||||
make all
|
||||
|
||||
- name: DAMASK - Compile
|
||||
run: |
|
||||
cmake -B build/grid -DDAMASK_SOLVER=grid -DCMAKE_INSTALL_PREFIX=${PWD}
|
||||
cmake --build build/grid --parallel
|
||||
cmake --install build/grid
|
||||
cmake -B build/mesh -DDAMASK_SOLVER=mesh -DCMAKE_INSTALL_PREFIX=${PWD}
|
||||
cmake --build build/mesh --parallel
|
||||
cmake --install build/mesh
|
||||
|
||||
- name: DAMASK - Run
|
||||
run: |
|
||||
./bin/DAMASK_grid -l tensionX.yaml -g 20grains16x16x16.vti -w examples/grid
|
|
@ -0,0 +1,54 @@
|
|||
name: Processing Tools
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
|
||||
pip:
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.7', '3.8', '3.9'] #, '3.10']
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pytest pandas scipy h5py vtk matplotlib pyyaml
|
||||
|
||||
- name: Install and run unit tests
|
||||
run: |
|
||||
python -m pip install ./python --no-deps -vv --use-feature=in-tree-build
|
||||
COLUMNS=256 pytest python
|
||||
|
||||
apt:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install pytest
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pytest
|
||||
|
||||
- name: Install dependencies
|
||||
run: >
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install python3-pip python3-pytest python3-pandas python3-scipy
|
||||
python3-h5py python3-vtk7 python3-matplotlib python3-yaml -y
|
||||
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
export PYTHONPATH=${PWD}/python
|
||||
COLUMNS=256 python -m pytest python
|
|
@ -46,29 +46,18 @@ variables:
|
|||
# Names of module files to load
|
||||
# ===============================================================================================
|
||||
# ++++++++++++ Compiler +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
IntelCompiler19_1: "Compiler/Intel/19.1.2 Libraries/IMKL/2020"
|
||||
GNUCompiler10: "Compiler/GNU/10"
|
||||
# ------------ Defaults ----------------------------------------------
|
||||
IntelCompiler: "$IntelCompiler19_1"
|
||||
GNUCompiler: "$GNUCompiler10"
|
||||
IntelCompiler: "Compiler/Intel/19.1.2 Libraries/IMKL/2020"
|
||||
GNUCompiler: "Compiler/GNU/10"
|
||||
# ++++++++++++ MPI ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
IMPI2020Intel19_1: "MPI/Intel/19.1.2/IntelMPI/2019"
|
||||
OMPI4_0GNU10: "MPI/GNU/10/OpenMPI/4.0.5"
|
||||
# ------------ Defaults ----------------------------------------------
|
||||
MPI_Intel: "$IMPI2020Intel19_1"
|
||||
MPI_GNU: "$OMPI4_0GNU10"
|
||||
MPI_Intel: "MPI/Intel/19.1.2/IntelMPI/2019"
|
||||
MPI_GNU: "MPI/GNU/10/OpenMPI/4.1.1"
|
||||
# ++++++++++++ PETSc ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
PETSc3_14_2IMPI2020Intel19_1: "Libraries/PETSc/3.14.2/Intel-19.1.2-IntelMPI-2019"
|
||||
PETSc3_14_2OMPI4_0GNU10: "Libraries/PETSc/3.14.2/GNU-10-OpenMPI-4.0.5"
|
||||
# ------------ Defaults ----------------------------------------------
|
||||
PETSc_Intel: "$PETSc3_14_2IMPI2020Intel19_1"
|
||||
PETSc_GNU: "$PETSc3_14_2OMPI4_0GNU10"
|
||||
# ++++++++++++ commercial FEM ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
MSC2021: "FEM/MSC/2021.2"
|
||||
# ------------ Defaults ----------------------------------------------
|
||||
MSC: "$MSC2021"
|
||||
IntelMarc: "$IntelCompiler19_1"
|
||||
HDF5Marc: "HDF5/1.12.0/Intel-19.1.2"
|
||||
PETSc_Intel: "Libraries/PETSc/3.16.0/Intel-19.1.2-IntelMPI-2019"
|
||||
PETSc_GNU: "Libraries/PETSc/3.16.0/GNU-10-OpenMPI-4.1.1"
|
||||
# ++++++++++++ MSC Marc +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
MSC: "FEM/MSC/2021.2"
|
||||
IntelMarc: "Compiler/Intel/19.1.2 Libraries/IMKL/2020"
|
||||
HDF5Marc: "HDF5/1.12.1/Intel-19.1.2"
|
||||
|
||||
###################################################################################################
|
||||
checkout:
|
||||
|
@ -102,13 +91,6 @@ processing:
|
|||
- master
|
||||
- release
|
||||
|
||||
###################################################################################################
|
||||
preprocessing_deprecated:
|
||||
stage: python
|
||||
script: PreProcessing/test.py
|
||||
except:
|
||||
- master
|
||||
- release
|
||||
|
||||
###################################################################################################
|
||||
compile_grid_Intel:
|
||||
|
@ -216,7 +198,7 @@ SpectralRuntime:
|
|||
- cd $LOCAL_HOME/performance # location of old results
|
||||
- git checkout . # undo any changes (i.e. run time data from non-development branch)
|
||||
- cd $DAMASKROOT/PRIVATE/testing
|
||||
- SpectralAll_runtime/test.py -d $LOCAL_HOME/performance
|
||||
- ./runtime.py --results ${LOCAL_HOME}/performance --damask_root ${DAMASKROOT} --tag ${CI_COMMIT_SHA}
|
||||
except:
|
||||
- master
|
||||
- release
|
||||
|
@ -226,16 +208,7 @@ source_distribution:
|
|||
stage: deploy
|
||||
script:
|
||||
- cd $(mktemp -d)
|
||||
- $DAMASKROOT/PRIVATE/releasing/deploy.sh $DAMASKROOT $CI_COMMIT_SHA
|
||||
except:
|
||||
- master
|
||||
- release
|
||||
|
||||
library_documentation:
|
||||
stage: deploy
|
||||
script:
|
||||
- cd $DAMASKROOT/PRIVATE/documenting/sphinx
|
||||
- make html
|
||||
- $DAMASKROOT/PRIVATE/releasing/tar.xz/create.sh $DAMASKROOT $CI_COMMIT_SHA
|
||||
except:
|
||||
- master
|
||||
- release
|
||||
|
@ -246,9 +219,7 @@ backup_runtime_measurement:
|
|||
script:
|
||||
- cd $LOCAL_HOME/performance # location of new runtime results
|
||||
- git commit -am"${CI_PIPELINE_ID}_${CI_COMMIT_SHA}"
|
||||
- mkdir $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}
|
||||
- mv $LOCAL_HOME/performance/time.png $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
|
||||
- mv $LOCAL_HOME/performance/memory.png $BACKUP/${CI_PIPELINE_ID}_${CI_COMMIT_SHA}/
|
||||
- git push
|
||||
only:
|
||||
- development
|
||||
|
||||
|
@ -259,9 +230,10 @@ merge_into_master:
|
|||
- cd $DAMASKROOT
|
||||
- export TESTEDREV=$(git describe) # might be detached from development branch
|
||||
- echo $TESTEDREV > python/damask/VERSION
|
||||
- git add python/damask/VERSION
|
||||
- >
|
||||
git diff-index --quiet HEAD ||
|
||||
git commit python/damask/VERSION -m "[skip ci] updated version information after successful test of $TESTEDREV"
|
||||
git commit -m "[skip ci] updated version information after successful test of $TESTEDREV"
|
||||
- export UPDATEDREV=$(git describe) # tested state + 1 commit
|
||||
- git checkout master
|
||||
- git merge $UPDATEDREV -s recursive -X ours # conflicts occur only for inconsistent state
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[submodule "PRIVATE"]
|
||||
path = PRIVATE
|
||||
url = ../PRIVATE.git
|
||||
branch = master
|
||||
branch = development
|
||||
shallow = true
|
||||
|
|
167
CMakeLists.txt
167
CMakeLists.txt
|
@ -1,116 +1,117 @@
|
|||
cmake_minimum_required (VERSION 3.10.0)
|
||||
include (FindPkgConfig REQUIRED)
|
||||
cmake_minimum_required(VERSION 3.12.0)
|
||||
include(FindPkgConfig REQUIRED)
|
||||
|
||||
if (DEFINED ENV{PETSC_DIR})
|
||||
message ("PETSC_DIR:\n$ENV{PETSC_DIR}\n")
|
||||
else ()
|
||||
message (FATAL_ERROR "PETSc location (PETSC_DIR) is not defined")
|
||||
endif ()
|
||||
if(DEFINED ENV{PETSC_DIR})
|
||||
message("PETSC_DIR:\n$ENV{PETSC_DIR}\n")
|
||||
else()
|
||||
message(FATAL_ERROR "PETSc location (PETSC_DIR) is not defined")
|
||||
endif()
|
||||
|
||||
# Dummy project to determine compiler names and version
|
||||
project (Prerequisites LANGUAGES)
|
||||
project(Prerequisites LANGUAGES)
|
||||
set(ENV{PKG_CONFIG_PATH} "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/pkgconfig")
|
||||
pkg_check_modules (PETSC REQUIRED PETSc>=3.12.0 PETSc<3.16.0)
|
||||
pkg_get_variable (CMAKE_Fortran_COMPILER PETSc fcompiler)
|
||||
pkg_get_variable (CMAKE_C_COMPILER PETSc ccompiler)
|
||||
pkg_check_modules(PETSC REQUIRED PETSc>=3.12.0 PETSc<3.17.0)
|
||||
pkg_get_variable(CMAKE_Fortran_COMPILER PETSc fcompiler)
|
||||
pkg_get_variable(CMAKE_C_COMPILER PETSc ccompiler)
|
||||
|
||||
# Solver determines name of project
|
||||
string(TOUPPER "${DAMASK_SOLVER}" DAMASK_SOLVER)
|
||||
if (DAMASK_SOLVER STREQUAL "GRID")
|
||||
project (damask-grid HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
|
||||
elseif (DAMASK_SOLVER STREQUAL "MESH")
|
||||
project (damask-mesh HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
|
||||
else ()
|
||||
message (FATAL_ERROR "Invalid solver: DAMASK_SOLVER=${DAMASK_SOLVER}")
|
||||
endif ()
|
||||
add_definitions ("-D${DAMASK_SOLVER}")
|
||||
if(DAMASK_SOLVER STREQUAL "GRID")
|
||||
project(damask-grid HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
|
||||
elseif(DAMASK_SOLVER STREQUAL "MESH")
|
||||
project(damask-mesh HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
|
||||
else()
|
||||
message(FATAL_ERROR "Invalid solver: DAMASK_SOLVER=${DAMASK_SOLVER}")
|
||||
endif()
|
||||
add_definitions("-D${DAMASK_SOLVER}")
|
||||
|
||||
file (STRINGS ${PROJECT_SOURCE_DIR}/VERSION DAMASK_VERSION)
|
||||
file(STRINGS ${PROJECT_SOURCE_DIR}/VERSION DAMASK_VERSION)
|
||||
|
||||
message ("\nBuilding ${CMAKE_PROJECT_NAME} ${DAMASK_VERSION}\n")
|
||||
message("\nBuilding ${CMAKE_PROJECT_NAME} ${DAMASK_VERSION}\n")
|
||||
|
||||
add_definitions (-DPETSC)
|
||||
add_definitions (-DDAMASKVERSION="${DAMASK_VERSION}")
|
||||
add_definitions(-DPETSC)
|
||||
add_definitions(-DDAMASKVERSION="${DAMASK_VERSION}")
|
||||
add_definitions(-DCMAKE_SYSTEM="${CMAKE_SYSTEM}")
|
||||
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "")
|
||||
set (CMAKE_BUILD_TYPE "RELEASE")
|
||||
endif ()
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "")
|
||||
set(CMAKE_BUILD_TYPE "RELEASE")
|
||||
endif()
|
||||
string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE)
|
||||
|
||||
# Predefined sets for OPTIMIZATION/OPENMP based on BUILD_TYPE
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
|
||||
set (DEBUG_FLAGS "${DEBUG_FLAGS} -DDEBUG")
|
||||
set (PARALLEL "OFF")
|
||||
set (OPTI "OFF")
|
||||
elseif (CMAKE_BUILD_TYPE STREQUAL "RELEASE")
|
||||
set (PARALLEL "ON")
|
||||
set (OPTI "DEFENSIVE")
|
||||
elseif (CMAKE_BUILD_TYPE STREQUAL "DEBUGRELEASE")
|
||||
set (DEBUG_FLAGS "${DEBUG_FLAGS} -DDEBUG")
|
||||
set (PARALLEL "ON")
|
||||
set (OPTI "DEFENSIVE")
|
||||
elseif (CMAKE_BUILD_TYPE STREQUAL "PERFORMANCE")
|
||||
set (PARALLEL "ON")
|
||||
set (OPTI "AGGRESSIVE")
|
||||
else ()
|
||||
message (FATAL_ERROR "Invalid build type: CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}")
|
||||
endif ()
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
|
||||
set(DEBUG_FLAGS "${DEBUG_FLAGS} -DDEBUG")
|
||||
set(PARALLEL "OFF")
|
||||
set(OPTI "OFF")
|
||||
elseif(CMAKE_BUILD_TYPE STREQUAL "RELEASE")
|
||||
set(PARALLEL "ON")
|
||||
set(OPTI "DEFENSIVE")
|
||||
elseif(CMAKE_BUILD_TYPE STREQUAL "DEBUGRELEASE")
|
||||
set(DEBUG_FLAGS "${DEBUG_FLAGS} -DDEBUG")
|
||||
set(PARALLEL "ON")
|
||||
set(OPTI "DEFENSIVE")
|
||||
elseif(CMAKE_BUILD_TYPE STREQUAL "PERFORMANCE")
|
||||
set(PARALLEL "ON")
|
||||
set(OPTI "AGGRESSIVE")
|
||||
else()
|
||||
message(FATAL_ERROR "Invalid build type: CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}")
|
||||
endif()
|
||||
|
||||
# $OPTIMIZATION takes precedence over $BUILD_TYPE defaults
|
||||
if (OPTIMIZATION STREQUAL "" OR NOT DEFINED OPTIMIZATION)
|
||||
set (OPTIMIZATION "${OPTI}")
|
||||
else ()
|
||||
set (OPTIMIZATION "${OPTIMIZATION}")
|
||||
endif ()
|
||||
if(OPTIMIZATION STREQUAL "" OR NOT DEFINED OPTIMIZATION)
|
||||
set(OPTIMIZATION "${OPTI}")
|
||||
else()
|
||||
set(OPTIMIZATION "${OPTIMIZATION}")
|
||||
endif()
|
||||
|
||||
# $OPENMP takes precedence over $BUILD_TYPE defaults
|
||||
if (OPENMP STREQUAL "" OR NOT DEFINED OPENMP)
|
||||
set (OPENMP "${PARALLEL}")
|
||||
else ()
|
||||
if(OPENMP STREQUAL "" OR NOT DEFINED OPENMP)
|
||||
set(OPENMP "${PARALLEL}")
|
||||
else()
|
||||
set(OPENMP "${OPENMP}")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
# syntax check only (mainly for pre-receive hook)
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
|
||||
set (BUILDCMD_POST "${BUILDCMD_POST} -fsyntax-only")
|
||||
endif ()
|
||||
# syntax check only(mainly for pre-receive hook)
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
|
||||
set(BUILDCMD_POST "${BUILDCMD_POST} -fsyntax-only")
|
||||
endif()
|
||||
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
|
||||
if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel")
|
||||
include (Compiler-Intel)
|
||||
include(Compiler-Intel)
|
||||
elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||
include (Compiler-GNU)
|
||||
else ()
|
||||
message (FATAL_ERROR "Compiler type (CMAKE_Fortran_COMPILER_ID) not recognized")
|
||||
endif ()
|
||||
include(Compiler-GNU)
|
||||
else()
|
||||
message(FATAL_ERROR "Compiler type(CMAKE_Fortran_COMPILER_ID) not recognized")
|
||||
endif()
|
||||
|
||||
file (STRINGS "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/petsc/conf/petscvariables" PETSC_EXTERNAL_LIB REGEX "PETSC_WITH_EXTERNAL_LIB = .*$?")
|
||||
string (REGEX MATCHALL "-[lLW]([^\" ]+)" PETSC_EXTERNAL_LIB "${PETSC_EXTERNAL_LIB}")
|
||||
list (REMOVE_DUPLICATES PETSC_EXTERNAL_LIB)
|
||||
string (REPLACE ";" " " PETSC_EXTERNAL_LIB "${PETSC_EXTERNAL_LIB}")
|
||||
message ("PETSC_EXTERNAL_LIB:\n${PETSC_EXTERNAL_LIB}\n")
|
||||
file(STRINGS "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/petsc/conf/petscvariables" PETSC_EXTERNAL_LIB REGEX "PETSC_WITH_EXTERNAL_LIB = .*$?")
|
||||
string(REGEX MATCHALL "-[lLW]([^\" ]+)" PETSC_EXTERNAL_LIB "${PETSC_EXTERNAL_LIB}")
|
||||
list(REMOVE_DUPLICATES PETSC_EXTERNAL_LIB)
|
||||
string(REPLACE ";" " " PETSC_EXTERNAL_LIB "${PETSC_EXTERNAL_LIB}")
|
||||
message("PETSC_EXTERNAL_LIB:\n${PETSC_EXTERNAL_LIB}\n")
|
||||
|
||||
file (STRINGS "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/petsc/conf/petscvariables" PETSC_INCLUDES REGEX "PETSC_FC_INCLUDES = .*$?")
|
||||
string (REGEX MATCHALL "-I([^\" ]+)" PETSC_INCLUDES "${PETSC_INCLUDES}")
|
||||
list (REMOVE_DUPLICATES PETSC_INCLUDES)
|
||||
string (REPLACE ";" " " PETSC_INCLUDES "${PETSC_INCLUDES}")
|
||||
message ("PETSC_INCLUDES:\n${PETSC_INCLUDES}\n")
|
||||
file(STRINGS "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/petsc/conf/petscvariables" PETSC_INCLUDES REGEX "PETSC_FC_INCLUDES = .*$?")
|
||||
string(REGEX MATCHALL "-I([^\" ]+)" PETSC_INCLUDES "${PETSC_INCLUDES}")
|
||||
list(REMOVE_DUPLICATES PETSC_INCLUDES)
|
||||
string(REPLACE ";" " " PETSC_INCLUDES "${PETSC_INCLUDES}")
|
||||
message("PETSC_INCLUDES:\n${PETSC_INCLUDES}\n")
|
||||
|
||||
set (CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${BUILDCMD_PRE} ${OPENMP_FLAGS} ${STANDARD_CHECK} ${OPTIMIZATION_FLAGS} ${COMPILE_FLAGS} ${PRECISION_FLAGS}")
|
||||
set (CMAKE_Fortran_LINK_EXECUTABLE "${BUILDCMD_PRE} ${CMAKE_Fortran_COMPILER} ${OPENMP_FLAGS} ${OPTIMIZATION_FLAGS} ${LINKER_FLAGS}")
|
||||
set(CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${BUILDCMD_PRE} ${OPENMP_FLAGS} ${STANDARD_CHECK} ${OPTIMIZATION_FLAGS} ${COMPILE_FLAGS} ${PRECISION_FLAGS}")
|
||||
set(CMAKE_Fortran_LINK_EXECUTABLE "${BUILDCMD_PRE} ${CMAKE_Fortran_COMPILER} ${OPENMP_FLAGS} ${OPTIMIZATION_FLAGS} ${LINKER_FLAGS}")
|
||||
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "DEBUG")
|
||||
set (CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}} ${DEBUG_FLAGS}")
|
||||
set (CMAKE_Fortran_LINK_EXECUTABLE "${CMAKE_Fortran_LINK_EXECUTABLE} ${DEBUG_FLAGS}")
|
||||
endif ()
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "DEBUG")
|
||||
set(CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}} ${DEBUG_FLAGS}")
|
||||
set(CMAKE_Fortran_LINK_EXECUTABLE "${CMAKE_Fortran_LINK_EXECUTABLE} ${DEBUG_FLAGS}")
|
||||
endif()
|
||||
|
||||
set (CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}} ${PETSC_INCLUDES} ${BUILDCMD_POST}")
|
||||
set (CMAKE_Fortran_LINK_EXECUTABLE "${CMAKE_Fortran_LINK_EXECUTABLE} <OBJECTS> -o <TARGET> <LINK_LIBRARIES> ${PETSC_EXTERNAL_LIB} -lz ${BUILDCMD_POST}")
|
||||
set(CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}} ${PETSC_INCLUDES} ${BUILDCMD_POST}")
|
||||
set(CMAKE_Fortran_LINK_EXECUTABLE "${CMAKE_Fortran_LINK_EXECUTABLE} <OBJECTS> -o <TARGET> <LINK_LIBRARIES> ${PETSC_EXTERNAL_LIB} -lz ${BUILDCMD_POST}")
|
||||
|
||||
message ("Fortran Compiler Flags:\n${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}}\n")
|
||||
message ("C Compiler Flags:\n${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE}}\n")
|
||||
message ("Fortran Linker Command:\n${CMAKE_Fortran_LINK_EXECUTABLE}\n")
|
||||
message("Fortran Compiler Flags:\n${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}}\n")
|
||||
message("C Compiler Flags:\n${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE}}\n")
|
||||
message("Fortran Linker Command:\n${CMAKE_Fortran_LINK_EXECUTABLE}\n")
|
||||
|
||||
# location of code
|
||||
add_subdirectory (src)
|
||||
add_subdirectory(src)
|
||||
|
|
|
@ -74,7 +74,7 @@ echo PETSC_ARCH: $PETSC_ARCH
|
|||
echo PETSC_DIR: $PETSC_DIR
|
||||
echo
|
||||
echo $PETSC_DIR/$PETSC_ARCH/lib:
|
||||
/s $PETSC_DIR/$PETSC_ARCH/lib
|
||||
ls $PETSC_DIR/$PETSC_ARCH/lib
|
||||
echo
|
||||
echo $PETSC_DIR/$PETSC_ARCH/lib/petsc/conf/petscvariables:
|
||||
cat $PETSC_DIR/$PETSC_ARCH/lib/petsc/conf/petscvariables
|
||||
|
@ -103,11 +103,16 @@ for EXECUTABLE in gcc g++ gfortran ;do
|
|||
getDetails $EXECUTABLE '--version'
|
||||
done
|
||||
|
||||
firstLevel "Intel Compiler Suite"
|
||||
firstLevel "Intel Compiler Suite (classic)"
|
||||
for EXECUTABLE in icc icpc ifort ;do
|
||||
getDetails $EXECUTABLE '--version'
|
||||
done
|
||||
|
||||
firstLevel "Intel Compiler Suite (LLVM)"
|
||||
for EXECUTABLE in icx icpx ifx ;do
|
||||
getDetails $EXECUTABLE '--version'
|
||||
done
|
||||
|
||||
firstLevel "MPI Wrappers"
|
||||
for EXECUTABLE in mpicc mpiCC mpiicc mpic++ mpiicpc mpicxx mpifort mpiifort mpif90 mpif77; do
|
||||
getDetails $EXECUTABLE '-show'
|
||||
|
|
2
PRIVATE
2
PRIVATE
|
@ -1 +1 @@
|
|||
Subproject commit 9699f20f21f8a5f532c735a1aa9daeba395da94d
|
||||
Subproject commit fabe69749425e8a7aceb3b7c2758b40d97d8b809
|
2
README
2
README
|
@ -10,4 +10,4 @@ Germany
|
|||
|
||||
damask@mpie.de
|
||||
https://damask.mpie.de
|
||||
https://magit1.mpie.de
|
||||
https://git.damask.mpie.de
|
||||
|
|
|
@ -12,9 +12,9 @@ endif ()
|
|||
if (OPTIMIZATION STREQUAL "OFF")
|
||||
set (OPTIMIZATION_FLAGS "-O0")
|
||||
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
|
||||
set (OPTIMIZATION_FLAGS "-O2")
|
||||
set (OPTIMIZATION_FLAGS "-O2 -mtune=generic")
|
||||
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
|
||||
set (OPTIMIZATION_FLAGS "-O3 -ffast-math -funroll-loops -ftree-vectorize")
|
||||
set (OPTIMIZATION_FLAGS "-O3 -march=native -ffast-math -funroll-loops -ftree-vectorize")
|
||||
endif ()
|
||||
|
||||
set (STANDARD_CHECK "-std=f2018 -pedantic-errors" )
|
||||
|
@ -25,7 +25,7 @@ set (LINKER_FLAGS "${LINKER_FLAGS},-undefined,dynamic_lookup" )
|
|||
|
||||
#------------------------------------------------------------------------------------------------
|
||||
# Fine tuning compilation options
|
||||
set (COMPILE_FLAGS "${COMPILE_FLAGS} -xf95-cpp-input")
|
||||
set (COMPILE_FLAGS "${COMPILE_FLAGS} -cpp")
|
||||
# preprocessor
|
||||
|
||||
set (COMPILE_FLAGS "${COMPILE_FLAGS} -fPIC -fPIE")
|
||||
|
|
|
@ -60,7 +60,7 @@ if [ ! -z "$PS1" ]; then
|
|||
echo -n "heap size "
|
||||
[[ "$(ulimit -d)" == "unlimited" ]] \
|
||||
&& echo "unlimited" \
|
||||
|| echo $(python -c \
|
||||
|| echo $(python3 -c \
|
||||
"import math; \
|
||||
size=$(( 1024*$(ulimit -d) )); \
|
||||
print('{:.4g} {}'.format(size / (1 << ((int(math.log(size,2) / 10) if size else 0) * 10)), \
|
||||
|
@ -68,7 +68,7 @@ if [ ! -z "$PS1" ]; then
|
|||
echo -n "stack size "
|
||||
[[ "$(ulimit -s)" == "unlimited" ]] \
|
||||
&& echo "unlimited" \
|
||||
|| echo $(python -c \
|
||||
|| echo $(python3 -c \
|
||||
"import math; \
|
||||
size=$(( 1024*$(ulimit -s) )); \
|
||||
print('{:.4g} {}'.format(size / (1 << ((int(math.log(size,2) / 10) if size else 0) * 10)), \
|
||||
|
|
|
@ -50,7 +50,7 @@ if [ ! -z "$PS1" ]; then
|
|||
echo -n "heap size "
|
||||
[[ "$(ulimit -d)" == "unlimited" ]] \
|
||||
&& echo "unlimited" \
|
||||
|| echo $(python -c \
|
||||
|| echo $(python3 -c \
|
||||
"import math; \
|
||||
size=$(( 1024*$(ulimit -d) )); \
|
||||
print('{:.4g} {}'.format(size / (1 << ((int(math.log(size,2) / 10) if size else 0) * 10)), \
|
||||
|
@ -58,7 +58,7 @@ if [ ! -z "$PS1" ]; then
|
|||
echo -n "stack size "
|
||||
[[ "$(ulimit -s)" == "unlimited" ]] \
|
||||
&& echo "unlimited" \
|
||||
|| echo $(python -c \
|
||||
|| echo $(python3 -c \
|
||||
"import math; \
|
||||
size=$(( 1024*$(ulimit -s) )); \
|
||||
print('{:.4g} {}'.format(size / (1 << ((int(math.log(size,2) / 10) if size else 0) * 10)), \
|
||||
|
|
|
@ -24,8 +24,403 @@ phase:
|
|||
xi_inf_sl: [63.e+6]
|
||||
|
||||
material:
|
||||
- homogenization: SX
|
||||
constituents:
|
||||
- phase: Aluminum
|
||||
- constituents:
|
||||
- O: [0.31638628373524325, 0.4606971763404367, -0.25136671882289513, 0.7902357900300152]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
O: [0.9330127018922194, 0.25, 0.06698729810778066, 0.25]
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.17926942151539643, -0.8129164299504208, -0.5453207208299451, -0.09825814907531387]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.11156578191572807, -0.4904242197947781, -0.8051447086471791, 0.3142915192646224]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.050875167730042026, -0.4676541613791777, -0.3231762099798638, -0.8211385022980162]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.5736388730744205, 0.030011807121272376, 0.1793738934298104, -0.7986630961094017]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.3524596363123286, 0.8260984090345517, 0.4361208241824434, 0.05596650851705724]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.1475195192105493, -0.2681290123533707, -0.8885681859138441, -0.3417475722928759]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.3878621882257487, -0.4133490094014299, -0.5962575110690821, 0.5684914246189594]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.44765430421636465, 0.1688301032261743, 0.5590033642770855, 0.6772128608407416]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.3154783988579777, -0.893128078628195, 0.126738882437621, 0.294504449369408]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.01177697706652547, -0.8423157700616575, 0.4660610852557732, -0.2704672089677829]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.6023412506865766, 0.33897759335409144, -0.587639839755177, 0.42066450724741294]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.1457746940340264, 0.33010317541439926, 0.7204157567665017, 0.592269169857055]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.10885124011262147, 0.38223867611365064, -0.5398450127934588, -0.7420325896959369]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.3329465044039982, 0.005520408719519113, 0.4218135102429913, 0.843320527954356]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.2773927866814888, 0.282254093261412, -0.9094550709020325, -0.12758268983226237]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.22508894523834683, 0.3481870269276267, -0.6119961977184769, -0.673469683793499]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.7972172843203117, -0.42474780085647684, -0.2632560619322889, 0.3387183979420616]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.1856727896017474, 0.5407410320424911, 0.8064864929236231, 0.15067942194898976]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.35572128347826554, -0.21063165012009927, 0.7164748021511587, -0.561925737380588]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.951689791582057, -0.18424932026485139, 0.24330606914616992, 0.03377699360630425]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.7518830912041409, -0.6350086418080308, 0.03666967302842633, -0.17346808660504054]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.45893176203555247, -0.10107069709110554, -0.8532524342056044, -0.22611199770616278]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.6863555826979106, 0.7132418012703317, -0.12068837363804946, -0.07507638436064179]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.5031196491913161, 0.7534072343296819, -0.418000862383123, 0.0672565008327974]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.5354367280219723, 0.1489275079865293, -0.5066200327507001, 0.6591390218047527]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.7481103019200436, -0.6384221488364733, 0.14256832672505068, -0.11145585785834745]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.02565565294273664, 0.5032076562445432, -0.10524431346049941, -0.8573490984734187]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.09677483527453862, 0.42211465960588607, 0.39550590793620377, -0.8099561236208737]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.5682732488475628, -0.018709507415836685, 0.5596636589678777, 0.6029030252098423]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.520651625785196, 0.5331055583395244, -0.1753591314180096, 0.6434046341634921]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.7323990700263593, 0.5135195038892626, -0.28947480564659256, -0.34072519461542217]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.2379144266672964, 0.9451799147482833, -0.022386636015155, 0.2225544716870999]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.6929401940723844, 0.6921675787458842, 0.04806193711709397, 0.196030560302569]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.114679536930033, -0.02786128070118354, -0.2458076367959361, -0.9621075607955694]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.5536359330028813, 0.3398797644491804, 0.6552731815916284, -0.3854686198548249]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.9501825140749718, -0.17348508997627773, -0.023693401768133945, 0.2578657329207251]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.5269902873788485, -0.38600076480335854, 0.7335400729523406, -0.18762624537269798]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.16353281877366127, -0.8502634877258836, 0.2921682908502614, -0.4061363175656595]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.2506727462997232, 0.38078481221063915, -0.8812340677720474, -0.12487040822466101]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.53797656304294, 0.04453446570800863, -0.73466834243862, -0.41092618023082744]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.5147765687773858, -0.012009003671292302, 0.8506194553313438, -0.10633630744205957]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.45770418863630163, 0.46122438992768267, -0.5413625109006552, 0.5335780820385708]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.12028049877433303, 0.5509971760365859, 0.5424247126754088, 0.6226637493007807]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.1208751835697386, -0.11646202949704858, 0.1842663733100575, -0.9684377570859003]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.24961872240151486, 0.29610154171987574, -0.885460753706652, 0.2568533123457741]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.4494732523235404, 0.8130366919200476, -0.22342614248113637, -0.2950015116798619]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.3566054427707518, 0.6009999195769142, 0.6204413194609187, 0.35592727341468655]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.041340362458678996, -0.7766310224212297, -0.446615292586641, 0.4423460295656439]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.3143906305984617, 0.637462215667549, -0.06250872515926072, -0.7006376483369167]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.15960096982054908, 0.0154505822997579, -0.6938445646590042, 0.7020459600568152]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.10937013835716297, 0.005361991476876583, 0.07892487169799395, -0.9908482661389645]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.5566919673747123, 0.12234937037008195, 0.03295758799282205, 0.8209984667611823]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.03242357741517866, -0.1003019572329824, -0.25727891603352054, -0.9605705535604615]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.07439180141351488, 0.222039714860086, 0.9710557663706901, 0.04706297382800665]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.386406745139845, -0.3485065110202708, 0.0213726326755233, -0.8536839284298527]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.9148730990764601, 0.0859699947503276, -0.2653710064737939, 0.29188114278237975]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.05351534255347124, -0.47484306303499174, -0.4424245873225889, -0.7588943655946909]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.1850990827256794, -0.7850365894615515, 0.5790701003651098, 0.11888524569444774]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.2293488176462691, 0.8155102586104775, 0.36252844203460916, -0.3884781418063178]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.6824605981608404, -0.07237666863890763, 0.6154543161215582, 0.38758887311431783]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.7240310183899645, -0.1492281437355668, -0.5793271457602446, 0.3433512832533411]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.4130306435878869, -0.08991141120131982, -0.8934593257803132, 0.15182904455126872]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.9217038188689652, -0.2551303946186847, 0.2760910380891145, 0.09562578475994342]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.49248590418849286, 0.7555385277692129, 0.01782522408264428, -0.4316264920256593]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.288763491302084, 0.26595000602129804, -0.8721581902166229, 0.29193549223478765]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.18741690177717063, -0.959586229086916, -0.01605960190298382, -0.2093114021302156]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.6141655879532604, 0.44351951295059505, 0.35530824864623534, 0.5475829806066271]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.2814752417920179, 0.7638077896809081, -0.5255180392616715, 0.24738661865884956]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.16603578951305883, -0.6913575628365758, -0.6767106315334661, -0.1911009107226411]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.7317089905438434, -0.4610621713555634, -0.01149547471101715, -0.5018879171322728]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.12911750380868442, -0.775968622433847, -0.5524437669202766, -0.27569412688569794]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.18347252569039887, -0.3000323311682173, -0.9120086722006003, -0.21108911483411225]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.2453327661435727, -0.041601186144862225, -0.967732952958631, 0.039675016391321906]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.9885220121542625, 0.08409037295425872, -0.06115390693360882, -0.1096049284004023]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.3506668690363713, 0.8300197131399097, 0.3314704911076744, -0.2796915019878287]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.43424700094705143, -0.6863040633023977, -0.3990882505417852, -0.4256111407642043]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.09516090822469872, -0.09694359326006573, 0.7244026181255996, -0.6758603318174947]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.6299976256081414, -0.6188326478138776, 0.4105304204739873, 0.22718697056217957]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
- constituents:
|
||||
- O: [0.1624065072613073, -0.6001819140771016, 0.5096769212668724, -0.5946723739521216]
|
||||
phase: Aluminum
|
||||
v: 1.0
|
||||
homogenization: SX
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
title r-value
|
||||
$....MARC input file produced by Marc Mentat 2019 (64bit)
|
||||
title R-Wert
|
||||
$....MARC input file produced by Marc Mentat 2021.2 (64bit)
|
||||
$...................................
|
||||
$....input file using extended precision
|
||||
extended
|
||||
|
@ -7,11 +7,11 @@ $...................................
|
|||
sizing 0 80 165 0
|
||||
alloc 25
|
||||
elements 7
|
||||
version 14 1 0 1
|
||||
version 15 1 0 1
|
||||
table 0 0 2 1 1 0 0 1
|
||||
processor 1 1 1 0
|
||||
$no list
|
||||
large stra 2 1 0 0 0 0 0
|
||||
large stra 2 1 0 0 0 0 0 0
|
||||
all points
|
||||
no echo 1 2 3 4
|
||||
state vars 2
|
||||
|
@ -269,8 +269,6 @@ coordinates
|
|||
163 3.999999999999999+1 9.999999999999996+0-5.000000000000000-1
|
||||
164 3.999999999999999+1 9.999999999999996+0 0.000000000000000+0
|
||||
165 3.999999999999999+1 9.999999999999996+0 5.000000000000000-1
|
||||
define element set DAMASK_elements
|
||||
1 to 80
|
||||
define node set unten_y_nodes
|
||||
2 5 8 11 14
|
||||
define node set oben_y_nodes
|
||||
|
@ -283,6 +281,166 @@ define node set unten_z_nodes
|
|||
7 to 9
|
||||
define node set oben_z_nodes
|
||||
157 to 159
|
||||
define element set material0_elements
|
||||
1
|
||||
define element set material1_elements
|
||||
2
|
||||
define element set material2_elements
|
||||
3
|
||||
define element set material3_elements
|
||||
4
|
||||
define element set material4_elements
|
||||
5
|
||||
define element set material5_elements
|
||||
6
|
||||
define element set material6_elements
|
||||
7
|
||||
define element set material7_elements
|
||||
8
|
||||
define element set material8_elements
|
||||
9
|
||||
define element set material9_elements
|
||||
10
|
||||
define element set material10_elements
|
||||
11
|
||||
define element set material11_elements
|
||||
12
|
||||
define element set material12_elements
|
||||
13
|
||||
define element set material13_elements
|
||||
14
|
||||
define element set material14_elements
|
||||
15
|
||||
define element set material15_elements
|
||||
16
|
||||
define element set material16_elements
|
||||
17
|
||||
define element set material17_elements
|
||||
18
|
||||
define element set material18_elements
|
||||
19
|
||||
define element set material19_elements
|
||||
20
|
||||
define element set material20_elements
|
||||
21
|
||||
define element set material21_elements
|
||||
22
|
||||
define element set material22_elements
|
||||
23
|
||||
define element set material23_elements
|
||||
24
|
||||
define element set material24_elements
|
||||
25
|
||||
define element set material25_elements
|
||||
26
|
||||
define element set material26_elements
|
||||
27
|
||||
define element set material27_elements
|
||||
28
|
||||
define element set material28_elements
|
||||
29
|
||||
define element set material29_elements
|
||||
30
|
||||
define element set material30_elements
|
||||
31
|
||||
define element set material31_elements
|
||||
32
|
||||
define element set material32_elements
|
||||
33
|
||||
define element set material33_elements
|
||||
34
|
||||
define element set material34_elements
|
||||
35
|
||||
define element set material35_elements
|
||||
36
|
||||
define element set material36_elements
|
||||
37
|
||||
define element set material37_elements
|
||||
38
|
||||
define element set material38_elements
|
||||
39
|
||||
define element set material39_elements
|
||||
40
|
||||
define element set material40_elements
|
||||
41
|
||||
define element set material41_elements
|
||||
42
|
||||
define element set material42_elements
|
||||
43
|
||||
define element set material43_elements
|
||||
44
|
||||
define element set material44_elements
|
||||
45
|
||||
define element set material45_elements
|
||||
46
|
||||
define element set material46_elements
|
||||
47
|
||||
define element set material47_elements
|
||||
48
|
||||
define element set material48_elements
|
||||
49
|
||||
define element set material49_elements
|
||||
50
|
||||
define element set material50_elements
|
||||
51
|
||||
define element set material51_elements
|
||||
52
|
||||
define element set material52_elements
|
||||
53
|
||||
define element set material53_elements
|
||||
54
|
||||
define element set material54_elements
|
||||
55
|
||||
define element set material55_elements
|
||||
56
|
||||
define element set material56_elements
|
||||
57
|
||||
define element set material57_elements
|
||||
58
|
||||
define element set material58_elements
|
||||
59
|
||||
define element set material59_elements
|
||||
60
|
||||
define element set material60_elements
|
||||
61
|
||||
define element set material61_elements
|
||||
62
|
||||
define element set material62_elements
|
||||
63
|
||||
define element set material63_elements
|
||||
64
|
||||
define element set material64_elements
|
||||
65
|
||||
define element set material65_elements
|
||||
66
|
||||
define element set material66_elements
|
||||
67
|
||||
define element set material67_elements
|
||||
68
|
||||
define element set material68_elements
|
||||
69
|
||||
define element set material69_elements
|
||||
70
|
||||
define element set material70_elements
|
||||
71
|
||||
define element set material71_elements
|
||||
72
|
||||
define element set material72_elements
|
||||
73
|
||||
define element set material73_elements
|
||||
74
|
||||
define element set material74_elements
|
||||
75
|
||||
define element set material75_elements
|
||||
76
|
||||
define element set material76_elements
|
||||
77
|
||||
define element set material77_elements
|
||||
78
|
||||
define element set material78_elements
|
||||
79
|
||||
define element set material79_elements
|
||||
80
|
||||
hypoelastic
|
||||
|
||||
1 0 1 0 1TKS 0
|
||||
|
@ -294,13 +452,13 @@ mat color
|
|||
1 1 230 0 0
|
||||
table weg_x
|
||||
1 1 0 0 2
|
||||
1 2 2 0 0 2 0 0 2 0 0 2
|
||||
1 2 2 0 0 2 0 0 2 0 0 2 0 0 0 0
|
||||
0.000000000000000+0 0.000000000000000+0
|
||||
2.000000000000000+2 1.600000000000000+1
|
||||
geometry
|
||||
0 0 2
|
||||
1 9 1 230 0 0
|
||||
r-value-sample
|
||||
r-wert-probe
|
||||
0.000000000000000+0 0.000000000000000+0 0.000000000000000+0 0.000000000000000+0 0.000000000000000+0 0.000000000000000+0 0.000000000000000+0
|
||||
|
||||
usdata 1
|
||||
|
@ -344,20 +502,655 @@ unten_fest_nodes
|
|||
oben_ziehen_nodes
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0DAMASK
|
||||
2 6 1 0 0 0material0
|
||||
0.000000000000000+0
|
||||
0
|
||||
1
|
||||
DAMASK_elements
|
||||
loadcase r-value
|
||||
5
|
||||
DAMASK
|
||||
material0_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material1
|
||||
1.000000000000000+0
|
||||
0
|
||||
1
|
||||
material1_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material2
|
||||
2.000000000000000+0
|
||||
0
|
||||
1
|
||||
material2_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material3
|
||||
3.000000000000000+0
|
||||
0
|
||||
1
|
||||
material3_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material4
|
||||
4.000000000000000+0
|
||||
0
|
||||
1
|
||||
material4_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material5
|
||||
5.000000000000000+0
|
||||
0
|
||||
1
|
||||
material5_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material6
|
||||
6.000000000000000+0
|
||||
0
|
||||
1
|
||||
material6_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material7
|
||||
7.000000000000000+0
|
||||
0
|
||||
1
|
||||
material7_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material8
|
||||
8.000000000000000+0
|
||||
0
|
||||
1
|
||||
material8_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material9
|
||||
9.000000000000000+0
|
||||
0
|
||||
1
|
||||
material9_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material10
|
||||
10.000000000000000+0
|
||||
0
|
||||
1
|
||||
material10_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material11
|
||||
11.000000000000000+0
|
||||
0
|
||||
1
|
||||
material11_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material12
|
||||
12.000000000000000+0
|
||||
0
|
||||
1
|
||||
material12_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material13
|
||||
13.000000000000000+0
|
||||
0
|
||||
1
|
||||
material13_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material14
|
||||
14.000000000000000+0
|
||||
0
|
||||
1
|
||||
material14_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material15
|
||||
15.000000000000000+0
|
||||
0
|
||||
1
|
||||
material15_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material16
|
||||
16.000000000000000+0
|
||||
0
|
||||
1
|
||||
material16_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material17
|
||||
17.000000000000000+0
|
||||
0
|
||||
1
|
||||
material17_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material18
|
||||
18.000000000000000+0
|
||||
0
|
||||
1
|
||||
material18_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material19
|
||||
19.000000000000000+0
|
||||
0
|
||||
1
|
||||
material19_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material20
|
||||
20.000000000000000+0
|
||||
0
|
||||
1
|
||||
material20_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material21
|
||||
21.000000000000000+0
|
||||
0
|
||||
1
|
||||
material21_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material22
|
||||
22.000000000000000+0
|
||||
0
|
||||
1
|
||||
material22_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material23
|
||||
23.000000000000000+0
|
||||
0
|
||||
1
|
||||
material23_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material24
|
||||
24.000000000000000+0
|
||||
0
|
||||
1
|
||||
material24_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material25
|
||||
25.000000000000000+0
|
||||
0
|
||||
1
|
||||
material25_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material26
|
||||
26.000000000000000+0
|
||||
0
|
||||
1
|
||||
material26_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material27
|
||||
27.000000000000000+0
|
||||
0
|
||||
1
|
||||
material27_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material28
|
||||
28.000000000000000+0
|
||||
0
|
||||
1
|
||||
material28_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material29
|
||||
29.000000000000000+0
|
||||
0
|
||||
1
|
||||
material29_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material30
|
||||
30.000000000000000+0
|
||||
0
|
||||
1
|
||||
material30_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material31
|
||||
31.000000000000000+0
|
||||
0
|
||||
1
|
||||
material31_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material32
|
||||
32.000000000000000+0
|
||||
0
|
||||
1
|
||||
material32_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material33
|
||||
33.000000000000000+0
|
||||
0
|
||||
1
|
||||
material33_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material34
|
||||
34.000000000000000+0
|
||||
0
|
||||
1
|
||||
material34_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material35
|
||||
35.000000000000000+0
|
||||
0
|
||||
1
|
||||
material35_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material36
|
||||
36.000000000000000+0
|
||||
0
|
||||
1
|
||||
material36_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material37
|
||||
37.000000000000000+0
|
||||
0
|
||||
1
|
||||
material37_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material38
|
||||
38.000000000000000+0
|
||||
0
|
||||
1
|
||||
material38_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material39
|
||||
39.000000000000000+0
|
||||
0
|
||||
1
|
||||
material39_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material40
|
||||
40.000000000000000+0
|
||||
0
|
||||
1
|
||||
material40_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material41
|
||||
41.000000000000000+0
|
||||
0
|
||||
1
|
||||
material41_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material42
|
||||
42.000000000000000+0
|
||||
0
|
||||
1
|
||||
material42_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material43
|
||||
43.000000000000000+0
|
||||
0
|
||||
1
|
||||
material43_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material44
|
||||
44.000000000000000+0
|
||||
0
|
||||
1
|
||||
material44_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material45
|
||||
45.000000000000000+0
|
||||
0
|
||||
1
|
||||
material45_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material46
|
||||
46.000000000000000+0
|
||||
0
|
||||
1
|
||||
material46_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material47
|
||||
47.000000000000000+0
|
||||
0
|
||||
1
|
||||
material47_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material48
|
||||
48.000000000000000+0
|
||||
0
|
||||
1
|
||||
material48_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material49
|
||||
49.000000000000000+0
|
||||
0
|
||||
1
|
||||
material49_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material50
|
||||
50.000000000000000+0
|
||||
0
|
||||
1
|
||||
material50_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material51
|
||||
51.000000000000000+0
|
||||
0
|
||||
1
|
||||
material51_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material52
|
||||
52.000000000000000+0
|
||||
0
|
||||
1
|
||||
material52_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material53
|
||||
53.000000000000000+0
|
||||
0
|
||||
1
|
||||
material53_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material54
|
||||
54.000000000000000+0
|
||||
0
|
||||
1
|
||||
material54_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material55
|
||||
55.000000000000000+0
|
||||
0
|
||||
1
|
||||
material55_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material56
|
||||
56.000000000000000+0
|
||||
0
|
||||
1
|
||||
material56_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material57
|
||||
57.000000000000000+0
|
||||
0
|
||||
1
|
||||
material57_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material58
|
||||
58.000000000000000+0
|
||||
0
|
||||
1
|
||||
material58_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material59
|
||||
59.000000000000000+0
|
||||
0
|
||||
1
|
||||
material59_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material60
|
||||
60.000000000000000+0
|
||||
0
|
||||
1
|
||||
material60_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material61
|
||||
61.000000000000000+0
|
||||
0
|
||||
1
|
||||
material61_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material62
|
||||
62.000000000000000+0
|
||||
0
|
||||
1
|
||||
material62_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material63
|
||||
63.000000000000000+0
|
||||
0
|
||||
1
|
||||
material63_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material64
|
||||
64.000000000000000+0
|
||||
0
|
||||
1
|
||||
material64_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material65
|
||||
65.000000000000000+0
|
||||
0
|
||||
1
|
||||
material65_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material66
|
||||
66.000000000000000+0
|
||||
0
|
||||
1
|
||||
material66_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material67
|
||||
67.000000000000000+0
|
||||
0
|
||||
1
|
||||
material67_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material68
|
||||
68.000000000000000+0
|
||||
0
|
||||
1
|
||||
material68_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material69
|
||||
69.000000000000000+0
|
||||
0
|
||||
1
|
||||
material69_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material70
|
||||
70.000000000000000+0
|
||||
0
|
||||
1
|
||||
material70_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material71
|
||||
71.000000000000000+0
|
||||
0
|
||||
1
|
||||
material71_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material72
|
||||
72.000000000000000+0
|
||||
0
|
||||
1
|
||||
material72_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material73
|
||||
73.000000000000000+0
|
||||
0
|
||||
1
|
||||
material73_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material74
|
||||
74.000000000000000+0
|
||||
0
|
||||
1
|
||||
material74_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material75
|
||||
75.000000000000000+0
|
||||
0
|
||||
1
|
||||
material75_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material76
|
||||
76.000000000000000+0
|
||||
0
|
||||
1
|
||||
material76_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material77
|
||||
77.000000000000000+0
|
||||
0
|
||||
1
|
||||
material77_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material78
|
||||
78.000000000000000+0
|
||||
0
|
||||
1
|
||||
material78_elements
|
||||
initial state
|
||||
|
||||
2 6 1 0 0 0material79
|
||||
79.000000000000000+0
|
||||
0
|
||||
1
|
||||
material79_elements
|
||||
loadcase R-Wert
|
||||
83
|
||||
material0
|
||||
material1
|
||||
material2
|
||||
material3
|
||||
material4
|
||||
material5
|
||||
material6
|
||||
material7
|
||||
material8
|
||||
material9
|
||||
material10
|
||||
material11
|
||||
material12
|
||||
material13
|
||||
material14
|
||||
material15
|
||||
material16
|
||||
material17
|
||||
material18
|
||||
material19
|
||||
material20
|
||||
material21
|
||||
material22
|
||||
material23
|
||||
material24
|
||||
material25
|
||||
material26
|
||||
material27
|
||||
material28
|
||||
material29
|
||||
material30
|
||||
material31
|
||||
material32
|
||||
material33
|
||||
material34
|
||||
material35
|
||||
material36
|
||||
material37
|
||||
material38
|
||||
material39
|
||||
material40
|
||||
material41
|
||||
material42
|
||||
material43
|
||||
material44
|
||||
material45
|
||||
material46
|
||||
material47
|
||||
material48
|
||||
material49
|
||||
material50
|
||||
material51
|
||||
material52
|
||||
material53
|
||||
material54
|
||||
material55
|
||||
material56
|
||||
material57
|
||||
material58
|
||||
material59
|
||||
material60
|
||||
material61
|
||||
material62
|
||||
material63
|
||||
material64
|
||||
material65
|
||||
material66
|
||||
material67
|
||||
material68
|
||||
material69
|
||||
material70
|
||||
material71
|
||||
material72
|
||||
material73
|
||||
material74
|
||||
material75
|
||||
material76
|
||||
material77
|
||||
material78
|
||||
material79
|
||||
unten_z
|
||||
unten_y
|
||||
unten_fest
|
||||
no print
|
||||
post
|
||||
6 16 17 0 0 19 20 0 1 0 0 0 0 0 0 0
|
||||
3 16 17 0 0 19 20 0 1 0 0 0 0 0 0 0
|
||||
17 0
|
||||
301 0
|
||||
311 0
|
||||
parameters
|
||||
1.000000000000000+0 1.000000000000000+9 1.000000000000000+2 1.000000000000000+6 2.500000000000000-1 5.000000000000000-1 1.500000000000000+0-5.000000000000000-1
|
||||
8.625000000000000+0 2.000000000000000+1 1.000000000000000-4 1.000000000000000-6 1.000000000000000+0 1.000000000000000-4
|
||||
|
@ -367,9 +1160,9 @@ parameters
|
|||
3.000000000000000+0 4.000000000000000-1
|
||||
end option
|
||||
$...................
|
||||
$....start of loadcase Tensile
|
||||
title Tensile
|
||||
loadcase Tensile
|
||||
$....start of loadcase Ziehen
|
||||
title Ziehen
|
||||
loadcase Ziehen
|
||||
6
|
||||
unten_z
|
||||
unten_y
|
||||
|
@ -392,5 +1185,5 @@ auto load
|
|||
time step
|
||||
2.000000000000000+0
|
||||
continue
|
||||
$....end of loadcase Tensile
|
||||
$....end of loadcase Ziehen
|
||||
$...................
|
|
@ -4,6 +4,9 @@ references:
|
|||
Acta Metallurgica 8(3):187-199, 1960,
|
||||
https://doi.org/10.1016/0001-6160(60)90127-9,
|
||||
fitted from Fig. 5
|
||||
- U.F. Kocks,
|
||||
Metallurgical and Materials Transactions B 1:1121–1143, 1970,
|
||||
https://doi.org/10.1007/BF02900224
|
||||
output: [xi_sl, gamma_sl]
|
||||
N_sl: [12]
|
||||
n_sl: 20
|
||||
|
@ -12,4 +15,4 @@ h_0_sl-sl: 1.7e+8
|
|||
xi_0_sl: [5.0e+6]
|
||||
xi_inf_sl: [37.5e+6]
|
||||
h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4]
|
||||
dot_gamma_0_sl: 4.5e-3
|
||||
dot_gamma_0_sl: 7.5e-5
|
||||
|
|
|
@ -6,6 +6,9 @@ references:
|
|||
- I. Kovács and G.Vörös,
|
||||
International Journal of Plasticity 12:35-43, 1996,
|
||||
https://doi.org/10.1016/S0749-6419(95)00043-7
|
||||
- U.F. Kocks,
|
||||
Metallurgical and Materials Transactions B 1:1121–1143, 1970,
|
||||
https://doi.org/10.1007/BF02900224
|
||||
output: [xi_sl, gamma_sl]
|
||||
N_sl: [12]
|
||||
n_sl: 83.3
|
||||
|
|
|
@ -4,6 +4,9 @@ references:
|
|||
Transactions of the Japan Institute of Metals 16(10):629-640, 1975,
|
||||
https://doi.org/10.2320/matertrans1960.16.629,
|
||||
fitted from Fig. 3b
|
||||
- U.F. Kocks,
|
||||
Metallurgical and Materials Transactions B 1:1121–1143, 1970,
|
||||
https://doi.org/10.1007/BF02900224
|
||||
output: [xi_sl, gamma_sl]
|
||||
N_sl: [12]
|
||||
n_sl: 20
|
||||
|
|
|
@ -3,6 +3,9 @@ references:
|
|||
- C.C. Tasan et al.,
|
||||
Acta Materialia 81:386-400, 2014,
|
||||
https://doi.org/10.1016/j.actamat.2014.07.071
|
||||
- U.F. Kocks,
|
||||
Metallurgical and Materials Transactions B 1:1121–1143, 1970,
|
||||
https://doi.org/10.1007/BF02900224
|
||||
output: [xi_sl, gamma_sl]
|
||||
N_sl: [12, 12]
|
||||
n_sl: 20
|
||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 1.4 KiB |
Binary file not shown.
Before Width: | Height: | Size: 8.1 KiB After Width: | Height: | Size: 16 KiB |
|
@ -1,117 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
from io import StringIO
|
||||
from optparse import OptionParser
|
||||
|
||||
import numpy as np
|
||||
|
||||
import damask
|
||||
|
||||
|
||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||
scriptID = ' '.join([scriptName,damask.version])
|
||||
|
||||
slipSystems = {
|
||||
'fcc': damask.lattice.kinematics['cF']['slip'][:12],
|
||||
'bcc': damask.lattice.kinematics['cI']['slip'],
|
||||
'hex': damask.lattice.kinematics['hP']['slip'],
|
||||
}
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# MAIN
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
parser = OptionParser(usage='%prog options [ASCIItable(s)]', description = """
|
||||
Add columns listing Schmid factors (and optional trace vector of selected system) for given Euler angles.
|
||||
|
||||
""", version = scriptID)
|
||||
|
||||
lattice_choices = list(slipSystems.keys())
|
||||
parser.add_option('-l',
|
||||
'--lattice',
|
||||
dest = 'lattice', type = 'choice', choices = lattice_choices, metavar='string',
|
||||
help = 'type of lattice structure [%default] {}'.format(lattice_choices))
|
||||
parser.add_option('--covera',
|
||||
dest = 'CoverA', type = 'float', metavar = 'float',
|
||||
help = 'C over A ratio for hexagonal systems [%default]')
|
||||
parser.add_option('-f',
|
||||
'--force',
|
||||
dest = 'force',
|
||||
type = 'float', nargs = 3, metavar = 'float float float',
|
||||
help = 'force direction in lab frame [%default]')
|
||||
parser.add_option('-n',
|
||||
'--normal',
|
||||
dest = 'normal',
|
||||
type = 'float', nargs = 3, metavar = 'float float float',
|
||||
help = 'stress plane normal in lab frame, per default perpendicular to the force')
|
||||
parser.add_option('-o',
|
||||
'--orientation',
|
||||
dest = 'quaternion',
|
||||
metavar = 'string',
|
||||
help = 'label of crystal orientation given as unit quaternion [%default]')
|
||||
|
||||
parser.set_defaults(force = (0.0,0.0,1.0),
|
||||
quaternion='orientation',
|
||||
normal = None,
|
||||
lattice = lattice_choices[0],
|
||||
CoverA = np.sqrt(8./3.),
|
||||
)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
if filenames == []: filenames = [None]
|
||||
|
||||
force = np.array(options.force)/np.linalg.norm(options.force)
|
||||
|
||||
if options.normal is not None:
|
||||
normal = np.array(options.normal)/np.linalg.norm(options.ormal)
|
||||
if abs(np.dot(force,normal)) > 1e-3:
|
||||
parser.error('stress plane normal not orthogonal to force direction')
|
||||
else:
|
||||
normal = force
|
||||
|
||||
|
||||
if options.lattice in ['bcc','fcc']:
|
||||
slip_direction = slipSystems[options.lattice][:,:3]
|
||||
slip_normal = slipSystems[options.lattice][:,3:]
|
||||
elif options.lattice == 'hex':
|
||||
slip_direction = np.zeros((len(slipSystems['hex']),3),'d')
|
||||
slip_normal = np.zeros_like(slip_direction)
|
||||
# convert 4 Miller index notation of hex to orthogonal 3 Miller index notation
|
||||
for i in range(len(slip_direction)):
|
||||
slip_direction[i] = np.array([slipSystems['hex'][i,0]*1.5,
|
||||
(slipSystems['hex'][i,0] + 2.*slipSystems['hex'][i,1])*0.5*np.sqrt(3),
|
||||
slipSystems['hex'][i,3]*options.CoverA,
|
||||
])
|
||||
slip_normal[i] = np.array([slipSystems['hex'][i,4],
|
||||
(slipSystems['hex'][i,4] + 2.*slipSystems['hex'][i,5])/np.sqrt(3),
|
||||
slipSystems['hex'][i,7]/options.CoverA,
|
||||
])
|
||||
|
||||
slip_direction /= np.linalg.norm(slip_direction,axis=1,keepdims=True)
|
||||
slip_normal /= np.linalg.norm(slip_normal, axis=1,keepdims=True)
|
||||
|
||||
labels = ['S[{direction[0]:.1g}_{direction[1]:.1g}_{direction[2]:.1g}]'
|
||||
'({normal[0]:.1g}_{normal[1]:.1g}_{normal[2]:.1g})'\
|
||||
.format(normal = theNormal, direction = theDirection,
|
||||
) for theNormal,theDirection in zip(slip_normal,slip_direction)]
|
||||
|
||||
for name in filenames:
|
||||
damask.util.report(scriptName,name)
|
||||
|
||||
table = damask.Table.load(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
||||
|
||||
o = damask.Rotation.from_quaternion(table.get(options.quaternion))
|
||||
|
||||
force = np.broadcast_to(force, o.shape+(3,))
|
||||
normal = np.broadcast_to(normal,o.shape+(3,))
|
||||
slip_direction = np.broadcast_to(slip_direction,o.shape+slip_direction.shape)
|
||||
slip_normal = np.broadcast_to(slip_normal, o.shape+slip_normal.shape)
|
||||
S = np.abs(np.einsum('ijk,ik->ij',slip_direction,(o@force))*
|
||||
np.einsum('ijk,ik->ij',slip_normal, (o@normal)))
|
||||
|
||||
for i,label in enumerate(labels):
|
||||
table = table.add(label,S[:,i],scriptID+' '+' '.join(sys.argv[1:]))
|
||||
|
||||
table.save((sys.stdout if name is None else name))
|
|
@ -1 +1 @@
|
|||
v3.0.0-alpha4-221-g4a8c83611
|
||||
v3.0.0-alpha5-31-gddb25ad0e
|
||||
|
|
|
@ -14,10 +14,10 @@ from . import tensor # noqa
|
|||
from . import mechanics # noqa
|
||||
from . import solver # noqa
|
||||
from . import grid_filters # noqa
|
||||
from . import lattice # noqa
|
||||
#Modules that contain only one class (of the same name), are prefixed by a '_'.
|
||||
#For example, '_colormap' containsa class called 'Colormap' which is imported as 'damask.Colormap'.
|
||||
from ._rotation import Rotation # noqa
|
||||
from ._crystal import Crystal # noqa
|
||||
from ._orientation import Orientation # noqa
|
||||
from ._table import Table # noqa
|
||||
from ._vtk import VTK # noqa
|
||||
|
@ -26,6 +26,3 @@ from ._config import Config # noqa
|
|||
from ._configmaterial import ConfigMaterial # noqa
|
||||
from ._grid import Grid # noqa
|
||||
from ._result import Result # noqa
|
||||
|
||||
# deprecated
|
||||
from ._test import Test # noqa
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
import json
|
||||
import functools
|
||||
import colorsys
|
||||
|
||||
import numpy as np
|
||||
import matplotlib as mpl
|
||||
|
@ -439,109 +440,26 @@ class Colormap(mpl.colors.ListedColormap):
|
|||
|
||||
@staticmethod
|
||||
def _hsv2rgb(hsv):
|
||||
"""
|
||||
H(ue) S(aturation) V(alue) to R(red) G(reen) B(lue).
|
||||
|
||||
References
|
||||
----------
|
||||
https://www.rapidtables.com/convert/color/hsv-to-rgb.html
|
||||
|
||||
"""
|
||||
sextant = np.clip(int(hsv[0]/60.),0,5)
|
||||
c = hsv[1]*hsv[2]
|
||||
x = c*(1.0 - abs((hsv[0]/60.)%2 - 1.))
|
||||
|
||||
return np.array([
|
||||
[c, x, 0],
|
||||
[x, c, 0],
|
||||
[0, c, x],
|
||||
[0, x, c],
|
||||
[x, 0, c],
|
||||
[c, 0, x],
|
||||
])[sextant] + hsv[2] - c
|
||||
"""H(ue) S(aturation) V(alue) to R(red) G(reen) B(lue)."""
|
||||
return np.array(colorsys.hsv_to_rgb(hsv[0]/360.,hsv[1],hsv[2]))
|
||||
|
||||
@staticmethod
|
||||
def _rgb2hsv(rgb):
|
||||
"""
|
||||
R(ed) G(reen) B(lue) to H(ue) S(aturation) V(alue).
|
||||
|
||||
References
|
||||
----------
|
||||
https://www.rapidtables.com/convert/color/rgb-to-hsv.html
|
||||
|
||||
"""
|
||||
C_max = rgb.max()
|
||||
C_min = rgb.min()
|
||||
Delta = C_max - C_min
|
||||
|
||||
v = C_max
|
||||
s = 0. if np.isclose(C_max,0.) else Delta/C_max
|
||||
if np.isclose(Delta,0.):
|
||||
h = 0.
|
||||
elif rgb.argmax() == 0:
|
||||
h = (rgb[1]-rgb[2])/Delta%6
|
||||
elif rgb.argmax() == 1:
|
||||
h = (rgb[2]-rgb[0])/Delta + 2.
|
||||
elif rgb.argmax() == 2:
|
||||
h = (rgb[0]-rgb[1])/Delta + 4.
|
||||
|
||||
h = np.clip(h,0.,6.) * 60.
|
||||
|
||||
return np.array([h,s,v])
|
||||
"""R(ed) G(reen) B(lue) to H(ue) S(aturation) V(alue)."""
|
||||
h,s,v = colorsys.rgb_to_hsv(rgb[0],rgb[1],rgb[2])
|
||||
return np.array([h*360,s,v])
|
||||
|
||||
|
||||
@staticmethod
|
||||
def _hsl2rgb(hsl):
|
||||
"""
|
||||
H(ue) S(aturation) L(uminance) to R(red) G(reen) B(lue).
|
||||
|
||||
References
|
||||
----------
|
||||
https://www.rapidtables.com/convert/color/hsl-to-rgb.html
|
||||
|
||||
"""
|
||||
sextant = np.clip(int(hsl[0]/60.),0,5)
|
||||
c = (1.0 - abs(2.0 * hsl[2] - 1.))*hsl[1]
|
||||
x = c*(1.0 - abs((hsl[0]/60.)%2 - 1.))
|
||||
m = hsl[2] - 0.5*c
|
||||
|
||||
return np.array([
|
||||
[c+m, x+m, m],
|
||||
[x+m, c+m, m],
|
||||
[m, c+m, x+m],
|
||||
[m, x+m, c+m],
|
||||
[x+m, m, c+m],
|
||||
[c+m, m, x+m],
|
||||
])[sextant]
|
||||
"""H(ue) S(aturation) L(uminance) to R(red) G(reen) B(lue)."""
|
||||
return np.array(colorsys.hls_to_rgb(hsl[0]/360.,hsl[2],hsl[1]))
|
||||
|
||||
@staticmethod
|
||||
def _rgb2hsl(rgb):
|
||||
"""
|
||||
R(ed) G(reen) B(lue) to H(ue) S(aturation) L(uminance).
|
||||
|
||||
References
|
||||
----------
|
||||
https://www.rapidtables.com/convert/color/rgb-to-hsl.html
|
||||
|
||||
"""
|
||||
C_max = rgb.max()
|
||||
C_min = rgb.min()
|
||||
Delta = C_max - C_min
|
||||
|
||||
l = np.clip((C_max + C_min)*.5,0.,1.) # noqa
|
||||
s = 0. if np.isclose(C_max,C_min) else Delta/(1.-np.abs(2*l-1.))
|
||||
if np.isclose(Delta,0.):
|
||||
h = 0.
|
||||
elif rgb.argmax() == 0:
|
||||
h = (rgb[1]-rgb[2])/Delta%6
|
||||
elif rgb.argmax() == 1:
|
||||
h = (rgb[2]-rgb[0])/Delta + 2.
|
||||
elif rgb.argmax() == 2:
|
||||
h = (rgb[0]-rgb[1])/Delta + 4.
|
||||
|
||||
h = np.clip(h,0.,6.) * 60.
|
||||
|
||||
return np.array([h,s,l])
|
||||
"""R(ed) G(reen) B(lue) to H(ue) S(aturation) L(uminance)."""
|
||||
h,l,s = colorsys.rgb_to_hls(rgb[0],rgb[1],rgb[2])
|
||||
return np.array([h*360,s,l])
|
||||
|
||||
|
||||
@staticmethod
|
||||
|
|
|
@ -146,12 +146,6 @@ class Config(dict):
|
|||
if 'sort_keys' not in kwargs:
|
||||
kwargs['sort_keys'] = False
|
||||
|
||||
def array_representer(dumper, data):
|
||||
"""Convert numpy array to list of native types."""
|
||||
return dumper.represent_list([d.item() for d in data])
|
||||
|
||||
NiceDumper.add_representer(np.ndarray, array_representer)
|
||||
|
||||
try:
|
||||
fhandle.write(yaml.dump(self,Dumper=NiceDumper,**kwargs))
|
||||
except TypeError: # compatibility with old pyyaml
|
||||
|
|
|
@ -0,0 +1,876 @@
|
|||
import numpy as np
|
||||
|
||||
from . import util
|
||||
from . import Rotation
|
||||
|
||||
lattice_symmetries = {
|
||||
'aP': 'triclinic',
|
||||
|
||||
'mP': 'monoclinic',
|
||||
'mS': 'monoclinic',
|
||||
|
||||
'oP': 'orthorhombic',
|
||||
'oS': 'orthorhombic',
|
||||
'oI': 'orthorhombic',
|
||||
'oF': 'orthorhombic',
|
||||
|
||||
'tP': 'tetragonal',
|
||||
'tI': 'tetragonal',
|
||||
|
||||
'hP': 'hexagonal',
|
||||
|
||||
'cP': 'cubic',
|
||||
'cI': 'cubic',
|
||||
'cF': 'cubic',
|
||||
}
|
||||
|
||||
|
||||
class Crystal():
|
||||
"""Crystal lattice."""
|
||||
|
||||
def __init__(self,*,
|
||||
family = None,
|
||||
lattice = None,
|
||||
a = None,b = None,c = None,
|
||||
alpha = None,beta = None,gamma = None,
|
||||
degrees = False):
|
||||
"""
|
||||
Representation of crystal in terms of crystal family or Bravais lattice.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
family : {'triclinic', 'monoclinic', 'orthorhombic', 'tetragonal', 'hexagonal', 'cubic'}, optional.
|
||||
Name of the crystal family.
|
||||
Will be inferred if 'lattice' is given.
|
||||
lattice : {'aP', 'mP', 'mS', 'oP', 'oS', 'oI', 'oF', 'tP', 'tI', 'hP', 'cP', 'cI', 'cF'}, optional.
|
||||
Name of the Bravais lattice in Pearson notation.
|
||||
a : float, optional
|
||||
Length of lattice parameter 'a'.
|
||||
b : float, optional
|
||||
Length of lattice parameter 'b'.
|
||||
c : float, optional
|
||||
Length of lattice parameter 'c'.
|
||||
alpha : float, optional
|
||||
Angle between b and c lattice basis.
|
||||
beta : float, optional
|
||||
Angle between c and a lattice basis.
|
||||
gamma : float, optional
|
||||
Angle between a and b lattice basis.
|
||||
degrees : bool, optional
|
||||
Angles are given in degrees. Defaults to False.
|
||||
|
||||
"""
|
||||
if family not in [None] + list(lattice_symmetries.values()):
|
||||
raise KeyError(f'invalid crystal family "{family}"')
|
||||
if lattice is not None and family is not None and family != lattice_symmetries[lattice]:
|
||||
raise KeyError(f'incompatible family "{family}" for lattice "{lattice}"')
|
||||
|
||||
self.family = lattice_symmetries[lattice] if family is None else family
|
||||
self.lattice = lattice
|
||||
|
||||
if self.lattice is not None:
|
||||
self.a = 1 if a is None else a
|
||||
self.b = b
|
||||
self.c = c
|
||||
self.a = float(self.a) if self.a is not None else \
|
||||
(self.b / self.ratio['b'] if self.b is not None and self.ratio['b'] is not None else
|
||||
self.c / self.ratio['c'] if self.c is not None and self.ratio['c'] is not None else None)
|
||||
self.b = float(self.b) if self.b is not None else \
|
||||
(self.a * self.ratio['b'] if self.a is not None and self.ratio['b'] is not None else
|
||||
self.c / self.ratio['c'] * self.ratio['b']
|
||||
if self.c is not None and self.ratio['b'] is not None and self.ratio['c'] is not None else None)
|
||||
self.c = float(self.c) if self.c is not None else \
|
||||
(self.a * self.ratio['c'] if self.a is not None and self.ratio['c'] is not None else
|
||||
self.b / self.ratio['b'] * self.ratio['c']
|
||||
if self.c is not None and self.ratio['b'] is not None and self.ratio['c'] is not None else None)
|
||||
|
||||
self.alpha = np.radians(alpha) if degrees and alpha is not None else alpha
|
||||
self.beta = np.radians(beta) if degrees and beta is not None else beta
|
||||
self.gamma = np.radians(gamma) if degrees and gamma is not None else gamma
|
||||
if self.alpha is None and 'alpha' in self.immutable: self.alpha = self.immutable['alpha']
|
||||
if self.beta is None and 'beta' in self.immutable: self.beta = self.immutable['beta']
|
||||
if self.gamma is None and 'gamma' in self.immutable: self.gamma = self.immutable['gamma']
|
||||
|
||||
if \
|
||||
(self.a is None) \
|
||||
or (self.b is None or ('b' in self.immutable and self.b != self.immutable['b'] * self.a)) \
|
||||
or (self.c is None or ('c' in self.immutable and self.c != self.immutable['c'] * self.b)) \
|
||||
or (self.alpha is None or ('alpha' in self.immutable and self.alpha != self.immutable['alpha'])) \
|
||||
or (self.beta is None or ('beta' in self.immutable and self.beta != self.immutable['beta'])) \
|
||||
or (self.gamma is None or ('gamma' in self.immutable and self.gamma != self.immutable['gamma'])):
|
||||
raise ValueError (f'incompatible parameters {self.parameters} for crystal family {self.family}')
|
||||
|
||||
if np.any(np.array([self.alpha,self.beta,self.gamma]) <= 0):
|
||||
raise ValueError ('lattice angles must be positive')
|
||||
if np.any([np.roll([self.alpha,self.beta,self.gamma],r)[0]
|
||||
>= np.sum(np.roll([self.alpha,self.beta,self.gamma],r)[1:]) for r in range(3)]):
|
||||
raise ValueError ('each lattice angle must be less than sum of others')
|
||||
else:
|
||||
self.a = self.b = self.c = None
|
||||
self.alpha = self.beta = self.gamma = None
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
"""Represent."""
|
||||
return '\n'.join([f'Crystal family {self.family}']
|
||||
+ ([] if self.lattice is None else [f'Bravais lattice {self.lattice}']+
|
||||
list(map(lambda x:f'{x[0]}: {x[1]:.5g}',
|
||||
zip(['a','b','c','α','β','γ',],
|
||||
self.parameters))))
|
||||
)
|
||||
|
||||
def __eq__(self,other):
|
||||
"""
|
||||
Equal to other.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
other : Crystal
|
||||
Crystal to check for equality.
|
||||
|
||||
"""
|
||||
return self.lattice == other.lattice and \
|
||||
self.parameters == other.parameters and \
|
||||
self.family == other.family
|
||||
|
||||
@property
|
||||
def parameters(self):
|
||||
"""Return lattice parameters a, b, c, alpha, beta, gamma."""
|
||||
return (self.a,self.b,self.c,self.alpha,self.beta,self.gamma)
|
||||
|
||||
|
||||
@property
|
||||
def immutable(self):
|
||||
"""Return immutable lattice parameters."""
|
||||
_immutable = {
|
||||
'cubic': {
|
||||
'b': 1.0,
|
||||
'c': 1.0,
|
||||
'alpha': np.pi/2.,
|
||||
'beta': np.pi/2.,
|
||||
'gamma': np.pi/2.,
|
||||
},
|
||||
'hexagonal': {
|
||||
'b': 1.0,
|
||||
'alpha': np.pi/2.,
|
||||
'beta': np.pi/2.,
|
||||
'gamma': 2.*np.pi/3.,
|
||||
},
|
||||
'tetragonal': {
|
||||
'b': 1.0,
|
||||
'alpha': np.pi/2.,
|
||||
'beta': np.pi/2.,
|
||||
'gamma': np.pi/2.,
|
||||
},
|
||||
'orthorhombic': {
|
||||
'alpha': np.pi/2.,
|
||||
'beta': np.pi/2.,
|
||||
'gamma': np.pi/2.,
|
||||
},
|
||||
'monoclinic': {
|
||||
'alpha': np.pi/2.,
|
||||
'gamma': np.pi/2.,
|
||||
},
|
||||
'triclinic': {}
|
||||
}
|
||||
return _immutable[self.family]
|
||||
|
||||
|
||||
@property
|
||||
def standard_triangle(self):
|
||||
"""
|
||||
Corners of the standard triangle.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Not yet defined for monoclinic.
|
||||
|
||||
|
||||
References
|
||||
----------
|
||||
Bases are computed from
|
||||
|
||||
>>> basis = {
|
||||
... 'cubic' : np.linalg.inv(np.array([[0.,0.,1.], # direction of red
|
||||
... [1.,0.,1.]/np.sqrt(2.), # green
|
||||
... [1.,1.,1.]/np.sqrt(3.)]).T), # blue
|
||||
... 'hexagonal' : np.linalg.inv(np.array([[0.,0.,1.], # direction of red
|
||||
... [1.,0.,0.], # green
|
||||
... [np.sqrt(3.),1.,0.]/np.sqrt(4.)]).T), # blue
|
||||
... 'tetragonal' : np.linalg.inv(np.array([[0.,0.,1.], # direction of red
|
||||
... [1.,0.,0.], # green
|
||||
... [1.,1.,0.]/np.sqrt(2.)]).T), # blue
|
||||
... 'orthorhombic': np.linalg.inv(np.array([[0.,0.,1.], # direction of red
|
||||
... [1.,0.,0.], # green
|
||||
... [0.,1.,0.]]).T), # blue
|
||||
... }
|
||||
|
||||
"""
|
||||
_basis = {
|
||||
'cubic': {'improper':np.array([ [-1. , 0. , 1. ],
|
||||
[ np.sqrt(2.) , -np.sqrt(2.) , 0. ],
|
||||
[ 0. , np.sqrt(3.) , 0. ] ]),
|
||||
'proper':np.array([ [ 0. , -1. , 1. ],
|
||||
[-np.sqrt(2.) , np.sqrt(2.) , 0. ],
|
||||
[ np.sqrt(3.) , 0. , 0. ] ]),
|
||||
},
|
||||
'hexagonal':
|
||||
{'improper':np.array([ [ 0. , 0. , 1. ],
|
||||
[ 1. , -np.sqrt(3.) , 0. ],
|
||||
[ 0. , 2. , 0. ] ]),
|
||||
'proper':np.array([ [ 0. , 0. , 1. ],
|
||||
[-1. , np.sqrt(3.) , 0. ],
|
||||
[ np.sqrt(3.) , -1. , 0. ] ]),
|
||||
},
|
||||
'tetragonal':
|
||||
{'improper':np.array([ [ 0. , 0. , 1. ],
|
||||
[ 1. , -1. , 0. ],
|
||||
[ 0. , np.sqrt(2.) , 0. ] ]),
|
||||
'proper':np.array([ [ 0. , 0. , 1. ],
|
||||
[-1. , 1. , 0. ],
|
||||
[ np.sqrt(2.) , 0. , 0. ] ]),
|
||||
},
|
||||
'orthorhombic':
|
||||
{'improper':np.array([ [ 0., 0., 1.],
|
||||
[ 1., 0., 0.],
|
||||
[ 0., 1., 0.] ]),
|
||||
'proper':np.array([ [ 0., 0., 1.],
|
||||
[-1., 0., 0.],
|
||||
[ 0., 1., 0.] ]),
|
||||
}}
|
||||
return _basis.get(self.family,None)
|
||||
|
||||
|
||||
@property
|
||||
def ratio(self):
|
||||
"""Return axes ratios of own lattice."""
|
||||
_ratio = { 'hexagonal': {'c': np.sqrt(8./3.)}}
|
||||
|
||||
return dict(b = self.immutable['b']
|
||||
if 'b' in self.immutable else
|
||||
_ratio[self.family]['b'] if self.family in _ratio and 'b' in _ratio[self.family] else None,
|
||||
c = self.immutable['c']
|
||||
if 'c' in self.immutable else
|
||||
_ratio[self.family]['c'] if self.family in _ratio and 'c' in _ratio[self.family] else None,
|
||||
)
|
||||
|
||||
|
||||
@property
|
||||
def basis_real(self):
|
||||
"""
|
||||
Return orthogonal real space crystal basis.
|
||||
|
||||
References
|
||||
----------
|
||||
C.T. Young and J.L. Lytton, Journal of Applied Physics 43:1408–1417, 1972
|
||||
https://doi.org/10.1063/1.1661333
|
||||
|
||||
"""
|
||||
if None in self.parameters:
|
||||
raise KeyError('missing crystal lattice parameters')
|
||||
return np.array([
|
||||
[1,0,0],
|
||||
[np.cos(self.gamma),np.sin(self.gamma),0],
|
||||
[np.cos(self.beta),
|
||||
(np.cos(self.alpha)-np.cos(self.beta)*np.cos(self.gamma)) /np.sin(self.gamma),
|
||||
np.sqrt(1 - np.cos(self.alpha)**2 - np.cos(self.beta)**2 - np.cos(self.gamma)**2
|
||||
+ 2 * np.cos(self.alpha) * np.cos(self.beta) * np.cos(self.gamma))/np.sin(self.gamma)],
|
||||
],dtype=float).T \
|
||||
* np.array([self.a,self.b,self.c])
|
||||
|
||||
|
||||
@property
|
||||
def basis_reciprocal(self):
|
||||
"""Return reciprocal (dual) crystal basis."""
|
||||
return np.linalg.inv(self.basis_real.T)
|
||||
|
||||
|
||||
@property
|
||||
def lattice_points(self):
|
||||
"""Return lattice points."""
|
||||
_lattice_points = {
|
||||
'P': [
|
||||
],
|
||||
'S': [
|
||||
[0.5,0.5,0],
|
||||
],
|
||||
'I': [
|
||||
[0.5,0.5,0.5],
|
||||
],
|
||||
'F': [
|
||||
[0.0,0.5,0.5],
|
||||
[0.5,0.0,0.5],
|
||||
[0.5,0.5,0.0],
|
||||
],
|
||||
'hP': [
|
||||
[2./3.,1./3.,0.5],
|
||||
],
|
||||
}
|
||||
|
||||
if self.lattice is None: raise KeyError('no lattice type specified')
|
||||
return np.array([[0,0,0]]
|
||||
+ _lattice_points.get(self.lattice if self.lattice == 'hP' else \
|
||||
self.lattice[-1],None),dtype=float)
|
||||
|
||||
def to_lattice(self,*,direction=None,plane=None):
|
||||
"""
|
||||
Calculate lattice vector corresponding to crystal frame direction or plane normal.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
direction|plane : numpy.ndarray of shape (...,3)
|
||||
Vector along direction or plane normal.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Miller : numpy.ndarray of shape (...,3)
|
||||
Lattice vector of direction or plane.
|
||||
Use util.scale_to_coprime to convert to (integer) Miller indices.
|
||||
|
||||
"""
|
||||
if (direction is not None) ^ (plane is None):
|
||||
raise KeyError('specify either "direction" or "plane"')
|
||||
axis,basis = (np.array(direction),self.basis_reciprocal.T) \
|
||||
if plane is None else \
|
||||
(np.array(plane),self.basis_real.T)
|
||||
return np.einsum('il,...l',basis,axis)
|
||||
|
||||
|
||||
def to_frame(self,*,uvw=None,hkl=None):
|
||||
"""
|
||||
Calculate crystal frame vector along lattice direction [uvw] or plane normal (hkl).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
uvw|hkl : numpy.ndarray of shape (...,3)
|
||||
Miller indices of crystallographic direction or plane normal.
|
||||
|
||||
Returns
|
||||
-------
|
||||
vector : numpy.ndarray of shape (...,3)
|
||||
Crystal frame vector along [uvw] direction or (hkl) plane normal.
|
||||
|
||||
"""
|
||||
if (uvw is not None) ^ (hkl is None):
|
||||
raise KeyError('specify either "uvw" or "hkl"')
|
||||
axis,basis = (np.array(uvw),self.basis_real) \
|
||||
if hkl is None else \
|
||||
(np.array(hkl),self.basis_reciprocal)
|
||||
return np.einsum('il,...l',basis,axis)
|
||||
|
||||
|
||||
def kinematics(self,mode):
|
||||
"""
|
||||
Return crystal kinematics systems.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
mode : {'slip','twin'}
|
||||
Deformation mode.
|
||||
|
||||
Returns
|
||||
-------
|
||||
direction_plane : dictionary
|
||||
Directions and planes of deformation mode families.
|
||||
|
||||
"""
|
||||
_kinematics = {
|
||||
'cF': {
|
||||
'slip' :[np.array([
|
||||
[+0,+1,-1, +1,+1,+1],
|
||||
[-1,+0,+1, +1,+1,+1],
|
||||
[+1,-1,+0, +1,+1,+1],
|
||||
[+0,-1,-1, -1,-1,+1],
|
||||
[+1,+0,+1, -1,-1,+1],
|
||||
[-1,+1,+0, -1,-1,+1],
|
||||
[+0,-1,+1, +1,-1,-1],
|
||||
[-1,+0,-1, +1,-1,-1],
|
||||
[+1,+1,+0, +1,-1,-1],
|
||||
[+0,+1,+1, -1,+1,-1],
|
||||
[+1,+0,-1, -1,+1,-1],
|
||||
[-1,-1,+0, -1,+1,-1]]),
|
||||
np.array([
|
||||
[+1,+1,+0, +1,-1,+0],
|
||||
[+1,-1,+0, +1,+1,+0],
|
||||
[+1,+0,+1, +1,+0,-1],
|
||||
[+1,+0,-1, +1,+0,+1],
|
||||
[+0,+1,+1, +0,+1,-1],
|
||||
[+0,+1,-1, +0,+1,+1]])],
|
||||
'twin' :[np.array([
|
||||
[-2, 1, 1, 1, 1, 1],
|
||||
[ 1,-2, 1, 1, 1, 1],
|
||||
[ 1, 1,-2, 1, 1, 1],
|
||||
[ 2,-1, 1, -1,-1, 1],
|
||||
[-1, 2, 1, -1,-1, 1],
|
||||
[-1,-1,-2, -1,-1, 1],
|
||||
[-2,-1,-1, 1,-1,-1],
|
||||
[ 1, 2,-1, 1,-1,-1],
|
||||
[ 1,-1, 2, 1,-1,-1],
|
||||
[ 2, 1,-1, -1, 1,-1],
|
||||
[-1,-2,-1, -1, 1,-1],
|
||||
[-1, 1, 2, -1, 1,-1]])]
|
||||
},
|
||||
'cI': {
|
||||
'slip' :[np.array([
|
||||
[+1,-1,+1, +0,+1,+1],
|
||||
[-1,-1,+1, +0,+1,+1],
|
||||
[+1,+1,+1, +0,-1,+1],
|
||||
[-1,+1,+1, +0,-1,+1],
|
||||
[-1,+1,+1, +1,+0,+1],
|
||||
[-1,-1,+1, +1,+0,+1],
|
||||
[+1,+1,+1, -1,+0,+1],
|
||||
[+1,-1,+1, -1,+0,+1],
|
||||
[-1,+1,+1, +1,+1,+0],
|
||||
[-1,+1,-1, +1,+1,+0],
|
||||
[+1,+1,+1, -1,+1,+0],
|
||||
[+1,+1,-1, -1,+1,+0]]),
|
||||
np.array([
|
||||
[-1,+1,+1, +2,+1,+1],
|
||||
[+1,+1,+1, -2,+1,+1],
|
||||
[+1,+1,-1, +2,-1,+1],
|
||||
[+1,-1,+1, +2,+1,-1],
|
||||
[+1,-1,+1, +1,+2,+1],
|
||||
[+1,+1,-1, -1,+2,+1],
|
||||
[+1,+1,+1, +1,-2,+1],
|
||||
[-1,+1,+1, +1,+2,-1],
|
||||
[+1,+1,-1, +1,+1,+2],
|
||||
[+1,-1,+1, -1,+1,+2],
|
||||
[-1,+1,+1, +1,-1,+2],
|
||||
[+1,+1,+1, +1,+1,-2]]),
|
||||
np.array([
|
||||
[+1,+1,-1, +1,+2,+3],
|
||||
[+1,-1,+1, -1,+2,+3],
|
||||
[-1,+1,+1, +1,-2,+3],
|
||||
[+1,+1,+1, +1,+2,-3],
|
||||
[+1,-1,+1, +1,+3,+2],
|
||||
[+1,+1,-1, -1,+3,+2],
|
||||
[+1,+1,+1, +1,-3,+2],
|
||||
[-1,+1,+1, +1,+3,-2],
|
||||
[+1,+1,-1, +2,+1,+3],
|
||||
[+1,-1,+1, -2,+1,+3],
|
||||
[-1,+1,+1, +2,-1,+3],
|
||||
[+1,+1,+1, +2,+1,-3],
|
||||
[+1,-1,+1, +2,+3,+1],
|
||||
[+1,+1,-1, -2,+3,+1],
|
||||
[+1,+1,+1, +2,-3,+1],
|
||||
[-1,+1,+1, +2,+3,-1],
|
||||
[-1,+1,+1, +3,+1,+2],
|
||||
[+1,+1,+1, -3,+1,+2],
|
||||
[+1,+1,-1, +3,-1,+2],
|
||||
[+1,-1,+1, +3,+1,-2],
|
||||
[-1,+1,+1, +3,+2,+1],
|
||||
[+1,+1,+1, -3,+2,+1],
|
||||
[+1,+1,-1, +3,-2,+1],
|
||||
[+1,-1,+1, +3,+2,-1]])],
|
||||
'twin' :[np.array([
|
||||
[-1, 1, 1, 2, 1, 1],
|
||||
[ 1, 1, 1, -2, 1, 1],
|
||||
[ 1, 1,-1, 2,-1, 1],
|
||||
[ 1,-1, 1, 2, 1,-1],
|
||||
[ 1,-1, 1, 1, 2, 1],
|
||||
[ 1, 1,-1, -1, 2, 1],
|
||||
[ 1, 1, 1, 1,-2, 1],
|
||||
[-1, 1, 1, 1, 2,-1],
|
||||
[ 1, 1,-1, 1, 1, 2],
|
||||
[ 1,-1, 1, -1, 1, 2],
|
||||
[-1, 1, 1, 1,-1, 2],
|
||||
[ 1, 1, 1, 1, 1,-2]])]
|
||||
},
|
||||
'hP': {
|
||||
'slip' :[np.array([
|
||||
[+2,-1,-1,+0, +0,+0,+0,+1],
|
||||
[-1,+2,-1,+0, +0,+0,+0,+1],
|
||||
[-1,-1,+2,+0, +0,+0,+0,+1]]),
|
||||
np.array([
|
||||
[+2,-1,-1,+0, +0,+1,-1,+0],
|
||||
[-1,+2,-1,+0, -1,+0,+1,+0],
|
||||
[-1,-1,+2,+0, +1,-1,+0,+0]]),
|
||||
np.array([
|
||||
[-1,+1,+0,+0, +1,+1,-2,+0],
|
||||
[+0,-1,+1,+0, -2,+1,+1,+0],
|
||||
[+1,+0,-1,+0, +1,-2,+1,+0]]),
|
||||
np.array([
|
||||
[-1,+2,-1,+0, +1,+0,-1,+1],
|
||||
[-2,+1,+1,+0, +0,+1,-1,+1],
|
||||
[-1,-1,+2,+0, -1,+1,+0,+1],
|
||||
[+1,-2,+1,+0, -1,+0,+1,+1],
|
||||
[+2,-1,-1,+0, +0,-1,+1,+1],
|
||||
[+1,+1,-2,+0, +1,-1,+0,+1]]),
|
||||
np.array([
|
||||
[-2,+1,+1,+3, +1,+0,-1,+1],
|
||||
[-1,-1,+2,+3, +1,+0,-1,+1],
|
||||
[-1,-1,+2,+3, +0,+1,-1,+1],
|
||||
[+1,-2,+1,+3, +0,+1,-1,+1],
|
||||
[+1,-2,+1,+3, -1,+1,+0,+1],
|
||||
[+2,-1,-1,+3, -1,+1,+0,+1],
|
||||
[+2,-1,-1,+3, -1,+0,+1,+1],
|
||||
[+1,+1,-2,+3, -1,+0,+1,+1],
|
||||
[+1,+1,-2,+3, +0,-1,+1,+1],
|
||||
[-1,+2,-1,+3, +0,-1,+1,+1],
|
||||
[-1,+2,-1,+3, +1,-1,+0,+1],
|
||||
[-2,+1,+1,+3, +1,-1,+0,+1]]),
|
||||
np.array([
|
||||
[-1,-1,+2,+3, +1,+1,-2,+2],
|
||||
[+1,-2,+1,+3, -1,+2,-1,+2],
|
||||
[+2,-1,-1,+3, -2,+1,+1,+2],
|
||||
[+1,+1,-2,+3, -1,-1,+2,+2],
|
||||
[-1,+2,-1,+3, +1,-2,+1,+2],
|
||||
[-2,+1,+1,+3, +2,-1,-1,+2]])],
|
||||
'twin' :[np.array([
|
||||
[-1, 0, 1, 1, 1, 0,-1, 2], # shear = (3-(c/a)^2)/(sqrt(3) c/a) <-10.1>{10.2}
|
||||
[ 0,-1, 1, 1, 0, 1,-1, 2],
|
||||
[ 1,-1, 0, 1, -1, 1, 0, 2],
|
||||
[ 1, 0,-1, 1, -1, 0, 1, 2],
|
||||
[ 0, 1,-1, 1, 0,-1, 1, 2],
|
||||
[-1, 1, 0, 1, 1,-1, 0, 2]]),
|
||||
np.array([
|
||||
[-1,-1, 2, 6, 1, 1,-2, 1], # shear = 1/(c/a) <11.6>{-1-1.1}
|
||||
[ 1,-2, 1, 6, -1, 2,-1, 1],
|
||||
[ 2,-1,-1, 6, -2, 1, 1, 1],
|
||||
[ 1, 1,-2, 6, -1,-1, 2, 1],
|
||||
[-1, 2,-1, 6, 1,-2, 1, 1],
|
||||
[-2, 1, 1, 6, 2,-1,-1, 1]]),
|
||||
np.array([
|
||||
[ 1, 0,-1,-2, 1, 0,-1, 1], # shear = (4(c/a)^2-9)/(4 sqrt(3) c/a) <10.-2>{10.1}
|
||||
[ 0, 1,-1,-2, 0, 1,-1, 1],
|
||||
[-1, 1, 0,-2, -1, 1, 0, 1],
|
||||
[-1, 0, 1,-2, -1, 0, 1, 1],
|
||||
[ 0,-1, 1,-2, 0,-1, 1, 1],
|
||||
[ 1,-1, 0,-2, 1,-1, 0, 1]]),
|
||||
np.array([
|
||||
[ 1, 1,-2,-3, 1, 1,-2, 2], # shear = 2((c/a)^2-2)/(3 c/a) <11.-3>{11.2}
|
||||
[-1, 2,-1,-3, -1, 2,-1, 2],
|
||||
[-2, 1, 1,-3, -2, 1, 1, 2],
|
||||
[-1,-1, 2,-3, -1,-1, 2, 2],
|
||||
[ 1,-2, 1,-3, 1,-2, 1, 2],
|
||||
[ 2,-1,-1,-3, 2,-1,-1, 2]])]
|
||||
},
|
||||
}
|
||||
master = _kinematics[self.lattice][mode]
|
||||
if self.lattice == 'hP':
|
||||
return {'direction':[util.Bravais_to_Miller(uvtw=m[:,0:4]) for m in master],
|
||||
'plane': [util.Bravais_to_Miller(hkil=m[:,4:8]) for m in master]}
|
||||
else:
|
||||
return {'direction':[m[:,0:3] for m in master],
|
||||
'plane': [m[:,3:6] for m in master]}
|
||||
|
||||
|
||||
def relation_operations(self,model):
|
||||
"""
|
||||
Crystallographic orientation relationships for phase transformations.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
model : str
|
||||
Name of orientation relationship.
|
||||
|
||||
Returns
|
||||
-------
|
||||
operations : (string, damask.Rotation)
|
||||
Resulting lattice and rotations characterizing the orientation relationship.
|
||||
|
||||
References
|
||||
----------
|
||||
S. Morito et al., Journal of Alloys and Compounds 577:s587-s592, 2013
|
||||
https://doi.org/10.1016/j.jallcom.2012.02.004
|
||||
|
||||
K. Kitahara et al., Acta Materialia 54(5):1279-1288, 2006
|
||||
https://doi.org/10.1016/j.actamat.2005.11.001
|
||||
|
||||
Y. He et al., Journal of Applied Crystallography 39:72-81, 2006
|
||||
https://doi.org/10.1107/S0021889805038276
|
||||
|
||||
H. Kitahara et al., Materials Characterization 54(4-5):378-386, 2005
|
||||
https://doi.org/10.1016/j.matchar.2004.12.015
|
||||
|
||||
Y. He et al., Acta Materialia 53(4):1179-1190, 2005
|
||||
https://doi.org/10.1016/j.actamat.2004.11.021
|
||||
|
||||
"""
|
||||
_orientation_relationships = {
|
||||
'KS': {
|
||||
'cF' : np.array([
|
||||
[[-1, 0, 1],[ 1, 1, 1]],
|
||||
[[-1, 0, 1],[ 1, 1, 1]],
|
||||
[[ 0, 1,-1],[ 1, 1, 1]],
|
||||
[[ 0, 1,-1],[ 1, 1, 1]],
|
||||
[[ 1,-1, 0],[ 1, 1, 1]],
|
||||
[[ 1,-1, 0],[ 1, 1, 1]],
|
||||
[[ 1, 0,-1],[ 1,-1, 1]],
|
||||
[[ 1, 0,-1],[ 1,-1, 1]],
|
||||
[[-1,-1, 0],[ 1,-1, 1]],
|
||||
[[-1,-1, 0],[ 1,-1, 1]],
|
||||
[[ 0, 1, 1],[ 1,-1, 1]],
|
||||
[[ 0, 1, 1],[ 1,-1, 1]],
|
||||
[[ 0,-1, 1],[-1, 1, 1]],
|
||||
[[ 0,-1, 1],[-1, 1, 1]],
|
||||
[[-1, 0,-1],[-1, 1, 1]],
|
||||
[[-1, 0,-1],[-1, 1, 1]],
|
||||
[[ 1, 1, 0],[-1, 1, 1]],
|
||||
[[ 1, 1, 0],[-1, 1, 1]],
|
||||
[[-1, 1, 0],[ 1, 1,-1]],
|
||||
[[-1, 1, 0],[ 1, 1,-1]],
|
||||
[[ 0,-1,-1],[ 1, 1,-1]],
|
||||
[[ 0,-1,-1],[ 1, 1,-1]],
|
||||
[[ 1, 0, 1],[ 1, 1,-1]],
|
||||
[[ 1, 0, 1],[ 1, 1,-1]],
|
||||
],dtype=float),
|
||||
'cI' : np.array([
|
||||
[[-1,-1, 1],[ 0, 1, 1]],
|
||||
[[-1, 1,-1],[ 0, 1, 1]],
|
||||
[[-1,-1, 1],[ 0, 1, 1]],
|
||||
[[-1, 1,-1],[ 0, 1, 1]],
|
||||
[[-1,-1, 1],[ 0, 1, 1]],
|
||||
[[-1, 1,-1],[ 0, 1, 1]],
|
||||
[[-1,-1, 1],[ 0, 1, 1]],
|
||||
[[-1, 1,-1],[ 0, 1, 1]],
|
||||
[[-1,-1, 1],[ 0, 1, 1]],
|
||||
[[-1, 1,-1],[ 0, 1, 1]],
|
||||
[[-1,-1, 1],[ 0, 1, 1]],
|
||||
[[-1, 1,-1],[ 0, 1, 1]],
|
||||
[[-1,-1, 1],[ 0, 1, 1]],
|
||||
[[-1, 1,-1],[ 0, 1, 1]],
|
||||
[[-1,-1, 1],[ 0, 1, 1]],
|
||||
[[-1, 1,-1],[ 0, 1, 1]],
|
||||
[[-1,-1, 1],[ 0, 1, 1]],
|
||||
[[-1, 1,-1],[ 0, 1, 1]],
|
||||
[[-1,-1, 1],[ 0, 1, 1]],
|
||||
[[-1, 1,-1],[ 0, 1, 1]],
|
||||
[[-1,-1, 1],[ 0, 1, 1]],
|
||||
[[-1, 1,-1],[ 0, 1, 1]],
|
||||
[[-1,-1, 1],[ 0, 1, 1]],
|
||||
[[-1, 1,-1],[ 0, 1, 1]],
|
||||
],dtype=float),
|
||||
},
|
||||
'GT': {
|
||||
'cF' : np.array([
|
||||
[[ -5,-12, 17],[ 1, 1, 1]],
|
||||
[[ 17, -5,-12],[ 1, 1, 1]],
|
||||
[[-12, 17, -5],[ 1, 1, 1]],
|
||||
[[ 5, 12, 17],[ -1, -1, 1]],
|
||||
[[-17, 5,-12],[ -1, -1, 1]],
|
||||
[[ 12,-17, -5],[ -1, -1, 1]],
|
||||
[[ -5, 12,-17],[ -1, 1, 1]],
|
||||
[[ 17, 5, 12],[ -1, 1, 1]],
|
||||
[[-12,-17, 5],[ -1, 1, 1]],
|
||||
[[ 5,-12,-17],[ 1, -1, 1]],
|
||||
[[-17, -5, 12],[ 1, -1, 1]],
|
||||
[[ 12, 17, 5],[ 1, -1, 1]],
|
||||
[[ -5, 17,-12],[ 1, 1, 1]],
|
||||
[[-12, -5, 17],[ 1, 1, 1]],
|
||||
[[ 17,-12, -5],[ 1, 1, 1]],
|
||||
[[ 5,-17,-12],[ -1, -1, 1]],
|
||||
[[ 12, 5, 17],[ -1, -1, 1]],
|
||||
[[-17, 12, -5],[ -1, -1, 1]],
|
||||
[[ -5,-17, 12],[ -1, 1, 1]],
|
||||
[[-12, 5,-17],[ -1, 1, 1]],
|
||||
[[ 17, 12, 5],[ -1, 1, 1]],
|
||||
[[ 5, 17, 12],[ 1, -1, 1]],
|
||||
[[ 12, -5,-17],[ 1, -1, 1]],
|
||||
[[-17,-12, 5],[ 1, -1, 1]],
|
||||
],dtype=float),
|
||||
'cI' : np.array([
|
||||
[[-17, -7, 17],[ 1, 0, 1]],
|
||||
[[ 17,-17, -7],[ 1, 1, 0]],
|
||||
[[ -7, 17,-17],[ 0, 1, 1]],
|
||||
[[ 17, 7, 17],[ -1, 0, 1]],
|
||||
[[-17, 17, -7],[ -1, -1, 0]],
|
||||
[[ 7,-17,-17],[ 0, -1, 1]],
|
||||
[[-17, 7,-17],[ -1, 0, 1]],
|
||||
[[ 17, 17, 7],[ -1, 1, 0]],
|
||||
[[ -7,-17, 17],[ 0, 1, 1]],
|
||||
[[ 17, -7,-17],[ 1, 0, 1]],
|
||||
[[-17,-17, 7],[ 1, -1, 0]],
|
||||
[[ 7, 17, 17],[ 0, -1, 1]],
|
||||
[[-17, 17, -7],[ 1, 1, 0]],
|
||||
[[ -7,-17, 17],[ 0, 1, 1]],
|
||||
[[ 17, -7,-17],[ 1, 0, 1]],
|
||||
[[ 17,-17, -7],[ -1, -1, 0]],
|
||||
[[ 7, 17, 17],[ 0, -1, 1]],
|
||||
[[-17, 7,-17],[ -1, 0, 1]],
|
||||
[[-17,-17, 7],[ -1, 1, 0]],
|
||||
[[ -7, 17,-17],[ 0, 1, 1]],
|
||||
[[ 17, 7, 17],[ -1, 0, 1]],
|
||||
[[ 17, 17, 7],[ 1, -1, 0]],
|
||||
[[ 7,-17,-17],[ 0, -1, 1]],
|
||||
[[-17, -7, 17],[ 1, 0, 1]],
|
||||
],dtype=float),
|
||||
},
|
||||
'GT_prime': {
|
||||
'cF' : np.array([
|
||||
[[ 0, 1, -1],[ 7, 17, 17]],
|
||||
[[ -1, 0, 1],[ 17, 7, 17]],
|
||||
[[ 1, -1, 0],[ 17, 17, 7]],
|
||||
[[ 0, -1, -1],[ -7,-17, 17]],
|
||||
[[ 1, 0, 1],[-17, -7, 17]],
|
||||
[[ 1, -1, 0],[-17,-17, 7]],
|
||||
[[ 0, 1, -1],[ 7,-17,-17]],
|
||||
[[ 1, 0, 1],[ 17, -7,-17]],
|
||||
[[ -1, -1, 0],[ 17,-17, -7]],
|
||||
[[ 0, -1, -1],[ -7, 17,-17]],
|
||||
[[ -1, 0, 1],[-17, 7,-17]],
|
||||
[[ -1, -1, 0],[-17, 17, -7]],
|
||||
[[ 0, -1, 1],[ 7, 17, 17]],
|
||||
[[ 1, 0, -1],[ 17, 7, 17]],
|
||||
[[ -1, 1, 0],[ 17, 17, 7]],
|
||||
[[ 0, 1, 1],[ -7,-17, 17]],
|
||||
[[ -1, 0, -1],[-17, -7, 17]],
|
||||
[[ -1, 1, 0],[-17,-17, 7]],
|
||||
[[ 0, -1, 1],[ 7,-17,-17]],
|
||||
[[ -1, 0, -1],[ 17, -7,-17]],
|
||||
[[ 1, 1, 0],[ 17,-17, -7]],
|
||||
[[ 0, 1, 1],[ -7, 17,-17]],
|
||||
[[ 1, 0, -1],[-17, 7,-17]],
|
||||
[[ 1, 1, 0],[-17, 17, -7]],
|
||||
],dtype=float),
|
||||
'cI' : np.array([
|
||||
[[ 1, 1, -1],[ 12, 5, 17]],
|
||||
[[ -1, 1, 1],[ 17, 12, 5]],
|
||||
[[ 1, -1, 1],[ 5, 17, 12]],
|
||||
[[ -1, -1, -1],[-12, -5, 17]],
|
||||
[[ 1, -1, 1],[-17,-12, 5]],
|
||||
[[ 1, -1, -1],[ -5,-17, 12]],
|
||||
[[ -1, 1, -1],[ 12, -5,-17]],
|
||||
[[ 1, 1, 1],[ 17,-12, -5]],
|
||||
[[ -1, -1, 1],[ 5,-17,-12]],
|
||||
[[ 1, -1, -1],[-12, 5,-17]],
|
||||
[[ -1, -1, 1],[-17, 12, -5]],
|
||||
[[ -1, -1, -1],[ -5, 17,-12]],
|
||||
[[ 1, -1, 1],[ 12, 17, 5]],
|
||||
[[ 1, 1, -1],[ 5, 12, 17]],
|
||||
[[ -1, 1, 1],[ 17, 5, 12]],
|
||||
[[ -1, 1, 1],[-12,-17, 5]],
|
||||
[[ -1, -1, -1],[ -5,-12, 17]],
|
||||
[[ -1, 1, -1],[-17, -5, 12]],
|
||||
[[ -1, -1, 1],[ 12,-17, -5]],
|
||||
[[ -1, 1, -1],[ 5,-12,-17]],
|
||||
[[ 1, 1, 1],[ 17, -5,-12]],
|
||||
[[ 1, 1, 1],[-12, 17, -5]],
|
||||
[[ 1, -1, -1],[ -5, 12,-17]],
|
||||
[[ 1, 1, -1],[-17, 5,-12]],
|
||||
],dtype=float),
|
||||
},
|
||||
'NW': {
|
||||
'cF' : np.array([
|
||||
[[ 2, -1, -1],[ 1, 1, 1]],
|
||||
[[ -1, 2, -1],[ 1, 1, 1]],
|
||||
[[ -1, -1, 2],[ 1, 1, 1]],
|
||||
[[ -2, -1, -1],[ -1, 1, 1]],
|
||||
[[ 1, 2, -1],[ -1, 1, 1]],
|
||||
[[ 1, -1, 2],[ -1, 1, 1]],
|
||||
[[ 2, 1, -1],[ 1, -1, 1]],
|
||||
[[ -1, -2, -1],[ 1, -1, 1]],
|
||||
[[ -1, 1, 2],[ 1, -1, 1]],
|
||||
[[ 2, -1, 1],[ -1, -1, 1]],
|
||||
[[ -1, 2, 1],[ -1, -1, 1]],
|
||||
[[ -1, -1, -2],[ -1, -1, 1]],
|
||||
],dtype=float),
|
||||
'cI' : np.array([
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
],dtype=float),
|
||||
},
|
||||
'Pitsch': {
|
||||
'cF' : np.array([
|
||||
[[ 1, 0, 1],[ 0, 1, 0]],
|
||||
[[ 1, 1, 0],[ 0, 0, 1]],
|
||||
[[ 0, 1, 1],[ 1, 0, 0]],
|
||||
[[ 0, 1, -1],[ 1, 0, 0]],
|
||||
[[ -1, 0, 1],[ 0, 1, 0]],
|
||||
[[ 1, -1, 0],[ 0, 0, 1]],
|
||||
[[ 1, 0, -1],[ 0, 1, 0]],
|
||||
[[ -1, 1, 0],[ 0, 0, 1]],
|
||||
[[ 0, -1, 1],[ 1, 0, 0]],
|
||||
[[ 0, 1, 1],[ 1, 0, 0]],
|
||||
[[ 1, 0, 1],[ 0, 1, 0]],
|
||||
[[ 1, 1, 0],[ 0, 0, 1]],
|
||||
],dtype=float),
|
||||
'cI' : np.array([
|
||||
[[ 1, -1, 1],[ -1, 0, 1]],
|
||||
[[ 1, 1, -1],[ 1, -1, 0]],
|
||||
[[ -1, 1, 1],[ 0, 1, -1]],
|
||||
[[ -1, 1, -1],[ 0, -1, -1]],
|
||||
[[ -1, -1, 1],[ -1, 0, -1]],
|
||||
[[ 1, -1, -1],[ -1, -1, 0]],
|
||||
[[ 1, -1, -1],[ -1, 0, -1]],
|
||||
[[ -1, 1, -1],[ -1, -1, 0]],
|
||||
[[ -1, -1, 1],[ 0, -1, -1]],
|
||||
[[ -1, 1, 1],[ 0, -1, 1]],
|
||||
[[ 1, -1, 1],[ 1, 0, -1]],
|
||||
[[ 1, 1, -1],[ -1, 1, 0]],
|
||||
],dtype=float),
|
||||
},
|
||||
'Bain': {
|
||||
'cF' : np.array([
|
||||
[[ 0, 1, 0],[ 1, 0, 0]],
|
||||
[[ 0, 0, 1],[ 0, 1, 0]],
|
||||
[[ 1, 0, 0],[ 0, 0, 1]],
|
||||
],dtype=float),
|
||||
'cI' : np.array([
|
||||
[[ 0, 1, 1],[ 1, 0, 0]],
|
||||
[[ 1, 0, 1],[ 0, 1, 0]],
|
||||
[[ 1, 1, 0],[ 0, 0, 1]],
|
||||
],dtype=float),
|
||||
},
|
||||
'Burgers' : {
|
||||
'cI' : np.array([
|
||||
[[ -1, 1, 1],[ 1, 1, 0]],
|
||||
[[ -1, 1, -1],[ 1, 1, 0]],
|
||||
[[ 1, 1, 1],[ 1, -1, 0]],
|
||||
[[ 1, 1, -1],[ 1, -1, 0]],
|
||||
|
||||
[[ 1, 1, -1],[ 1, 0, 1]],
|
||||
[[ -1, 1, 1],[ 1, 0, 1]],
|
||||
[[ 1, 1, 1],[ -1, 0, 1]],
|
||||
[[ 1, -1, 1],[ -1, 0, 1]],
|
||||
|
||||
[[ -1, 1, -1],[ 0, 1, 1]],
|
||||
[[ 1, 1, -1],[ 0, 1, 1]],
|
||||
[[ -1, 1, 1],[ 0, -1, 1]],
|
||||
[[ 1, 1, 1],[ 0, -1, 1]],
|
||||
],dtype=float),
|
||||
'hP' : np.array([
|
||||
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
|
||||
|
||||
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
|
||||
|
||||
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
|
||||
],dtype=float),
|
||||
},
|
||||
}
|
||||
orientation_relationships = {k:v for k,v in _orientation_relationships.items() if self.lattice in v}
|
||||
if model not in orientation_relationships:
|
||||
raise KeyError(f'unknown orientation relationship "{model}"')
|
||||
r = orientation_relationships[model]
|
||||
|
||||
sl = self.lattice
|
||||
ol = (set(r)-{sl}).pop()
|
||||
m = r[sl]
|
||||
o = r[ol]
|
||||
|
||||
p_,_p = np.zeros(m.shape[:-1]+(3,)),np.zeros(o.shape[:-1]+(3,))
|
||||
p_[...,0,:] = m[...,0,:] if m.shape[-1] == 3 else util.Bravais_to_Miller(uvtw=m[...,0,0:4])
|
||||
p_[...,1,:] = m[...,1,:] if m.shape[-1] == 3 else util.Bravais_to_Miller(hkil=m[...,1,0:4])
|
||||
_p[...,0,:] = o[...,0,:] if o.shape[-1] == 3 else util.Bravais_to_Miller(uvtw=o[...,0,0:4])
|
||||
_p[...,1,:] = o[...,1,:] if o.shape[-1] == 3 else util.Bravais_to_Miller(hkil=o[...,1,0:4])
|
||||
|
||||
return (ol,Rotation.from_parallel(p_,_p))
|
|
@ -270,7 +270,8 @@ class Grid:
|
|||
cells = np.array(v.vtk_data.GetDimensions())-1
|
||||
bbox = np.array(v.vtk_data.GetBounds()).reshape(3,2).T
|
||||
|
||||
return Grid(v.get('MaterialId').reshape(cells,order='F') - 1, bbox[1] - bbox[0], bbox[0],
|
||||
return Grid(v.get('MaterialId').reshape(cells,order='F').astype('int32',casting='unsafe') - 1,
|
||||
bbox[1] - bbox[0], bbox[0],
|
||||
util.execution_stamp('Grid','load_Neper'))
|
||||
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -567,9 +567,13 @@ class Result:
|
|||
formula = kwargs['formula']
|
||||
for d in re.findall(r'#(.*?)#',formula):
|
||||
formula = formula.replace(f'#{d}#',f"kwargs['{d}']['data']")
|
||||
data = eval(formula)
|
||||
|
||||
if not hasattr(data,'shape') or data.shape[0] != kwargs[d]['data'].shape[0]:
|
||||
raise ValueError("'{}' results in invalid shape".format(kwargs['formula']))
|
||||
|
||||
return {
|
||||
'data': eval(formula),
|
||||
'data': data,
|
||||
'label': kwargs['label'],
|
||||
'meta': {
|
||||
'unit': kwargs['unit'],
|
||||
|
@ -981,47 +985,39 @@ class Result:
|
|||
self._add_generic_pointwise(self._add_stress_second_Piola_Kirchhoff,{'P':P,'F':F})
|
||||
|
||||
|
||||
# The add_pole functionality needs discussion.
|
||||
# The new Crystal object can perform such a calculation but the outcome depends on the lattice parameters
|
||||
# as well as on whether a direction or plane is concerned (see the DAMASK_examples/pole_figure notebook).
|
||||
# Below code appears to be too simplistic.
|
||||
|
||||
# @staticmethod
|
||||
# def _add_pole(q,p,polar):
|
||||
# pole = np.array(p)
|
||||
# unit_pole = pole/np.linalg.norm(pole)
|
||||
# m = util.scale_to_coprime(pole)
|
||||
# rot = Rotation(q['data'].view(np.double).reshape(-1,4))
|
||||
#
|
||||
# rotatedPole = rot @ np.broadcast_to(unit_pole,rot.shape+(3,)) # rotate pole according to crystal orientation
|
||||
# xy = rotatedPole[:,0:2]/(1.+abs(unit_pole[2])) # stereographic projection
|
||||
# coords = xy if not polar else \
|
||||
# np.block([np.sqrt(xy[:,0:1]*xy[:,0:1]+xy[:,1:2]*xy[:,1:2]),np.arctan2(xy[:,1:2],xy[:,0:1])])
|
||||
# return {
|
||||
# 'data': coords,
|
||||
# 'label': 'p^{}_[{} {} {})'.format(u'rφ' if polar else 'xy',*m),
|
||||
# 'meta' : {
|
||||
# 'unit': '1',
|
||||
# 'description': '{} coordinates of stereographic projection of pole (direction/plane) in crystal frame'\
|
||||
# .format('Polar' if polar else 'Cartesian'),
|
||||
# 'creator': 'add_pole'
|
||||
# }
|
||||
# }
|
||||
# def add_pole(self,q,p,polar=False):
|
||||
# """
|
||||
# Add coordinates of stereographic projection of given pole in crystal frame.
|
||||
#
|
||||
# Parameters
|
||||
# ----------
|
||||
# q : str
|
||||
# Name of the dataset containing the crystallographic orientation as quaternions.
|
||||
# p : numpy.array of shape (3)
|
||||
# Crystallographic direction or plane.
|
||||
# polar : bool, optional
|
||||
# Give pole in polar coordinates. Defaults to False.
|
||||
#
|
||||
# """
|
||||
# self._add_generic_pointwise(self._add_pole,{'q':q},{'p':p,'polar':polar})
|
||||
@staticmethod
|
||||
def _add_pole(q,uvw,hkl,with_symmetry):
|
||||
c = q['meta']['c/a'] if 'c/a' in q['meta'] else 1
|
||||
pole = Orientation(q['data'],lattice=q['meta']['lattice'],a=1,c=c).to_pole(uvw=uvw,hkl=hkl,with_symmetry=with_symmetry)
|
||||
|
||||
return {
|
||||
'data': pole,
|
||||
'label': 'p^[{} {} {}]'.format(*uvw) if uvw else 'p^({} {} {})'.format(*hkl),
|
||||
'meta' : {
|
||||
'unit': '1',
|
||||
'description': 'lab frame vector along lattice ' \
|
||||
+ ('direction' if uvw else 'plane') \
|
||||
+ ('s' if with_symmetry else ''),
|
||||
'creator': 'add_pole'
|
||||
}
|
||||
}
|
||||
def add_pole(self,q='O',*,uvw=None,hkl=None,with_symmetry=False):
|
||||
"""
|
||||
Add lab frame vector along lattice direction [uvw] or plane normal (hkl).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
q : str
|
||||
Name of the dataset containing the crystallographic orientation as quaternions.
|
||||
Defaults to 'O'.
|
||||
uvw|hkl : numpy.ndarray of shape (...,3)
|
||||
Miller indices of crystallographic direction or plane normal.
|
||||
with_symmetry : bool, optional
|
||||
Calculate all N symmetrically equivalent vectors.
|
||||
|
||||
"""
|
||||
self._add_generic_pointwise(self._add_pole,{'q':q},{'uvw':uvw,'hkl':hkl,'with_symmetry':with_symmetry})
|
||||
|
||||
|
||||
@staticmethod
|
||||
|
@ -1266,7 +1262,7 @@ class Result:
|
|||
Arguments parsed to func.
|
||||
|
||||
"""
|
||||
if len(datasets) != 1 or self.N_constituents !=1:
|
||||
if len(datasets) != 1 or self.N_constituents != 1:
|
||||
raise NotImplementedError
|
||||
|
||||
at_cell_ph,in_data_ph,at_cell_ho,in_data_ho = self._mappings()
|
||||
|
@ -1313,7 +1309,8 @@ class Result:
|
|||
loc = f[group+'/'+label]
|
||||
datasets_in[arg]={'data' :loc[()],
|
||||
'label':label,
|
||||
'meta': {k:(v if h5py3 else v.decode()) for k,v in loc.attrs.items()}}
|
||||
'meta': {k:(v.decode() if not h5py3 and type(v) is bytes else v) \
|
||||
for k,v in loc.attrs.items()}}
|
||||
lock.release()
|
||||
r = func(**datasets_in,**args)
|
||||
return [group,r]
|
||||
|
@ -1366,22 +1363,24 @@ class Result:
|
|||
dataset[...] = result['data']
|
||||
dataset.attrs['overwritten'] = True
|
||||
else:
|
||||
if result['data'].size >= chunk_size*2:
|
||||
shape = result['data'].shape
|
||||
if result['data'].size >= chunk_size*2:
|
||||
chunks = (chunk_size//np.prod(shape[1:]),)+shape[1:]
|
||||
compression = ('gzip',6)
|
||||
else:
|
||||
chunks = shape
|
||||
compression = (None,None)
|
||||
dataset = f[group].create_dataset(result['label'],data=result['data'],
|
||||
maxshape=shape, chunks=chunks,
|
||||
compression='gzip', compression_opts=6,
|
||||
compression=compression[0], compression_opts=compression[1],
|
||||
shuffle=True,fletcher32=True)
|
||||
else:
|
||||
dataset = f[group].create_dataset(result['label'],data=result['data'])
|
||||
|
||||
now = datetime.datetime.now().astimezone()
|
||||
dataset.attrs['created'] = now.strftime('%Y-%m-%d %H:%M:%S%z') if h5py3 else \
|
||||
now.strftime('%Y-%m-%d %H:%M:%S%z').encode()
|
||||
|
||||
for l,v in result['meta'].items():
|
||||
dataset.attrs[l.lower()]=v if h5py3 else v.encode()
|
||||
dataset.attrs[l.lower()]=v.encode() if not h5py3 and type(v) is str else v
|
||||
creator = dataset.attrs['creator'] if h5py3 else \
|
||||
dataset.attrs['creator'].decode()
|
||||
dataset.attrs['creator'] = f'damask.Result.{creator} v{damask.version}' if h5py3 else \
|
||||
|
@ -1770,7 +1769,7 @@ class Result:
|
|||
if type(obj) == h5py.Dataset and _match(output,[name]):
|
||||
d = obj.attrs['description'] if h5py3 else obj.attrs['description'].decode()
|
||||
if not Path(name).exists() or overwrite:
|
||||
with open(name,'w') as f_out: f_out.write(obj[()].decode())
|
||||
with open(name,'w') as f_out: f_out.write(obj[0].decode())
|
||||
print(f"Exported {d} to '{name}'.")
|
||||
else:
|
||||
print(f"'{name}' exists, {d} not exported.")
|
||||
|
|
|
@ -120,7 +120,7 @@ class Rotation:
|
|||
Parameters
|
||||
----------
|
||||
other : Rotation
|
||||
Rotation to check for equality.
|
||||
Rotation to check for inequality.
|
||||
|
||||
"""
|
||||
return np.logical_not(self==other)
|
||||
|
@ -236,7 +236,7 @@ class Rotation:
|
|||
|
||||
Parameters
|
||||
----------
|
||||
other : Rotation of shape(self.shape)
|
||||
other : Rotation of shape (self.shape)
|
||||
Rotation for composition.
|
||||
|
||||
Returns
|
||||
|
@ -262,7 +262,7 @@ class Rotation:
|
|||
|
||||
Parameters
|
||||
----------
|
||||
other : Rotation of shape(self.shape)
|
||||
other : Rotation of shape (self.shape)
|
||||
Rotation for composition.
|
||||
|
||||
"""
|
||||
|
@ -276,7 +276,7 @@ class Rotation:
|
|||
Parameters
|
||||
----------
|
||||
other : damask.Rotation of shape (self.shape)
|
||||
Rotation to inverse composition.
|
||||
Rotation to invert for composition.
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
@ -296,7 +296,7 @@ class Rotation:
|
|||
Parameters
|
||||
----------
|
||||
other : Rotation of shape (self.shape)
|
||||
Rotation to inverse composition.
|
||||
Rotation to invert for composition.
|
||||
|
||||
"""
|
||||
return self/other
|
||||
|
@ -468,7 +468,7 @@ class Rotation:
|
|||
Misorientation.
|
||||
|
||||
"""
|
||||
return other*~self
|
||||
return other/self
|
||||
|
||||
|
||||
################################################################################################
|
||||
|
@ -481,7 +481,7 @@ class Rotation:
|
|||
Returns
|
||||
-------
|
||||
q : numpy.ndarray of shape (...,4)
|
||||
Unit quaternion in positive real hemisphere: (q_0, q_1, q_2, q_3), ǀqǀ=1, q_0 ≥ 0.
|
||||
Unit quaternion (q_0, q_1, q_2, q_3) in positive real hemisphere, i.e. ǀqǀ = 1, q_0 ≥ 0.
|
||||
|
||||
"""
|
||||
return self.quaternion.copy()
|
||||
|
@ -489,7 +489,7 @@ class Rotation:
|
|||
def as_Euler_angles(self,
|
||||
degrees = False):
|
||||
"""
|
||||
Represent as Bunge-Euler angles.
|
||||
Represent as Bunge Euler angles.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
@ -499,12 +499,16 @@ class Rotation:
|
|||
Returns
|
||||
-------
|
||||
phi : numpy.ndarray of shape (...,3)
|
||||
Bunge-Euler angles: (φ_1, ϕ, φ_2), φ_1 ∈ [0,2π], ϕ ∈ [0,π], φ_2 ∈ [0,2π]
|
||||
unless degrees == True: φ_1 ∈ [0,360], ϕ ∈ [0,180], φ_2 ∈ [0,360]
|
||||
Bunge Euler angles (φ_1 ∈ [0,2π], ϕ ∈ [0,π], φ_2 ∈ [0,2π])
|
||||
or (φ_1 ∈ [0,360], ϕ ∈ [0,180], φ_2 ∈ [0,360]) if degrees == True.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Bunge Euler angles correspond to a rotation axis sequence of z–x'–z''.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Cube orientation as Bunge-Euler angles.
|
||||
Cube orientation as Bunge Euler angles.
|
||||
|
||||
>>> import damask
|
||||
>>> import numpy as np
|
||||
|
@ -520,7 +524,7 @@ class Rotation:
|
|||
degrees = False,
|
||||
pair = False):
|
||||
"""
|
||||
Represent as axis angle pair.
|
||||
Represent as axis–angle pair.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
@ -531,19 +535,18 @@ class Rotation:
|
|||
|
||||
Returns
|
||||
-------
|
||||
axis_angle : numpy.ndarray of shape (...,4) unless pair == True:
|
||||
tuple containing numpy.ndarray of shapes (...,3) and (...)
|
||||
Axis angle pair: (n_1, n_2, n_3, ω), ǀnǀ = 1 and ω ∈ [0,π]
|
||||
unless degrees = True: ω ∈ [0,180].
|
||||
axis_angle : numpy.ndarray of shape (...,4) or tuple ((...,3), (...)) if pair == True
|
||||
Axis and angle [n_1, n_2, n_3, ω] with ǀnǀ = 1 and ω ∈ [0,π]
|
||||
or ω ∈ [0,180] if degrees == True.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Cube orientation as axis angle pair.
|
||||
Cube orientation as axis–angle pair.
|
||||
|
||||
>>> import damask
|
||||
>>> import numpy as np
|
||||
>>> damask.Rotation(np.array([1,0,0,0])).as_axis_angle()
|
||||
array([0., 0., 1., 0.])
|
||||
>>> damask.Rotation(np.array([1,0,0,0])).as_axis_angle(pair=True)
|
||||
(array([0., 0., 1.]), array(0.))
|
||||
|
||||
"""
|
||||
ax = Rotation._qu2ax(self.quaternion)
|
||||
|
@ -557,7 +560,7 @@ class Rotation:
|
|||
Returns
|
||||
-------
|
||||
R : numpy.ndarray of shape (...,3,3)
|
||||
Rotation matrix R, det(R) = 1, R.T∙R=I.
|
||||
Rotation matrix R with det(R) = 1, R.T ∙ R = I.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
@ -576,25 +579,23 @@ class Rotation:
|
|||
def as_Rodrigues_vector(self,
|
||||
compact = False):
|
||||
"""
|
||||
Represent as Rodrigues-Frank vector with separated axis and angle argument.
|
||||
Represent as Rodrigues–Frank vector with separate axis and angle argument.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
compact : bool, optional
|
||||
Return as actual Rodrigues-Frank vector,
|
||||
Return three-component Rodrigues–Frank vector,
|
||||
i.e. axis and angle argument are not separated.
|
||||
|
||||
Returns
|
||||
-------
|
||||
rho : numpy.ndarray of shape (...,4) containing
|
||||
[n_1, n_2, n_3, tan(ω/2)], ǀnǀ = 1 and ω ∈ [0,π]
|
||||
unless compact == True:
|
||||
numpy.ndarray of shape (...,3) containing
|
||||
tan(ω/2) [n_1, n_2, n_3], ω ∈ [0,π].
|
||||
rho : numpy.ndarray of shape (...,4) or (...,3) if compact == True
|
||||
Rodrigues–Frank vector [n_1, n_2, n_3, tan(ω/2)] with ǀnǀ = 1 and ω ∈ [0,π]
|
||||
or [n_1, n_2, n_3] with ǀnǀ = tan(ω/2) and ω ∈ [0,π] if compact == True.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Cube orientation as 'real' Rodrigues-Frank vector.
|
||||
Cube orientation as three-component Rodrigues–Frank vector.
|
||||
|
||||
>>> import damask
|
||||
>>> import numpy as np
|
||||
|
@ -616,7 +617,7 @@ class Rotation:
|
|||
Returns
|
||||
-------
|
||||
h : numpy.ndarray of shape (...,3)
|
||||
Homochoric vector: (h_1, h_2, h_3), ǀhǀ < (3/4*π)^(1/3).
|
||||
Homochoric vector (h_1, h_2, h_3) with ǀhǀ < (3/4*π)^(1/3).
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
@ -637,7 +638,7 @@ class Rotation:
|
|||
Returns
|
||||
-------
|
||||
x : numpy.ndarray of shape (...,3)
|
||||
Cubochoric vector: (x_1, x_2, x_3), max(x_i) < 1/2*π^(2/3).
|
||||
Cubochoric vector (x_1, x_2, x_3) with max(x_i) < 1/2*π^(2/3).
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
@ -664,13 +665,12 @@ class Rotation:
|
|||
Parameters
|
||||
----------
|
||||
q : numpy.ndarray of shape (...,4)
|
||||
Unit quaternion in positive real hemisphere: (q_0, q_1, q_2, q_3),
|
||||
ǀqǀ=1, q_0 ≥ 0.
|
||||
Unit quaternion (q_0, q_1, q_2, q_3) in positive real hemisphere, i.e. ǀqǀ = 1, q_0 ≥ 0.
|
||||
accept_homomorph : boolean, optional
|
||||
Allow homomorphic variants, i.e. q_0 < 0 (negative real hemisphere).
|
||||
Defaults to False.
|
||||
P : int ∈ {-1,1}, optional
|
||||
Convention used. Defaults to -1.
|
||||
Sign convention. Defaults to -1.
|
||||
|
||||
"""
|
||||
qu = np.array(q,dtype=float)
|
||||
|
@ -694,15 +694,19 @@ class Rotation:
|
|||
def from_Euler_angles(phi,
|
||||
degrees = False):
|
||||
"""
|
||||
Initialize from Bunge-Euler angles.
|
||||
Initialize from Bunge Euler angles.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
phi : numpy.ndarray of shape (...,3)
|
||||
Bunge-Euler angles: (φ_1, ϕ, φ_2), φ_1 ∈ [0,2π], ϕ ∈ [0,π], φ_2 ∈ [0,2π]
|
||||
unless degrees == True: φ_1 ∈ [0,360], ϕ ∈ [0,180], φ_2 ∈ [0,360].
|
||||
Euler angles (φ_1 ∈ [0,2π], ϕ ∈ [0,π], φ_2 ∈ [0,2π])
|
||||
or (φ_1 ∈ [0,360], ϕ ∈ [0,180], φ_2 ∈ [0,360]) if degrees == True.
|
||||
degrees : boolean, optional
|
||||
Bunge-Euler angles are given in degrees. Defaults to False.
|
||||
Euler angles are given in degrees. Defaults to False.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Bunge Euler angles correspond to a rotation axis sequence of z–x'–z''.
|
||||
|
||||
"""
|
||||
eu = np.array(phi,dtype=float)
|
||||
|
@ -726,14 +730,14 @@ class Rotation:
|
|||
Parameters
|
||||
----------
|
||||
axis_angle : numpy.ndarray of shape (...,4)
|
||||
Axis angle pair: [n_1, n_2, n_3, ω], ǀnǀ = 1 and ω ∈ [0,π]
|
||||
unless degrees = True: ω ∈ [0,180].
|
||||
Axis and angle (n_1, n_2, n_3, ω) with ǀnǀ = 1 and ω ∈ [0,π]
|
||||
or ω ∈ [0,180] if degrees == True.
|
||||
degrees : boolean, optional
|
||||
Angle ω is given in degrees. Defaults to False.
|
||||
normalize: boolean, optional
|
||||
Allow ǀnǀ ≠ 1. Defaults to False.
|
||||
P : int ∈ {-1,1}, optional
|
||||
Convention used. Defaults to -1.
|
||||
Sign convention. Defaults to -1.
|
||||
|
||||
"""
|
||||
ax = np.array(axis_angle,dtype=float)
|
||||
|
@ -746,10 +750,10 @@ class Rotation:
|
|||
if degrees: ax[..., 3] = np.radians(ax[...,3])
|
||||
if normalize: ax[...,0:3] /= np.linalg.norm(ax[...,0:3],axis=-1,keepdims=True)
|
||||
if np.any(ax[...,3] < 0.0) or np.any(ax[...,3] > np.pi):
|
||||
raise ValueError('Axis angle rotation angle outside of [0..π].')
|
||||
raise ValueError('Axis–angle rotation angle outside of [0..π].')
|
||||
if not np.all(np.isclose(np.linalg.norm(ax[...,0:3],axis=-1), 1.0)):
|
||||
print(np.linalg.norm(ax[...,0:3],axis=-1))
|
||||
raise ValueError('Axis angle rotation axis is not of unit length.')
|
||||
raise ValueError('Axis–angle rotation axis is not of unit length.')
|
||||
|
||||
return Rotation(Rotation._ax2qu(ax))
|
||||
|
||||
|
@ -797,7 +801,7 @@ class Rotation:
|
|||
Parameters
|
||||
----------
|
||||
R : numpy.ndarray of shape (...,3,3)
|
||||
Rotation matrix: det(R) = 1, R.T∙R=I.
|
||||
Rotation matrix with det(R) = 1, R.T ∙ R = I.
|
||||
|
||||
"""
|
||||
return Rotation.from_basis(R)
|
||||
|
@ -836,16 +840,16 @@ class Rotation:
|
|||
normalize = False,
|
||||
P = -1):
|
||||
"""
|
||||
Initialize from Rodrigues-Frank vector (angle separated from axis).
|
||||
Initialize from Rodrigues–Frank vector (angle separated from axis).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
rho : numpy.ndarray of shape (...,4)
|
||||
Rodrigues-Frank vector. (n_1, n_2, n_3, tan(ω/2)), ǀnǀ = 1 and ω ∈ [0,π].
|
||||
Rodrigues–Frank vector (n_1, n_2, n_3, tan(ω/2)) with ǀnǀ = 1 and ω ∈ [0,π].
|
||||
normalize : boolean, optional
|
||||
Allow ǀnǀ ≠ 1. Defaults to False.
|
||||
P : int ∈ {-1,1}, optional
|
||||
Convention used. Defaults to -1.
|
||||
Sign convention. Defaults to -1.
|
||||
|
||||
"""
|
||||
ro = np.array(rho,dtype=float)
|
||||
|
@ -857,7 +861,7 @@ class Rotation:
|
|||
ro[...,0:3] *= -P
|
||||
if normalize: ro[...,0:3] /= np.linalg.norm(ro[...,0:3],axis=-1,keepdims=True)
|
||||
if np.any(ro[...,3] < 0.0):
|
||||
raise ValueError('Rodrigues vector rotation angle not positive.')
|
||||
raise ValueError('Rodrigues vector rotation angle is negative.')
|
||||
if not np.all(np.isclose(np.linalg.norm(ro[...,0:3],axis=-1), 1.0)):
|
||||
raise ValueError('Rodrigues vector rotation axis is not of unit length.')
|
||||
|
||||
|
@ -872,9 +876,9 @@ class Rotation:
|
|||
Parameters
|
||||
----------
|
||||
h : numpy.ndarray of shape (...,3)
|
||||
Homochoric vector: (h_1, h_2, h_3), ǀhǀ < (3/4*π)^(1/3).
|
||||
Homochoric vector (h_1, h_2, h_3) with ǀhǀ < (3/4*π)^(1/3).
|
||||
P : int ∈ {-1,1}, optional
|
||||
Convention used. Defaults to -1.
|
||||
Sign convention. Defaults to -1.
|
||||
|
||||
"""
|
||||
ho = np.array(h,dtype=float)
|
||||
|
@ -899,9 +903,9 @@ class Rotation:
|
|||
Parameters
|
||||
----------
|
||||
x : numpy.ndarray of shape (...,3)
|
||||
Cubochoric vector: (x_1, x_2, x_3), max(x_i) < 1/2*π^(2/3).
|
||||
Cubochoric vector (x_1, x_2, x_3) with max(x_i) < 1/2*π^(2/3).
|
||||
P : int ∈ {-1,1}, optional
|
||||
Convention used. Defaults to -1.
|
||||
Sign convention. Defaults to -1.
|
||||
|
||||
"""
|
||||
cu = np.array(x,dtype=float)
|
||||
|
@ -922,18 +926,17 @@ class Rotation:
|
|||
def from_random(shape = None,
|
||||
rng_seed = None):
|
||||
"""
|
||||
Draw random rotation.
|
||||
Initialize with random rotation.
|
||||
|
||||
Rotations are uniformly distributed.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
shape : tuple of ints, optional
|
||||
Shape of the sample. Defaults to None which gives a
|
||||
single rotation
|
||||
Shape of the sample. Defaults to None, which gives a single rotation.
|
||||
rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
|
||||
A seed to initialize the BitGenerator. Defaults to None.
|
||||
If None, then fresh, unpredictable entropy will be pulled from the OS.
|
||||
A seed to initialize the BitGenerator.
|
||||
Defaults to None, i.e. unpredictable entropy will be pulled from the OS.
|
||||
|
||||
"""
|
||||
rng = np.random.default_rng(rng_seed)
|
||||
|
@ -958,25 +961,25 @@ class Rotation:
|
|||
rng_seed = None,
|
||||
**kwargs):
|
||||
"""
|
||||
Sample discrete values from a binned ODF.
|
||||
Sample discrete values from a binned orientation distribution function (ODF).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
weights : numpy.ndarray of shape (n)
|
||||
Texture intensity values (probability density or volume fraction) at Euler grid points.
|
||||
Texture intensity values (probability density or volume fraction) at Euler space grid points.
|
||||
phi : numpy.ndarray of shape (n,3)
|
||||
Grid coordinates in Euler space at which weights are defined.
|
||||
N : integer, optional
|
||||
Number of discrete orientations to be sampled from the given ODF.
|
||||
Defaults to 500.
|
||||
degrees : boolean, optional
|
||||
Euler grid values are in degrees. Defaults to True.
|
||||
Euler space grid coordinates are in degrees. Defaults to True.
|
||||
fractions : boolean, optional
|
||||
ODF values correspond to volume fractions, not probability density.
|
||||
ODF values correspond to volume fractions, not probability densities.
|
||||
Defaults to True.
|
||||
rng_seed: {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
|
||||
A seed to initialize the BitGenerator. Defaults to None, i.e. unpredictable entropy
|
||||
will be pulled from the OS.
|
||||
A seed to initialize the BitGenerator.
|
||||
Defaults to None, i.e. unpredictable entropy will be pulled from the OS.
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
@ -1024,12 +1027,12 @@ class Rotation:
|
|||
sigma : float
|
||||
Standard deviation of (Gaussian) misorientation distribution.
|
||||
N : int, optional
|
||||
Number of samples, defaults to 500.
|
||||
Number of samples. Defaults to 500.
|
||||
degrees : boolean, optional
|
||||
sigma is given in degrees.
|
||||
sigma is given in degrees. Defaults to True.
|
||||
rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
|
||||
A seed to initialize the BitGenerator. Defaults to None, i.e. unpredictable entropy
|
||||
will be pulled from the OS.
|
||||
A seed to initialize the BitGenerator.
|
||||
Defaults to None, i.e. unpredictable entropy will be pulled from the OS.
|
||||
|
||||
"""
|
||||
rng = np.random.default_rng(rng_seed)
|
||||
|
@ -1055,20 +1058,20 @@ class Rotation:
|
|||
|
||||
Parameters
|
||||
----------
|
||||
alpha : numpy.ndarray of size 2
|
||||
Polar coordinates (phi from x,theta from z) of fiber direction in crystal frame.
|
||||
beta : numpy.ndarray of size 2
|
||||
Polar coordinates (phi from x,theta from z) of fiber direction in sample frame.
|
||||
alpha : numpy.ndarray of shape (2)
|
||||
Polar coordinates (phi from x, theta from z) of fiber direction in crystal frame.
|
||||
beta : numpy.ndarray of shape (2)
|
||||
Polar coordinates (phi from x, theta from z) of fiber direction in sample frame.
|
||||
sigma : float, optional
|
||||
Standard deviation of (Gaussian) misorientation distribution.
|
||||
Defaults to 0.
|
||||
N : int, optional
|
||||
Number of samples, defaults to 500.
|
||||
Number of samples. Defaults to 500.
|
||||
degrees : boolean, optional
|
||||
sigma, alpha, and beta are given in degrees.
|
||||
rng_seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
|
||||
A seed to initialize the BitGenerator. Defaults to None, i.e. unpredictable entropy
|
||||
will be pulled from the OS.
|
||||
A seed to initialize the BitGenerator.
|
||||
Defaults to None, i.e. unpredictable entropy will be pulled from the OS.
|
||||
|
||||
"""
|
||||
rng = np.random.default_rng(rng_seed)
|
||||
|
@ -1142,7 +1145,7 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _qu2eu(qu):
|
||||
"""Quaternion to Bunge-Euler angles."""
|
||||
"""Quaternion to Bunge Euler angles."""
|
||||
q02 = qu[...,0:1]*qu[...,2:3]
|
||||
q13 = qu[...,1:2]*qu[...,3:4]
|
||||
q01 = qu[...,0:1]*qu[...,1:2]
|
||||
|
@ -1171,9 +1174,9 @@ class Rotation:
|
|||
@staticmethod
|
||||
def _qu2ax(qu):
|
||||
"""
|
||||
Quaternion to axis angle pair.
|
||||
Quaternion to axis–angle pair.
|
||||
|
||||
Modified version of the original formulation, should be numerically more stable
|
||||
Modified version of the original formulation, should be numerically more stable.
|
||||
"""
|
||||
with np.errstate(invalid='ignore',divide='ignore'):
|
||||
s = np.sign(qu[...,0:1])/np.sqrt(qu[...,1:2]**2+qu[...,2:3]**2+qu[...,3:4]**2)
|
||||
|
@ -1186,7 +1189,7 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _qu2ro(qu):
|
||||
"""Quaternion to Rodrigues-Frank vector."""
|
||||
"""Quaternion to Rodrigues–Frank vector."""
|
||||
with np.errstate(invalid='ignore',divide='ignore'):
|
||||
s = np.linalg.norm(qu[...,1:4],axis=-1,keepdims=True)
|
||||
ro = np.where(np.broadcast_to(np.abs(qu[...,0:1]) < 1.0e-12,qu.shape),
|
||||
|
@ -1260,7 +1263,7 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _om2eu(om):
|
||||
"""Rotation matrix to Bunge-Euler angles."""
|
||||
"""Rotation matrix to Bunge Euler angles."""
|
||||
with np.errstate(invalid='ignore',divide='ignore'):
|
||||
zeta = 1.0/np.sqrt(1.0-om[...,2,2:3]**2)
|
||||
eu = np.where(np.isclose(np.abs(om[...,2,2:3]),1.0,0.0),
|
||||
|
@ -1279,7 +1282,7 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _om2ax(om):
|
||||
"""Rotation matrix to axis angle pair."""
|
||||
"""Rotation matrix to axis–angle pair."""
|
||||
diag_delta = -_P*np.block([om[...,1,2:3]-om[...,2,1:2],
|
||||
om[...,2,0:1]-om[...,0,2:3],
|
||||
om[...,0,1:2]-om[...,1,0:1]
|
||||
|
@ -1300,7 +1303,7 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _om2ro(om):
|
||||
"""Rotation matrix to Rodrigues-Frank vector."""
|
||||
"""Rotation matrix to Rodrigues–Frank vector."""
|
||||
return Rotation._eu2ro(Rotation._om2eu(om))
|
||||
|
||||
@staticmethod
|
||||
|
@ -1314,10 +1317,10 @@ class Rotation:
|
|||
return Rotation._ho2cu(Rotation._om2ho(om))
|
||||
|
||||
|
||||
#---------- Bunge-Euler angles ----------
|
||||
#---------- Bunge Euler angles ----------
|
||||
@staticmethod
|
||||
def _eu2qu(eu):
|
||||
"""Bunge-Euler angles to quaternion."""
|
||||
"""Bunge Euler angles to quaternion."""
|
||||
ee = 0.5*eu
|
||||
cPhi = np.cos(ee[...,1:2])
|
||||
sPhi = np.sin(ee[...,1:2])
|
||||
|
@ -1330,7 +1333,7 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _eu2om(eu):
|
||||
"""Bunge-Euler angles to rotation matrix."""
|
||||
"""Bunge Euler angles to rotation matrix."""
|
||||
c = np.cos(eu)
|
||||
s = np.sin(eu)
|
||||
om = np.block([+c[...,0:1]*c[...,2:3]-s[...,0:1]*s[...,2:3]*c[...,1:2],
|
||||
|
@ -1348,7 +1351,7 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _eu2ax(eu):
|
||||
"""Bunge-Euler angles to axis angle pair."""
|
||||
"""Bunge Euler angles to axis–angle pair."""
|
||||
t = np.tan(eu[...,1:2]*0.5)
|
||||
sigma = 0.5*(eu[...,0:1]+eu[...,2:3])
|
||||
delta = 0.5*(eu[...,0:1]-eu[...,2:3])
|
||||
|
@ -1367,7 +1370,7 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _eu2ro(eu):
|
||||
"""Bunge-Euler angles to Rodrigues-Frank vector."""
|
||||
"""Bunge Euler angles to Rodrigues–Frank vector."""
|
||||
ax = Rotation._eu2ax(eu)
|
||||
ro = np.block([ax[...,:3],np.tan(ax[...,3:4]*.5)])
|
||||
ro[ax[...,3]>=np.pi,3] = np.inf
|
||||
|
@ -1376,19 +1379,19 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _eu2ho(eu):
|
||||
"""Bunge-Euler angles to homochoric vector."""
|
||||
"""Bunge Euler angles to homochoric vector."""
|
||||
return Rotation._ax2ho(Rotation._eu2ax(eu))
|
||||
|
||||
@staticmethod
|
||||
def _eu2cu(eu):
|
||||
"""Bunge-Euler angles to cubochoric vector."""
|
||||
"""Bunge Euler angles to cubochoric vector."""
|
||||
return Rotation._ho2cu(Rotation._eu2ho(eu))
|
||||
|
||||
|
||||
#---------- Axis angle pair ----------
|
||||
@staticmethod
|
||||
def _ax2qu(ax):
|
||||
"""Axis angle pair to quaternion."""
|
||||
"""Axis–angle pair to quaternion."""
|
||||
c = np.cos(ax[...,3:4]*.5)
|
||||
s = np.sin(ax[...,3:4]*.5)
|
||||
qu = np.where(np.abs(ax[...,3:4])<1.e-6,[1.0, 0.0, 0.0, 0.0],np.block([c, ax[...,:3]*s]))
|
||||
|
@ -1396,7 +1399,7 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _ax2om(ax):
|
||||
"""Axis angle pair to rotation matrix."""
|
||||
"""Axis-angle pair to rotation matrix."""
|
||||
c = np.cos(ax[...,3:4])
|
||||
s = np.sin(ax[...,3:4])
|
||||
omc = 1. -c
|
||||
|
@ -1418,7 +1421,7 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _ax2ro(ax):
|
||||
"""Axis angle pair to Rodrigues-Frank vector."""
|
||||
"""Axis–angle pair to Rodrigues–Frank vector."""
|
||||
ro = np.block([ax[...,:3],
|
||||
np.where(np.isclose(ax[...,3:4],np.pi,atol=1.e-15,rtol=.0),
|
||||
np.inf,
|
||||
|
@ -1429,36 +1432,36 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _ax2ho(ax):
|
||||
"""Axis angle pair to homochoric vector."""
|
||||
"""Axis–angle pair to homochoric vector."""
|
||||
f = (0.75 * ( ax[...,3:4] - np.sin(ax[...,3:4]) ))**(1.0/3.0)
|
||||
ho = ax[...,:3] * f
|
||||
return ho
|
||||
|
||||
@staticmethod
|
||||
def _ax2cu(ax):
|
||||
"""Axis angle pair to cubochoric vector."""
|
||||
"""Axis–angle pair to cubochoric vector."""
|
||||
return Rotation._ho2cu(Rotation._ax2ho(ax))
|
||||
|
||||
|
||||
#---------- Rodrigues-Frank vector ----------
|
||||
@staticmethod
|
||||
def _ro2qu(ro):
|
||||
"""Rodrigues-Frank vector to quaternion."""
|
||||
"""Rodrigues–Frank vector to quaternion."""
|
||||
return Rotation._ax2qu(Rotation._ro2ax(ro))
|
||||
|
||||
@staticmethod
|
||||
def _ro2om(ro):
|
||||
"""Rodgrigues-Frank vector to rotation matrix."""
|
||||
"""Rodgrigues–Frank vector to rotation matrix."""
|
||||
return Rotation._ax2om(Rotation._ro2ax(ro))
|
||||
|
||||
@staticmethod
|
||||
def _ro2eu(ro):
|
||||
"""Rodrigues-Frank vector to Bunge-Euler angles."""
|
||||
"""Rodrigues–Frank vector to Bunge Euler angles."""
|
||||
return Rotation._om2eu(Rotation._ro2om(ro))
|
||||
|
||||
@staticmethod
|
||||
def _ro2ax(ro):
|
||||
"""Rodrigues-Frank vector to axis angle pair."""
|
||||
"""Rodrigues–Frank vector to axis–angle pair."""
|
||||
with np.errstate(invalid='ignore',divide='ignore'):
|
||||
ax = np.where(np.isfinite(ro[...,3:4]),
|
||||
np.block([ro[...,0:3]*np.linalg.norm(ro[...,0:3],axis=-1,keepdims=True),2.*np.arctan(ro[...,3:4])]),
|
||||
|
@ -1468,7 +1471,7 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _ro2ho(ro):
|
||||
"""Rodrigues-Frank vector to homochoric vector."""
|
||||
"""Rodrigues–Frank vector to homochoric vector."""
|
||||
f = np.where(np.isfinite(ro[...,3:4]),2.0*np.arctan(ro[...,3:4]) -np.sin(2.0*np.arctan(ro[...,3:4])),np.pi)
|
||||
ho = np.where(np.broadcast_to(np.sum(ro[...,0:3]**2.0,axis=-1,keepdims=True) < 1.e-8,ro[...,0:3].shape),
|
||||
np.zeros(3), ro[...,0:3]* (0.75*f)**(1.0/3.0))
|
||||
|
@ -1476,7 +1479,7 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _ro2cu(ro):
|
||||
"""Rodrigues-Frank vector to cubochoric vector."""
|
||||
"""Rodrigues–Frank vector to cubochoric vector."""
|
||||
return Rotation._ho2cu(Rotation._ro2ho(ro))
|
||||
|
||||
|
||||
|
@ -1493,12 +1496,12 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _ho2eu(ho):
|
||||
"""Homochoric vector to Bunge-Euler angles."""
|
||||
"""Homochoric vector to Bunge Euler angles."""
|
||||
return Rotation._ax2eu(Rotation._ho2ax(ho))
|
||||
|
||||
@staticmethod
|
||||
def _ho2ax(ho):
|
||||
"""Homochoric vector to axis angle pair."""
|
||||
"""Homochoric vector to axis–angle pair."""
|
||||
tfit = np.array([+1.0000000000018852, -0.5000000002194847,
|
||||
-0.024999992127593126, -0.003928701544781374,
|
||||
-0.0008152701535450438, -0.0002009500426119712,
|
||||
|
@ -1521,7 +1524,7 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _ho2ro(ho):
|
||||
"""Axis angle pair to Rodrigues-Frank vector."""
|
||||
"""Axis–angle pair to Rodrigues–Frank vector."""
|
||||
return Rotation._ax2ro(Rotation._ho2ax(ho))
|
||||
|
||||
@staticmethod
|
||||
|
@ -1576,17 +1579,17 @@ class Rotation:
|
|||
|
||||
@staticmethod
|
||||
def _cu2eu(cu):
|
||||
"""Cubochoric vector to Bunge-Euler angles."""
|
||||
"""Cubochoric vector to Bunge Euler angles."""
|
||||
return Rotation._ho2eu(Rotation._cu2ho(cu))
|
||||
|
||||
@staticmethod
|
||||
def _cu2ax(cu):
|
||||
"""Cubochoric vector to axis angle pair."""
|
||||
"""Cubochoric vector to axis–angle pair."""
|
||||
return Rotation._ho2ax(Rotation._cu2ho(cu))
|
||||
|
||||
@staticmethod
|
||||
def _cu2ro(cu):
|
||||
"""Cubochoric vector to Rodrigues-Frank vector."""
|
||||
"""Cubochoric vector to Rodrigues–Frank vector."""
|
||||
return Rotation._ho2ro(Rotation._cu2ho(cu))
|
||||
|
||||
@staticmethod
|
||||
|
@ -1642,7 +1645,7 @@ class Rotation:
|
|||
Parameters
|
||||
----------
|
||||
xyz : numpy.ndarray
|
||||
coordinates of a point on a uniform refinable grid on a ball or
|
||||
Coordinates of a point on a uniform refinable grid on a ball or
|
||||
in a uniform refinable cubical grid.
|
||||
|
||||
References
|
||||
|
|
|
@ -1,269 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
import shutil
|
||||
import logging
|
||||
import logging.config
|
||||
from optparse import OptionParser
|
||||
from pathlib import Path
|
||||
|
||||
import damask
|
||||
|
||||
class Test:
|
||||
"""
|
||||
General class for testing.
|
||||
|
||||
Is sub-classed by the individual tests.
|
||||
"""
|
||||
|
||||
variants = []
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""New test."""
|
||||
defaults = {'description': '',
|
||||
'keep': False,
|
||||
'accept': False,
|
||||
'updateRequest': False,
|
||||
'show': False,
|
||||
'select': None,
|
||||
}
|
||||
for arg in defaults.keys():
|
||||
setattr(self,arg,kwargs.get(arg) if kwargs.get(arg) else defaults[arg])
|
||||
|
||||
fh = logging.FileHandler('test.log') # create file handler which logs even debug messages
|
||||
fh.setLevel(logging.DEBUG)
|
||||
fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s: \n%(message)s'))
|
||||
|
||||
ch = logging.StreamHandler(stream=sys.stdout) # create console handler with a higher log level
|
||||
ch.setLevel(logging.INFO)
|
||||
ch.setFormatter(logging.Formatter('%(message)s'))
|
||||
|
||||
logger = logging.getLogger()
|
||||
logger.addHandler(fh)
|
||||
logger.addHandler(ch)
|
||||
logger.setLevel(0)
|
||||
|
||||
logging.info('\n'.join(['+'*40,
|
||||
'-'*40,
|
||||
'| '+self.description,
|
||||
'-'*40,
|
||||
]))
|
||||
|
||||
self.dirBase = os.path.dirname(os.path.realpath(sys.modules[self.__class__.__module__].__file__))
|
||||
|
||||
self.parser = OptionParser(description = f'{self.description} (Test class version: {damask.version})',
|
||||
usage = './test.py [options]')
|
||||
self.parser.add_option("-k", "--keep",
|
||||
action = "store_true",
|
||||
dest = "keep",
|
||||
help = "keep current results, just run postprocessing")
|
||||
self.parser.add_option("--ok", "--accept",
|
||||
action = "store_true",
|
||||
dest = "accept",
|
||||
help = "calculate results but always consider test as successful")
|
||||
self.parser.add_option("-l", "--list",
|
||||
action = "store_true",
|
||||
dest = "show",
|
||||
help = "show all test variants without actual calculation")
|
||||
self.parser.add_option("-s", "--select",
|
||||
dest = "select",
|
||||
help = "run test(s) of given name only")
|
||||
self.parser.set_defaults(keep = self.keep,
|
||||
accept = self.accept,
|
||||
update = self.updateRequest,
|
||||
show = self.show,
|
||||
select = self.select,
|
||||
)
|
||||
|
||||
|
||||
def variantName(self,variant):
|
||||
"""Generate name of (numerical) variant."""
|
||||
return str(variant)
|
||||
|
||||
def execute(self):
|
||||
"""Run all variants and report first failure."""
|
||||
if not self.options.keep:
|
||||
self.clean()
|
||||
self.prepareAll()
|
||||
|
||||
for variant,object in enumerate(self.variants):
|
||||
name = self.variantName(variant)
|
||||
if self.options.show:
|
||||
logging.critical(f'{variant+1}: {name}')
|
||||
elif self.options.select is not None \
|
||||
and not (name in self.options.select or str(variant+1) in self.options.select):
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
if not self.options.keep:
|
||||
self.prepare(variant)
|
||||
self.run(variant)
|
||||
|
||||
self.postprocess(variant)
|
||||
|
||||
if self.options.update:
|
||||
if self.update(variant) != 0: logging.critical(f'update for "{name}" failed.')
|
||||
elif not (self.options.accept or self.compare(variant)): # no update, do comparison
|
||||
return variant+1 # return culprit
|
||||
|
||||
except Exception as e:
|
||||
logging.critical(f'exception during variant execution: "{e}"')
|
||||
return variant+1 # return culprit
|
||||
return 0
|
||||
|
||||
def clean(self):
|
||||
"""Delete directory tree containing current results."""
|
||||
try:
|
||||
shutil.rmtree(self.dirCurrent())
|
||||
except FileNotFoundError:
|
||||
logging.warning(f'removal of directory "{self.dirCurrent()}" not possible...')
|
||||
|
||||
try:
|
||||
os.mkdir(self.dirCurrent())
|
||||
return True
|
||||
except FileExistsError:
|
||||
logging.critical(f'creation of directory "{self.dirCurrent()}" failed.')
|
||||
return False
|
||||
|
||||
def prepareAll(self):
|
||||
"""Do all necessary preparations for the whole test."""
|
||||
return True
|
||||
|
||||
def prepare(self,variant):
|
||||
"""Do all necessary preparations for the run of each test variant."""
|
||||
return True
|
||||
|
||||
|
||||
def run(self,variant):
|
||||
"""Execute the requested test variant."""
|
||||
return True
|
||||
|
||||
|
||||
def postprocess(self,variant):
|
||||
"""Perform post-processing of generated results for this test variant."""
|
||||
return True
|
||||
|
||||
|
||||
def compare(self,variant):
|
||||
"""Compare reference to current results."""
|
||||
return True
|
||||
|
||||
|
||||
def update(self,variant):
|
||||
"""Update reference with current results."""
|
||||
logging.critical('update not supported.')
|
||||
return 1
|
||||
|
||||
|
||||
def dirReference(self):
|
||||
"""Directory containing reference results of the test."""
|
||||
return os.path.normpath(os.path.join(self.dirBase,'reference/'))
|
||||
|
||||
|
||||
def dirCurrent(self):
|
||||
"""Directory containing current results of the test."""
|
||||
return os.path.normpath(os.path.join(self.dirBase,'current/'))
|
||||
|
||||
|
||||
def fileInRoot(self,dir,file):
|
||||
"""Path to a file in the root directory of DAMASK."""
|
||||
return str(Path(os.environ['DAMASK_ROOT'])/dir/file)
|
||||
|
||||
|
||||
def fileInReference(self,file):
|
||||
"""Path to a file in the refrence directory for the test."""
|
||||
return os.path.join(self.dirReference(),file)
|
||||
|
||||
|
||||
def fileInCurrent(self,file):
|
||||
"""Path to a file in the current results directory for the test."""
|
||||
return os.path.join(self.dirCurrent(),file)
|
||||
|
||||
|
||||
def copy(self, mapA, mapB,
|
||||
A = [], B = []):
|
||||
"""
|
||||
Copy list of files from (mapped) source to target.
|
||||
|
||||
mapA/B is one of self.fileInX.
|
||||
"""
|
||||
if not B or len(B) == 0: B = A
|
||||
|
||||
for source,target in zip(list(map(mapA,A)),list(map(mapB,B))):
|
||||
try:
|
||||
shutil.copy2(source,target)
|
||||
except FileNotFoundError:
|
||||
logging.critical(f'error copying {source} to {target}')
|
||||
raise FileNotFoundError
|
||||
|
||||
|
||||
def copy_Reference2Current(self,sourcefiles=[],targetfiles=[]):
|
||||
|
||||
if len(targetfiles) == 0: targetfiles = sourcefiles
|
||||
for i,f in enumerate(sourcefiles):
|
||||
try:
|
||||
shutil.copy2(self.fileInReference(f),self.fileInCurrent(targetfiles[i]))
|
||||
except FileNotFoundError:
|
||||
logging.critical(f'Reference2Current: Unable to copy file "{f}"')
|
||||
raise FileNotFoundError
|
||||
|
||||
|
||||
def copy_Base2Current(self,sourceDir,sourcefiles=[],targetfiles=[]):
|
||||
|
||||
source = os.path.normpath(os.path.join(self.dirBase,'../../..',sourceDir))
|
||||
if len(targetfiles) == 0: targetfiles = sourcefiles
|
||||
for i,f in enumerate(sourcefiles):
|
||||
try:
|
||||
shutil.copy2(os.path.join(source,f),self.fileInCurrent(targetfiles[i]))
|
||||
except FileNotFoundError:
|
||||
logging.error(os.path.join(source,f))
|
||||
logging.critical(f'Base2Current: Unable to copy file "{f}"')
|
||||
raise FileNotFoundError
|
||||
|
||||
|
||||
def copy_Current2Reference(self,sourcefiles=[],targetfiles=[]):
|
||||
|
||||
if len(targetfiles) == 0: targetfiles = sourcefiles
|
||||
for i,f in enumerate(sourcefiles):
|
||||
try:
|
||||
shutil.copy2(self.fileInCurrent(f),self.fileInReference(targetfiles[i]))
|
||||
except FileNotFoundError:
|
||||
logging.critical(f'Current2Reference: Unable to copy file "{f}"')
|
||||
raise FileNotFoundError
|
||||
|
||||
|
||||
def copy_Current2Current(self,sourcefiles=[],targetfiles=[]):
|
||||
|
||||
for i,f in enumerate(sourcefiles):
|
||||
try:
|
||||
shutil.copy2(self.fileInReference(f),self.fileInCurrent(targetfiles[i]))
|
||||
except FileNotFoundError:
|
||||
logging.critical(f'Current2Current: Unable to copy file "{f}"')
|
||||
raise FileNotFoundError
|
||||
|
||||
|
||||
def execute_inCurrentDir(self,cmd,env=None):
|
||||
|
||||
logging.info(cmd)
|
||||
out,error = damask.util.execute(cmd,self.dirCurrent())
|
||||
|
||||
logging.info(error)
|
||||
logging.debug(out)
|
||||
|
||||
return out,error
|
||||
|
||||
|
||||
def report_Success(self,culprit):
|
||||
|
||||
ret = culprit
|
||||
|
||||
if culprit == 0:
|
||||
count = len(self.variants) if self.options.select is None else len(self.options.select)
|
||||
msg = ('Test passed.' if count == 1 else f'All {count} tests passed.') + '\a\a\a'
|
||||
elif culprit == -1:
|
||||
msg = 'Warning: could not start test...'
|
||||
ret = 0
|
||||
else:
|
||||
msg = f'Test "{self.variantName(culprit-1)}" failed.'
|
||||
|
||||
logging.critical('\n'.join(['*'*40,msg,'*'*40]) + '\n')
|
||||
return ret
|
|
@ -121,7 +121,7 @@ class VTK:
|
|||
|
||||
"""
|
||||
vtk_nodes = vtk.vtkPoints()
|
||||
vtk_nodes.SetData(np_to_vtk(nodes))
|
||||
vtk_nodes.SetData(np_to_vtk(np.ascontiguousarray(nodes)))
|
||||
cells = vtk.vtkCellArray()
|
||||
cells.SetNumberOfCells(connectivity.shape[0])
|
||||
T = np.concatenate((np.ones((connectivity.shape[0],1),dtype=np.int64)*connectivity.shape[1],
|
||||
|
@ -157,7 +157,7 @@ class VTK:
|
|||
"""
|
||||
N = points.shape[0]
|
||||
vtk_points = vtk.vtkPoints()
|
||||
vtk_points.SetData(np_to_vtk(points))
|
||||
vtk_points.SetData(np_to_vtk(np.ascontiguousarray(points)))
|
||||
|
||||
vtk_cells = vtk.vtkCellArray()
|
||||
vtk_cells.SetNumberOfCells(N)
|
||||
|
|
|
@ -1,501 +0,0 @@
|
|||
import numpy as _np
|
||||
|
||||
|
||||
def Bravais_to_Miller(*,uvtw=None,hkil=None):
|
||||
"""
|
||||
Transform 4 Miller–Bravais indices to 3 Miller indices of crystal direction [uvw] or plane normal (hkl).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
uvtw|hkil : numpy.ndarray of shape (...,4)
|
||||
Miller–Bravais indices of crystallographic direction [uvtw] or plane normal (hkil).
|
||||
|
||||
Returns
|
||||
-------
|
||||
uvw|hkl : numpy.ndarray of shape (...,3)
|
||||
Miller indices of [uvw] direction or (hkl) plane normal.
|
||||
|
||||
"""
|
||||
if (uvtw is not None) ^ (hkil is None):
|
||||
raise KeyError('Specify either "uvtw" or "hkil"')
|
||||
axis,basis = (_np.array(uvtw),_np.array([[1,0,-1,0],
|
||||
[0,1,-1,0],
|
||||
[0,0, 0,1]])) \
|
||||
if hkil is None else \
|
||||
(_np.array(hkil),_np.array([[1,0,0,0],
|
||||
[0,1,0,0],
|
||||
[0,0,0,1]]))
|
||||
return _np.einsum('il,...l',basis,axis)
|
||||
|
||||
|
||||
def Miller_to_Bravais(*,uvw=None,hkl=None):
|
||||
"""
|
||||
Transform 3 Miller indices to 4 Miller–Bravais indices of crystal direction [uvtw] or plane normal (hkil).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
uvw|hkl : numpy.ndarray of shape (...,3)
|
||||
Miller indices of crystallographic direction [uvw] or plane normal (hkl).
|
||||
|
||||
Returns
|
||||
-------
|
||||
uvtw|hkil : numpy.ndarray of shape (...,4)
|
||||
Miller–Bravais indices of [uvtw] direction or (hkil) plane normal.
|
||||
|
||||
"""
|
||||
if (uvw is not None) ^ (hkl is None):
|
||||
raise KeyError('Specify either "uvw" or "hkl"')
|
||||
axis,basis = (_np.array(uvw),_np.array([[ 2,-1, 0],
|
||||
[-1, 2, 0],
|
||||
[-1,-1, 0],
|
||||
[ 0, 0, 3]])/3) \
|
||||
if hkl is None else \
|
||||
(_np.array(hkl),_np.array([[ 1, 0, 0],
|
||||
[ 0, 1, 0],
|
||||
[-1,-1, 0],
|
||||
[ 0, 0, 1]]))
|
||||
return _np.einsum('il,...l',basis,axis)
|
||||
|
||||
|
||||
kinematics = {
|
||||
'cF': {
|
||||
'slip' : _np.array([
|
||||
[+0,+1,-1 , +1,+1,+1],
|
||||
[-1,+0,+1 , +1,+1,+1],
|
||||
[+1,-1,+0 , +1,+1,+1],
|
||||
[+0,-1,-1 , -1,-1,+1],
|
||||
[+1,+0,+1 , -1,-1,+1],
|
||||
[-1,+1,+0 , -1,-1,+1],
|
||||
[+0,-1,+1 , +1,-1,-1],
|
||||
[-1,+0,-1 , +1,-1,-1],
|
||||
[+1,+1,+0 , +1,-1,-1],
|
||||
[+0,+1,+1 , -1,+1,-1],
|
||||
[+1,+0,-1 , -1,+1,-1],
|
||||
[-1,-1,+0 , -1,+1,-1],
|
||||
[+1,+1,+0 , +1,-1,+0],
|
||||
[+1,-1,+0 , +1,+1,+0],
|
||||
[+1,+0,+1 , +1,+0,-1],
|
||||
[+1,+0,-1 , +1,+0,+1],
|
||||
[+0,+1,+1 , +0,+1,-1],
|
||||
[+0,+1,-1 , +0,+1,+1],
|
||||
],'d'),
|
||||
'twin' : _np.array([
|
||||
[-2, 1, 1, 1, 1, 1],
|
||||
[ 1,-2, 1, 1, 1, 1],
|
||||
[ 1, 1,-2, 1, 1, 1],
|
||||
[ 2,-1, 1, -1,-1, 1],
|
||||
[-1, 2, 1, -1,-1, 1],
|
||||
[-1,-1,-2, -1,-1, 1],
|
||||
[-2,-1,-1, 1,-1,-1],
|
||||
[ 1, 2,-1, 1,-1,-1],
|
||||
[ 1,-1, 2, 1,-1,-1],
|
||||
[ 2, 1,-1, -1, 1,-1],
|
||||
[-1,-2,-1, -1, 1,-1],
|
||||
[-1, 1, 2, -1, 1,-1],
|
||||
],dtype=float),
|
||||
},
|
||||
'cI': {
|
||||
'slip' : _np.array([
|
||||
[+1,-1,+1 , +0,+1,+1],
|
||||
[-1,-1,+1 , +0,+1,+1],
|
||||
[+1,+1,+1 , +0,-1,+1],
|
||||
[-1,+1,+1 , +0,-1,+1],
|
||||
[-1,+1,+1 , +1,+0,+1],
|
||||
[-1,-1,+1 , +1,+0,+1],
|
||||
[+1,+1,+1 , -1,+0,+1],
|
||||
[+1,-1,+1 , -1,+0,+1],
|
||||
[-1,+1,+1 , +1,+1,+0],
|
||||
[-1,+1,-1 , +1,+1,+0],
|
||||
[+1,+1,+1 , -1,+1,+0],
|
||||
[+1,+1,-1 , -1,+1,+0],
|
||||
[-1,+1,+1 , +2,+1,+1],
|
||||
[+1,+1,+1 , -2,+1,+1],
|
||||
[+1,+1,-1 , +2,-1,+1],
|
||||
[+1,-1,+1 , +2,+1,-1],
|
||||
[+1,-1,+1 , +1,+2,+1],
|
||||
[+1,+1,-1 , -1,+2,+1],
|
||||
[+1,+1,+1 , +1,-2,+1],
|
||||
[-1,+1,+1 , +1,+2,-1],
|
||||
[+1,+1,-1 , +1,+1,+2],
|
||||
[+1,-1,+1 , -1,+1,+2],
|
||||
[-1,+1,+1 , +1,-1,+2],
|
||||
[+1,+1,+1 , +1,+1,-2],
|
||||
[+1,+1,-1 , +1,+2,+3],
|
||||
[+1,-1,+1 , -1,+2,+3],
|
||||
[-1,+1,+1 , +1,-2,+3],
|
||||
[+1,+1,+1 , +1,+2,-3],
|
||||
[+1,-1,+1 , +1,+3,+2],
|
||||
[+1,+1,-1 , -1,+3,+2],
|
||||
[+1,+1,+1 , +1,-3,+2],
|
||||
[-1,+1,+1 , +1,+3,-2],
|
||||
[+1,+1,-1 , +2,+1,+3],
|
||||
[+1,-1,+1 , -2,+1,+3],
|
||||
[-1,+1,+1 , +2,-1,+3],
|
||||
[+1,+1,+1 , +2,+1,-3],
|
||||
[+1,-1,+1 , +2,+3,+1],
|
||||
[+1,+1,-1 , -2,+3,+1],
|
||||
[+1,+1,+1 , +2,-3,+1],
|
||||
[-1,+1,+1 , +2,+3,-1],
|
||||
[-1,+1,+1 , +3,+1,+2],
|
||||
[+1,+1,+1 , -3,+1,+2],
|
||||
[+1,+1,-1 , +3,-1,+2],
|
||||
[+1,-1,+1 , +3,+1,-2],
|
||||
[-1,+1,+1 , +3,+2,+1],
|
||||
[+1,+1,+1 , -3,+2,+1],
|
||||
[+1,+1,-1 , +3,-2,+1],
|
||||
[+1,-1,+1 , +3,+2,-1],
|
||||
],'d'),
|
||||
'twin' : _np.array([
|
||||
[-1, 1, 1, 2, 1, 1],
|
||||
[ 1, 1, 1, -2, 1, 1],
|
||||
[ 1, 1,-1, 2,-1, 1],
|
||||
[ 1,-1, 1, 2, 1,-1],
|
||||
[ 1,-1, 1, 1, 2, 1],
|
||||
[ 1, 1,-1, -1, 2, 1],
|
||||
[ 1, 1, 1, 1,-2, 1],
|
||||
[-1, 1, 1, 1, 2,-1],
|
||||
[ 1, 1,-1, 1, 1, 2],
|
||||
[ 1,-1, 1, -1, 1, 2],
|
||||
[-1, 1, 1, 1,-1, 2],
|
||||
[ 1, 1, 1, 1, 1,-2],
|
||||
],dtype=float),
|
||||
},
|
||||
'hP': {
|
||||
'slip' : _np.array([
|
||||
[+2,-1,-1,+0 , +0,+0,+0,+1],
|
||||
[-1,+2,-1,+0 , +0,+0,+0,+1],
|
||||
[-1,-1,+2,+0 , +0,+0,+0,+1],
|
||||
[+2,-1,-1,+0 , +0,+1,-1,+0],
|
||||
[-1,+2,-1,+0 , -1,+0,+1,+0],
|
||||
[-1,-1,+2,+0 , +1,-1,+0,+0],
|
||||
[-1,+1,+0,+0 , +1,+1,-2,+0],
|
||||
[+0,-1,+1,+0 , -2,+1,+1,+0],
|
||||
[+1,+0,-1,+0 , +1,-2,+1,+0],
|
||||
[-1,+2,-1,+0 , +1,+0,-1,+1],
|
||||
[-2,+1,+1,+0 , +0,+1,-1,+1],
|
||||
[-1,-1,+2,+0 , -1,+1,+0,+1],
|
||||
[+1,-2,+1,+0 , -1,+0,+1,+1],
|
||||
[+2,-1,-1,+0 , +0,-1,+1,+1],
|
||||
[+1,+1,-2,+0 , +1,-1,+0,+1],
|
||||
[-2,+1,+1,+3 , +1,+0,-1,+1],
|
||||
[-1,-1,+2,+3 , +1,+0,-1,+1],
|
||||
[-1,-1,+2,+3 , +0,+1,-1,+1],
|
||||
[+1,-2,+1,+3 , +0,+1,-1,+1],
|
||||
[+1,-2,+1,+3 , -1,+1,+0,+1],
|
||||
[+2,-1,-1,+3 , -1,+1,+0,+1],
|
||||
[+2,-1,-1,+3 , -1,+0,+1,+1],
|
||||
[+1,+1,-2,+3 , -1,+0,+1,+1],
|
||||
[+1,+1,-2,+3 , +0,-1,+1,+1],
|
||||
[-1,+2,-1,+3 , +0,-1,+1,+1],
|
||||
[-1,+2,-1,+3 , +1,-1,+0,+1],
|
||||
[-2,+1,+1,+3 , +1,-1,+0,+1],
|
||||
[-1,-1,+2,+3 , +1,+1,-2,+2],
|
||||
[+1,-2,+1,+3 , -1,+2,-1,+2],
|
||||
[+2,-1,-1,+3 , -2,+1,+1,+2],
|
||||
[+1,+1,-2,+3 , -1,-1,+2,+2],
|
||||
[-1,+2,-1,+3 , +1,-2,+1,+2],
|
||||
[-2,+1,+1,+3 , +2,-1,-1,+2],
|
||||
],'d'),
|
||||
'twin' : _np.array([
|
||||
[-1, 0, 1, 1, 1, 0, -1, 2], # shear = (3-(c/a)^2)/(sqrt(3) c/a) <-10.1>{10.2}
|
||||
[ 0, -1, 1, 1, 0, 1, -1, 2],
|
||||
[ 1, -1, 0, 1, -1, 1, 0, 2],
|
||||
[ 1, 0, -1, 1, -1, 0, 1, 2],
|
||||
[ 0, 1, -1, 1, 0, -1, 1, 2],
|
||||
[-1, 1, 0, 1, 1, -1, 0, 2],
|
||||
[-1, -1, 2, 6, 1, 1, -2, 1], # shear = 1/(c/a) <11.6>{-1-1.1}
|
||||
[ 1, -2, 1, 6, -1, 2, -1, 1],
|
||||
[ 2, -1, -1, 6, -2, 1, 1, 1],
|
||||
[ 1, 1, -2, 6, -1, -1, 2, 1],
|
||||
[-1, 2, -1, 6, 1, -2, 1, 1],
|
||||
[-2, 1, 1, 6, 2, -1, -1, 1],
|
||||
[ 1, 0, -1, -2, 1, 0, -1, 1], # shear = (4(c/a)^2-9)/(4 sqrt(3) c/a) <10.-2>{10.1}
|
||||
[ 0, 1, -1, -2, 0, 1, -1, 1],
|
||||
[-1, 1, 0, -2, -1, 1, 0, 1],
|
||||
[-1, 0, 1, -2, -1, 0, 1, 1],
|
||||
[ 0, -1, 1, -2, 0, -1, 1, 1],
|
||||
[ 1, -1, 0, -2, 1, -1, 0, 1],
|
||||
[ 1, 1, -2, -3, 1, 1, -2, 2], # shear = 2((c/a)^2-2)/(3 c/a) <11.-3>{11.2}
|
||||
[-1, 2, -1, -3, -1, 2, -1, 2],
|
||||
[-2, 1, 1, -3, -2, 1, 1, 2],
|
||||
[-1, -1, 2, -3, -1, -1, 2, 2],
|
||||
[ 1, -2, 1, -3, 1, -2, 1, 2],
|
||||
[ 2, -1, -1, -3, 2, -1, -1, 2],
|
||||
],dtype=float),
|
||||
},
|
||||
}
|
||||
|
||||
# Kurdjomov--Sachs orientation relationship for fcc <-> bcc transformation
|
||||
# from S. Morito et al., Journal of Alloys and Compounds 577:s587-s592, 2013
|
||||
# also see K. Kitahara et al., Acta Materialia 54:1279-1288, 2006
|
||||
|
||||
relations = {
|
||||
'KS': {
|
||||
'cF' : _np.array([
|
||||
[[ -1, 0, 1],[ 1, 1, 1]],
|
||||
[[ -1, 0, 1],[ 1, 1, 1]],
|
||||
[[ 0, 1, -1],[ 1, 1, 1]],
|
||||
[[ 0, 1, -1],[ 1, 1, 1]],
|
||||
[[ 1, -1, 0],[ 1, 1, 1]],
|
||||
[[ 1, -1, 0],[ 1, 1, 1]],
|
||||
[[ 1, 0, -1],[ 1, -1, 1]],
|
||||
[[ 1, 0, -1],[ 1, -1, 1]],
|
||||
[[ -1, -1, 0],[ 1, -1, 1]],
|
||||
[[ -1, -1, 0],[ 1, -1, 1]],
|
||||
[[ 0, 1, 1],[ 1, -1, 1]],
|
||||
[[ 0, 1, 1],[ 1, -1, 1]],
|
||||
[[ 0, -1, 1],[ -1, 1, 1]],
|
||||
[[ 0, -1, 1],[ -1, 1, 1]],
|
||||
[[ -1, 0, -1],[ -1, 1, 1]],
|
||||
[[ -1, 0, -1],[ -1, 1, 1]],
|
||||
[[ 1, 1, 0],[ -1, 1, 1]],
|
||||
[[ 1, 1, 0],[ -1, 1, 1]],
|
||||
[[ -1, 1, 0],[ 1, 1, -1]],
|
||||
[[ -1, 1, 0],[ 1, 1, -1]],
|
||||
[[ 0, -1, -1],[ 1, 1, -1]],
|
||||
[[ 0, -1, -1],[ 1, 1, -1]],
|
||||
[[ 1, 0, 1],[ 1, 1, -1]],
|
||||
[[ 1, 0, 1],[ 1, 1, -1]],
|
||||
],dtype=float),
|
||||
'cI' : _np.array([
|
||||
[[ -1, -1, 1],[ 0, 1, 1]],
|
||||
[[ -1, 1, -1],[ 0, 1, 1]],
|
||||
[[ -1, -1, 1],[ 0, 1, 1]],
|
||||
[[ -1, 1, -1],[ 0, 1, 1]],
|
||||
[[ -1, -1, 1],[ 0, 1, 1]],
|
||||
[[ -1, 1, -1],[ 0, 1, 1]],
|
||||
[[ -1, -1, 1],[ 0, 1, 1]],
|
||||
[[ -1, 1, -1],[ 0, 1, 1]],
|
||||
[[ -1, -1, 1],[ 0, 1, 1]],
|
||||
[[ -1, 1, -1],[ 0, 1, 1]],
|
||||
[[ -1, -1, 1],[ 0, 1, 1]],
|
||||
[[ -1, 1, -1],[ 0, 1, 1]],
|
||||
[[ -1, -1, 1],[ 0, 1, 1]],
|
||||
[[ -1, 1, -1],[ 0, 1, 1]],
|
||||
[[ -1, -1, 1],[ 0, 1, 1]],
|
||||
[[ -1, 1, -1],[ 0, 1, 1]],
|
||||
[[ -1, -1, 1],[ 0, 1, 1]],
|
||||
[[ -1, 1, -1],[ 0, 1, 1]],
|
||||
[[ -1, -1, 1],[ 0, 1, 1]],
|
||||
[[ -1, 1, -1],[ 0, 1, 1]],
|
||||
[[ -1, -1, 1],[ 0, 1, 1]],
|
||||
[[ -1, 1, -1],[ 0, 1, 1]],
|
||||
[[ -1, -1, 1],[ 0, 1, 1]],
|
||||
[[ -1, 1, -1],[ 0, 1, 1]],
|
||||
],dtype=float),
|
||||
},
|
||||
'GT': {
|
||||
'cF' : _np.array([
|
||||
[[ -5,-12, 17],[ 1, 1, 1]],
|
||||
[[ 17, -5,-12],[ 1, 1, 1]],
|
||||
[[-12, 17, -5],[ 1, 1, 1]],
|
||||
[[ 5, 12, 17],[ -1, -1, 1]],
|
||||
[[-17, 5,-12],[ -1, -1, 1]],
|
||||
[[ 12,-17, -5],[ -1, -1, 1]],
|
||||
[[ -5, 12,-17],[ -1, 1, 1]],
|
||||
[[ 17, 5, 12],[ -1, 1, 1]],
|
||||
[[-12,-17, 5],[ -1, 1, 1]],
|
||||
[[ 5,-12,-17],[ 1, -1, 1]],
|
||||
[[-17, -5, 12],[ 1, -1, 1]],
|
||||
[[ 12, 17, 5],[ 1, -1, 1]],
|
||||
[[ -5, 17,-12],[ 1, 1, 1]],
|
||||
[[-12, -5, 17],[ 1, 1, 1]],
|
||||
[[ 17,-12, -5],[ 1, 1, 1]],
|
||||
[[ 5,-17,-12],[ -1, -1, 1]],
|
||||
[[ 12, 5, 17],[ -1, -1, 1]],
|
||||
[[-17, 12, -5],[ -1, -1, 1]],
|
||||
[[ -5,-17, 12],[ -1, 1, 1]],
|
||||
[[-12, 5,-17],[ -1, 1, 1]],
|
||||
[[ 17, 12, 5],[ -1, 1, 1]],
|
||||
[[ 5, 17, 12],[ 1, -1, 1]],
|
||||
[[ 12, -5,-17],[ 1, -1, 1]],
|
||||
[[-17,-12, 5],[ 1, -1, 1]],
|
||||
],dtype=float),
|
||||
'cI' : _np.array([
|
||||
[[-17, -7, 17],[ 1, 0, 1]],
|
||||
[[ 17,-17, -7],[ 1, 1, 0]],
|
||||
[[ -7, 17,-17],[ 0, 1, 1]],
|
||||
[[ 17, 7, 17],[ -1, 0, 1]],
|
||||
[[-17, 17, -7],[ -1, -1, 0]],
|
||||
[[ 7,-17,-17],[ 0, -1, 1]],
|
||||
[[-17, 7,-17],[ -1, 0, 1]],
|
||||
[[ 17, 17, 7],[ -1, 1, 0]],
|
||||
[[ -7,-17, 17],[ 0, 1, 1]],
|
||||
[[ 17, -7,-17],[ 1, 0, 1]],
|
||||
[[-17,-17, 7],[ 1, -1, 0]],
|
||||
[[ 7, 17, 17],[ 0, -1, 1]],
|
||||
[[-17, 17, -7],[ 1, 1, 0]],
|
||||
[[ -7,-17, 17],[ 0, 1, 1]],
|
||||
[[ 17, -7,-17],[ 1, 0, 1]],
|
||||
[[ 17,-17, -7],[ -1, -1, 0]],
|
||||
[[ 7, 17, 17],[ 0, -1, 1]],
|
||||
[[-17, 7,-17],[ -1, 0, 1]],
|
||||
[[-17,-17, 7],[ -1, 1, 0]],
|
||||
[[ -7, 17,-17],[ 0, 1, 1]],
|
||||
[[ 17, 7, 17],[ -1, 0, 1]],
|
||||
[[ 17, 17, 7],[ 1, -1, 0]],
|
||||
[[ 7,-17,-17],[ 0, -1, 1]],
|
||||
[[-17, -7, 17],[ 1, 0, 1]],
|
||||
],dtype=float),
|
||||
},
|
||||
'GT_prime': {
|
||||
'cF' : _np.array([
|
||||
[[ 0, 1, -1],[ 7, 17, 17]],
|
||||
[[ -1, 0, 1],[ 17, 7, 17]],
|
||||
[[ 1, -1, 0],[ 17, 17, 7]],
|
||||
[[ 0, -1, -1],[ -7,-17, 17]],
|
||||
[[ 1, 0, 1],[-17, -7, 17]],
|
||||
[[ 1, -1, 0],[-17,-17, 7]],
|
||||
[[ 0, 1, -1],[ 7,-17,-17]],
|
||||
[[ 1, 0, 1],[ 17, -7,-17]],
|
||||
[[ -1, -1, 0],[ 17,-17, -7]],
|
||||
[[ 0, -1, -1],[ -7, 17,-17]],
|
||||
[[ -1, 0, 1],[-17, 7,-17]],
|
||||
[[ -1, -1, 0],[-17, 17, -7]],
|
||||
[[ 0, -1, 1],[ 7, 17, 17]],
|
||||
[[ 1, 0, -1],[ 17, 7, 17]],
|
||||
[[ -1, 1, 0],[ 17, 17, 7]],
|
||||
[[ 0, 1, 1],[ -7,-17, 17]],
|
||||
[[ -1, 0, -1],[-17, -7, 17]],
|
||||
[[ -1, 1, 0],[-17,-17, 7]],
|
||||
[[ 0, -1, 1],[ 7,-17,-17]],
|
||||
[[ -1, 0, -1],[ 17, -7,-17]],
|
||||
[[ 1, 1, 0],[ 17,-17, -7]],
|
||||
[[ 0, 1, 1],[ -7, 17,-17]],
|
||||
[[ 1, 0, -1],[-17, 7,-17]],
|
||||
[[ 1, 1, 0],[-17, 17, -7]],
|
||||
],dtype=float),
|
||||
'cI' : _np.array([
|
||||
[[ 1, 1, -1],[ 12, 5, 17]],
|
||||
[[ -1, 1, 1],[ 17, 12, 5]],
|
||||
[[ 1, -1, 1],[ 5, 17, 12]],
|
||||
[[ -1, -1, -1],[-12, -5, 17]],
|
||||
[[ 1, -1, 1],[-17,-12, 5]],
|
||||
[[ 1, -1, -1],[ -5,-17, 12]],
|
||||
[[ -1, 1, -1],[ 12, -5,-17]],
|
||||
[[ 1, 1, 1],[ 17,-12, -5]],
|
||||
[[ -1, -1, 1],[ 5,-17,-12]],
|
||||
[[ 1, -1, -1],[-12, 5,-17]],
|
||||
[[ -1, -1, 1],[-17, 12, -5]],
|
||||
[[ -1, -1, -1],[ -5, 17,-12]],
|
||||
[[ 1, -1, 1],[ 12, 17, 5]],
|
||||
[[ 1, 1, -1],[ 5, 12, 17]],
|
||||
[[ -1, 1, 1],[ 17, 5, 12]],
|
||||
[[ -1, 1, 1],[-12,-17, 5]],
|
||||
[[ -1, -1, -1],[ -5,-12, 17]],
|
||||
[[ -1, 1, -1],[-17, -5, 12]],
|
||||
[[ -1, -1, 1],[ 12,-17, -5]],
|
||||
[[ -1, 1, -1],[ 5,-12,-17]],
|
||||
[[ 1, 1, 1],[ 17, -5,-12]],
|
||||
[[ 1, 1, 1],[-12, 17, -5]],
|
||||
[[ 1, -1, -1],[ -5, 12,-17]],
|
||||
[[ 1, 1, -1],[-17, 5,-12]],
|
||||
],dtype=float),
|
||||
},
|
||||
'NW': {
|
||||
'cF' : _np.array([
|
||||
[[ 2, -1, -1],[ 1, 1, 1]],
|
||||
[[ -1, 2, -1],[ 1, 1, 1]],
|
||||
[[ -1, -1, 2],[ 1, 1, 1]],
|
||||
[[ -2, -1, -1],[ -1, 1, 1]],
|
||||
[[ 1, 2, -1],[ -1, 1, 1]],
|
||||
[[ 1, -1, 2],[ -1, 1, 1]],
|
||||
[[ 2, 1, -1],[ 1, -1, 1]],
|
||||
[[ -1, -2, -1],[ 1, -1, 1]],
|
||||
[[ -1, 1, 2],[ 1, -1, 1]],
|
||||
[[ 2, -1, 1],[ -1, -1, 1]],
|
||||
[[ -1, 2, 1],[ -1, -1, 1]],
|
||||
[[ -1, -1, -2],[ -1, -1, 1]],
|
||||
],dtype=float),
|
||||
'cI' : _np.array([
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
[[ 0, -1, 1],[ 0, 1, 1]],
|
||||
],dtype=float),
|
||||
},
|
||||
'Pitsch': {
|
||||
'cF' : _np.array([
|
||||
[[ 1, 0, 1],[ 0, 1, 0]],
|
||||
[[ 1, 1, 0],[ 0, 0, 1]],
|
||||
[[ 0, 1, 1],[ 1, 0, 0]],
|
||||
[[ 0, 1, -1],[ 1, 0, 0]],
|
||||
[[ -1, 0, 1],[ 0, 1, 0]],
|
||||
[[ 1, -1, 0],[ 0, 0, 1]],
|
||||
[[ 1, 0, -1],[ 0, 1, 0]],
|
||||
[[ -1, 1, 0],[ 0, 0, 1]],
|
||||
[[ 0, -1, 1],[ 1, 0, 0]],
|
||||
[[ 0, 1, 1],[ 1, 0, 0]],
|
||||
[[ 1, 0, 1],[ 0, 1, 0]],
|
||||
[[ 1, 1, 0],[ 0, 0, 1]],
|
||||
],dtype=float),
|
||||
'cI' : _np.array([
|
||||
[[ 1, -1, 1],[ -1, 0, 1]],
|
||||
[[ 1, 1, -1],[ 1, -1, 0]],
|
||||
[[ -1, 1, 1],[ 0, 1, -1]],
|
||||
[[ -1, 1, -1],[ 0, -1, -1]],
|
||||
[[ -1, -1, 1],[ -1, 0, -1]],
|
||||
[[ 1, -1, -1],[ -1, -1, 0]],
|
||||
[[ 1, -1, -1],[ -1, 0, -1]],
|
||||
[[ -1, 1, -1],[ -1, -1, 0]],
|
||||
[[ -1, -1, 1],[ 0, -1, -1]],
|
||||
[[ -1, 1, 1],[ 0, -1, 1]],
|
||||
[[ 1, -1, 1],[ 1, 0, -1]],
|
||||
[[ 1, 1, -1],[ -1, 1, 0]],
|
||||
],dtype=float),
|
||||
},
|
||||
'Bain': {
|
||||
'cF' : _np.array([
|
||||
[[ 0, 1, 0],[ 1, 0, 0]],
|
||||
[[ 0, 0, 1],[ 0, 1, 0]],
|
||||
[[ 1, 0, 0],[ 0, 0, 1]],
|
||||
],dtype=float),
|
||||
'cI' : _np.array([
|
||||
[[ 0, 1, 1],[ 1, 0, 0]],
|
||||
[[ 1, 0, 1],[ 0, 1, 0]],
|
||||
[[ 1, 1, 0],[ 0, 0, 1]],
|
||||
],dtype=float),
|
||||
},
|
||||
'Burgers' : {
|
||||
'cI' : _np.array([
|
||||
[[ -1, 1, 1],[ 1, 1, 0]],
|
||||
[[ -1, 1, -1],[ 1, 1, 0]],
|
||||
[[ 1, 1, 1],[ 1, -1, 0]],
|
||||
[[ 1, 1, -1],[ 1, -1, 0]],
|
||||
|
||||
[[ 1, 1, -1],[ 1, 0, 1]],
|
||||
[[ -1, 1, 1],[ 1, 0, 1]],
|
||||
[[ 1, 1, 1],[ -1, 0, 1]],
|
||||
[[ 1, -1, 1],[ -1, 0, 1]],
|
||||
|
||||
[[ -1, 1, -1],[ 0, 1, 1]],
|
||||
[[ 1, 1, -1],[ 0, 1, 1]],
|
||||
[[ -1, 1, 1],[ 0, -1, 1]],
|
||||
[[ 1, 1, 1],[ 0, -1, 1]],
|
||||
],dtype=float),
|
||||
'hP' : _np.array([
|
||||
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
|
||||
|
||||
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
|
||||
|
||||
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, 2, -1, 0],[ 0, 0, 0, 1]],
|
||||
[[ -1, -1, 2, 0],[ 0, 0, 0, 1]],
|
||||
],dtype=float),
|
||||
},
|
||||
}
|
|
@ -257,18 +257,18 @@ def _polar_decomposition(T,requested):
|
|||
u, _, vh = _np.linalg.svd(T)
|
||||
R = _np.einsum('...ij,...jk',u,vh)
|
||||
|
||||
output = []
|
||||
output = ()
|
||||
if 'R' in requested:
|
||||
output.append(R)
|
||||
output+=(R,)
|
||||
if 'V' in requested:
|
||||
output.append(_np.einsum('...ij,...kj',T,R))
|
||||
output+=(_np.einsum('...ij,...kj',T,R),)
|
||||
if 'U' in requested:
|
||||
output.append(_np.einsum('...ji,...jk',R,T))
|
||||
output+=(_np.einsum('...ji,...jk',R,T),)
|
||||
|
||||
if len(output) == 0:
|
||||
raise ValueError('output needs to be out of V, R, U')
|
||||
|
||||
return tuple(output)
|
||||
return output
|
||||
|
||||
|
||||
def _equivalent_Mises(T_sym,s):
|
||||
|
|
|
@ -8,7 +8,7 @@ _msc_root = '/opt/msc'
|
|||
_damask_root = str(Path(__file__).parents[3])
|
||||
|
||||
class Marc:
|
||||
"""Wrapper to run DAMASK with MSCMarc."""
|
||||
"""Wrapper to run DAMASK with MSC.Marc."""
|
||||
|
||||
def __init__(self,msc_version=_msc_version,msc_root=_msc_root,damask_root=_damask_root):
|
||||
"""
|
||||
|
@ -47,20 +47,33 @@ class Marc:
|
|||
def submit_job(self, model, job,
|
||||
compile = False,
|
||||
optimization = ''):
|
||||
"""
|
||||
Assemble command line arguments and call Marc executable.
|
||||
|
||||
usersub = self.damask_root/'src/DAMASK_Marc'
|
||||
usersub = usersub.parent/(usersub.name + ('.f90' if compile else '.marc'))
|
||||
Parameters
|
||||
----------
|
||||
model : str
|
||||
Name of model.
|
||||
job : str
|
||||
Name of job.
|
||||
compile : bool, optional
|
||||
Compile DAMASK_Marc user subroutine (and save for future use).
|
||||
Defaults to False.
|
||||
optimization : str, optional
|
||||
Optimization level '' (-O0), 'l' (-O1), or 'h' (-O3).
|
||||
Defaults to ''.
|
||||
|
||||
"""
|
||||
usersub = (self.damask_root/'src/DAMASK_Marc').with_suffix('.f90' if compile else '.marc')
|
||||
if not usersub.is_file():
|
||||
raise FileNotFoundError(f'subroutine ({"source" if compile else "binary"}) "{usersub}" not found')
|
||||
|
||||
# Define options [see Marc Installation and Operation Guide, pp 23]
|
||||
script = f'run_damask_{optimization}mp'
|
||||
|
||||
cmd = str(self.tools_path/script) + \
|
||||
' -jid ' + model+'_'+job + \
|
||||
' -nprocd 1 -autorst 0 -ci n -cr n -dcoup 0 -b no -v no'
|
||||
cmd += ' -u ' + str(usersub) + ' -save y' if compile else \
|
||||
' -prog ' + str(usersub.with_suffix(''))
|
||||
cmd = f'{self.tools_path/script} -jid {model}_{job} -nprocd 1 -autorst 0 -ci n -cr n -dcoup 0 -b no -v no ' \
|
||||
+ (f'-u {usersub} -save y' if compile else f'-prog {usersub.with_suffix("")}')
|
||||
|
||||
print(cmd)
|
||||
|
||||
ret = subprocess.run(shlex.split(cmd),capture_output=True)
|
||||
|
@ -75,4 +88,3 @@ class Marc:
|
|||
print(ret.stderr.decode())
|
||||
print(ret.stdout.decode())
|
||||
raise RuntimeError('Marc simulation failed (unknown return value)')
|
||||
|
||||
|
|
|
@ -17,8 +17,8 @@ from . import version
|
|||
# limit visibility
|
||||
__all__=[
|
||||
'srepr',
|
||||
'emph','deemph','warn','strikeout',
|
||||
'execute',
|
||||
'emph', 'deemph', 'warn', 'strikeout',
|
||||
'run',
|
||||
'natural_sort',
|
||||
'show_progress',
|
||||
'scale_to_coprime',
|
||||
|
@ -27,6 +27,7 @@ __all__=[
|
|||
'execution_stamp',
|
||||
'shapeshifter', 'shapeblender',
|
||||
'extend_docstring', 'extended_docstring',
|
||||
'Bravais_to_Miller', 'Miller_to_Bravais',
|
||||
'DREAM3D_base_group', 'DREAM3D_cell_data_group',
|
||||
'dict_prune', 'dict_flatten'
|
||||
]
|
||||
|
@ -143,9 +144,9 @@ def strikeout(what):
|
|||
return _colors['crossout']+srepr(what)+_colors['end_color']
|
||||
|
||||
|
||||
def execute(cmd,wd='./',env=None):
|
||||
def run(cmd,wd='./',env=None,timeout=None):
|
||||
"""
|
||||
Execute command.
|
||||
Run a command.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
@ -155,20 +156,23 @@ def execute(cmd,wd='./',env=None):
|
|||
Working directory of process. Defaults to ./ .
|
||||
env : dict, optional
|
||||
Environment for execution.
|
||||
timeout : integer, optional
|
||||
Timeout in seconds.
|
||||
|
||||
Returns
|
||||
-------
|
||||
stdout, stderr : str
|
||||
stdout, stderr : (str, str)
|
||||
Output of the executed command.
|
||||
|
||||
"""
|
||||
print(f"executing '{cmd}' in '{wd}'")
|
||||
print(f"running '{cmd}' in '{wd}'")
|
||||
process = subprocess.run(shlex.split(cmd),
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.PIPE,
|
||||
env = os.environ if env is None else env,
|
||||
cwd = wd,
|
||||
encoding = 'utf-8')
|
||||
encoding = 'utf-8',
|
||||
timeout = timeout)
|
||||
|
||||
if process.returncode != 0:
|
||||
print(process.stdout)
|
||||
|
@ -178,6 +182,9 @@ def execute(cmd,wd='./',env=None):
|
|||
return process.stdout, process.stderr
|
||||
|
||||
|
||||
execute = run
|
||||
|
||||
|
||||
def natural_sort(key):
|
||||
"""
|
||||
Natural sort.
|
||||
|
@ -286,6 +293,8 @@ def project_stereographic(vector,direction='z',normalize=True,keepdims=False):
|
|||
|
||||
Examples
|
||||
--------
|
||||
>>> import damask
|
||||
>>> import numpy as np
|
||||
>>> project_stereographic(np.ones(3))
|
||||
[0.3660254, 0.3660254]
|
||||
>>> project_stereographic(np.ones(3),direction='x',normalize=False,keepdims=True)
|
||||
|
@ -338,7 +347,7 @@ def hybrid_IA(dist,N,rng_seed=None):
|
|||
|
||||
def shapeshifter(fro,to,mode='left',keep_ones=False):
|
||||
"""
|
||||
Return a tuple that reshapes 'fro' to become broadcastable to 'to'.
|
||||
Return dimensions that reshape 'fro' to become broadcastable to 'to'.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
@ -355,7 +364,25 @@ def shapeshifter(fro,to,mode='left',keep_ones=False):
|
|||
Treat '1' in fro as literal value instead of dimensional placeholder.
|
||||
Defaults to False.
|
||||
|
||||
Returns
|
||||
-------
|
||||
new_dims : tuple
|
||||
Dimensions for reshape.
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> import numpy as np
|
||||
>>> from damask import util
|
||||
>>> a = np.ones((3,4,2))
|
||||
>>> b = np.ones(4)
|
||||
>>> b_extended = b.reshape(util.shapeshifter(b.shape,a.shape))
|
||||
>>> (a * np.broadcast_to(b_extended,a.shape)).shape
|
||||
(3,4,2)
|
||||
|
||||
|
||||
"""
|
||||
if not len(fro) and not len(to): return ()
|
||||
|
||||
beg = dict(left ='(^.*\\b)',
|
||||
right='(^.*?\\b)')
|
||||
sep = dict(left ='(.*\\b)',
|
||||
|
@ -499,6 +526,62 @@ def DREAM3D_cell_data_group(fname):
|
|||
return cell_data_group
|
||||
|
||||
|
||||
def Bravais_to_Miller(*,uvtw=None,hkil=None):
|
||||
"""
|
||||
Transform 4 Miller–Bravais indices to 3 Miller indices of crystal direction [uvw] or plane normal (hkl).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
uvtw|hkil : numpy.ndarray of shape (...,4)
|
||||
Miller–Bravais indices of crystallographic direction [uvtw] or plane normal (hkil).
|
||||
|
||||
Returns
|
||||
-------
|
||||
uvw|hkl : numpy.ndarray of shape (...,3)
|
||||
Miller indices of [uvw] direction or (hkl) plane normal.
|
||||
|
||||
"""
|
||||
if (uvtw is not None) ^ (hkil is None):
|
||||
raise KeyError('Specify either "uvtw" or "hkil"')
|
||||
axis,basis = (np.array(uvtw),np.array([[1,0,-1,0],
|
||||
[0,1,-1,0],
|
||||
[0,0, 0,1]])) \
|
||||
if hkil is None else \
|
||||
(np.array(hkil),np.array([[1,0,0,0],
|
||||
[0,1,0,0],
|
||||
[0,0,0,1]]))
|
||||
return np.einsum('il,...l',basis,axis)
|
||||
|
||||
|
||||
def Miller_to_Bravais(*,uvw=None,hkl=None):
|
||||
"""
|
||||
Transform 3 Miller indices to 4 Miller–Bravais indices of crystal direction [uvtw] or plane normal (hkil).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
uvw|hkl : numpy.ndarray of shape (...,3)
|
||||
Miller indices of crystallographic direction [uvw] or plane normal (hkl).
|
||||
|
||||
Returns
|
||||
-------
|
||||
uvtw|hkil : numpy.ndarray of shape (...,4)
|
||||
Miller–Bravais indices of [uvtw] direction or (hkil) plane normal.
|
||||
|
||||
"""
|
||||
if (uvw is not None) ^ (hkl is None):
|
||||
raise KeyError('Specify either "uvw" or "hkl"')
|
||||
axis,basis = (np.array(uvw),np.array([[ 2,-1, 0],
|
||||
[-1, 2, 0],
|
||||
[-1,-1, 0],
|
||||
[ 0, 0, 3]])/3) \
|
||||
if hkl is None else \
|
||||
(np.array(hkl),np.array([[ 1, 0, 0],
|
||||
[ 0, 1, 0],
|
||||
[-1,-1, 0],
|
||||
[ 0, 0, 1]]))
|
||||
return np.einsum('il,...l',basis,axis)
|
||||
|
||||
|
||||
def dict_prune(d):
|
||||
"""
|
||||
Recursively remove empty dictionaries.
|
||||
|
|
|
@ -16,14 +16,15 @@ setuptools.setup(
|
|||
url='https://damask.mpie.de',
|
||||
packages=setuptools.find_packages(),
|
||||
include_package_data=True,
|
||||
python_requires = '>=3.6',
|
||||
python_requires = '>=3.7',
|
||||
install_requires = [
|
||||
'pandas>=0.24', # requires numpy
|
||||
'numpy>=1.17', # needed for default_rng
|
||||
'scipy>=1.2',
|
||||
'h5py>=2.9', # requires numpy
|
||||
'vtk>=8.1',
|
||||
'matplotlib>=3.0', # requires numpy, pillow
|
||||
'pyaml>=3.12'
|
||||
'pyyaml>=3.12'
|
||||
],
|
||||
classifiers = [
|
||||
'Intended Audience :: Science/Research',
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
<ImageData WholeExtent="0 5 0 6 0 7" Origin="0 0 0" Spacing="0.2 0.333333333333 0.428571428571" Direction="1 0 0 0 1 0 0 0 1">
|
||||
<FieldData>
|
||||
<Array type="String" Name="comments" NumberOfTuples="1" format="binary">
|
||||
AQAAAACAAABPAAAAVgAAAA==eF4FwW0KgCAMANCO4s8iJnNLIS/Qv64Qo9kHGYlB5+89lVvey071VJsf0WVOJVXzsUWLILkcwkAUYCfdfJCwDqYlJAcYwHlDHB1FHnskxK75AdGCFTk=
|
||||
AQAAAACAAABPAAAAVQAAAA==eF4FwVEKgCAQBcCO4mcRK291heoC/XWF2FAqMhKDzt9M1Fvfy871jDY/GtcllVTN5y0sSHM5VEhYaMcQePMMNq2DY8JI7A2HCW4S6eGArvkBvNAUlg==
|
||||
</Array>
|
||||
</FieldData>
|
||||
<Piece Extent="0 5 0 6 0 7">
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -5,12 +5,12 @@ solver:
|
|||
loadstep:
|
||||
- boundary_conditions:
|
||||
mechanical:
|
||||
dot_F: [x, 0, 0,
|
||||
0, -1.0e-3, 0,
|
||||
0, 0, x]
|
||||
P: [0, x, x,
|
||||
x, x, x,
|
||||
x, x, 0]
|
||||
dot_F: [[x, 0, 0],
|
||||
[0, -1.0e-3, 0],
|
||||
[0, 0, x]]
|
||||
P: [[0, x, x],
|
||||
[x, x, x],
|
||||
[x, x, 0]]
|
||||
discretization:
|
||||
t: 5
|
||||
N: 10
|
||||
|
|
|
@ -1 +1 @@
|
|||
d5db0be324a959f00245e42704ea2d6f
|
||||
a40baead936c79dd4f86f84ad858b9fa
|
|
@ -1 +1 @@
|
|||
fe9ddaf54ac1fb785094251d29fcdc9c
|
||||
6fb37bd65934de859dd6b6e0191e7d64
|
|
@ -1 +1 @@
|
|||
9c8ac0bb1eb4a7b9ebc3e5fd5840b0a4
|
||||
61953c35f61f3234b98d78a912e7dc83
|
|
@ -0,0 +1 @@
|
|||
bb783bb80ff04dd435e814f4b82a3234
|
|
@ -1 +1 @@
|
|||
1b9ebd17c5257e2edba48d006f25d4e6
|
||||
4f85d2613aa70622a2d5f49dc8bf2eb2
|
|
@ -1 +1 @@
|
|||
8967bb1a6c329a072baaa83da534ae56
|
||||
e1ca5306082fc3ab411f5ddab1a2e370
|
|
@ -1 +1 @@
|
|||
752e8b6186ad2b6b1b5c781940669cb1
|
||||
1641c3b3641e942ffc325d471bdfaf00
|
|
@ -0,0 +1 @@
|
|||
ba97286c5d95bf817143f7bb9cf58421
|
|
@ -5,12 +5,12 @@ solver:
|
|||
loadstep:
|
||||
- boundary_conditions:
|
||||
mechanical:
|
||||
dot_F: [x, 0, 0,
|
||||
0, 1.0e-3, 0,
|
||||
0, 0, x]
|
||||
P: [0, x, x,
|
||||
x, x, x,
|
||||
x, x, 0]
|
||||
dot_F: [[x, 0, 0],
|
||||
[0, 1.0e-3, 0],
|
||||
[0, 0, x]]
|
||||
P: [[0, x, x],
|
||||
[x, x, x],
|
||||
[x, x, 0]]
|
||||
discretization:
|
||||
t: 20
|
||||
N: 40
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
import pytest
|
||||
import numpy as np
|
||||
|
||||
import damask
|
||||
from damask import Crystal
|
||||
|
||||
class TestCrystal:
|
||||
|
||||
@pytest.mark.parametrize('lattice,family',[('aP','cubic'),('xI','cubic')])
|
||||
def test_invalid_init(self,lattice,family):
|
||||
with pytest.raises(KeyError):
|
||||
Crystal(family=family,lattice=lattice)
|
||||
|
||||
def test_eq(self):
|
||||
family = np.random.choice(list(damask._crystal.lattice_symmetries.values()))
|
||||
assert Crystal(family=family) == Crystal(family=family)
|
||||
|
||||
def test_double_to_lattice(self):
|
||||
c = Crystal(lattice='cF')
|
||||
with pytest.raises(KeyError):
|
||||
c.to_lattice(direction=np.ones(3),plane=np.ones(3))
|
||||
|
||||
def test_double_to_frame(self):
|
||||
c = Crystal(lattice='cF')
|
||||
with pytest.raises(KeyError):
|
||||
c.to_frame(uvw=np.ones(3),hkl=np.ones(3))
|
||||
|
||||
@pytest.mark.parametrize('lattice,a,b,c,alpha,beta,gamma',
|
||||
[
|
||||
('aP',0.5,2.0,3.0,0.8,0.5,1.2),
|
||||
('mP',1.0,2.0,3.0,np.pi/2,0.5,np.pi/2),
|
||||
('oI',0.5,1.5,3.0,np.pi/2,np.pi/2,np.pi/2),
|
||||
('tP',0.5,0.5,3.0,np.pi/2,np.pi/2,np.pi/2),
|
||||
('hP',1.0,None,1.6,np.pi/2,np.pi/2,2*np.pi/3),
|
||||
('cF',1.0,1.0,None,np.pi/2,np.pi/2,np.pi/2),
|
||||
])
|
||||
def test_bases_contraction(self,lattice,a,b,c,alpha,beta,gamma):
|
||||
c = Crystal(lattice=lattice,
|
||||
a=a,b=b,c=c,
|
||||
alpha=alpha,beta=beta,gamma=gamma)
|
||||
assert np.allclose(np.eye(3),np.einsum('ik,jk',c.basis_real,c.basis_reciprocal))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('keyFrame,keyLattice',[('uvw','direction'),('hkl','plane'),])
|
||||
@pytest.mark.parametrize('vector',np.array([
|
||||
[1.,1.,1.],
|
||||
[-2.,3.,0.5],
|
||||
[0.,0.,1.],
|
||||
[1.,1.,1.],
|
||||
[2.,2.,2.],
|
||||
[0.,1.,1.],
|
||||
]))
|
||||
@pytest.mark.parametrize('lattice,a,b,c,alpha,beta,gamma',
|
||||
[
|
||||
('aP',0.5,2.0,3.0,0.8,0.5,1.2),
|
||||
('mP',1.0,2.0,3.0,np.pi/2,0.5,np.pi/2),
|
||||
('oI',0.5,1.5,3.0,np.pi/2,np.pi/2,np.pi/2),
|
||||
('tP',0.5,0.5,3.0,np.pi/2,np.pi/2,np.pi/2),
|
||||
('hP',1.0,1.0,1.6,np.pi/2,np.pi/2,2*np.pi/3),
|
||||
('cF',1.0,1.0,1.0,np.pi/2,np.pi/2,np.pi/2),
|
||||
])
|
||||
def test_to_frame_to_lattice(self,lattice,a,b,c,alpha,beta,gamma,vector,keyFrame,keyLattice):
|
||||
c = Crystal(lattice=lattice,
|
||||
a=a,b=b,c=c,
|
||||
alpha=alpha,beta=beta,gamma=gamma)
|
||||
assert np.allclose(vector,
|
||||
c.to_frame(**{keyFrame:c.to_lattice(**{keyLattice:vector})}))
|
||||
|
||||
@pytest.mark.parametrize('lattice,a,b,c,alpha,beta,gamma,points',
|
||||
[
|
||||
('aP',0.5,2.0,3.0,0.8,0.5,1.2,[[0,0,0]]),
|
||||
('mS',1.0,2.0,3.0,np.pi/2,0.5,np.pi/2,[[0,0,0],[0.5,0.5,0.0]]),
|
||||
('oI',0.5,1.5,3.0,np.pi/2,np.pi/2,np.pi/2,[[0,0,0],[0.5,0.5,0.5]]),
|
||||
('hP',1.0,1.0,1.6,np.pi/2,np.pi/2,2*np.pi/3,[[0,0,0],[2./3.,1./3.,0.5]]),
|
||||
('cF',1.0,1.0,1.0,np.pi/2,np.pi/2,np.pi/2,[[0,0,0],[0.0,0.5,0.5],[0.5,0.0,0.5],[0.5,0.5,0.0]]),
|
||||
])
|
||||
def test_lattice_points(self,lattice,a,b,c,alpha,beta,gamma,points):
|
||||
c = Crystal(lattice=lattice,
|
||||
a=a,b=b,c=c,
|
||||
alpha=alpha,beta=beta,gamma=gamma)
|
||||
assert np.allclose(points,c.lattice_points)
|
|
@ -1,5 +1,6 @@
|
|||
import pytest
|
||||
import numpy as np
|
||||
import vtk
|
||||
|
||||
from damask import VTK
|
||||
from damask import Grid
|
||||
|
@ -410,6 +411,7 @@ class TestGrid:
|
|||
|
||||
@pytest.mark.parametrize('periodic',[True,False])
|
||||
@pytest.mark.parametrize('direction',['x','y','z',['x','y'],'zy','xz',['x','y','z']])
|
||||
@pytest.mark.xfail(int(vtk.vtkVersion.GetVTKVersion().split('.')[0])<8, reason='missing METADATA')
|
||||
def test_get_grain_boundaries(self,update,ref_path,periodic,direction):
|
||||
grid = Grid.load(ref_path/'get_grain_boundaries_8g12x15x20.vti')
|
||||
current = grid.get_grain_boundaries(periodic,direction)
|
||||
|
|
|
@ -7,10 +7,9 @@ from damask import Orientation
|
|||
from damask import Table
|
||||
from damask import util
|
||||
from damask import grid_filters
|
||||
from damask import lattice
|
||||
from damask import _orientation
|
||||
from damask import _crystal
|
||||
|
||||
crystal_families = set(_orientation.lattice_symmetries.values())
|
||||
crystal_families = set(_crystal.lattice_symmetries.values())
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -25,38 +24,42 @@ def set_of_rodrigues(set_of_quaternions):
|
|||
|
||||
class TestOrientation:
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
@pytest.mark.parametrize('shape',[None,5,(4,6)])
|
||||
def test_equal(self,lattice,shape):
|
||||
def test_equal(self,family,shape):
|
||||
R = Rotation.from_random(shape)
|
||||
assert Orientation(R,lattice) == Orientation(R,lattice) if shape is None else \
|
||||
(Orientation(R,lattice) == Orientation(R,lattice)).all()
|
||||
assert Orientation(R,family=family) == Orientation(R,family=family) if shape is None else \
|
||||
(Orientation(R,family=family) == Orientation(R,family=family)).all()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
@pytest.mark.parametrize('shape',[None,5,(4,6)])
|
||||
def test_unequal(self,lattice,shape):
|
||||
def test_unequal(self,family,shape):
|
||||
R = Rotation.from_random(shape)
|
||||
assert not ( Orientation(R,lattice) != Orientation(R,lattice) if shape is None else \
|
||||
(Orientation(R,lattice) != Orientation(R,lattice)).any())
|
||||
assert not ( Orientation(R,family=family) != Orientation(R,family=family) if shape is None else \
|
||||
(Orientation(R,family=family) != Orientation(R,family=family)).any())
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
@pytest.mark.parametrize('shape',[None,5,(4,6)])
|
||||
def test_close(self,lattice,shape):
|
||||
R = Orientation.from_random(lattice=lattice,shape=shape)
|
||||
def test_close(self,family,shape):
|
||||
R = Orientation.from_random(family=family,shape=shape)
|
||||
assert R.isclose(R.reduced).all() and R.allclose(R.reduced)
|
||||
|
||||
@pytest.mark.parametrize('a,b',[
|
||||
(dict(rotation=[1,0,0,0],lattice='triclinic'),
|
||||
dict(rotation=[0.5,0.5,0.5,0.5],lattice='triclinic')),
|
||||
(dict(rotation=[1,0,0,0],family='triclinic'),
|
||||
dict(rotation=[0.5,0.5,0.5,0.5],family='triclinic')),
|
||||
|
||||
(dict(rotation=[1,0,0,0],lattice='cubic'),
|
||||
dict(rotation=[1,0,0,0],lattice='hexagonal')),
|
||||
(dict(rotation=[1,0,0,0],family='cubic'),
|
||||
dict(rotation=[1,0,0,0],family='hexagonal')),
|
||||
])
|
||||
def test_unequal_family(self,a,b):
|
||||
assert Orientation(**a) != Orientation(**b)
|
||||
|
||||
@pytest.mark.parametrize('a,b',[
|
||||
(dict(rotation=[1,0,0,0],lattice='cF',a=1),
|
||||
dict(rotation=[1,0,0,0],lattice='cF',a=2)),
|
||||
])
|
||||
def test_nonequal(self,a,b):
|
||||
def test_unequal_lattice(self,a,b):
|
||||
assert Orientation(**a) != Orientation(**b)
|
||||
|
||||
@pytest.mark.parametrize('kwargs',[
|
||||
|
@ -72,7 +75,17 @@ class TestOrientation:
|
|||
])
|
||||
def test_invalid_init(self,kwargs):
|
||||
with pytest.raises(ValueError):
|
||||
Orientation(**kwargs).parameters # noqa
|
||||
Orientation(**kwargs)
|
||||
|
||||
@pytest.mark.parametrize('invalid_family',[None,'fcc','bcc','hello'])
|
||||
def test_invalid_family_init(self,invalid_family):
|
||||
with pytest.raises(KeyError):
|
||||
Orientation(family=invalid_family)
|
||||
|
||||
@pytest.mark.parametrize('invalid_lattice',[None,'fcc','bcc','hello'])
|
||||
def test_invalid_lattice_init(self,invalid_lattice):
|
||||
with pytest.raises(KeyError):
|
||||
Orientation(lattice=invalid_lattice)
|
||||
|
||||
@pytest.mark.parametrize('kwargs',[
|
||||
dict(lattice='aP',a=1.0,b=1.1,c=1.2,alpha=np.pi/4,beta=np.pi/3,gamma=np.pi/2),
|
||||
|
@ -100,47 +113,47 @@ class TestOrientation:
|
|||
assert o != p
|
||||
|
||||
def test_from_quaternion(self):
|
||||
assert np.all(Orientation.from_quaternion(q=np.array([1,0,0,0]),lattice='triclinic').as_matrix()
|
||||
assert np.all(Orientation.from_quaternion(q=np.array([1,0,0,0]),family='triclinic').as_matrix()
|
||||
== np.eye(3))
|
||||
|
||||
def test_from_Euler_angles(self):
|
||||
assert np.all(Orientation.from_Euler_angles(phi=np.zeros(3),lattice='triclinic').as_matrix()
|
||||
assert np.all(Orientation.from_Euler_angles(phi=np.zeros(3),family='triclinic').as_matrix()
|
||||
== np.eye(3))
|
||||
|
||||
def test_from_axis_angle(self):
|
||||
assert np.all(Orientation.from_axis_angle(axis_angle=[1,0,0,0],lattice='triclinic').as_matrix()
|
||||
assert np.all(Orientation.from_axis_angle(axis_angle=[1,0,0,0],family='triclinic').as_matrix()
|
||||
== np.eye(3))
|
||||
|
||||
def test_from_basis(self):
|
||||
assert np.all(Orientation.from_basis(basis=np.eye(3),lattice='triclinic').as_matrix()
|
||||
assert np.all(Orientation.from_basis(basis=np.eye(3),family='triclinic').as_matrix()
|
||||
== np.eye(3))
|
||||
|
||||
def test_from_matrix(self):
|
||||
assert np.all(Orientation.from_matrix(R=np.eye(3),lattice='triclinic').as_matrix()
|
||||
assert np.all(Orientation.from_matrix(R=np.eye(3),family='triclinic').as_matrix()
|
||||
== np.eye(3))
|
||||
|
||||
def test_from_Rodrigues_vector(self):
|
||||
assert np.all(Orientation.from_Rodrigues_vector(rho=np.array([0,0,1,0]),lattice='triclinic').as_matrix()
|
||||
assert np.all(Orientation.from_Rodrigues_vector(rho=np.array([0,0,1,0]),family='triclinic').as_matrix()
|
||||
== np.eye(3))
|
||||
|
||||
def test_from_homochoric(self):
|
||||
assert np.all(Orientation.from_homochoric(h=np.zeros(3),lattice='triclinic').as_matrix()
|
||||
assert np.all(Orientation.from_homochoric(h=np.zeros(3),family='triclinic').as_matrix()
|
||||
== np.eye(3))
|
||||
|
||||
def test_from_cubochoric(self):
|
||||
assert np.all(Orientation.from_cubochoric(x=np.zeros(3),lattice='triclinic').as_matrix()
|
||||
assert np.all(Orientation.from_cubochoric(x=np.zeros(3),family='triclinic').as_matrix()
|
||||
== np.eye(3))
|
||||
|
||||
def test_from_spherical_component(self):
|
||||
assert np.all(Orientation.from_spherical_component(center=Rotation(),
|
||||
sigma=0.0,N=1,lattice='triclinic').as_matrix()
|
||||
sigma=0.0,N=1,family='triclinic').as_matrix()
|
||||
== np.eye(3))
|
||||
|
||||
def test_from_fiber_component(self):
|
||||
r = Rotation.from_fiber_component(alpha=np.zeros(2),beta=np.zeros(2),
|
||||
sigma=0.0,N=1,rng_seed=0)
|
||||
assert np.all(Orientation.from_fiber_component(alpha=np.zeros(2),beta=np.zeros(2),
|
||||
sigma=0.0,N=1,rng_seed=0,lattice='triclinic').quaternion
|
||||
sigma=0.0,N=1,rng_seed=0,family='triclinic').quaternion
|
||||
== r.quaternion)
|
||||
|
||||
@pytest.mark.parametrize('kwargs',[
|
||||
|
@ -185,26 +198,35 @@ class TestOrientation:
|
|||
with pytest.raises(ValueError):
|
||||
Orientation(lattice='aP',a=1,b=2,c=3,alpha=45,beta=45,gamma=90.0001,degrees=True) # noqa
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
@pytest.mark.parametrize('angle',[10,20,30,40])
|
||||
def test_average(self,angle,lattice):
|
||||
o = Orientation.from_axis_angle(lattice=lattice,axis_angle=[[0,0,1,10],[0,0,1,angle]],degrees=True)
|
||||
def test_average(self,angle,family):
|
||||
o = Orientation.from_axis_angle(family=family,axis_angle=[[0,0,1,10],[0,0,1,angle]],degrees=True)
|
||||
avg_angle = o.average().as_axis_angle(degrees=True,pair=True)[1]
|
||||
assert np.isclose(avg_angle,10+(angle-10)/2.)
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
def test_reduced_equivalent(self,lattice):
|
||||
i = Orientation(lattice=lattice)
|
||||
o = Orientation.from_random(lattice=lattice)
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
def test_reduced_equivalent(self,family):
|
||||
i = Orientation(family=family)
|
||||
o = Orientation.from_random(family=family)
|
||||
eq = o.equivalent
|
||||
FZ = np.argmin(abs(eq.misorientation(i.broadcast_to(len(eq))).as_axis_angle(pair=True)[1]))
|
||||
assert o.reduced == eq[FZ]
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
def test_reduced_corner_cases(self,family):
|
||||
# test whether there is always a sym-eq rotation that falls into the FZ
|
||||
N = np.random.randint(10,40)
|
||||
size = np.ones(3)*np.pi**(2./3.)
|
||||
grid = grid_filters.coordinates0_node([N+1,N+1,N+1],size,-size*.5)
|
||||
evenly_distributed = Orientation.from_cubochoric(x=grid[:-2,:-2,:-2],family=family)
|
||||
assert evenly_distributed.shape == evenly_distributed.reduced.shape
|
||||
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
@pytest.mark.parametrize('N',[1,8,32])
|
||||
def test_disorientation(self,lattice,N):
|
||||
o = Orientation.from_random(lattice=lattice,shape=N)
|
||||
p = Orientation.from_random(lattice=lattice,shape=N)
|
||||
def test_disorientation(self,family,N):
|
||||
o = Orientation.from_random(family=family,shape=N)
|
||||
p = Orientation.from_random(family=family,shape=N)
|
||||
|
||||
d,ops = o.disorientation(p,return_operators=True)
|
||||
|
||||
|
@ -218,156 +240,30 @@ class TestOrientation:
|
|||
.misorientation(p[n].equivalent[ops[n][1]])
|
||||
.as_quaternion())
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
@pytest.mark.parametrize('a,b',[
|
||||
((2,3,2),(2,3,2)),
|
||||
((2,2),(4,4)),
|
||||
((3,1),(1,3)),
|
||||
(None,None),
|
||||
])
|
||||
def test_disorientation_blending(self,lattice,a,b):
|
||||
o = Orientation.from_random(lattice=lattice,shape=a)
|
||||
p = Orientation.from_random(lattice=lattice,shape=b)
|
||||
blend = util.shapeblender(o.shape,p.shape)
|
||||
for loc in np.random.randint(0,blend,(10,len(blend))):
|
||||
assert o[tuple(loc[:len(o.shape)])].disorientation(p[tuple(loc[-len(p.shape):])]) \
|
||||
.isclose(o.disorientation(p)[tuple(loc)])
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
def test_disorientation360(self,lattice):
|
||||
o_1 = Orientation(Rotation(),lattice)
|
||||
o_2 = Orientation.from_Euler_angles(lattice=lattice,phi=[360,0,0],degrees=True)
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
def test_disorientation360(self,family):
|
||||
o_1 = Orientation(Rotation(),family=family)
|
||||
o_2 = Orientation.from_Euler_angles(family=family,phi=[360,0,0],degrees=True)
|
||||
assert np.allclose((o_1.disorientation(o_2)).as_matrix(),np.eye(3))
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
@pytest.mark.parametrize('shape',[(1),(2,3),(4,3,2)])
|
||||
def test_reduced_vectorization(self,lattice,shape):
|
||||
o = Orientation.from_random(lattice=lattice,shape=shape)
|
||||
for r, theO in zip(o.reduced.flatten(),o.flatten()):
|
||||
assert r == theO.reduced
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
def test_reduced_corner_cases(self,lattice):
|
||||
# test whether there is always a sym-eq rotation that falls into the FZ
|
||||
N = np.random.randint(10,40)
|
||||
size = np.ones(3)*np.pi**(2./3.)
|
||||
grid = grid_filters.coordinates0_node([N+1,N+1,N+1],size,-size*.5)
|
||||
evenly_distributed = Orientation.from_cubochoric(x=grid[:-2,:-2,:-2],lattice=lattice)
|
||||
assert evenly_distributed.shape == evenly_distributed.reduced.shape
|
||||
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
@pytest.mark.parametrize('shape',[(1),(2,3),(4,3,2)])
|
||||
@pytest.mark.parametrize('vector',np.array([[1,0,0],[1,2,3],[-1,1,-1]]))
|
||||
@pytest.mark.parametrize('proper',[True,False])
|
||||
def test_to_SST_vectorization(self,lattice,shape,vector,proper):
|
||||
o = Orientation.from_random(lattice=lattice,shape=shape)
|
||||
for r, theO in zip(o.to_SST(vector=vector,proper=proper).reshape((-1,3)),o.flatten()):
|
||||
assert np.allclose(r,theO.to_SST(vector=vector,proper=proper))
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
@pytest.mark.parametrize('shape',[(1),(2,3),(4,3,2)])
|
||||
@pytest.mark.parametrize('vector',np.array([[1,0,0],[1,2,3],[-1,1,-1]]))
|
||||
@pytest.mark.parametrize('proper',[True,False])
|
||||
@pytest.mark.parametrize('in_SST',[True,False])
|
||||
def test_IPF_color_vectorization(self,lattice,shape,vector,proper,in_SST):
|
||||
o = Orientation.from_random(lattice=lattice,shape=shape)
|
||||
for r, theO in zip(o.IPF_color(vector,in_SST=in_SST,proper=proper).reshape((-1,3)),o.flatten()):
|
||||
assert np.allclose(r,theO.IPF_color(vector,in_SST=in_SST,proper=proper))
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
@pytest.mark.parametrize('a,b',[
|
||||
((2,3,2),(2,3,2)),
|
||||
((2,2),(4,4)),
|
||||
((3,1),(1,3)),
|
||||
(None,(3,)),
|
||||
])
|
||||
def test_to_SST_blending(self,lattice,a,b):
|
||||
o = Orientation.from_random(lattice=lattice,shape=a)
|
||||
v = np.random.random(b+(3,))
|
||||
blend = util.shapeblender(o.shape,b)
|
||||
for loc in np.random.randint(0,blend,(10,len(blend))):
|
||||
print(f'{a}/{b} @ {loc}')
|
||||
print(o[tuple(loc[:len(o.shape)])].to_SST(v[tuple(loc[-len(b):])]))
|
||||
print(o.to_SST(v)[tuple(loc)])
|
||||
assert np.allclose(o[tuple(loc[:len(o.shape)])].to_SST(v[tuple(loc[-len(b):])]),
|
||||
o.to_SST(v)[tuple(loc)])
|
||||
|
||||
@pytest.mark.parametrize('color',[{'label':'red', 'RGB':[1,0,0],'direction':[0,0,1]},
|
||||
{'label':'green','RGB':[0,1,0],'direction':[0,1,1]},
|
||||
{'label':'blue', 'RGB':[0,0,1],'direction':[1,1,1]}])
|
||||
@pytest.mark.parametrize('proper',[True,False])
|
||||
def test_IPF_cubic(self,color,proper):
|
||||
cube = Orientation(lattice='cubic')
|
||||
cube = Orientation(family='cubic')
|
||||
for direction in set(permutations(np.array(color['direction']))):
|
||||
assert np.allclose(np.array(color['RGB']),
|
||||
cube.IPF_color(vector=np.array(direction),proper=proper))
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
@pytest.mark.parametrize('proper',[True,False])
|
||||
def test_IPF_equivalent(self,set_of_quaternions,lattice,proper):
|
||||
def test_IPF_equivalent(self,set_of_quaternions,family,proper):
|
||||
direction = np.random.random(3)*2.0-1.0
|
||||
o = Orientation(rotation=set_of_quaternions,lattice=lattice).equivalent
|
||||
o = Orientation(rotation=set_of_quaternions,family=family).equivalent
|
||||
color = o.IPF_color(vector=direction,proper=proper)
|
||||
assert np.allclose(np.broadcast_to(color[0,...],color.shape),color)
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
def test_in_FZ_vectorization(self,set_of_rodrigues,lattice):
|
||||
result = Orientation.from_Rodrigues_vector(rho=set_of_rodrigues.reshape((-1,4,4)),lattice=lattice).in_FZ.reshape(-1)
|
||||
for r,rho in zip(result,set_of_rodrigues[:len(result)]):
|
||||
assert r == Orientation.from_Rodrigues_vector(rho=rho,lattice=lattice).in_FZ
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
def test_in_disorientation_FZ_vectorization(self,set_of_rodrigues,lattice):
|
||||
result = Orientation.from_Rodrigues_vector(rho=set_of_rodrigues.reshape((-1,4,4)),
|
||||
lattice=lattice).in_disorientation_FZ.reshape(-1)
|
||||
for r,rho in zip(result,set_of_rodrigues[:len(result)]):
|
||||
assert r == Orientation.from_Rodrigues_vector(rho=rho,lattice=lattice).in_disorientation_FZ
|
||||
|
||||
@pytest.mark.parametrize('proper',[True,False])
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
def test_in_SST_vectorization(self,lattice,proper):
|
||||
vecs = np.random.rand(20,4,3)
|
||||
result = Orientation(lattice=lattice).in_SST(vecs,proper).flatten()
|
||||
for r,v in zip(result,vecs.reshape((-1,3))):
|
||||
assert np.all(r == Orientation(lattice=lattice).in_SST(v,proper))
|
||||
|
||||
@pytest.mark.parametrize('invalid_lattice',['fcc','bcc','hello'])
|
||||
def test_invalid_lattice_init(self,invalid_lattice):
|
||||
with pytest.raises(KeyError):
|
||||
Orientation(lattice=invalid_lattice) # noqa
|
||||
|
||||
@pytest.mark.parametrize('invalid_family',[None,'fcc','bcc','hello'])
|
||||
def test_invalid_symmetry_family(self,invalid_family):
|
||||
with pytest.raises(KeyError):
|
||||
o = Orientation(lattice='cubic')
|
||||
o.family = invalid_family
|
||||
o.symmetry_operations # noqa
|
||||
|
||||
def test_invalid_rot(self):
|
||||
with pytest.raises(TypeError):
|
||||
Orientation.from_random(lattice='cubic') * np.ones(3)
|
||||
|
||||
def test_missing_symmetry_immutable(self):
|
||||
with pytest.raises(KeyError):
|
||||
Orientation(lattice=None).immutable # noqa
|
||||
|
||||
def test_missing_symmetry_basis_real(self):
|
||||
with pytest.raises(KeyError):
|
||||
Orientation(lattice=None).basis_real # noqa
|
||||
|
||||
def test_missing_symmetry_basis_reciprocal(self):
|
||||
with pytest.raises(KeyError):
|
||||
Orientation(lattice=None).basis_reciprocal # noqa
|
||||
|
||||
def test_double_to_lattice(self):
|
||||
with pytest.raises(KeyError):
|
||||
Orientation().to_lattice(direction=np.ones(3),plane=np.ones(3)) # noqa
|
||||
|
||||
def test_double_to_frame(self):
|
||||
with pytest.raises(KeyError):
|
||||
Orientation().to_frame(uvw=np.ones(3),hkl=np.ones(3)) # noqa
|
||||
|
||||
@pytest.mark.parametrize('relation',[None,'Peter','Paul'])
|
||||
def test_unknown_relation(self,relation):
|
||||
with pytest.raises(KeyError):
|
||||
|
@ -388,29 +284,17 @@ class TestOrientation:
|
|||
a=a,b=b,c=c,
|
||||
alpha=alpha,beta=beta,gamma=gamma).related(relation) # noqa
|
||||
|
||||
@pytest.mark.parametrize('lattice',crystal_families)
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
@pytest.mark.parametrize('proper',[True,False])
|
||||
def test_in_SST(self,lattice,proper):
|
||||
assert Orientation(lattice=lattice).in_SST(np.zeros(3),proper)
|
||||
def test_in_SST(self,family,proper):
|
||||
assert Orientation(family=family).in_SST(np.zeros(3),proper)
|
||||
|
||||
@pytest.mark.parametrize('function',['in_SST','IPF_color'])
|
||||
def test_invalid_argument(self,function):
|
||||
o = Orientation(lattice='cubic') # noqa
|
||||
o = Orientation(family='cubic') # noqa
|
||||
with pytest.raises(ValueError):
|
||||
eval(f'o.{function}(np.ones(4))')
|
||||
|
||||
@pytest.mark.parametrize('model',lattice.relations)
|
||||
def test_relationship_definition(self,model):
|
||||
m,o = list(lattice.relations[model])
|
||||
assert lattice.relations[model][m].shape[:-1] == lattice.relations[model][o].shape[:-1]
|
||||
|
||||
@pytest.mark.parametrize('model',['Bain','KS','GT','GT_prime','NW','Pitsch'])
|
||||
@pytest.mark.parametrize('lattice',['cF','cI'])
|
||||
def test_relationship_vectorize(self,set_of_quaternions,lattice,model):
|
||||
r = Orientation(rotation=set_of_quaternions[:200].reshape((50,4,4)),lattice=lattice).related(model)
|
||||
for i in range(200):
|
||||
assert (r.reshape((-1,200))[:,i] == Orientation(set_of_quaternions[i],lattice).related(model)).all()
|
||||
|
||||
@pytest.mark.parametrize('model',['Bain','KS','GT','GT_prime','NW','Pitsch'])
|
||||
@pytest.mark.parametrize('lattice',['cF','cI'])
|
||||
def test_relationship_forward_backward(self,model,lattice):
|
||||
|
@ -444,30 +328,6 @@ class TestOrientation:
|
|||
)
|
||||
assert np.allclose(o.to_frame(uvw=np.eye(3)),basis), 'Lattice basis disagrees with initialization'
|
||||
|
||||
@pytest.mark.parametrize('lattice,a,b,c,alpha,beta,gamma',
|
||||
[
|
||||
('aP',0.5,2.0,3.0,0.8,0.5,1.2),
|
||||
('mP',1.0,2.0,3.0,np.pi/2,0.5,np.pi/2),
|
||||
('oI',0.5,1.5,3.0,np.pi/2,np.pi/2,np.pi/2),
|
||||
('tP',0.5,0.5,3.0,np.pi/2,np.pi/2,np.pi/2),
|
||||
('hP',1.0,None,1.6,np.pi/2,np.pi/2,2*np.pi/3),
|
||||
('cF',1.0,1.0,None,np.pi/2,np.pi/2,np.pi/2),
|
||||
])
|
||||
def test_bases_contraction(self,lattice,a,b,c,alpha,beta,gamma):
|
||||
L = Orientation(lattice=lattice,
|
||||
a=a,b=b,c=c,
|
||||
alpha=alpha,beta=beta,gamma=gamma)
|
||||
assert np.allclose(np.eye(3),np.einsum('ik,jk',L.basis_real,L.basis_reciprocal))
|
||||
|
||||
@pytest.mark.parametrize('keyFrame,keyLattice',[('uvw','direction'),('hkl','plane'),])
|
||||
@pytest.mark.parametrize('vector',np.array([
|
||||
[1.,1.,1.],
|
||||
[-2.,3.,0.5],
|
||||
[0.,0.,1.],
|
||||
[1.,1.,1.],
|
||||
[2.,2.,2.],
|
||||
[0.,1.,1.],
|
||||
]))
|
||||
@pytest.mark.parametrize('lattice,a,b,c,alpha,beta,gamma',
|
||||
[
|
||||
('aP',0.5,2.0,3.0,0.8,0.5,1.2),
|
||||
|
@ -477,23 +337,6 @@ class TestOrientation:
|
|||
('hP',1.0,1.0,1.6,np.pi/2,np.pi/2,2*np.pi/3),
|
||||
('cF',1.0,1.0,1.0,np.pi/2,np.pi/2,np.pi/2),
|
||||
])
|
||||
def test_to_frame_to_lattice(self,lattice,a,b,c,alpha,beta,gamma,vector,keyFrame,keyLattice):
|
||||
L = Orientation(lattice=lattice,
|
||||
a=a,b=b,c=c,
|
||||
alpha=alpha,beta=beta,gamma=gamma)
|
||||
assert np.allclose(vector,
|
||||
L.to_frame(**{keyFrame:L.to_lattice(**{keyLattice:vector})}))
|
||||
|
||||
@pytest.mark.parametrize('lattice,a,b,c,alpha,beta,gamma',
|
||||
[
|
||||
('aP',0.5,2.0,3.0,0.8,0.5,1.2),
|
||||
('mP',1.0,2.0,3.0,np.pi/2,0.5,np.pi/2),
|
||||
('oI',0.5,1.5,3.0,np.pi/2,np.pi/2,np.pi/2),
|
||||
('tP',0.5,0.5,3.0,np.pi/2,np.pi/2,np.pi/2),
|
||||
('hP',1.0,1.0,1.6,np.pi/2,np.pi/2,2*np.pi/3),
|
||||
('cF',1.0,1.0,1.0,np.pi/2,np.pi/2,np.pi/2),
|
||||
])
|
||||
|
||||
@pytest.mark.parametrize('kw',['uvw','hkl'])
|
||||
@pytest.mark.parametrize('with_symmetry',[False,True])
|
||||
@pytest.mark.parametrize('shape',[None,1,(12,24)])
|
||||
|
@ -508,15 +351,157 @@ class TestOrientation:
|
|||
a=a,b=b,c=c,
|
||||
alpha=alpha,beta=beta,gamma=gamma)
|
||||
assert o.to_pole(**{kw:vector,'with_symmetry':with_symmetry}).shape \
|
||||
== o.shape + (o.symmetry_operations.shape if with_symmetry else ()) + vector.shape
|
||||
== o.shape + vector.shape[:-1] + (o.symmetry_operations.shape if with_symmetry else ()) + vector.shape[-1:]
|
||||
|
||||
@pytest.mark.parametrize('lattice',['hP','cI','cF'])
|
||||
@pytest.mark.parametrize('mode',['slip','twin'])
|
||||
def test_Schmid(self,update,ref_path,lattice,mode):
|
||||
L = Orientation(lattice=lattice)
|
||||
@pytest.mark.parametrize('lattice',['hP','cI','cF']) #tI not included yet
|
||||
def test_Schmid(self,update,ref_path,lattice):
|
||||
O = Orientation(lattice=lattice) # noqa
|
||||
for mode in ['slip','twin']:
|
||||
reference = ref_path/f'{lattice}_{mode}.txt'
|
||||
P = L.Schmid(mode)
|
||||
P = O.Schmid(N_slip='*') if mode == 'slip' else O.Schmid(N_twin='*')
|
||||
if update:
|
||||
table = Table(P.reshape(-1,9),{'Schmid':(3,3,)})
|
||||
table.save(reference)
|
||||
assert np.allclose(P,Table.load(reference).get('Schmid'))
|
||||
|
||||
### vectorization tests ###
|
||||
|
||||
@pytest.mark.parametrize('lattice',['hP','cI','cF']) # tI not included yet
|
||||
def test_Schmid_vectorization(self,lattice):
|
||||
O = Orientation.from_random(shape=4,lattice=lattice) # noqa
|
||||
for mode in ['slip','twin']:
|
||||
Ps = O.Schmid(N_slip='*') if mode == 'slip' else O.Schmid(N_twin='*')
|
||||
for i in range(4):
|
||||
P = O[i].Schmid(N_slip='*') if mode == 'slip' else O[i].Schmid(N_twin='*')
|
||||
assert np.allclose(P,Ps[:,i])
|
||||
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
@pytest.mark.parametrize('shape',[(1),(2,3),(4,3,2)])
|
||||
def test_reduced_vectorization(self,family,shape):
|
||||
o = Orientation.from_random(family=family,shape=shape)
|
||||
for r, theO in zip(o.reduced.flatten(),o.flatten()):
|
||||
assert r == theO.reduced
|
||||
|
||||
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
@pytest.mark.parametrize('shape',[(1),(2,3),(4,3,2)])
|
||||
@pytest.mark.parametrize('vector',np.array([[1,0,0],[1,2,3],[-1,1,-1]]))
|
||||
@pytest.mark.parametrize('proper',[True,False])
|
||||
def test_to_SST_vectorization(self,family,shape,vector,proper):
|
||||
o = Orientation.from_random(family=family,shape=shape)
|
||||
for r, theO in zip(o.to_SST(vector=vector,proper=proper).reshape((-1,3)),o.flatten()):
|
||||
assert np.allclose(r,theO.to_SST(vector=vector,proper=proper))
|
||||
|
||||
@pytest.mark.parametrize('proper',[True,False])
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
def test_in_SST_vectorization(self,family,proper):
|
||||
vecs = np.random.rand(20,4,3)
|
||||
result = Orientation(family=family).in_SST(vecs,proper).flatten()
|
||||
for r,v in zip(result,vecs.reshape((-1,3))):
|
||||
assert np.all(r == Orientation(family=family).in_SST(v,proper))
|
||||
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
@pytest.mark.parametrize('shape',[(1),(2,3),(4,3,2)])
|
||||
@pytest.mark.parametrize('vector',np.array([[1,0,0],[1,2,3],[-1,1,-1]]))
|
||||
@pytest.mark.parametrize('proper',[True,False])
|
||||
@pytest.mark.parametrize('in_SST',[True,False])
|
||||
def test_IPF_color_vectorization(self,family,shape,vector,proper,in_SST):
|
||||
o = Orientation.from_random(family=family,shape=shape)
|
||||
for r, theO in zip(o.IPF_color(vector,in_SST=in_SST,proper=proper).reshape((-1,3)),o.flatten()):
|
||||
assert np.allclose(r,theO.IPF_color(vector,in_SST=in_SST,proper=proper))
|
||||
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
def test_in_FZ_vectorization(self,set_of_rodrigues,family):
|
||||
result = Orientation.from_Rodrigues_vector(rho=set_of_rodrigues.reshape((-1,4,4)),family=family).in_FZ.reshape(-1)
|
||||
for r,rho in zip(result,set_of_rodrigues[:len(result)]):
|
||||
assert r == Orientation.from_Rodrigues_vector(rho=rho,family=family).in_FZ
|
||||
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
def test_in_disorientation_FZ_vectorization(self,set_of_rodrigues,family):
|
||||
result = Orientation.from_Rodrigues_vector(rho=set_of_rodrigues.reshape((-1,4,4)),
|
||||
family=family).in_disorientation_FZ.reshape(-1)
|
||||
for r,rho in zip(result,set_of_rodrigues[:len(result)]):
|
||||
assert r == Orientation.from_Rodrigues_vector(rho=rho,family=family).in_disorientation_FZ
|
||||
|
||||
@pytest.mark.parametrize('model',['Bain','KS','GT','GT_prime','NW','Pitsch'])
|
||||
@pytest.mark.parametrize('lattice',['cF','cI'])
|
||||
def test_relationship_vectorization(self,set_of_quaternions,lattice,model):
|
||||
r = Orientation(rotation=set_of_quaternions[:200].reshape((50,4,4)),lattice=lattice).related(model)
|
||||
for i in range(200):
|
||||
assert (r.reshape((-1,200))[:,i] == Orientation(set_of_quaternions[i],lattice=lattice).related(model)).all()
|
||||
|
||||
### blending tests ###
|
||||
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
@pytest.mark.parametrize('left,right',[
|
||||
((2,3,2),(2,3,2)),
|
||||
((2,2),(4,4)),
|
||||
((3,1),(1,3)),
|
||||
(None,None),
|
||||
])
|
||||
def test_disorientation_blending(self,family,left,right):
|
||||
o = Orientation.from_random(family=family,shape=left)
|
||||
p = Orientation.from_random(family=family,shape=right)
|
||||
blend = util.shapeblender(o.shape,p.shape)
|
||||
for loc in np.random.randint(0,blend,(10,len(blend))):
|
||||
# print(f'{a}/{b} @ {loc}')
|
||||
# print(o[tuple(loc[:len(o.shape)])].disorientation(p[tuple(loc[-len(p.shape):])]))
|
||||
# print(o.disorientation(p)[tuple(loc)])
|
||||
assert o[tuple(loc[:len(o.shape)])].disorientation(p[tuple(loc[-len(p.shape):])]) \
|
||||
.isclose(o.disorientation(p)[tuple(loc)])
|
||||
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
@pytest.mark.parametrize('left,right',[
|
||||
((2,3,2),(2,3,2)),
|
||||
((2,2),(4,4)),
|
||||
((3,1),(1,3)),
|
||||
(None,(3,)),
|
||||
])
|
||||
def test_IPF_color_blending(self,family,left,right):
|
||||
o = Orientation.from_random(family=family,shape=left)
|
||||
v = np.random.random(right+(3,))
|
||||
blend = util.shapeblender(o.shape,v.shape[:-1])
|
||||
for loc in np.random.randint(0,blend,(10,len(blend))):
|
||||
assert np.allclose(o[tuple(loc[:len(o.shape)])].IPF_color(v[tuple(loc[-len(v.shape[:-1]):])]),
|
||||
o.IPF_color(v)[tuple(loc)])
|
||||
|
||||
@pytest.mark.parametrize('family',crystal_families)
|
||||
@pytest.mark.parametrize('left,right',[
|
||||
((2,3,2),(2,3,2)),
|
||||
((2,2),(4,4)),
|
||||
((3,1),(1,3)),
|
||||
(None,(3,)),
|
||||
])
|
||||
def test_to_SST_blending(self,family,left,right):
|
||||
o = Orientation.from_random(family=family,shape=left)
|
||||
v = np.random.random(right+(3,))
|
||||
blend = util.shapeblender(o.shape,v.shape[:-1])
|
||||
for loc in np.random.randint(0,blend,(10,len(blend))):
|
||||
assert np.allclose(o[tuple(loc[:len(o.shape)])].to_SST(v[tuple(loc[-len(v.shape[:-1]):])]),
|
||||
o.to_SST(v)[tuple(loc)])
|
||||
|
||||
@pytest.mark.parametrize('lattice,a,b,c,alpha,beta,gamma',
|
||||
[
|
||||
('aP',0.5,2.0,3.0,0.8,0.5,1.2),
|
||||
('mP',1.0,2.0,3.0,np.pi/2,0.5,np.pi/2),
|
||||
('oI',0.5,1.5,3.0,np.pi/2,np.pi/2,np.pi/2),
|
||||
('tP',0.5,0.5,3.0,np.pi/2,np.pi/2,np.pi/2),
|
||||
('hP',1.0,1.0,1.6,np.pi/2,np.pi/2,2*np.pi/3),
|
||||
('cF',1.0,1.0,1.0,np.pi/2,np.pi/2,np.pi/2),
|
||||
])
|
||||
@pytest.mark.parametrize('left,right',[
|
||||
((2,3,2),(2,3,2)),
|
||||
((2,2),(4,4)),
|
||||
((3,1),(1,3)),
|
||||
(None,(3,)),
|
||||
])
|
||||
def test_to_pole_blending(self,lattice,a,b,c,alpha,beta,gamma,left,right):
|
||||
o = Orientation.from_random(shape=left,
|
||||
lattice=lattice,
|
||||
a=a,b=b,c=c,
|
||||
alpha=alpha,beta=beta,gamma=gamma)
|
||||
v = np.random.random(right+(3,))
|
||||
blend = util.shapeblender(o.shape,v.shape[:-1])
|
||||
for loc in np.random.randint(0,blend,(10,len(blend))):
|
||||
assert np.allclose(o[tuple(loc[:len(o.shape)])].to_pole(uvw=v[tuple(loc[-len(v.shape[:-1]):])]),
|
||||
o.to_pole(uvw=v)[tuple(loc)])
|
||||
|
|
|
@ -12,12 +12,13 @@ import vtk
|
|||
import numpy as np
|
||||
|
||||
from damask import Result
|
||||
from damask import Rotation
|
||||
from damask import Orientation
|
||||
from damask import VTK
|
||||
from damask import tensor
|
||||
from damask import mechanics
|
||||
from damask import grid_filters
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def default(tmp_path,ref_path):
|
||||
"""Small Result file in temp location for modification."""
|
||||
|
@ -107,7 +108,8 @@ class TestResult:
|
|||
in_file = default.place('|F_e|')
|
||||
assert np.allclose(in_memory,in_file)
|
||||
|
||||
@pytest.mark.parametrize('mode',['direct','function'])
|
||||
@pytest.mark.parametrize('mode',
|
||||
['direct',pytest.param('function',marks=pytest.mark.xfail(sys.platform=='darwin',reason='n/a'))])
|
||||
def test_add_calculation(self,default,tmp_path,mode):
|
||||
|
||||
if mode == 'direct':
|
||||
|
@ -124,6 +126,10 @@ class TestResult:
|
|||
in_file = default.place('x')
|
||||
assert np.allclose(in_memory,in_file)
|
||||
|
||||
def test_add_calculation_invalid(self,default):
|
||||
default.add_calculation('np.linalg.norm(#F#,axis=0)','wrong_dim')
|
||||
assert default.get('wrong_dim') is None
|
||||
|
||||
def test_add_stress_Cauchy(self,default):
|
||||
default.add_stress_Cauchy('P','F')
|
||||
in_memory = mechanics.stress_Cauchy(default.place('P'), default.place('F'))
|
||||
|
@ -220,17 +226,16 @@ class TestResult:
|
|||
in_file = default.place('S')
|
||||
assert np.allclose(in_memory,in_file)
|
||||
|
||||
@pytest.mark.skip(reason='requires rework of lattice.f90')
|
||||
@pytest.mark.parametrize('polar',[True,False])
|
||||
def test_add_pole(self,default,polar):
|
||||
pole = np.array([1.,0.,0.])
|
||||
default.add_pole('O',pole,polar)
|
||||
rot = Rotation(default.place('O'))
|
||||
rotated_pole = rot * np.broadcast_to(pole,rot.shape+(3,))
|
||||
xy = rotated_pole[:,0:2]/(1.+abs(pole[2]))
|
||||
in_memory = xy if not polar else \
|
||||
np.block([np.sqrt(xy[:,0:1]*xy[:,0:1]+xy[:,1:2]*xy[:,1:2]),np.arctan2(xy[:,1:2],xy[:,0:1])])
|
||||
in_file = default.place('p^{}_[1 0 0)'.format(u'rφ' if polar else 'xy'))
|
||||
@pytest.mark.parametrize('options',[{'uvw':[1,0,0],'with_symmetry':False},
|
||||
{'hkl':[0,1,1],'with_symmetry':True}])
|
||||
def test_add_pole(self,default,options):
|
||||
default.add_pole(**options)
|
||||
rot = default.place('O')
|
||||
in_memory = Orientation(rot,lattice=rot.dtype.metadata['lattice']).to_pole(**options)
|
||||
brackets = ['[[]','[]]'] if 'uvw' in options.keys() else ['(',')'] # escape fnmatch
|
||||
label = '{}{} {} {}{}'.format(brackets[0],*(list(options.values())[0]),brackets[1])
|
||||
in_file = default.place(f'p^{label}')
|
||||
print(in_file - in_memory)
|
||||
assert np.allclose(in_memory,in_file)
|
||||
|
||||
def test_add_rotation(self,default):
|
||||
|
@ -266,10 +271,15 @@ class TestResult:
|
|||
in_file = default.place('V(F)')
|
||||
assert np.allclose(in_memory,in_file)
|
||||
|
||||
def test_add_invalid(self,default):
|
||||
def test_add_invalid_dataset(self,default):
|
||||
with pytest.raises(TypeError):
|
||||
default.add_calculation('#invalid#*2')
|
||||
|
||||
def test_add_generic_grid_invalid(self,ref_path):
|
||||
result = Result(ref_path/'4grains2x4x3_compressionY.hdf5')
|
||||
with pytest.raises(NotImplementedError):
|
||||
result.add_curl('F')
|
||||
|
||||
|
||||
@pytest.mark.parametrize('shape',['vector','tensor'])
|
||||
def test_add_curl(self,default,shape):
|
||||
|
@ -362,25 +372,20 @@ class TestResult:
|
|||
b = default.coordinates0_node.reshape(tuple(default.cells+1)+(3,),order='F')
|
||||
assert np.allclose(a,b)
|
||||
|
||||
# need to wait for writing in parallel, output order might change if select more then one
|
||||
@pytest.mark.parametrize('output',['F','*',['P']],ids=range(3))
|
||||
@pytest.mark.parametrize('output',['F','*',['P'],['P','F']],ids=range(4))
|
||||
@pytest.mark.parametrize('fname',['12grains6x7x8_tensionY.hdf5'],ids=range(1))
|
||||
@pytest.mark.parametrize('inc',[4,0],ids=range(2))
|
||||
@pytest.mark.xfail(int(vtk.vtkVersion.GetVTKVersion().split('.')[0])<9, reason='missing "Direction" attribute')
|
||||
def test_vtk(self,request,tmp_path,ref_path,update,patch_execution_stamp,patch_datetime_now,output,fname,inc):
|
||||
result = Result(ref_path/fname).view('increments',inc)
|
||||
os.chdir(tmp_path)
|
||||
result.export_VTK(output)
|
||||
result.export_VTK(output,parallel=False)
|
||||
fname = fname.split('.')[0]+f'_inc{(inc if type(inc) == int else inc[0]):0>2}.vti'
|
||||
last = ''
|
||||
for i in range(10):
|
||||
if os.path.isfile(tmp_path/fname):
|
||||
v = VTK.load(tmp_path/fname)
|
||||
v.set_comments('n/a')
|
||||
v.save(tmp_path/fname,parallel=False)
|
||||
with open(fname) as f:
|
||||
cur = hashlib.md5(f.read().encode()).hexdigest()
|
||||
if cur == last:
|
||||
break
|
||||
else:
|
||||
last = cur
|
||||
time.sleep(.5)
|
||||
if update:
|
||||
with open((ref_path/'export_VTK'/request.node.name).with_suffix('.md5'),'w') as f:
|
||||
f.write(cur)
|
||||
|
@ -405,6 +410,11 @@ class TestResult:
|
|||
os.chdir(tmp_path)
|
||||
single_phase.export_VTK(mode=mode)
|
||||
|
||||
def test_vtk_invalid_mode(self,single_phase):
|
||||
with pytest.raises(ValueError):
|
||||
single_phase.export_VTK(mode='invalid')
|
||||
|
||||
|
||||
def test_XDMF_datatypes(self,tmp_path,single_phase,update,ref_path):
|
||||
for shape in [('scalar',()),('vector',(3,)),('tensor',(3,3)),('matrix',(12,))]:
|
||||
for dtype in ['f4','f8','i1','i2','i4','i8','u1','u2','u4','u8']:
|
||||
|
@ -417,7 +427,8 @@ class TestResult:
|
|||
|
||||
assert sorted(open(tmp_path/fname).read()) == sorted(open(ref_path/fname).read()) # XML is not ordered
|
||||
|
||||
@pytest.mark.skipif(not hasattr(vtk,'vtkXdmfReader'),reason='https://discourse.vtk.org/t/2450')
|
||||
@pytest.mark.skipif(not (hasattr(vtk,'vtkXdmfReader') and hasattr(vtk.vtkXdmfReader(),'GetOutput')),
|
||||
reason='https://discourse.vtk.org/t/2450')
|
||||
def test_XDMF_shape(self,tmp_path,single_phase):
|
||||
os.chdir(tmp_path)
|
||||
|
||||
|
@ -429,19 +440,14 @@ class TestResult:
|
|||
dim_xdmf = reader_xdmf.GetOutput().GetDimensions()
|
||||
bounds_xdmf = reader_xdmf.GetOutput().GetBounds()
|
||||
|
||||
single_phase.view('increments',0).export_VTK()
|
||||
single_phase.view('increments',0).export_VTK(parallel=False)
|
||||
fname = os.path.splitext(os.path.basename(single_phase.fname))[0]+'_inc00.vti'
|
||||
for i in range(10): # waiting for parallel IO
|
||||
reader_vti = vtk.vtkXMLImageDataReader()
|
||||
reader_vti.SetFileName(fname)
|
||||
reader_vti.Update()
|
||||
dim_vti = reader_vti.GetOutput().GetDimensions()
|
||||
bounds_vti = reader_vti.GetOutput().GetBounds()
|
||||
if dim_vti == dim_xdmf and bounds_vti == bounds_xdmf:
|
||||
return
|
||||
time.sleep(.5)
|
||||
|
||||
assert False
|
||||
assert dim_vti == dim_xdmf and bounds_vti == bounds_xdmf
|
||||
|
||||
def test_XDMF_invalid(self,default):
|
||||
with pytest.raises(TypeError):
|
||||
|
@ -496,3 +502,14 @@ class TestResult:
|
|||
with bz2.BZ2File((ref_path/'place'/fname).with_suffix('.pbz2')) as f:
|
||||
ref = pickle.load(f)
|
||||
assert cur is None if ref is None else dict_equal(cur,ref)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('fname',['4grains2x4x3_compressionY.hdf5',
|
||||
'6grains6x7x8_single_phase_tensionY.hdf5'])
|
||||
@pytest.mark.parametrize('output',['material.yaml','*'])
|
||||
@pytest.mark.parametrize('overwrite',[True,False])
|
||||
def test_export_setup(self,ref_path,tmp_path,fname,output,overwrite):
|
||||
os.chdir(tmp_path)
|
||||
r = Result(ref_path/fname)
|
||||
r.export_setup(output,overwrite)
|
||||
r.export_setup(output,overwrite)
|
||||
|
|
|
@ -5,6 +5,7 @@ import time
|
|||
import pytest
|
||||
import numpy as np
|
||||
import numpy.ma as ma
|
||||
import vtk
|
||||
|
||||
from damask import VTK
|
||||
from damask import grid_filters
|
||||
|
@ -162,6 +163,7 @@ class TestVTK:
|
|||
new = VTK.load(tmp_path/'with_comments.vtr')
|
||||
assert new.get_comments() == ['this is a comment']
|
||||
|
||||
@pytest.mark.xfail(int(vtk.vtkVersion.GetVTKVersion().split('.')[0])<8, reason='missing METADATA')
|
||||
def test_compare_reference_polyData(self,update,ref_path,tmp_path):
|
||||
points=np.dstack((np.linspace(0.,1.,10),np.linspace(0.,2.,10),np.linspace(-1.,1.,10))).squeeze()
|
||||
polyData = VTK.from_poly_data(points)
|
||||
|
@ -173,14 +175,15 @@ class TestVTK:
|
|||
assert polyData.__repr__() == reference.__repr__() and \
|
||||
np.allclose(polyData.get('coordinates'),points)
|
||||
|
||||
@pytest.mark.xfail(int(vtk.vtkVersion.GetVTKVersion().split('.')[0])<8, reason='missing METADATA')
|
||||
def test_compare_reference_rectilinearGrid(self,update,ref_path,tmp_path):
|
||||
cells = np.array([5,6,7],int)
|
||||
size = np.array([.6,1.,.5])
|
||||
rectilinearGrid = VTK.from_rectilinear_grid(cells,size)
|
||||
c = grid_filters.coordinates0_point(cells,size).reshape(-1,3,order='F')
|
||||
n = grid_filters.coordinates0_node(cells,size).reshape(-1,3,order='F')
|
||||
rectilinearGrid.add(c,'cell')
|
||||
rectilinearGrid.add(n,'node')
|
||||
rectilinearGrid.add(np.ascontiguousarray(c),'cell')
|
||||
rectilinearGrid.add(np.ascontiguousarray(n),'node')
|
||||
if update:
|
||||
rectilinearGrid.save(ref_path/'rectilinearGrid')
|
||||
else:
|
||||
|
|
|
@ -1,34 +0,0 @@
|
|||
import pytest
|
||||
import numpy as np
|
||||
|
||||
from damask import lattice
|
||||
|
||||
class TestLattice:
|
||||
|
||||
def test_double_Bravais_to_Miller(self):
|
||||
with pytest.raises(KeyError):
|
||||
lattice.Bravais_to_Miller(uvtw=np.ones(4),hkil=np.ones(4)) # noqa
|
||||
|
||||
def test_double_Miller_to_Bravais(self):
|
||||
with pytest.raises(KeyError):
|
||||
lattice.Miller_to_Bravais(uvw=np.ones(4),hkl=np.ones(4)) # noqa
|
||||
|
||||
@pytest.mark.parametrize('vector',np.array([
|
||||
[1,0,0],
|
||||
[1,1,0],
|
||||
[1,1,1],
|
||||
[1,0,-2],
|
||||
]))
|
||||
@pytest.mark.parametrize('kw_Miller,kw_Bravais',[('uvw','uvtw'),('hkl','hkil')])
|
||||
def test_Miller_Bravais_Miller(self,vector,kw_Miller,kw_Bravais):
|
||||
assert np.all(vector == lattice.Bravais_to_Miller(**{kw_Bravais:lattice.Miller_to_Bravais(**{kw_Miller:vector})}))
|
||||
|
||||
@pytest.mark.parametrize('vector',np.array([
|
||||
[1,0,-1,2],
|
||||
[1,-1,0,3],
|
||||
[1,1,-2,-3],
|
||||
[0,0,0,1],
|
||||
]))
|
||||
@pytest.mark.parametrize('kw_Miller,kw_Bravais',[('uvw','uvtw'),('hkl','hkil')])
|
||||
def test_Bravais_Miller_Bravais(self,vector,kw_Miller,kw_Bravais):
|
||||
assert np.all(vector == lattice.Miller_to_Bravais(**{kw_Miller:lattice.Bravais_to_Miller(**{kw_Bravais:vector})}))
|
|
@ -19,9 +19,9 @@ class TestUtil:
|
|||
out,err = util.execute('sh -c "echo $test_for_execute"',env={'test_for_execute':'test'})
|
||||
assert out=='test\n' and err==''
|
||||
|
||||
def test_execute_invalid(self):
|
||||
def test_execute_runtime_error(self):
|
||||
with pytest.raises(RuntimeError):
|
||||
util.execute('/bin/false')
|
||||
util.execute('false')
|
||||
|
||||
@pytest.mark.parametrize('input,output',
|
||||
[
|
||||
|
@ -158,3 +158,33 @@ class TestUtil:
|
|||
({'A':{'B':{},'C':'D'}}, {'B':{},'C':'D'})])
|
||||
def test_flatten(self,full,reduced):
|
||||
assert util.dict_flatten(full) == reduced
|
||||
|
||||
|
||||
def test_double_Bravais_to_Miller(self):
|
||||
with pytest.raises(KeyError):
|
||||
util.Bravais_to_Miller(uvtw=np.ones(4),hkil=np.ones(4))
|
||||
|
||||
def test_double_Miller_to_Bravais(self):
|
||||
with pytest.raises(KeyError):
|
||||
util.Miller_to_Bravais(uvw=np.ones(4),hkl=np.ones(4))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('vector',np.array([
|
||||
[1,0,0],
|
||||
[1,1,0],
|
||||
[1,1,1],
|
||||
[1,0,-2],
|
||||
]))
|
||||
@pytest.mark.parametrize('kw_Miller,kw_Bravais',[('uvw','uvtw'),('hkl','hkil')])
|
||||
def test_Miller_Bravais_Miller(self,vector,kw_Miller,kw_Bravais):
|
||||
assert np.all(vector == util.Bravais_to_Miller(**{kw_Bravais:util.Miller_to_Bravais(**{kw_Miller:vector})}))
|
||||
|
||||
@pytest.mark.parametrize('vector',np.array([
|
||||
[1,0,-1,2],
|
||||
[1,-1,0,3],
|
||||
[1,1,-2,-3],
|
||||
[0,0,0,1],
|
||||
]))
|
||||
@pytest.mark.parametrize('kw_Miller,kw_Bravais',[('uvw','uvtw'),('hkl','hkil')])
|
||||
def test_Bravais_Miller_Bravais(self,vector,kw_Miller,kw_Bravais):
|
||||
assert np.all(vector == util.Miller_to_Bravais(**{kw_Miller:util.Bravais_to_Miller(**{kw_Bravais:vector})}))
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
# special flags for some files
|
||||
if (CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||
if(CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||
# long lines for interaction matrix
|
||||
SET_SOURCE_FILES_PROPERTIES("lattice.f90" PROPERTIES COMPILE_FLAGS "-ffree-line-length-240")
|
||||
endif()
|
||||
|
||||
file(GLOB damask-sources *.f90 *.c)
|
||||
file(GLOB damask-sources CONFIGURE_DEPENDS *.f90 *.c)
|
||||
|
||||
# probably we should have a subfolder for MSC.Marc
|
||||
list(FILTER damask-sources EXCLUDE REGEX ".*CPFEM.f90")
|
||||
|
@ -12,25 +12,24 @@ list(FILTER damask-sources EXCLUDE REGEX ".*DAMASK_Marc.*.f90")
|
|||
list(FILTER damask-sources EXCLUDE REGEX ".*commercialFEM_fileList.*.f90")
|
||||
|
||||
|
||||
if (PROJECT_NAME STREQUAL "damask-grid")
|
||||
if(PROJECT_NAME STREQUAL "damask-grid")
|
||||
set(executable-name "DAMASK_grid")
|
||||
file(GLOB solver-sources CONFIGURE_DEPENDS grid/*.f90)
|
||||
elseif(PROJECT_NAME STREQUAL "damask-mesh")
|
||||
set(executable-name "DAMASK_mesh")
|
||||
file(GLOB solver-sources CONFIGURE_DEPENDS mesh/*.f90)
|
||||
endif()
|
||||
|
||||
file(GLOB grid-sources grid/*.f90)
|
||||
if(NOT CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
|
||||
add_executable(${executable-name} ${damask-sources} ${solver-sources})
|
||||
install(TARGETS ${executable-name} RUNTIME DESTINATION bin)
|
||||
else()
|
||||
add_library(${executable-name} OBJECT ${damask-sources} ${solver-sources})
|
||||
exec_program(mktemp OUTPUT_VARIABLE nothing)
|
||||
exec_program(mktemp ARGS -d OUTPUT_VARIABLE black_hole)
|
||||
install(PROGRAMS ${nothing} DESTINATION ${black_hole})
|
||||
endif()
|
||||
|
||||
if (NOT CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
|
||||
add_executable(DAMASK_grid ${damask-sources} ${grid-sources})
|
||||
install (TARGETS DAMASK_grid RUNTIME DESTINATION bin)
|
||||
else ()
|
||||
add_library(DAMASK_grid OBJECT ${damask-sources} ${grid-sources})
|
||||
exec_program (mktemp OUTPUT_VARIABLE nothing)
|
||||
exec_program (mktemp ARGS -d OUTPUT_VARIABLE black_hole)
|
||||
install (PROGRAMS ${nothing} DESTINATION ${black_hole})
|
||||
endif ()
|
||||
|
||||
elseif (PROJECT_NAME STREQUAL "damask-mesh")
|
||||
|
||||
file(GLOB mesh-sources mesh/*.f90)
|
||||
|
||||
add_executable(DAMASK_mesh ${damask-sources} ${mesh-sources})
|
||||
install (TARGETS DAMASK_mesh RUNTIME DESTINATION bin)
|
||||
|
||||
endif ()
|
||||
string(REPLACE ";" "\n" sources "${damask-sources};${solver-sources}")
|
||||
message(${CMAKE_BINARY_DIR})
|
||||
file(WRITE ${CMAKE_BINARY_DIR}/sources.txt ${sources})
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
#define PETSC_MAJOR 3
|
||||
#define PETSC_MINOR_MIN 12
|
||||
#define PETSC_MINOR_MAX 15
|
||||
#define PETSC_MINOR_MAX 16
|
||||
|
||||
module DAMASK_interface
|
||||
use, intrinsic :: ISO_fortran_env
|
||||
|
@ -102,6 +102,7 @@ subroutine DAMASK_interface_init
|
|||
print'(/,a)', ' Version: '//DAMASKVERSION
|
||||
|
||||
print'(/,a)', ' Compiled with: '//compiler_version()
|
||||
print'(a)', ' Compiled on: '//CMAKE_SYSTEM
|
||||
print'(a)', ' Compiler options: '//compiler_options()
|
||||
|
||||
! https://github.com/jeffhammond/HPCInfo/blob/master/docs/Preprocessor-Macros.md
|
||||
|
@ -189,10 +190,10 @@ subroutine DAMASK_interface_init
|
|||
if (len_trim(workingDirArg) > 0) &
|
||||
print'(a)', ' Working dir argument: '//trim(workingDirArg)
|
||||
print'(a)', ' Geometry argument: '//trim(geometryArg)
|
||||
print'(a)', ' Loadcase argument: '//trim(loadcaseArg)
|
||||
print'(a)', ' Load case argument: '//trim(loadcaseArg)
|
||||
print'(/,a)', ' Working directory: '//getCWD()
|
||||
print'(a)', ' Geometry file: '//interface_geomFile
|
||||
print'(a)', ' Loadcase file: '//interface_loadFile
|
||||
print'(a)', ' Load case file: '//interface_loadFile
|
||||
print'(a)', ' Solver job name: '//getSolverJobName()
|
||||
if (interface_restartInc > 0) &
|
||||
print'(a,i6.6)', ' Restart from increment: ', interface_restartInc
|
||||
|
|
|
@ -77,10 +77,12 @@ module HDF5_utilities
|
|||
end interface HDF5_addAttribute
|
||||
|
||||
#ifdef PETSC
|
||||
logical, parameter, private :: parallel_default = .true.
|
||||
logical, parameter :: parallel_default = .true.
|
||||
#else
|
||||
logical, parameter, private :: parallel_default = .false.
|
||||
logical, parameter :: parallel_default = .false.
|
||||
#endif
|
||||
logical :: compression_possible
|
||||
|
||||
public :: &
|
||||
HDF5_utilities_init, &
|
||||
HDF5_read, &
|
||||
|
@ -103,31 +105,38 @@ contains
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine HDF5_utilities_init
|
||||
|
||||
integer :: hdferr
|
||||
integer :: hdferr, HDF5_major, HDF5_minor, HDF5_release, deflate_info
|
||||
integer(SIZE_T) :: typeSize
|
||||
|
||||
|
||||
print'(/,a)', ' <<<+- HDF5_Utilities init -+>>>'
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!initialize HDF5 library and check if integer and float type size match
|
||||
|
||||
call h5open_f(hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
call h5tget_size_f(H5T_NATIVE_INTEGER,typeSize, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
if (int(bit_size(0),SIZE_T)/=typeSize*8) &
|
||||
error stop 'Default integer size does not match H5T_NATIVE_INTEGER'
|
||||
|
||||
call h5tget_size_f(H5T_NATIVE_DOUBLE,typeSize, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
if (int(storage_size(0.0_pReal),SIZE_T)/=typeSize*8) &
|
||||
error stop 'pReal does not match H5T_NATIVE_DOUBLE'
|
||||
|
||||
call H5get_libversion_f(HDF5_major,HDF5_minor,HDF5_release,hdferr)
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
call H5Zget_filter_info_f(H5Z_FILTER_DEFLATE_F,deflate_info,hdferr)
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
compression_possible = (HDF5_major == 1 .and. HDF5_minor >= 12) .and. & ! https://forum.hdfgroup.org/t/6186
|
||||
ior(H5Z_FILTER_ENCODE_ENABLED_F,deflate_info) > 0
|
||||
|
||||
end subroutine HDF5_utilities_init
|
||||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief open and initializes HDF5 output file
|
||||
!> @brief Open and initialize HDF5 file.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
integer(HID_T) function HDF5_openFile(fileName,mode,parallel)
|
||||
|
||||
|
@ -314,12 +323,12 @@ subroutine HDF5_addAttribute_str(loc_id,attrLabel,attrValue,path)
|
|||
character(len=*), intent(in) :: attrLabel, attrValue
|
||||
character(len=*), intent(in), optional :: path
|
||||
|
||||
integer(HID_T) :: attr_id, space_id, type_id
|
||||
integer(HID_T) :: attr_id, space_id
|
||||
logical :: attrExists
|
||||
integer :: hdferr
|
||||
character(len=:), allocatable :: p
|
||||
character(len=:,kind=C_CHAR), allocatable,target :: attrValue_
|
||||
type(c_ptr), target, dimension(1) :: ptr
|
||||
character(len=len_trim(attrValue)+1,kind=C_CHAR), dimension(1), target :: attrValue_
|
||||
type(C_PTR), target, dimension(1) :: ptr
|
||||
|
||||
|
||||
if (present(path)) then
|
||||
|
@ -328,13 +337,11 @@ subroutine HDF5_addAttribute_str(loc_id,attrLabel,attrValue,path)
|
|||
p = '.'
|
||||
endif
|
||||
|
||||
attrValue_ = trim(attrValue)//C_NULL_CHAR
|
||||
ptr(1) = c_loc(attrValue_)
|
||||
attrValue_(1) = trim(attrValue)//C_NULL_CHAR
|
||||
ptr(1) = c_loc(attrValue_(1))
|
||||
|
||||
call h5screate_f(H5S_SCALAR_F,space_id,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5tcopy_f(H5T_STRING, type_id, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
call h5aexists_by_name_f(loc_id,trim(p),attrLabel,attrExists,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
|
@ -342,15 +349,14 @@ subroutine HDF5_addAttribute_str(loc_id,attrLabel,attrValue,path)
|
|||
call h5adelete_by_name_f(loc_id, trim(p), attrLabel, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
endif
|
||||
call h5acreate_by_name_f(loc_id,trim(p),trim(attrLabel),type_id,space_id,attr_id,hdferr)
|
||||
|
||||
call h5acreate_by_name_f(loc_id,trim(p),trim(attrLabel),H5T_STRING,space_id,attr_id,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5awrite_f(attr_id, type_id, c_loc(ptr(1)), hdferr)
|
||||
call h5awrite_f(attr_id, H5T_STRING, c_loc(ptr), hdferr) ! ptr instead of c_loc(ptr) works on gfortran, not on ifort
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
call h5aclose_f(attr_id,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5tclose_f(type_id,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5sclose_f(space_id,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
|
@ -388,6 +394,7 @@ subroutine HDF5_addAttribute_int(loc_id,attrLabel,attrValue,path)
|
|||
call h5adelete_by_name_f(loc_id, trim(p), attrLabel, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
endif
|
||||
|
||||
call h5acreate_by_name_f(loc_id,trim(p),trim(attrLabel),H5T_NATIVE_INTEGER,space_id,attr_id,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5awrite_f(attr_id, H5T_NATIVE_INTEGER, attrValue, int([1],HSIZE_T), hdferr)
|
||||
|
@ -432,6 +439,7 @@ subroutine HDF5_addAttribute_real(loc_id,attrLabel,attrValue,path)
|
|||
call h5adelete_by_name_f(loc_id, trim(p), attrLabel, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
endif
|
||||
|
||||
call h5acreate_by_name_f(loc_id,trim(p),trim(attrLabel),H5T_NATIVE_DOUBLE,space_id,attr_id,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5awrite_f(attr_id, H5T_NATIVE_DOUBLE, attrValue, int([1],HSIZE_T), hdferr)
|
||||
|
@ -455,12 +463,12 @@ subroutine HDF5_addAttribute_str_array(loc_id,attrLabel,attrValue,path)
|
|||
character(len=*), intent(in), dimension(:) :: attrValue
|
||||
character(len=*), intent(in), optional :: path
|
||||
|
||||
integer(HID_T) :: attr_id, space_id, filetype_id, memtype_id
|
||||
integer :: hdferr
|
||||
integer(HID_T) :: attr_id, space_id
|
||||
logical :: attrExists
|
||||
integer :: hdferr,i
|
||||
character(len=:), allocatable :: p
|
||||
type(C_PTR) :: f_ptr
|
||||
character(len=:), allocatable, dimension(:), target :: attrValue_
|
||||
character(len=len(attrValue)+1,kind=C_CHAR), dimension(size(attrValue)), target :: attrValue_
|
||||
type(C_PTR), target, dimension(size(attrValue)) :: ptr
|
||||
|
||||
|
||||
if (present(path)) then
|
||||
|
@ -469,34 +477,26 @@ subroutine HDF5_addAttribute_str_array(loc_id,attrLabel,attrValue,path)
|
|||
p = '.'
|
||||
endif
|
||||
|
||||
attrValue_ = attrValue
|
||||
do i=1,size(attrValue)
|
||||
attrValue_(i) = attrValue(i)//C_NULL_CHAR
|
||||
ptr(i) = c_loc(attrValue_(i))
|
||||
enddo
|
||||
|
||||
call h5tcopy_f(H5T_C_S1,filetype_id,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5tset_size_f(filetype_id, int(len(attrValue_)+1,C_SIZE_T),hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5tcopy_f(H5T_FORTRAN_S1, memtype_id, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5tset_size_f(memtype_id, int(len(attrValue_),C_SIZE_T), hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5screate_simple_f(1,shape(attrValue_,kind=HSIZE_T),space_id, hdferr)
|
||||
call h5screate_simple_f(1,shape(attrValue_,kind=HSIZE_T),space_id,hdferr,shape(attrValue_,kind=HSIZE_T))
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
call h5aexists_by_name_f(loc_id,trim(p),attrLabel,attrExists,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
if (attrExists) then
|
||||
call h5adelete_by_name_f(loc_id, trim(p), attrLabel, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
endif
|
||||
call h5acreate_by_name_f(loc_id,trim(p),trim(attrLabel),filetype_id,space_id,attr_id,hdferr)
|
||||
|
||||
call h5acreate_by_name_f(loc_id,trim(p),trim(attrLabel),H5T_STRING,space_id,attr_id,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
f_ptr = c_loc(attrValue_)
|
||||
call h5awrite_f(attr_id, memtype_id, f_ptr, hdferr)
|
||||
call h5awrite_f(attr_id, H5T_STRING, c_loc(ptr), hdferr) ! ptr instead of c_loc(ptr) works on gfortran, not on ifort
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
call h5tclose_f(memtype_id,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5tclose_f(filetype_id,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5aclose_f(attr_id,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5sclose_f(space_id,hdferr)
|
||||
|
@ -539,6 +539,7 @@ subroutine HDF5_addAttribute_int_array(loc_id,attrLabel,attrValue,path)
|
|||
call h5adelete_by_name_f(loc_id, trim(p), attrLabel, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
endif
|
||||
|
||||
call h5acreate_by_name_f(loc_id,trim(p),trim(attrLabel),H5T_NATIVE_INTEGER,space_id,attr_id,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5awrite_f(attr_id, H5T_NATIVE_INTEGER, attrValue, array_size, hdferr)
|
||||
|
@ -586,6 +587,7 @@ subroutine HDF5_addAttribute_real_array(loc_id,attrLabel,attrValue,path)
|
|||
call h5adelete_by_name_f(loc_id, trim(p), attrLabel, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
endif
|
||||
|
||||
call h5acreate_by_name_f(loc_id,trim(p),trim(attrLabel),H5T_NATIVE_DOUBLE,space_id,attr_id,hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5awrite_f(attr_id, H5T_NATIVE_DOUBLE, attrValue, array_size, hdferr)
|
||||
|
@ -1483,32 +1485,50 @@ subroutine HDF5_write_str(dataset,loc_id,datasetName)
|
|||
integer(HID_T), intent(in) :: loc_id
|
||||
character(len=*), intent(in) :: datasetName !< name of the dataset in the file
|
||||
|
||||
integer(HID_T) :: filetype_id, space_id, dataset_id
|
||||
integer(HID_T) :: filetype_id, memtype_id, space_id, dataset_id, dcpl
|
||||
integer :: hdferr
|
||||
character(len=len_trim(dataset)+1,kind=C_CHAR), dimension(1), target :: dataset_
|
||||
type(C_PTR), target, dimension(1) :: ptr
|
||||
character(len=len_trim(dataset),kind=C_CHAR), target :: dataset_
|
||||
|
||||
|
||||
dataset_(1) = trim(dataset)//C_NULL_CHAR
|
||||
ptr(1) = c_loc(dataset_(1))
|
||||
dataset_ = trim(dataset)
|
||||
|
||||
call h5tcopy_f(H5T_STRING, filetype_id, hdferr)
|
||||
call h5tcopy_f(H5T_C_S1, filetype_id, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5tset_size_f(filetype_id, int(len(dataset_),HSIZE_T), hdferr)
|
||||
call h5tset_size_f(filetype_id, int(len(dataset_)+1,HSIZE_T), hdferr) ! +1 for NULL
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
call h5screate_f(H5S_SCALAR_F, space_id, hdferr)
|
||||
call H5Tcopy_f(H5T_FORTRAN_S1, memtype_id, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5dcreate_f(loc_id, datasetName, H5T_STRING, space_id, dataset_id, hdferr)
|
||||
call H5Tset_size_f(memtype_id, int(len(dataset_),HSIZE_T), hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
call h5dwrite_f(dataset_id, H5T_STRING, c_loc(ptr), hdferr)
|
||||
call h5pcreate_f(H5P_DATASET_CREATE_F, dcpl, hdferr)
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
call h5pset_chunk_f(dcpl, 1, [1_HSIZE_T], hdferr)
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
call h5pset_shuffle_f(dcpl, hdferr)
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
call h5pset_Fletcher32_f(dcpl,hdferr)
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
if (compression_possible .and. len(dataset) > 1024*256) call h5pset_deflate_f(dcpl, 6, hdferr)
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
call h5screate_simple_f(1, [1_HSIZE_T], space_id, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
CALL h5dcreate_f(loc_id, datasetName, filetype_id, space_id, dataset_id, hdferr, dcpl)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
call h5dwrite_f(dataset_id, memtype_id, c_loc(dataset_(1:1)), hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
call h5pclose_f(dcpl, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5dclose_f(dataset_id, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5sclose_f(space_id, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5tclose_f(memtype_id, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call h5tclose_f(filetype_id, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
|
@ -1916,9 +1936,10 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
|
|||
|
||||
integer, dimension(worldsize) :: writeSize !< contribution of all processes
|
||||
integer(HID_T) :: dcpl
|
||||
integer :: ierr, hdferr, HDF5_major, HDF5_minor, HDF5_release
|
||||
integer :: ierr, hdferr
|
||||
integer(HSIZE_T), parameter :: chunkSize = 1024_HSIZE_T**2/8_HSIZE_T
|
||||
|
||||
|
||||
!-------------------------------------------------------------------------------------------------
|
||||
! creating a property list for transfer properties (is collective when writing in parallel)
|
||||
call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr)
|
||||
|
@ -1945,22 +1966,24 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
|
|||
totalShape = [myShape(1:ubound(myShape,1)-1),int(sum(writeSize),HSIZE_T)]
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! compress (and chunk) larger datasets
|
||||
! chunk dataset, enable compression for larger datasets
|
||||
call h5pcreate_f(H5P_DATASET_CREATE_F, dcpl, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
if(product(totalShape) >= chunkSize*2_HSIZE_T) then
|
||||
call H5get_libversion_f(HDF5_major,HDF5_minor,HDF5_release,hdferr)
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
if (HDF5_major == 1 .and. HDF5_minor >= 12) then ! https://forum.hdfgroup.org/t/6186
|
||||
call h5pset_chunk_f(dcpl, size(totalShape), getChunks(totalShape,chunkSize), hdferr)
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
if (product(totalShape) > 0) then
|
||||
call h5pset_shuffle_f(dcpl, hdferr)
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
call h5pset_deflate_f(dcpl, 6, hdferr)
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
call h5pset_Fletcher32_f(dcpl,hdferr)
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
if (product(totalShape) >= chunkSize*2_HSIZE_T) then
|
||||
call h5pset_chunk_f(dcpl, size(totalShape), getChunks(totalShape,chunkSize), hdferr)
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
if (compression_possible) call h5pset_deflate_f(dcpl, 6, hdferr)
|
||||
else
|
||||
call h5pset_chunk_f(dcpl, size(totalShape), totalShape, hdferr)
|
||||
endif
|
||||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
endif
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
|
|
|
@ -16,7 +16,8 @@ module IO
|
|||
private
|
||||
|
||||
character(len=*), parameter, public :: &
|
||||
IO_WHITESPACE = achar(44)//achar(32)//achar(9)//achar(10)//achar(13) !< whitespace characters
|
||||
IO_WHITESPACE = achar(44)//achar(32)//achar(9)//achar(10)//achar(13), & !< whitespace characters
|
||||
IO_QUOTES = "'"//'"'
|
||||
character, parameter, public :: &
|
||||
IO_EOL = new_line('DAMASK'), & !< end of line character
|
||||
IO_COMMENT = '#'
|
||||
|
@ -495,13 +496,13 @@ subroutine IO_error(error_ID,el,ip,g,instance,ext_msg)
|
|||
msg = '--- expected after YAML file header'
|
||||
case (709)
|
||||
msg = 'Length mismatch'
|
||||
case (710)
|
||||
msg = 'Closing quotation mark missing in string'
|
||||
|
||||
!-------------------------------------------------------------------------------------------------
|
||||
! errors related to the grid solver
|
||||
case (831)
|
||||
msg = 'mask consistency violated in grid load case'
|
||||
case (832)
|
||||
msg = 'ill-defined L (line partly defined) in grid load case'
|
||||
case (833)
|
||||
msg = 'non-positive ratio for geometric progression'
|
||||
case (834)
|
||||
|
|
|
@ -71,8 +71,8 @@ recursive function parse_flow(YAML_flow) result(node)
|
|||
s = e
|
||||
d = s + scan(flow_string(s+1:),':')
|
||||
e = d + find_end(flow_string(d+1:),'}')
|
||||
|
||||
key = trim(adjustl(flow_string(s+1:d-1)))
|
||||
if(quotedString(key)) key = key(2:len(key)-1)
|
||||
myVal => parse_flow(flow_string(d+1:e-1)) ! parse items (recursively)
|
||||
|
||||
select type (node)
|
||||
|
@ -97,7 +97,11 @@ recursive function parse_flow(YAML_flow) result(node)
|
|||
allocate(tScalar::node)
|
||||
select type (node)
|
||||
class is (tScalar)
|
||||
if(quotedString(flow_string)) then
|
||||
node = trim(adjustl(flow_string(2:len(flow_string)-1)))
|
||||
else
|
||||
node = trim(adjustl(flow_string))
|
||||
endif
|
||||
end select
|
||||
endif
|
||||
|
||||
|
@ -119,18 +123,38 @@ integer function find_end(str,e_char)
|
|||
|
||||
N_sq = 0
|
||||
N_cu = 0
|
||||
do i = 1, len_trim(str)
|
||||
i = 1
|
||||
do while(i<=len_trim(str))
|
||||
if (scan(str(i:i),IO_QUOTES) == 1) i = i + scan(str(i+1:),str(i:i))
|
||||
if (N_sq==0 .and. N_cu==0 .and. scan(str(i:i),e_char//',') == 1) exit
|
||||
N_sq = N_sq + merge(1,0,str(i:i) == '[')
|
||||
N_cu = N_cu + merge(1,0,str(i:i) == '{')
|
||||
N_sq = N_sq - merge(1,0,str(i:i) == ']')
|
||||
N_cu = N_cu - merge(1,0,str(i:i) == '}')
|
||||
i = i + 1
|
||||
enddo
|
||||
find_end = i
|
||||
|
||||
end function find_end
|
||||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! @brief check whether a string is enclosed with single or double quotes
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
logical function quotedString(line)
|
||||
|
||||
character(len=*), intent(in) :: line
|
||||
|
||||
quotedString = .false.
|
||||
|
||||
if (scan(line(:1),IO_QUOTES) == 1) then
|
||||
quotedString = .true.
|
||||
if(line(len(line):len(line)) /= line(:1)) call IO_error(710,ext_msg=line)
|
||||
endif
|
||||
|
||||
end function quotedString
|
||||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! @brief Returns Indentation.
|
||||
! @details It determines the indentation level for a given block/line.
|
||||
|
@ -333,6 +357,37 @@ subroutine remove_line_break(blck,s_blck,e_char,flow_line)
|
|||
end subroutine remove_line_break
|
||||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief return the scalar list item without line break
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine list_item_inline(blck,s_blck,inline,offset)
|
||||
|
||||
character(len=*), intent(in) :: blck !< YAML in mixed style
|
||||
integer, intent(inout) :: s_blck
|
||||
character(len=:), allocatable, intent(out) :: inline
|
||||
integer, intent(inout) :: offset
|
||||
|
||||
character(len=:), allocatable :: line
|
||||
integer :: indent,indent_next
|
||||
|
||||
indent = indentDepth(blck(s_blck:),offset)
|
||||
line = IO_rmComment(blck(s_blck:s_blck + index(blck(s_blck:),IO_EOL) - 2))
|
||||
inline = line(indent-offset+3:)
|
||||
s_blck = s_blck + index(blck(s_blck:),IO_EOL)
|
||||
|
||||
indent_next = indentDepth(blck(s_blck:))
|
||||
|
||||
do while(indent_next > indent)
|
||||
inline = inline//' '//trim(adjustl(IO_rmComment(blck(s_blck:s_blck + index(blck(s_blck:),IO_EOL) - 2))))
|
||||
s_blck = s_blck + index(blck(s_blck:),IO_EOL)
|
||||
indent_next = indentDepth(blck(s_blck:))
|
||||
enddo
|
||||
|
||||
if(scan(inline,",") > 0) inline = '"'//inline//'"'
|
||||
|
||||
end subroutine list_item_inline
|
||||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! @brief reads a line of YAML block which is already in flow style
|
||||
! @details Dicts should be enlcosed within '{}' for it to be consistent with DAMASK YAML parser
|
||||
|
@ -463,7 +518,7 @@ recursive subroutine lst(blck,flow,s_blck,s_flow,offset)
|
|||
integer, intent(inout) :: s_blck, & !< start position in blck
|
||||
s_flow, & !< start position in flow
|
||||
offset !< stores leading '- ' in nested lists
|
||||
character(len=:), allocatable :: line,flow_line
|
||||
character(len=:), allocatable :: line,flow_line,inline
|
||||
integer :: e_blck,indent
|
||||
|
||||
indent = indentDepth(blck(s_blck:),offset)
|
||||
|
@ -509,9 +564,9 @@ recursive subroutine lst(blck,flow,s_blck,s_flow,offset)
|
|||
else ! list item in the same line
|
||||
line = line(indentDepth(line)+3:)
|
||||
if(isScalar(line)) then
|
||||
call line_toFlow(flow,s_flow,line)
|
||||
s_blck = e_blck +2
|
||||
call list_item_inline(blck,s_blck,inline,offset)
|
||||
offset = 0
|
||||
call line_toFlow(flow,s_flow,inline)
|
||||
elseif(isFlow(line)) then
|
||||
s_blck = s_blck + index(blck(s_blck:),'-')
|
||||
if(isFlowList(line)) then
|
||||
|
@ -723,6 +778,8 @@ subroutine selfTest
|
|||
if (indentDepth('a') /= 0) error stop 'indentDepth'
|
||||
if (indentDepth('x ') /= 0) error stop 'indentDepth'
|
||||
|
||||
if (.not. quotedString("'a'")) error stop 'quotedString'
|
||||
|
||||
if ( isFlow(' a')) error stop 'isFLow'
|
||||
if (.not. isFlow('{')) error stop 'isFlow'
|
||||
if (.not. isFlow(' [')) error stop 'isFlow'
|
||||
|
@ -809,14 +866,14 @@ subroutine selfTest
|
|||
|
||||
multi_line_flow1: block
|
||||
character(len=*), parameter :: flow_multi = &
|
||||
"%YAML 1.1"//IO_EOL//&
|
||||
"---"//IO_EOL//&
|
||||
"a: [b,"//IO_EOL//&
|
||||
"c: "//IO_EOL//&
|
||||
"d, e]"//IO_EOL
|
||||
'%YAML 1.1'//IO_EOL//&
|
||||
'---'//IO_EOL//&
|
||||
'a: ["b",'//IO_EOL//&
|
||||
'c: '//IO_EOL//&
|
||||
'"d", "e"]'//IO_EOL
|
||||
|
||||
character(len=*), parameter :: flow = &
|
||||
"{a: [b, {c: d}, e]}"
|
||||
'{a: ["b", {c: "d"}, "e"]}'
|
||||
|
||||
if( .not. to_flow(flow_multi) == flow) error stop 'to_flow'
|
||||
end block multi_line_flow1
|
||||
|
@ -848,14 +905,15 @@ subroutine selfTest
|
|||
" "//IO_EOL//&
|
||||
" "//IO_EOL//&
|
||||
" param_1: [a: b, c, {d: {e: [f: g, h]}}]"//IO_EOL//&
|
||||
" - c: d"//IO_EOL//&
|
||||
" - c:d"//IO_EOL//&
|
||||
" e.f,"//IO_EOL//&
|
||||
" bb:"//IO_EOL//&
|
||||
" "//IO_EOL//&
|
||||
" - "//IO_EOL//&
|
||||
" {param_1: [{a: b}, c, {d: {e: [{f: g}, h]}}]}"//IO_EOL//&
|
||||
"..."//IO_EOL
|
||||
character(len=*), parameter :: mixed_flow = &
|
||||
"{aa: [{param_1: [{a: b}, c, {d: {e: [{f: g}, h]}}]}, {c: d}], bb: [{param_1: [{a: b}, c, {d: {e: [{f: g}, h]}}]}]}"
|
||||
'{aa: [{param_1: [{a: b}, c, {d: {e: [{f: g}, h]}}]}, "c:d e.f,"], bb: [{param_1: [{a: b}, c, {d: {e: [{f: g}, h]}}]}]}'
|
||||
|
||||
if(.not. to_flow(block_flow) == mixed_flow) error stop 'to_flow'
|
||||
end block basic_mixed
|
||||
|
|
|
@ -83,10 +83,12 @@ subroutine parse_numerics()
|
|||
if (worldrank == 0) then
|
||||
print*, 'reading numerics.yaml'; flush(IO_STDOUT)
|
||||
fileContent = IO_read('numerics.yaml')
|
||||
if (len(fileContent) > 0) then
|
||||
call results_openJobFile(parallel=.false.)
|
||||
call results_writeDataset_str(fileContent,'setup','numerics.yaml','numerics configuration')
|
||||
call results_closeJobFile
|
||||
endif
|
||||
endif
|
||||
call parallelization_bcast_str(fileContent)
|
||||
|
||||
config_numerics => YAML_parse_str(fileContent)
|
||||
|
@ -113,10 +115,12 @@ subroutine parse_debug()
|
|||
if (worldrank == 0) then
|
||||
print*, 'reading debug.yaml'; flush(IO_STDOUT)
|
||||
fileContent = IO_read('debug.yaml')
|
||||
if (len(fileContent) > 0) then
|
||||
call results_openJobFile(parallel=.false.)
|
||||
call results_writeDataset_str(fileContent,'setup','debug.yaml','debug configuration')
|
||||
call results_closeJobFile
|
||||
endif
|
||||
endif
|
||||
call parallelization_bcast_str(fileContent)
|
||||
|
||||
config_debug => YAML_parse_str(fileContent)
|
||||
|
|
|
@ -53,12 +53,11 @@ program DAMASK_grid
|
|||
integer, parameter :: &
|
||||
subStepFactor = 2 !< for each substep, divide the last time increment by 2.0
|
||||
real(pReal) :: &
|
||||
T_0 = 300.0_pReal, &
|
||||
time = 0.0_pReal, & !< elapsed time
|
||||
time0 = 0.0_pReal, & !< begin of interval
|
||||
timeinc = 1.0_pReal, & !< current time interval
|
||||
timeIncOld = 0.0_pReal, & !< previous time interval
|
||||
remainingLoadCaseTime = 0.0_pReal !< remaining time of current load case
|
||||
t = 0.0_pReal, & !< elapsed time
|
||||
t_0 = 0.0_pReal, & !< begin of interval
|
||||
Delta_t = 1.0_pReal, & !< current time interval
|
||||
Delta_t_prev = 0.0_pReal, & !< previous time interval
|
||||
t_remaining = 0.0_pReal !< remaining time of current load case
|
||||
logical :: &
|
||||
guess, & !< guess along former trajectory
|
||||
stagIterate, &
|
||||
|
@ -230,12 +229,6 @@ program DAMASK_grid
|
|||
reportAndCheck: if (worldrank == 0) then
|
||||
print'(/,a,i0)', ' load case: ', l
|
||||
print*, ' estimate_rate:', loadCases(l)%estimate_rate
|
||||
if (loadCases(l)%deformation%myType == 'L') then
|
||||
do j = 1, 3
|
||||
if (any(loadCases(l)%deformation%mask(j,1:3) .eqv. .true.) .and. &
|
||||
any(loadCases(l)%deformation%mask(j,1:3) .eqv. .false.)) errorID = 832 ! each row should be either fully or not at all defined
|
||||
enddo
|
||||
endif
|
||||
if (loadCases(l)%deformation%myType == 'F') then
|
||||
print*, ' F:'
|
||||
else
|
||||
|
@ -243,14 +236,14 @@ program DAMASK_grid
|
|||
endif
|
||||
do i = 1, 3; do j = 1, 3
|
||||
if (loadCases(l)%deformation%mask(i,j)) then
|
||||
write(IO_STDOUT,'(2x,f12.7)',advance='no') loadCases(l)%deformation%values(i,j)
|
||||
else
|
||||
write(IO_STDOUT,'(2x,12a)',advance='no') ' x '
|
||||
else
|
||||
write(IO_STDOUT,'(2x,f12.7)',advance='no') loadCases(l)%deformation%values(i,j)
|
||||
endif
|
||||
enddo; write(IO_STDOUT,'(/)',advance='no')
|
||||
enddo
|
||||
if (any(loadCases(l)%stress%mask .eqv. loadCases(l)%deformation%mask)) errorID = 831
|
||||
if (any(loadCases(l)%stress%mask .and. transpose(loadCases(l)%stress%mask) .and. (math_I3<1))) &
|
||||
if (any(.not.(loadCases(l)%stress%mask .or. transpose(loadCases(l)%stress%mask)) .and. (math_I3<1))) &
|
||||
errorID = 838 ! no rotation is allowed by stress BC
|
||||
|
||||
if (loadCases(l)%stress%myType == 'P') print*, ' P / MPa:'
|
||||
|
@ -259,9 +252,9 @@ program DAMASK_grid
|
|||
if (loadCases(l)%stress%myType /= '') then
|
||||
do i = 1, 3; do j = 1, 3
|
||||
if (loadCases(l)%stress%mask(i,j)) then
|
||||
write(IO_STDOUT,'(2x,f12.4)',advance='no') loadCases(l)%stress%values(i,j)*1e-6_pReal
|
||||
else
|
||||
write(IO_STDOUT,'(2x,12a)',advance='no') ' x '
|
||||
else
|
||||
write(IO_STDOUT,'(2x,f12.4)',advance='no') loadCases(l)%stress%values(i,j)*1e-6_pReal
|
||||
endif
|
||||
enddo; write(IO_STDOUT,'(/)',advance='no')
|
||||
enddo
|
||||
|
@ -303,7 +296,7 @@ program DAMASK_grid
|
|||
case(FIELD_THERMAL_ID)
|
||||
initial_conditions => config_load%get('initial_conditions',defaultVal=emptyDict)
|
||||
thermal => initial_conditions%get('thermal',defaultVal=emptyDict)
|
||||
call grid_thermal_spectral_init(thermal%get_asFloat('T',defaultVal = T_0))
|
||||
call grid_thermal_spectral_init(thermal%get_asFloat('T'))
|
||||
|
||||
case(FIELD_DAMAGE_ID)
|
||||
call grid_damage_spectral_init
|
||||
|
@ -330,7 +323,7 @@ program DAMASK_grid
|
|||
endif writeUndeformed
|
||||
|
||||
loadCaseLooping: do l = 1, size(loadCases)
|
||||
time0 = time ! load case start time
|
||||
t_0 = t ! load case start time
|
||||
guess = loadCases(l)%estimate_rate ! change of load case? homogeneous guess for the first inc
|
||||
|
||||
incLooping: do inc = 1, loadCases(l)%N
|
||||
|
@ -338,31 +331,31 @@ program DAMASK_grid
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! forwarding time
|
||||
timeIncOld = timeinc ! last timeinc that brought former inc to an end
|
||||
Delta_t_prev = Delta_t ! last time intervall that brought former inc to an end
|
||||
if (dEq(loadCases(l)%r,1.0_pReal,1.e-9_pReal)) then ! linear scale
|
||||
timeinc = loadCases(l)%t/real(loadCases(l)%N,pReal)
|
||||
Delta_t = loadCases(l)%t/real(loadCases(l)%N,pReal)
|
||||
else
|
||||
timeinc = loadCases(l)%t * (loadCases(l)%r**(inc-1)-loadCases(l)%r**inc) &
|
||||
Delta_t = loadCases(l)%t * (loadCases(l)%r**(inc-1)-loadCases(l)%r**inc) &
|
||||
/ (1.0_pReal-loadCases(l)%r**loadCases(l)%N)
|
||||
endif
|
||||
timeinc = timeinc * real(subStepFactor,pReal)**real(-cutBackLevel,pReal) ! depending on cut back level, decrease time step
|
||||
Delta_t = Delta_t * real(subStepFactor,pReal)**real(-cutBackLevel,pReal) ! depending on cut back level, decrease time step
|
||||
|
||||
skipping: if (totalIncsCounter <= interface_restartInc) then ! not yet at restart inc?
|
||||
time = time + timeinc ! just advance time, skip already performed calculation
|
||||
t = t + Delta_t ! just advance time, skip already performed calculation
|
||||
guess = .true. ! QUESTION:why forced guessing instead of inheriting loadcase preference
|
||||
else skipping
|
||||
stepFraction = 0 ! fraction scaled by stepFactor**cutLevel
|
||||
|
||||
subStepLooping: do while (stepFraction < subStepFactor**cutBackLevel)
|
||||
remainingLoadCaseTime = loadCases(l)%t+time0 - time
|
||||
time = time + timeinc ! forward target time
|
||||
t_remaining = loadCases(l)%t + t_0 - t
|
||||
t = t + Delta_t ! forward target time
|
||||
stepFraction = stepFraction + 1 ! count step
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! report begin of new step
|
||||
print'(/,a)', ' ###########################################################################'
|
||||
print'(1x,a,es12.5,6(a,i0))', &
|
||||
'Time', time, &
|
||||
'Time', t, &
|
||||
's: Increment ', inc,'/',loadCases(l)%N,&
|
||||
'-', stepFraction,'/',subStepFactor**cutBackLevel,&
|
||||
' of load case ', l,'/',size(loadCases)
|
||||
|
@ -377,7 +370,7 @@ program DAMASK_grid
|
|||
select case(ID(field))
|
||||
case(FIELD_MECH_ID)
|
||||
call mechanical_forward (&
|
||||
cutBack,guess,timeinc,timeIncOld,remainingLoadCaseTime, &
|
||||
cutBack,guess,Delta_t,Delta_t_prev,t_remaining, &
|
||||
deformation_BC = loadCases(l)%deformation, &
|
||||
stress_BC = loadCases(l)%stress, &
|
||||
rotation_BC = loadCases(l)%rot)
|
||||
|
@ -398,9 +391,9 @@ program DAMASK_grid
|
|||
case(FIELD_MECH_ID)
|
||||
solres(field) = mechanical_solution(incInfo)
|
||||
case(FIELD_THERMAL_ID)
|
||||
solres(field) = grid_thermal_spectral_solution(timeinc)
|
||||
solres(field) = grid_thermal_spectral_solution(Delta_t)
|
||||
case(FIELD_DAMAGE_ID)
|
||||
solres(field) = grid_damage_spectral_solution(timeinc)
|
||||
solres(field) = grid_damage_spectral_solution(Delta_t)
|
||||
end select
|
||||
|
||||
if (.not. solres(field)%converged) exit ! no solution found
|
||||
|
@ -418,11 +411,11 @@ program DAMASK_grid
|
|||
if ( (all(solres(:)%converged .and. solres(:)%stagConverged)) & ! converged
|
||||
.and. .not. solres(1)%termIll) then ! and acceptable solution found
|
||||
call mechanical_updateCoords
|
||||
timeIncOld = timeinc
|
||||
Delta_t_prev = Delta_t
|
||||
cutBack = .false.
|
||||
guess = .true. ! start guessing after first converged (sub)inc
|
||||
if (worldrank == 0) then
|
||||
write(statUnit,*) totalIncsCounter, time, cutBackLevel, &
|
||||
write(statUnit,*) totalIncsCounter, t, cutBackLevel, &
|
||||
solres(1)%converged, solres(1)%iterationsNeeded
|
||||
flush(statUnit)
|
||||
endif
|
||||
|
@ -430,8 +423,8 @@ program DAMASK_grid
|
|||
cutBack = .true.
|
||||
stepFraction = (stepFraction - 1) * subStepFactor ! adjust to new denominator
|
||||
cutBackLevel = cutBackLevel + 1
|
||||
time = time - timeinc ! rewind time
|
||||
timeinc = timeinc/real(subStepFactor,pReal) ! cut timestep
|
||||
t = t - Delta_t
|
||||
Delta_t = Delta_t/real(subStepFactor,pReal) ! cut timestep
|
||||
print'(/,a)', ' cutting back '
|
||||
else ! no more options to continue
|
||||
if (worldrank == 0) close(statUnit)
|
||||
|
@ -453,7 +446,7 @@ program DAMASK_grid
|
|||
if (mod(inc,loadCases(l)%f_out) == 0 .or. signal) then
|
||||
print'(1/,a)', ' ... writing results to file ......................................'
|
||||
flush(IO_STDOUT)
|
||||
call CPFEM_results(totalIncsCounter,time)
|
||||
call CPFEM_results(totalIncsCounter,t)
|
||||
endif
|
||||
if(signal) call interface_setSIGUSR1(.false.)
|
||||
call MPI_Allreduce(interface_SIGUSR2,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
|
||||
|
@ -497,8 +490,8 @@ subroutine getMaskedTensor(values,mask,tensor)
|
|||
do i = 1,3
|
||||
row => tensor%get(i)
|
||||
do j = 1,3
|
||||
mask(i,j) = row%get_asString(j) /= 'x'
|
||||
if (mask(i,j)) values(i,j) = row%get_asFloat(j)
|
||||
mask(i,j) = row%get_asString(j) == 'x'
|
||||
if (.not. mask(i,j)) values(i,j) = row%get_asFloat(j)
|
||||
enddo
|
||||
enddo
|
||||
|
||||
|
|
|
@ -266,7 +266,8 @@ subroutine readVTI(grid,geomSize,origin,material, &
|
|||
integer :: i
|
||||
|
||||
|
||||
if (getXMLValue(header,'Direction') /= '1 0 0 0 1 0 0 0 1') &
|
||||
temp = getXMLValue(header,'Direction')
|
||||
if (temp /= '1 0 0 0 1 0 0 0 1' .and. temp /= '') & ! https://discourse.vtk.org/t/vti-specification/6526
|
||||
call IO_error(error_ID = 844, ext_msg = 'coordinate order')
|
||||
|
||||
temp = getXMLValue(header,'WholeExtent')
|
||||
|
|
|
@ -160,10 +160,10 @@ end subroutine grid_damage_spectral_init
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief solution for the spectral damage scheme with internal iterations
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
function grid_damage_spectral_solution(timeinc) result(solution)
|
||||
function grid_damage_spectral_solution(Delta_t) result(solution)
|
||||
|
||||
real(pReal), intent(in) :: &
|
||||
timeinc !< increment in time for current solution
|
||||
Delta_t !< increment in time for current solution
|
||||
integer :: i, j, k, ce
|
||||
type(tSolutionState) :: solution
|
||||
PetscInt :: devNull
|
||||
|
@ -176,7 +176,7 @@ function grid_damage_spectral_solution(timeinc) result(solution)
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! set module wide availabe data
|
||||
params%timeinc = timeinc
|
||||
params%Delta_t = Delta_t
|
||||
|
||||
call SNESSolve(damage_snes,PETSC_NULL_VEC,solution_vec,ierr); CHKERRQ(ierr)
|
||||
call SNESGetConvergedReason(damage_snes,reason,ierr); CHKERRQ(ierr)
|
||||
|
@ -284,7 +284,7 @@ subroutine formResidual(in,x_scal,f_scal,dummy,ierr)
|
|||
ce = 0
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1)
|
||||
ce = ce + 1
|
||||
scalarField_real(i,j,k) = params%timeinc*(scalarField_real(i,j,k) + homogenization_f_phi(phi_current(i,j,k),ce)) &
|
||||
scalarField_real(i,j,k) = params%Delta_t*(scalarField_real(i,j,k) + homogenization_f_phi(phi_current(i,j,k),ce)) &
|
||||
+ homogenization_mu_phi(ce)*(phi_lastInc(i,j,k) - phi_current(i,j,k)) &
|
||||
+ mu_ref*phi_current(i,j,k)
|
||||
enddo; enddo; enddo
|
||||
|
@ -292,7 +292,7 @@ subroutine formResidual(in,x_scal,f_scal,dummy,ierr)
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
! convolution of damage field with green operator
|
||||
call utilities_FFTscalarForward
|
||||
call utilities_fourierGreenConvolution(K_ref, mu_ref, params%timeinc)
|
||||
call utilities_fourierGreenConvolution(K_ref, mu_ref, params%Delta_t)
|
||||
call utilities_FFTscalarBackward
|
||||
|
||||
where(scalarField_real(1:grid(1),1:grid(2),1:grid3) > phi_lastInc) &
|
||||
|
|
|
@ -324,7 +324,7 @@ function grid_mechanical_FEM_solution(incInfoIn) result(solution)
|
|||
solution%iterationsNeeded = totalIter
|
||||
solution%termIll = terminallyIll
|
||||
terminallyIll = .false.
|
||||
P_aim = merge(P_aim,P_av,params%stress_mask)
|
||||
P_aim = merge(P_av,P_aim,params%stress_mask)
|
||||
|
||||
end function grid_mechanical_FEM_solution
|
||||
|
||||
|
@ -363,20 +363,20 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai
|
|||
else
|
||||
C_volAvgLastInc = C_volAvg
|
||||
|
||||
F_aimDot = merge(merge((F_aim-F_aim_lastInc)/Delta_t_old,0.0_pReal,stress_BC%mask), 0.0_pReal, guess) ! estimate deformation rate for prescribed stress components
|
||||
F_aimDot = merge(merge(.0_pReal,(F_aim-F_aim_lastInc)/Delta_t_old,stress_BC%mask),.0_pReal,guess) ! estimate deformation rate for prescribed stress components
|
||||
F_aim_lastInc = F_aim
|
||||
|
||||
!-----------------------------------------------------------------------------------------------
|
||||
! calculate rate for aim
|
||||
if (deformation_BC%myType=='L') then ! calculate F_aimDot from given L and current F
|
||||
F_aimDot = F_aimDot &
|
||||
+ merge(matmul(deformation_BC%values, F_aim_lastInc),.0_pReal,deformation_BC%mask)
|
||||
+ matmul(merge(.0_pReal,deformation_BC%values,deformation_BC%mask),F_aim_lastInc)
|
||||
elseif (deformation_BC%myType=='dot_F') then ! F_aimDot is prescribed
|
||||
F_aimDot = F_aimDot &
|
||||
+ merge(deformation_BC%values,.0_pReal,deformation_BC%mask)
|
||||
+ merge(.0_pReal,deformation_BC%values,deformation_BC%mask)
|
||||
elseif (deformation_BC%myType=='F') then ! aim at end of load case is prescribed
|
||||
F_aimDot = F_aimDot &
|
||||
+ merge((deformation_BC%values - F_aim_lastInc)/t_remaining,.0_pReal,deformation_BC%mask)
|
||||
+ merge(.0_pReal,(deformation_BC%values - F_aim_lastInc)/t_remaining,deformation_BC%mask)
|
||||
endif
|
||||
|
||||
if (guess) then
|
||||
|
@ -397,9 +397,9 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai
|
|||
! update average and local deformation gradients
|
||||
F_aim = F_aim_lastInc + F_aimDot * Delta_t
|
||||
if (stress_BC%myType=='P') P_aim = P_aim &
|
||||
+ merge((stress_BC%values - P_aim)/t_remaining,0.0_pReal,stress_BC%mask)*Delta_t
|
||||
+ merge(.0_pReal,(stress_BC%values - P_aim)/t_remaining,stress_BC%mask)*Delta_t
|
||||
if (stress_BC%myType=='dot_P') P_aim = P_aim &
|
||||
+ merge(stress_BC%values,0.0_pReal,stress_BC%mask)*Delta_t
|
||||
+ merge(.0_pReal,stress_BC%values,stress_BC%mask)*Delta_t
|
||||
|
||||
call VecAXPY(solution_current,Delta_t,solution_rate,ierr); CHKERRQ(ierr)
|
||||
|
||||
|
@ -412,7 +412,7 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai
|
|||
! set module wide available data
|
||||
params%stress_mask = stress_BC%mask
|
||||
params%rotation_BC = rotation_BC
|
||||
params%timeinc = Delta_t
|
||||
params%Delta_t = Delta_t
|
||||
|
||||
end subroutine grid_mechanical_FEM_forward
|
||||
|
||||
|
@ -446,22 +446,26 @@ subroutine grid_mechanical_FEM_restartWrite
|
|||
|
||||
fileHandle = HDF5_openFile(getSolverJobName()//'_restart.hdf5','w')
|
||||
groupHandle = HDF5_addGroup(fileHandle,'solver')
|
||||
|
||||
call HDF5_write(P_aim,groupHandle,'P_aim',.false.)
|
||||
call HDF5_write(F_aim,groupHandle,'F_aim',.false.)
|
||||
call HDF5_write(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
||||
call HDF5_write(F_aimDot,groupHandle,'F_aimDot',.false.)
|
||||
call HDF5_write(F,groupHandle,'F')
|
||||
call HDF5_write(F_lastInc,groupHandle,'F_lastInc')
|
||||
call HDF5_write(u_current,groupHandle,'u')
|
||||
call HDF5_write(u_lastInc,groupHandle,'u_lastInc')
|
||||
|
||||
call HDF5_write(C_volAvg,groupHandle,'C_volAvg',.false.)
|
||||
call HDF5_write(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
||||
|
||||
call HDF5_closeGroup(groupHandle)
|
||||
call HDF5_closeFile(fileHandle)
|
||||
|
||||
if (worldrank == 0) then
|
||||
fileHandle = HDF5_openFile(getSolverJobName()//'_restart.hdf5','a',.false.)
|
||||
groupHandle = HDF5_openGroup(fileHandle,'solver')
|
||||
call HDF5_write(P_aim,groupHandle,'P_aim',.false.)
|
||||
call HDF5_write(F_aim,groupHandle,'F_aim',.false.)
|
||||
call HDF5_write(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
||||
call HDF5_write(F_aimDot,groupHandle,'F_aimDot',.false.)
|
||||
call HDF5_write(C_volAvg,groupHandle,'C_volAvg',.false.)
|
||||
call HDF5_write(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
||||
call HDF5_closeGroup(groupHandle)
|
||||
call HDF5_closeFile(fileHandle)
|
||||
endif
|
||||
|
||||
call DMDAVecRestoreArrayF90(mechanical_grid,solution_current,u_current,ierr)
|
||||
CHKERRQ(ierr)
|
||||
call DMDAVecRestoreArrayF90(mechanical_grid,solution_lastInc,u_lastInc,ierr)
|
||||
|
@ -568,13 +572,13 @@ subroutine formResidual(da_local,x_local, &
|
|||
! evaluate constitutive response
|
||||
call utilities_constitutiveResponse(P_current,&
|
||||
P_av,C_volAvg,devNull, &
|
||||
F,params%timeinc,params%rotation_BC)
|
||||
F,params%Delta_t,params%rotation_BC)
|
||||
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! stress BC handling
|
||||
F_aim = F_aim - math_mul3333xx33(S, P_av - P_aim) ! S = 0.0 for no bc
|
||||
err_BC = maxval(abs(merge(P_av - P_aim,.0_pReal,params%stress_mask)))
|
||||
err_BC = maxval(abs(merge(.0_pReal,P_av - P_aim,params%stress_mask)))
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! constructing residual
|
||||
|
|
|
@ -277,7 +277,7 @@ function grid_mechanical_spectral_basic_solution(incInfoIn) result(solution)
|
|||
solution%iterationsNeeded = totalIter
|
||||
solution%termIll = terminallyIll
|
||||
terminallyIll = .false.
|
||||
P_aim = merge(P_aim,P_av,params%stress_mask)
|
||||
P_aim = merge(P_av,P_aim,params%stress_mask)
|
||||
|
||||
end function grid_mechanical_spectral_basic_solution
|
||||
|
||||
|
@ -314,20 +314,20 @@ subroutine grid_mechanical_spectral_basic_forward(cutBack,guess,Delta_t,Delta_t_
|
|||
C_volAvgLastInc = C_volAvg
|
||||
C_minMaxAvgLastInc = C_minMaxAvg
|
||||
|
||||
F_aimDot = merge(merge((F_aim-F_aim_lastInc)/Delta_t_old,0.0_pReal,stress_BC%mask), 0.0_pReal, guess) ! estimate deformation rate for prescribed stress components
|
||||
F_aimDot = merge(merge(.0_pReal,(F_aim-F_aim_lastInc)/Delta_t_old,stress_BC%mask),.0_pReal,guess) ! estimate deformation rate for prescribed stress components
|
||||
F_aim_lastInc = F_aim
|
||||
|
||||
!-----------------------------------------------------------------------------------------------
|
||||
! calculate rate for aim
|
||||
if (deformation_BC%myType=='L') then ! calculate F_aimDot from given L and current F
|
||||
F_aimDot = F_aimDot &
|
||||
+ merge(matmul(deformation_BC%values, F_aim_lastInc),.0_pReal,deformation_BC%mask)
|
||||
+ matmul(merge(.0_pReal,deformation_BC%values,deformation_BC%mask),F_aim_lastInc)
|
||||
elseif (deformation_BC%myType=='dot_F') then ! F_aimDot is prescribed
|
||||
F_aimDot = F_aimDot &
|
||||
+ merge(deformation_BC%values,.0_pReal,deformation_BC%mask)
|
||||
+ merge(.0_pReal,deformation_BC%values,deformation_BC%mask)
|
||||
elseif (deformation_BC%myType=='F') then ! aim at end of load case is prescribed
|
||||
F_aimDot = F_aimDot &
|
||||
+ merge((deformation_BC%values - F_aim_lastInc)/t_remaining,.0_pReal,deformation_BC%mask)
|
||||
+ merge(.0_pReal,(deformation_BC%values - F_aim_lastInc)/t_remaining,deformation_BC%mask)
|
||||
endif
|
||||
|
||||
Fdot = utilities_calculateRate(guess, &
|
||||
|
@ -342,9 +342,9 @@ subroutine grid_mechanical_spectral_basic_forward(cutBack,guess,Delta_t,Delta_t_
|
|||
! update average and local deformation gradients
|
||||
F_aim = F_aim_lastInc + F_aimDot * Delta_t
|
||||
if (stress_BC%myType=='P') P_aim = P_aim &
|
||||
+ merge((stress_BC%values - P_aim)/t_remaining,0.0_pReal,stress_BC%mask)*Delta_t
|
||||
+ merge(.0_pReal,(stress_BC%values - P_aim)/t_remaining,stress_BC%mask)*Delta_t
|
||||
if (stress_BC%myType=='dot_P') P_aim = P_aim &
|
||||
+ merge(stress_BC%values,0.0_pReal,stress_BC%mask)*Delta_t
|
||||
+ merge(.0_pReal,stress_BC%values,stress_BC%mask)*Delta_t
|
||||
|
||||
F = reshape(utilities_forwardField(Delta_t,F_lastInc,Fdot, & ! estimate of F at end of time+Delta_t that matches rotated F_aim on average
|
||||
rotation_BC%rotate(F_aim,active=.true.)),[9,grid(1),grid(2),grid3])
|
||||
|
@ -354,7 +354,7 @@ subroutine grid_mechanical_spectral_basic_forward(cutBack,guess,Delta_t,Delta_t_
|
|||
! set module wide available data
|
||||
params%stress_mask = stress_BC%mask
|
||||
params%rotation_BC = rotation_BC
|
||||
params%timeinc = Delta_t
|
||||
params%Delta_t = Delta_t
|
||||
|
||||
end subroutine grid_mechanical_spectral_basic_forward
|
||||
|
||||
|
@ -389,20 +389,24 @@ subroutine grid_mechanical_spectral_basic_restartWrite
|
|||
|
||||
fileHandle = HDF5_openFile(getSolverJobName()//'_restart.hdf5','w')
|
||||
groupHandle = HDF5_addGroup(fileHandle,'solver')
|
||||
call HDF5_write(F,groupHandle,'F')
|
||||
call HDF5_write(F_lastInc,groupHandle,'F_lastInc')
|
||||
call HDF5_closeGroup(groupHandle)
|
||||
call HDF5_closeFile(fileHandle)
|
||||
|
||||
if (worldrank == 0) then
|
||||
fileHandle = HDF5_openFile(getSolverJobName()//'_restart.hdf5','a',.false.)
|
||||
groupHandle = HDF5_openGroup(fileHandle,'solver')
|
||||
call HDF5_write(P_aim,groupHandle,'P_aim',.false.)
|
||||
call HDF5_write(F_aim,groupHandle,'F_aim',.false.)
|
||||
call HDF5_write(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
||||
call HDF5_write(F_aimDot,groupHandle,'F_aimDot',.false.)
|
||||
call HDF5_write(F,groupHandle,'F')
|
||||
call HDF5_write(F_lastInc,groupHandle,'F_lastInc')
|
||||
|
||||
call HDF5_write(C_volAvg,groupHandle,'C_volAvg',.false.)
|
||||
call HDF5_write(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
||||
call HDF5_write(C_minMaxAvg,groupHandle,'C_minMaxAvg',.false.)
|
||||
|
||||
call HDF5_closeGroup(groupHandle)
|
||||
call HDF5_closeFile(fileHandle)
|
||||
endif
|
||||
|
||||
if (num%update_gamma) call utilities_saveReferenceStiffness
|
||||
|
||||
|
@ -492,14 +496,14 @@ subroutine formResidual(in, F, &
|
|||
! evaluate constitutive response
|
||||
call utilities_constitutiveResponse(residuum, & ! "residuum" gets field of first PK stress (to save memory)
|
||||
P_av,C_volAvg,C_minMaxAvg, &
|
||||
F,params%timeinc,params%rotation_BC)
|
||||
F,params%Delta_t,params%rotation_BC)
|
||||
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! stress BC handling
|
||||
deltaF_aim = math_mul3333xx33(S, P_av - P_aim) ! S = 0.0 for no bc
|
||||
F_aim = F_aim - deltaF_aim
|
||||
err_BC = maxval(abs(merge(P_av - P_aim,.0_pReal,params%stress_mask)))
|
||||
err_BC = maxval(abs(merge(.0_pReal,P_av - P_aim,params%stress_mask)))
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! updated deformation gradient using fix point algorithm of basic scheme
|
||||
|
|
|
@ -309,7 +309,7 @@ function grid_mechanical_spectral_polarisation_solution(incInfoIn) result(soluti
|
|||
solution%iterationsNeeded = totalIter
|
||||
solution%termIll = terminallyIll
|
||||
terminallyIll = .false.
|
||||
P_aim = merge(P_aim,P_av,params%stress_mask)
|
||||
P_aim = merge(P_av,P_aim,params%stress_mask)
|
||||
|
||||
end function grid_mechanical_spectral_polarisation_solution
|
||||
|
||||
|
@ -350,20 +350,20 @@ subroutine grid_mechanical_spectral_polarisation_forward(cutBack,guess,Delta_t,D
|
|||
C_volAvgLastInc = C_volAvg
|
||||
C_minMaxAvgLastInc = C_minMaxAvg
|
||||
|
||||
F_aimDot = merge(merge((F_aim-F_aim_lastInc)/Delta_t_old,0.0_pReal,stress_BC%mask), 0.0_pReal, guess) ! estimate deformation rate for prescribed stress components
|
||||
F_aimDot = merge(merge(.0_pReal,(F_aim-F_aim_lastInc)/Delta_t_old,stress_BC%mask),.0_pReal,guess) ! estimate deformation rate for prescribed stress components
|
||||
F_aim_lastInc = F_aim
|
||||
|
||||
!-----------------------------------------------------------------------------------------------
|
||||
! calculate rate for aim
|
||||
if (deformation_BC%myType=='L') then ! calculate F_aimDot from given L and current F
|
||||
F_aimDot = F_aimDot &
|
||||
+ merge(matmul(deformation_BC%values, F_aim_lastInc),.0_pReal,deformation_BC%mask)
|
||||
+ matmul(merge(.0_pReal,deformation_BC%values,deformation_BC%mask),F_aim_lastInc)
|
||||
elseif (deformation_BC%myType=='dot_F') then ! F_aimDot is prescribed
|
||||
F_aimDot = F_aimDot &
|
||||
+ merge(deformation_BC%values,.0_pReal,deformation_BC%mask)
|
||||
+ merge(.0_pReal,deformation_BC%values,deformation_BC%mask)
|
||||
elseif (deformation_BC%myType=='F') then ! aim at end of load case is prescribed
|
||||
F_aimDot = F_aimDot &
|
||||
+ merge((deformation_BC%values - F_aim_lastInc)/t_remaining,.0_pReal,deformation_BC%mask)
|
||||
+ merge(.0_pReal,(deformation_BC%values - F_aim_lastInc)/t_remaining,deformation_BC%mask)
|
||||
endif
|
||||
|
||||
Fdot = utilities_calculateRate(guess, &
|
||||
|
@ -382,9 +382,9 @@ subroutine grid_mechanical_spectral_polarisation_forward(cutBack,guess,Delta_t,D
|
|||
! update average and local deformation gradients
|
||||
F_aim = F_aim_lastInc + F_aimDot * Delta_t
|
||||
if(stress_BC%myType=='P') P_aim = P_aim &
|
||||
+ merge((stress_BC%values - P_aim)/t_remaining,0.0_pReal,stress_BC%mask)*Delta_t
|
||||
+ merge(.0_pReal,(stress_BC%values - P_aim)/t_remaining,stress_BC%mask)*Delta_t
|
||||
if(stress_BC%myType=='dot_P') P_aim = P_aim &
|
||||
+ merge(stress_BC%values,0.0_pReal,stress_BC%mask)*Delta_t
|
||||
+ merge(.0_pReal,stress_BC%values,stress_BC%mask)*Delta_t
|
||||
|
||||
F = reshape(utilities_forwardField(Delta_t,F_lastInc,Fdot, & ! estimate of F at end of time+Delta_t that matches rotated F_aim on average
|
||||
rotation_BC%rotate(F_aim,active=.true.)),&
|
||||
|
@ -408,7 +408,7 @@ subroutine grid_mechanical_spectral_polarisation_forward(cutBack,guess,Delta_t,D
|
|||
! set module wide available data
|
||||
params%stress_mask = stress_BC%mask
|
||||
params%rotation_BC = rotation_BC
|
||||
params%timeinc = Delta_t
|
||||
params%Delta_t = Delta_t
|
||||
|
||||
end subroutine grid_mechanical_spectral_polarisation_forward
|
||||
|
||||
|
@ -445,22 +445,26 @@ subroutine grid_mechanical_spectral_polarisation_restartWrite
|
|||
|
||||
fileHandle = HDF5_openFile(getSolverJobName()//'_restart.hdf5','w')
|
||||
groupHandle = HDF5_addGroup(fileHandle,'solver')
|
||||
|
||||
call HDF5_write(F_aim,groupHandle,'P_aim',.false.)
|
||||
call HDF5_write(F_aim,groupHandle,'F_aim',.false.)
|
||||
call HDF5_write(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
||||
call HDF5_write(F_aimDot,groupHandle,'F_aimDot',.false.)
|
||||
call HDF5_write(F,groupHandle,'F')
|
||||
call HDF5_write(F_lastInc,groupHandle,'F_lastInc')
|
||||
call HDF5_write(F_tau,groupHandle,'F_tau')
|
||||
call HDF5_write(F_tau_lastInc,groupHandle,'F_tau_lastInc')
|
||||
|
||||
call HDF5_write(C_volAvg,groupHandle,'C_volAvg',.false.)
|
||||
call HDF5_write(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
||||
|
||||
call HDF5_closeGroup(groupHandle)
|
||||
call HDF5_closeFile(fileHandle)
|
||||
|
||||
if (worldrank == 0) then
|
||||
fileHandle = HDF5_openFile(getSolverJobName()//'_restart.hdf5','a',.false.)
|
||||
groupHandle = HDF5_openGroup(fileHandle,'solver')
|
||||
call HDF5_write(F_aim,groupHandle,'P_aim',.false.)
|
||||
call HDF5_write(F_aim,groupHandle,'F_aim',.false.)
|
||||
call HDF5_write(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
||||
call HDF5_write(F_aimDot,groupHandle,'F_aimDot',.false.)
|
||||
call HDF5_write(C_volAvg,groupHandle,'C_volAvg',.false.)
|
||||
call HDF5_write(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
||||
call HDF5_closeGroup(groupHandle)
|
||||
call HDF5_closeFile(fileHandle)
|
||||
endif
|
||||
|
||||
if(num%update_gamma) call utilities_saveReferenceStiffness
|
||||
|
||||
call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr)
|
||||
|
@ -592,14 +596,14 @@ subroutine formResidual(in, FandF_tau, &
|
|||
! evaluate constitutive response
|
||||
call utilities_constitutiveResponse(residual_F, & ! "residuum" gets field of first PK stress (to save memory)
|
||||
P_av,C_volAvg,C_minMaxAvg, &
|
||||
F - residual_F_tau/num%beta,params%timeinc,params%rotation_BC)
|
||||
F - residual_F_tau/num%beta,params%Delta_t,params%rotation_BC)
|
||||
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! stress BC handling
|
||||
F_aim = F_aim - math_mul3333xx33(S, P_av - P_aim) ! S = 0.0 for no bc
|
||||
err_BC = maxval(abs(merge(P_av-P_aim, &
|
||||
math_mul3333xx33(C_scale,F_aim-params%rotation_BC%rotate(F_av)),&
|
||||
err_BC = maxval(abs(merge(math_mul3333xx33(C_scale,F_aim-params%rotation_BC%rotate(F_av)), &
|
||||
P_av-P_aim, &
|
||||
params%stress_mask)))
|
||||
! calculate divergence
|
||||
tensorField_real = 0.0_pReal
|
||||
|
|
|
@ -155,10 +155,10 @@ end subroutine grid_thermal_spectral_init
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief solution for the spectral thermal scheme with internal iterations
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
function grid_thermal_spectral_solution(timeinc) result(solution)
|
||||
function grid_thermal_spectral_solution(Delta_t) result(solution)
|
||||
|
||||
real(pReal), intent(in) :: &
|
||||
timeinc !< increment in time for current solution
|
||||
Delta_t !< increment in time for current solution
|
||||
integer :: i, j, k, ce
|
||||
type(tSolutionState) :: solution
|
||||
PetscInt :: devNull
|
||||
|
@ -171,7 +171,7 @@ function grid_thermal_spectral_solution(timeinc) result(solution)
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! set module wide availabe data
|
||||
params%timeinc = timeinc
|
||||
params%Delta_t = Delta_t
|
||||
|
||||
call SNESSolve(thermal_snes,PETSC_NULL_VEC,solution_vec,ierr); CHKERRQ(ierr)
|
||||
call SNESGetConvergedReason(thermal_snes,reason,ierr); CHKERRQ(ierr)
|
||||
|
@ -195,7 +195,7 @@ function grid_thermal_spectral_solution(timeinc) result(solution)
|
|||
ce = 0
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1)
|
||||
ce = ce + 1
|
||||
call homogenization_thermal_setField(T_current(i,j,k),(T_current(i,j,k)-T_lastInc(i,j,k))/params%timeinc,ce)
|
||||
call homogenization_thermal_setField(T_current(i,j,k),(T_current(i,j,k)-T_lastInc(i,j,k))/params%Delta_t,ce)
|
||||
enddo; enddo; enddo
|
||||
|
||||
call VecMin(solution_vec,devNull,T_min,ierr); CHKERRQ(ierr)
|
||||
|
@ -233,7 +233,7 @@ subroutine grid_thermal_spectral_forward(cutBack)
|
|||
ce = 0
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1)
|
||||
ce = ce + 1
|
||||
call homogenization_thermal_setField(T_current(i,j,k),(T_current(i,j,k)-T_lastInc(i,j,k))/params%timeinc,ce)
|
||||
call homogenization_thermal_setField(T_current(i,j,k),(T_current(i,j,k)-T_lastInc(i,j,k))/params%Delta_t,ce)
|
||||
enddo; enddo; enddo
|
||||
else
|
||||
T_lastInc = T_current
|
||||
|
@ -279,7 +279,7 @@ subroutine formResidual(in,x_scal,f_scal,dummy,ierr)
|
|||
ce = 0
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1)
|
||||
ce = ce + 1
|
||||
scalarField_real(i,j,k) = params%timeinc*(scalarField_real(i,j,k) + homogenization_f_T(ce)) &
|
||||
scalarField_real(i,j,k) = params%Delta_t*(scalarField_real(i,j,k) + homogenization_f_T(ce)) &
|
||||
+ homogenization_mu_T(ce) * (T_lastInc(i,j,k) - T_current(i,j,k)) &
|
||||
+ mu_ref*T_current(i,j,k)
|
||||
enddo; enddo; enddo
|
||||
|
@ -287,7 +287,7 @@ subroutine formResidual(in,x_scal,f_scal,dummy,ierr)
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
! convolution of temperature field with green operator
|
||||
call utilities_FFTscalarForward
|
||||
call utilities_fourierGreenConvolution(K_ref, mu_ref, params%timeinc)
|
||||
call utilities_fourierGreenConvolution(K_ref, mu_ref, params%Delta_t)
|
||||
call utilities_FFTscalarBackward
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
|
|
|
@ -88,7 +88,7 @@ module spectral_utilities
|
|||
|
||||
type, public :: tBoundaryCondition !< set of parameters defining a boundary condition
|
||||
real(pReal), dimension(3,3) :: values = 0.0_pReal
|
||||
logical, dimension(3,3) :: mask = .false.
|
||||
logical, dimension(3,3) :: mask = .true.
|
||||
character(len=:), allocatable :: myType
|
||||
end type tBoundaryCondition
|
||||
|
||||
|
@ -96,7 +96,7 @@ module spectral_utilities
|
|||
real(pReal), dimension(3,3) :: stress_BC
|
||||
logical, dimension(3,3) :: stress_mask
|
||||
type(rotation) :: rotation_BC
|
||||
real(pReal) :: timeinc
|
||||
real(pReal) :: Delta_t
|
||||
end type tSolutionParams
|
||||
|
||||
type :: tNumerics
|
||||
|
@ -392,8 +392,8 @@ subroutine utilities_updateGamma(C)
|
|||
xiDyad_cmplx(l,m) = conjg(-xi1st(l,i,j,k-grid3Offset))*xi1st(m,i,j,k-grid3Offset)
|
||||
forall(l = 1:3, m = 1:3) &
|
||||
temp33_complex(l,m) = sum(cmplx(C_ref(l,1:3,m,1:3),0.0_pReal)*xiDyad_cmplx)
|
||||
A(1:3,1:3) = real(temp33_complex); A(4:6,4:6) = real(temp33_complex)
|
||||
A(1:3,4:6) = aimag(temp33_complex); A(4:6,1:3) = -aimag(temp33_complex)
|
||||
A(1:3,1:3) = temp33_complex%re; A(4:6,4:6) = temp33_complex%re
|
||||
A(1:3,4:6) = temp33_complex%im; A(4:6,1:3) = -temp33_complex%im
|
||||
if (abs(math_det33(A(1:3,1:3))) > 1e-16) then
|
||||
call math_invert(A_inv, err, A)
|
||||
temp33_complex = cmplx(A_inv(1:3,1:3),A_inv(1:3,4:6),pReal)
|
||||
|
@ -509,8 +509,8 @@ subroutine utilities_fourierGammaConvolution(fieldAim)
|
|||
xiDyad_cmplx(l,m) = conjg(-xi1st(l,i,j,k))*xi1st(m,i,j,k)
|
||||
forall(l = 1:3, m = 1:3) &
|
||||
temp33_complex(l,m) = sum(cmplx(C_ref(l,1:3,m,1:3),0.0_pReal)*xiDyad_cmplx)
|
||||
A(1:3,1:3) = real(temp33_complex); A(4:6,4:6) = real(temp33_complex)
|
||||
A(1:3,4:6) = aimag(temp33_complex); A(4:6,1:3) = -aimag(temp33_complex)
|
||||
A(1:3,1:3) = temp33_complex%re; A(4:6,4:6) = temp33_complex%re
|
||||
A(1:3,4:6) = temp33_complex%im; A(4:6,1:3) = -temp33_complex%im
|
||||
if (abs(math_det33(A(1:3,1:3))) > 1e-16) then
|
||||
call math_invert(A_inv, err, A)
|
||||
temp33_complex = cmplx(A_inv(1:3,1:3),A_inv(1:3,4:6),pReal)
|
||||
|
@ -630,7 +630,7 @@ real(pReal) function utilities_curlRMS()
|
|||
-tensorField_fourier(l,1,i,j,k)*xi1st(2,i,j,k)*rescaledGeom(2))
|
||||
enddo
|
||||
utilities_curlRMS = utilities_curlRMS &
|
||||
+2.0_pReal*sum(real(curl_fourier)**2.0_pReal+aimag(curl_fourier)**2.0_pReal)! Has somewhere a conj. complex counterpart. Therefore count it twice.
|
||||
+2.0_pReal*sum(curl_fourier%re**2.0_pReal+curl_fourier%im**2.0_pReal) ! Has somewhere a conj. complex counterpart. Therefore count it twice.
|
||||
enddo
|
||||
do l = 1, 3
|
||||
curl_fourier = (+tensorField_fourier(l,3,1,j,k)*xi1st(2,1,j,k)*rescaledGeom(2) &
|
||||
|
@ -641,7 +641,7 @@ real(pReal) function utilities_curlRMS()
|
|||
-tensorField_fourier(l,1,1,j,k)*xi1st(2,1,j,k)*rescaledGeom(2))
|
||||
enddo
|
||||
utilities_curlRMS = utilities_curlRMS &
|
||||
+ sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal) ! this layer (DC) does not have a conjugate complex counterpart (if grid(1) /= 1)
|
||||
+ sum(curl_fourier%re**2.0_pReal + curl_fourier%im**2.0_pReal) ! this layer (DC) does not have a conjugate complex counterpart (if grid(1) /= 1)
|
||||
do l = 1, 3
|
||||
curl_fourier = (+tensorField_fourier(l,3,grid1Red,j,k)*xi1st(2,grid1Red,j,k)*rescaledGeom(2) &
|
||||
-tensorField_fourier(l,2,grid1Red,j,k)*xi1st(3,grid1Red,j,k)*rescaledGeom(3))
|
||||
|
@ -651,7 +651,7 @@ real(pReal) function utilities_curlRMS()
|
|||
-tensorField_fourier(l,1,grid1Red,j,k)*xi1st(2,grid1Red,j,k)*rescaledGeom(2))
|
||||
enddo
|
||||
utilities_curlRMS = utilities_curlRMS &
|
||||
+ sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1)
|
||||
+ sum(curl_fourier%re**2.0_pReal + curl_fourier%im**2.0_pReal) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1)
|
||||
enddo; enddo
|
||||
|
||||
call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||
|
@ -684,7 +684,7 @@ function utilities_maskedCompliance(rot_BC,mask_stress,C)
|
|||
logical :: errmatinv
|
||||
character(len=pStringLen):: formatString
|
||||
|
||||
mask_stressVector = reshape(transpose(mask_stress), [9])
|
||||
mask_stressVector = .not. reshape(transpose(mask_stress), [9])
|
||||
size_reduced = count(mask_stressVector)
|
||||
if(size_reduced > 0) then
|
||||
temp99_real = math_3333to99(rot_BC%rotate(C))
|
||||
|
@ -791,16 +791,16 @@ end subroutine utilities_fourierTensorDivergence
|
|||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief calculate constitutive response from homogenization_F0 to F during timeinc
|
||||
!> @brief calculate constitutive response from homogenization_F0 to F during Delta_t
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
|
||||
F,timeinc,rotation_BC)
|
||||
F,Delta_t,rotation_BC)
|
||||
|
||||
real(pReal), intent(out), dimension(3,3,3,3) :: C_volAvg, C_minmaxAvg !< average stiffness
|
||||
real(pReal), intent(out), dimension(3,3) :: P_av !< average PK stress
|
||||
real(pReal), intent(out), dimension(3,3,grid(1),grid(2),grid3) :: P !< PK stress
|
||||
real(pReal), intent(in), dimension(3,3,grid(1),grid(2),grid3) :: F !< deformation gradient target
|
||||
real(pReal), intent(in) :: timeinc !< loading time
|
||||
real(pReal), intent(in) :: Delta_t !< loading time
|
||||
type(rotation), intent(in), optional :: rotation_BC !< rotation of load frame
|
||||
|
||||
|
||||
|
@ -815,11 +815,11 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
|
|||
|
||||
homogenization_F = reshape(F,[3,3,product(grid(1:2))*grid3]) ! set materialpoint target F to estimated field
|
||||
|
||||
call homogenization_mechanical_response(timeinc,[1,1],[1,product(grid(1:2))*grid3]) ! calculate P field
|
||||
call homogenization_mechanical_response(Delta_t,[1,1],[1,product(grid(1:2))*grid3]) ! calculate P field
|
||||
if (.not. terminallyIll) &
|
||||
call homogenization_thermal_response(timeinc,[1,1],[1,product(grid(1:2))*grid3])
|
||||
call homogenization_thermal_response(Delta_t,[1,1],[1,product(grid(1:2))*grid3])
|
||||
if (.not. terminallyIll) &
|
||||
call homogenization_mechanical_response2(timeinc,[1,1],[1,product(grid(1:2))*grid3])
|
||||
call homogenization_mechanical_response2(Delta_t,[1,1],[1,product(grid(1:2))*grid3])
|
||||
|
||||
P = reshape(homogenization_P, [3,3,grid(1),grid(2),grid3])
|
||||
P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt
|
||||
|
@ -870,14 +870,14 @@ end subroutine utilities_constitutiveResponse
|
|||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief calculates forward rate, either guessing or just add delta/timeinc
|
||||
!> @brief calculates forward rate, either guessing or just add delta/Delta_t
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
pure function utilities_calculateRate(heterogeneous,field0,field,dt,avRate)
|
||||
|
||||
real(pReal), intent(in), dimension(3,3) :: &
|
||||
avRate !< homogeneous addon
|
||||
real(pReal), intent(in) :: &
|
||||
dt !< timeinc between field0 and field
|
||||
dt !< Delta_t between field0 and field
|
||||
logical, intent(in) :: &
|
||||
heterogeneous !< calculate field of rates
|
||||
real(pReal), intent(in), dimension(3,3,grid(1),grid(2),grid3) :: &
|
||||
|
@ -899,10 +899,10 @@ end function utilities_calculateRate
|
|||
!> @brief forwards a field with a pointwise given rate, if aim is given,
|
||||
!> ensures that the average matches the aim
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
function utilities_forwardField(timeinc,field_lastInc,rate,aim)
|
||||
function utilities_forwardField(Delta_t,field_lastInc,rate,aim)
|
||||
|
||||
real(pReal), intent(in) :: &
|
||||
timeinc !< timeinc of current step
|
||||
Delta_t !< Delta_t of current step
|
||||
real(pReal), intent(in), dimension(3,3,grid(1),grid(2),grid3) :: &
|
||||
field_lastInc, & !< initial field
|
||||
rate !< rate by which to forward
|
||||
|
@ -913,7 +913,7 @@ function utilities_forwardField(timeinc,field_lastInc,rate,aim)
|
|||
real(pReal), dimension(3,3) :: fieldDiff !< <a + adot*t> - aim
|
||||
PetscErrorCode :: ierr
|
||||
|
||||
utilities_forwardField = field_lastInc + rate*timeinc
|
||||
utilities_forwardField = field_lastInc + rate*Delta_t
|
||||
if (present(aim)) then !< correct to match average
|
||||
fieldDiff = sum(sum(sum(utilities_forwardField,dim=5),dim=4),dim=3)*wgt
|
||||
call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||
|
|
|
@ -766,25 +766,21 @@ end function relaxationVector
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief identify the normal of an interface
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
pure function interfaceNormal(intFace,ho,en)
|
||||
|
||||
real(pReal), dimension(3) :: interfaceNormal
|
||||
pure function interfaceNormal(intFace,ho,en) result(n)
|
||||
|
||||
real(pReal), dimension(3) :: n
|
||||
integer, dimension(4), intent(in) :: intFace !< interface ID in 4D array (normal and position)
|
||||
integer, intent(in) :: &
|
||||
ho, &
|
||||
en
|
||||
|
||||
integer :: nPos
|
||||
|
||||
associate (dst => dependentState(ho))
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! get the normal of the interface, identified from the value of intFace(1)
|
||||
interfaceNormal = 0.0_pReal
|
||||
nPos = abs(intFace(1)) ! identify the position of the interface in global state array
|
||||
interfaceNormal(nPos) = real(intFace(1)/abs(intFace(1)),pReal) ! get the normal vector w.r.t. cluster axis
|
||||
n = 0.0_pReal
|
||||
n(abs(intFace(1))) = real(intFace(1)/abs(intFace(1)),pReal) ! get the normal vector w.r.t. cluster axis
|
||||
|
||||
interfaceNormal = matmul(dst%orientation(1:3,1:3,en),interfaceNormal) ! map the normal vector into sample coordinate system (basis)
|
||||
n = matmul(dst%orientation(1:3,1:3,en),n) ! map the normal vector into sample coordinate system (basis)
|
||||
|
||||
end associate
|
||||
|
||||
|
@ -794,22 +790,18 @@ end function interfaceNormal
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief collect six faces of a grain in 4D (normal and position)
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
pure function getInterface(iFace,iGrain3)
|
||||
|
||||
integer, dimension(4) :: getInterface
|
||||
pure function getInterface(iFace,iGrain3) result(i)
|
||||
|
||||
integer, dimension(4) :: i
|
||||
integer, dimension(3), intent(in) :: iGrain3 !< grain ID in 3D array
|
||||
integer, intent(in) :: iFace !< face index (1..6) mapped like (-e1,-e2,-e3,+e1,+e2,+e3) or iDir = (-1,-2,-3,1,2,3)
|
||||
|
||||
integer :: iDir !< direction of interface normal
|
||||
|
||||
iDir = (int(real(iFace-1,pReal)/2.0_pReal)+1)*(-1)**iFace
|
||||
getInterface(1) = iDir
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! identify the interface position by the direction of its normal
|
||||
getInterface(2:4) = iGrain3
|
||||
if (iDir < 0) getInterface(1-iDir) = getInterface(1-iDir)-1 ! to have a correlation with coordinate/position in real space
|
||||
iDir = (int(real(iFace-1,pReal)/2.0_pReal)+1)*(-1)**iFace
|
||||
i = [iDir,iGrain3]
|
||||
if (iDir < 0) i(1-iDir) = i(1-iDir)-1 ! to have a correlation with coordinate/position in real space
|
||||
|
||||
end function getInterface
|
||||
|
||||
|
|
|
@ -1930,6 +1930,7 @@ subroutine buildTransformationSystem(Q,S,Ntrans,cOverA,a_fcc,a_bcc)
|
|||
-1,-2,-1, -1, 1,-1, &
|
||||
-1, 1, 2, -1, 1,-1 &
|
||||
],pReal),shape(FCCTOHEX_SYSTEMTRANS))
|
||||
|
||||
real(pReal), dimension(4,fcc_Ntrans), parameter :: &
|
||||
FCCTOBCC_SYSTEMTRANS = reshape([&
|
||||
0.0, 1.0, 0.0, 10.26, & ! Pitsch OR (Ma & Hartmaier 2014, Table 3)
|
||||
|
@ -1978,7 +1979,7 @@ subroutine buildTransformationSystem(Q,S,Ntrans,cOverA,a_fcc,a_bcc)
|
|||
0.0, 0.0, 1.0, 45.0 &
|
||||
],shape(FCCTOBCC_BAINROT))
|
||||
|
||||
if (a_bcc > 0.0_pReal .and. a_fcc > 0.0_pReal .and. dEq0(cOverA)) then ! fcc -> bcc transformation
|
||||
if (a_bcc > 0.0_pReal .and. a_fcc > 0.0_pReal .and. dEq0(cOverA)) then ! fcc -> bcc
|
||||
do i = 1,sum(Ntrans)
|
||||
call R%fromAxisAngle(FCCTOBCC_SYSTEMTRANS(:,i),degrees=.true.,P=1)
|
||||
call B%fromAxisAngle(FCCTOBCC_BAINROT(:,i), degrees=.true.,P=1)
|
||||
|
@ -1992,7 +1993,7 @@ subroutine buildTransformationSystem(Q,S,Ntrans,cOverA,a_fcc,a_bcc)
|
|||
Q(1:3,1:3,i) = matmul(R%asMatrix(),B%asMatrix())
|
||||
S(1:3,1:3,i) = matmul(R%asMatrix(),U) - MATH_I3
|
||||
enddo
|
||||
elseif (cOverA > 0.0_pReal .and. dEq0(a_bcc)) then ! fcc -> hex transformation
|
||||
elseif (cOverA > 0.0_pReal .and. dEq0(a_bcc)) then ! fcc -> hex
|
||||
ss = MATH_I3
|
||||
sd = MATH_I3
|
||||
ss(1,3) = sqrt(2.0_pReal)/4.0_pReal
|
||||
|
@ -2062,7 +2063,7 @@ function getlabels(active,potential,system) result(labels)
|
|||
enddo normal
|
||||
label(i:i) = ')'
|
||||
|
||||
labels(s) = label
|
||||
labels(a) = label
|
||||
|
||||
enddo activeSystems
|
||||
enddo activeFamilies
|
||||
|
|
|
@ -67,8 +67,8 @@ module phase
|
|||
interface
|
||||
|
||||
! == cleaned:begin =================================================================================
|
||||
module subroutine mechanical_init(materials,phases)
|
||||
class(tNode), pointer :: materials,phases
|
||||
module subroutine mechanical_init(phases)
|
||||
class(tNode), pointer :: phases
|
||||
end subroutine mechanical_init
|
||||
|
||||
module subroutine damage_init
|
||||
|
@ -386,7 +386,7 @@ subroutine phase_init
|
|||
phase_O(ph)%data = phase_O_0(ph)%data
|
||||
enddo
|
||||
|
||||
call mechanical_init(materials,phases)
|
||||
call mechanical_init(phases)
|
||||
call damage_init
|
||||
call thermal_init(phases)
|
||||
|
||||
|
@ -482,7 +482,6 @@ end subroutine phase_results
|
|||
subroutine crystallite_init()
|
||||
|
||||
integer :: &
|
||||
ph, &
|
||||
ce, &
|
||||
co, & !< counter in integration point component loop
|
||||
ip, & !< counter in integration point loop
|
||||
|
|
|
@ -386,7 +386,7 @@ module function phase_K_phi(co,ce) result(K)
|
|||
real(pReal), dimension(3,3) :: K
|
||||
real(pReal), parameter :: l = 1.0_pReal
|
||||
|
||||
K = crystallite_push33ToRef(co,ce,param(material_phaseID(co,ce))%D) \
|
||||
K = crystallite_push33ToRef(co,ce,param(material_phaseID(co,ce))%D) &
|
||||
* l**2.0_pReal
|
||||
|
||||
end function phase_K_phi
|
||||
|
|
|
@ -199,10 +199,9 @@ contains
|
|||
!> @brief Initialize mechanical field related constitutive models
|
||||
!> @details Initialize elasticity, plasticity and stiffness degradation models.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
module subroutine mechanical_init(materials,phases)
|
||||
module subroutine mechanical_init(phases)
|
||||
|
||||
class(tNode), pointer :: &
|
||||
materials, &
|
||||
phases
|
||||
|
||||
integer :: &
|
||||
|
|
|
@ -146,7 +146,7 @@ module function elastic_nu(ph) result(nu)
|
|||
integer, intent(in) :: ph
|
||||
|
||||
|
||||
nu = param(ph)%mu
|
||||
nu = param(ph)%nu
|
||||
|
||||
end function elastic_nu
|
||||
|
||||
|
|
|
@ -44,20 +44,22 @@ submodule(phase:plastic) dislotungsten
|
|||
output
|
||||
logical :: &
|
||||
dipoleFormation !< flag indicating consideration of dipole formation
|
||||
end type !< container type for internal constitutive parameters
|
||||
character(len=:), allocatable, dimension(:) :: &
|
||||
systems_sl
|
||||
end type tParameters !< container type for internal constitutive parameters
|
||||
|
||||
type :: tDisloTungstenState
|
||||
type :: tDislotungstenState
|
||||
real(pReal), dimension(:,:), pointer :: &
|
||||
rho_mob, &
|
||||
rho_dip, &
|
||||
gamma_sl
|
||||
end type tDisloTungstenState
|
||||
end type tDislotungstenState
|
||||
|
||||
type :: tDisloTungstendependentState
|
||||
type :: tDislotungstenDependentState
|
||||
real(pReal), dimension(:,:), allocatable :: &
|
||||
Lambda_sl, &
|
||||
tau_pass
|
||||
end type tDisloTungstendependentState
|
||||
end type tDislotungstenDependentState
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! containers for parameters and state
|
||||
|
@ -65,7 +67,7 @@ submodule(phase:plastic) dislotungsten
|
|||
type(tDisloTungstenState), allocatable, dimension(:) :: &
|
||||
dotState, &
|
||||
state
|
||||
type(tDisloTungstendependentState), allocatable, dimension(:) :: dependentState
|
||||
type(tDisloTungstenDependentState), allocatable, dimension(:) :: dependentState
|
||||
|
||||
contains
|
||||
|
||||
|
@ -78,7 +80,7 @@ module function plastic_dislotungsten_init() result(myPlasticity)
|
|||
|
||||
logical, dimension(:), allocatable :: myPlasticity
|
||||
integer :: &
|
||||
ph, i, &
|
||||
ph, &
|
||||
Nmembers, &
|
||||
sizeState, sizeDotState, &
|
||||
startIndex, endIndex
|
||||
|
@ -136,6 +138,7 @@ module function plastic_dislotungsten_init() result(myPlasticity)
|
|||
N_sl = pl%get_as1dInt('N_sl',defaultVal=emptyIntArray)
|
||||
prm%sum_N_sl = sum(abs(N_sl))
|
||||
slipActive: if (prm%sum_N_sl > 0) then
|
||||
prm%systems_sl = lattice_labels_slip(N_sl,phase_lattice(ph))
|
||||
prm%P_sl = lattice_SchmidMatrix_slip(N_sl,phase_lattice(ph),phase_cOverA(ph))
|
||||
|
||||
if (phase_lattice(ph) == 'cI') then
|
||||
|
@ -394,28 +397,34 @@ module subroutine plastic_dislotungsten_results(ph,group)
|
|||
integer, intent(in) :: ph
|
||||
character(len=*), intent(in) :: group
|
||||
|
||||
integer :: o
|
||||
integer :: ou
|
||||
|
||||
|
||||
associate(prm => param(ph), stt => state(ph), dst => dependentState(ph))
|
||||
outputsLoop: do o = 1,size(prm%output)
|
||||
select case(trim(prm%output(o)))
|
||||
|
||||
do ou = 1,size(prm%output)
|
||||
|
||||
select case(trim(prm%output(ou)))
|
||||
|
||||
case('rho_mob')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%rho_mob,group,trim(prm%output(o)), &
|
||||
'mobile dislocation density','1/m²')
|
||||
call results_writeDataset(stt%rho_mob,group,trim(prm%output(ou)), &
|
||||
'mobile dislocation density','1/m²',prm%systems_sl)
|
||||
case('rho_dip')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%rho_dip,group,trim(prm%output(o)), &
|
||||
'dislocation dipole density','1/m²')
|
||||
call results_writeDataset(stt%rho_dip,group,trim(prm%output(ou)), &
|
||||
'dislocation dipole density','1/m²',prm%systems_sl)
|
||||
case('gamma_sl')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%gamma_sl,group,trim(prm%output(o)), &
|
||||
'plastic shear','1')
|
||||
call results_writeDataset(stt%gamma_sl,group,trim(prm%output(ou)), &
|
||||
'plastic shear','1',prm%systems_sl)
|
||||
case('Lambda_sl')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(dst%Lambda_sl,group,trim(prm%output(o)), &
|
||||
'mean free path for slip','m')
|
||||
call results_writeDataset(dst%Lambda_sl,group,trim(prm%output(ou)), &
|
||||
'mean free path for slip','m',prm%systems_sl)
|
||||
case('tau_pass')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(dst%tau_pass,group,trim(prm%output(o)), &
|
||||
'threshold stress for slip','Pa')
|
||||
call results_writeDataset(dst%tau_pass,group,trim(prm%output(ou)), &
|
||||
'threshold stress for slip','Pa',prm%systems_sl)
|
||||
end select
|
||||
enddo outputsLoop
|
||||
|
||||
enddo
|
||||
|
||||
end associate
|
||||
|
||||
end subroutine plastic_dislotungsten_results
|
||||
|
|
|
@ -82,6 +82,9 @@ submodule(phase:plastic) dislotwin
|
|||
ExtendedDislocations, & !< consider split into partials for climb calculation
|
||||
fccTwinTransNucleation, & !< twinning and transformation models are for fcc
|
||||
omitDipoles !< flag controlling consideration of dipole formation
|
||||
character(len=:), allocatable, dimension(:) :: &
|
||||
systems_sl, &
|
||||
systems_tw
|
||||
end type !< container type for internal constitutive parameters
|
||||
|
||||
type :: tDislotwinState
|
||||
|
@ -93,7 +96,7 @@ submodule(phase:plastic) dislotwin
|
|||
f_tr
|
||||
end type tDislotwinState
|
||||
|
||||
type :: tDislotwinMicrostructure
|
||||
type :: tDislotwinDependentState
|
||||
real(pReal), dimension(:,:), allocatable :: &
|
||||
Lambda_sl, & !< mean free path between 2 obstacles seen by a moving dislocation
|
||||
Lambda_tw, & !< mean free path between 2 obstacles seen by a growing twin
|
||||
|
@ -105,7 +108,7 @@ submodule(phase:plastic) dislotwin
|
|||
V_tr, & !< volume of a new martensite disc
|
||||
tau_r_tw, & !< stress to bring partials close together (twin)
|
||||
tau_r_tr !< stress to bring partials close together (trans)
|
||||
end type tDislotwinMicrostructure
|
||||
end type tDislotwinDependentState
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! containers for parameters and state
|
||||
|
@ -113,7 +116,7 @@ submodule(phase:plastic) dislotwin
|
|||
type(tDislotwinState), allocatable, dimension(:) :: &
|
||||
dotState, &
|
||||
state
|
||||
type(tDislotwinMicrostructure), allocatable, dimension(:) :: dependentState
|
||||
type(tDislotwinDependentState), allocatable, dimension(:) :: dependentState
|
||||
|
||||
contains
|
||||
|
||||
|
@ -192,6 +195,7 @@ module function plastic_dislotwin_init() result(myPlasticity)
|
|||
N_sl = pl%get_as1dInt('N_sl',defaultVal=emptyIntArray)
|
||||
prm%sum_N_sl = sum(abs(N_sl))
|
||||
slipActive: if (prm%sum_N_sl > 0) then
|
||||
prm%systems_sl = lattice_labels_slip(N_sl,phase_lattice(ph))
|
||||
prm%P_sl = lattice_SchmidMatrix_slip(N_sl,phase_lattice(ph),phase_cOverA(ph))
|
||||
prm%h_sl_sl = lattice_interaction_SlipBySlip(N_sl,pl%get_as1dFloat('h_sl-sl'),phase_lattice(ph))
|
||||
prm%forestProjection = lattice_forestProjection_edge(N_sl,phase_lattice(ph),phase_cOverA(ph))
|
||||
|
@ -259,6 +263,7 @@ module function plastic_dislotwin_init() result(myPlasticity)
|
|||
N_tw = pl%get_as1dInt('N_tw', defaultVal=emptyIntArray)
|
||||
prm%sum_N_tw = sum(abs(N_tw))
|
||||
twinActive: if (prm%sum_N_tw > 0) then
|
||||
prm%systems_tw = lattice_labels_twin(N_tw,phase_lattice(ph))
|
||||
prm%P_tw = lattice_SchmidMatrix_twin(N_tw,phase_lattice(ph),phase_cOverA(ph))
|
||||
prm%h_tw_tw = lattice_interaction_TwinByTwin(N_tw,pl%get_as1dFloat('h_tw-tw'), &
|
||||
phase_lattice(ph))
|
||||
|
@ -787,44 +792,49 @@ module subroutine plastic_dislotwin_results(ph,group)
|
|||
integer, intent(in) :: ph
|
||||
character(len=*), intent(in) :: group
|
||||
|
||||
integer :: o
|
||||
integer :: ou
|
||||
|
||||
|
||||
associate(prm => param(ph), stt => state(ph), dst => dependentState(ph))
|
||||
outputsLoop: do o = 1,size(prm%output)
|
||||
select case(trim(prm%output(o)))
|
||||
|
||||
do ou = 1,size(prm%output)
|
||||
|
||||
select case(trim(prm%output(ou)))
|
||||
|
||||
case('rho_mob')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%rho_mob,group,trim(prm%output(o)), &
|
||||
'mobile dislocation density','1/m²')
|
||||
call results_writeDataset(stt%rho_mob,group,trim(prm%output(ou)), &
|
||||
'mobile dislocation density','1/m²',prm%systems_sl)
|
||||
case('rho_dip')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%rho_dip,group,trim(prm%output(o)), &
|
||||
'dislocation dipole density','1/m²')
|
||||
call results_writeDataset(stt%rho_dip,group,trim(prm%output(ou)), &
|
||||
'dislocation dipole density','1/m²',prm%systems_sl)
|
||||
case('gamma_sl')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%gamma_sl,group,trim(prm%output(o)), &
|
||||
'plastic shear','1')
|
||||
call results_writeDataset(stt%gamma_sl,group,trim(prm%output(ou)), &
|
||||
'plastic shear','1',prm%systems_sl)
|
||||
case('Lambda_sl')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(dst%Lambda_sl,group,trim(prm%output(o)), &
|
||||
'mean free path for slip','m')
|
||||
call results_writeDataset(dst%Lambda_sl,group,trim(prm%output(ou)), &
|
||||
'mean free path for slip','m',prm%systems_sl)
|
||||
case('tau_pass')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(dst%tau_pass,group,trim(prm%output(o)), &
|
||||
'passing stress for slip','Pa')
|
||||
call results_writeDataset(dst%tau_pass,group,trim(prm%output(ou)), &
|
||||
'passing stress for slip','Pa',prm%systems_sl)
|
||||
|
||||
case('f_tw')
|
||||
if(prm%sum_N_tw>0) call results_writeDataset(stt%f_tw,group,trim(prm%output(o)), &
|
||||
'twinned volume fraction','m³/m³')
|
||||
call results_writeDataset(stt%f_tw,group,trim(prm%output(ou)), &
|
||||
'twinned volume fraction','m³/m³',prm%systems_tw)
|
||||
case('Lambda_tw')
|
||||
if(prm%sum_N_tw>0) call results_writeDataset(dst%Lambda_tw,group,trim(prm%output(o)), &
|
||||
'mean free path for twinning','m')
|
||||
call results_writeDataset(dst%Lambda_tw,group,trim(prm%output(ou)), &
|
||||
'mean free path for twinning','m',prm%systems_tw)
|
||||
case('tau_hat_tw')
|
||||
if(prm%sum_N_tw>0) call results_writeDataset(dst%tau_hat_tw,group,trim(prm%output(o)), &
|
||||
'threshold stress for twinning','Pa')
|
||||
call results_writeDataset(dst%tau_hat_tw,group,trim(prm%output(ou)), &
|
||||
'threshold stress for twinning','Pa',prm%systems_tw)
|
||||
|
||||
case('f_tr')
|
||||
if(prm%sum_N_tr>0) call results_writeDataset(stt%f_tr,group,trim(prm%output(o)), &
|
||||
if(prm%sum_N_tr>0) call results_writeDataset(stt%f_tr,group,trim(prm%output(ou)), &
|
||||
'martensite volume fraction','m³/m³')
|
||||
|
||||
end select
|
||||
enddo outputsLoop
|
||||
|
||||
enddo
|
||||
|
||||
end associate
|
||||
|
||||
end subroutine plastic_dislotwin_results
|
||||
|
|
|
@ -30,6 +30,8 @@ submodule(phase:plastic) kinehardening
|
|||
nonSchmidActive = .false.
|
||||
character(len=pStringLen), allocatable, dimension(:) :: &
|
||||
output
|
||||
character(len=:), allocatable, dimension(:) :: &
|
||||
systems_sl
|
||||
end type tParameters
|
||||
|
||||
type :: tKinehardeningState
|
||||
|
@ -40,7 +42,6 @@ submodule(phase:plastic) kinehardening
|
|||
gamma, & !< accumulated (absolute) shear
|
||||
gamma_0, & !< accumulated shear at last switch of stress sense
|
||||
sgn_gamma !< sense of acting shear stress (-1 or +1)
|
||||
|
||||
end type tKinehardeningState
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
|
@ -113,6 +114,7 @@ module function plastic_kinehardening_init() result(myPlasticity)
|
|||
N_sl = pl%get_as1dInt('N_sl',defaultVal=emptyIntArray)
|
||||
prm%sum_N_sl = sum(abs(N_sl))
|
||||
slipActive: if (prm%sum_N_sl > 0) then
|
||||
prm%systems_sl = lattice_labels_slip(N_sl,phase_lattice(ph))
|
||||
prm%P = lattice_SchmidMatrix_slip(N_sl,phase_lattice(ph),phase_cOverA(ph))
|
||||
|
||||
if (phase_lattice(ph) == 'cI') then
|
||||
|
@ -351,31 +353,37 @@ module subroutine plastic_kinehardening_results(ph,group)
|
|||
integer, intent(in) :: ph
|
||||
character(len=*), intent(in) :: group
|
||||
|
||||
integer :: o
|
||||
integer :: ou
|
||||
|
||||
|
||||
associate(prm => param(ph), stt => state(ph))
|
||||
outputsLoop: do o = 1,size(prm%output)
|
||||
select case(trim(prm%output(o)))
|
||||
|
||||
do ou = 1,size(prm%output)
|
||||
|
||||
select case(trim(prm%output(ou)))
|
||||
|
||||
case ('xi')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%xi,group,trim(prm%output(o)), &
|
||||
'resistance against plastic slip','Pa')
|
||||
call results_writeDataset(stt%xi,group,trim(prm%output(ou)), &
|
||||
'resistance against plastic slip','Pa',prm%systems_sl)
|
||||
case ('chi')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%chi,group,trim(prm%output(o)), &
|
||||
'back stress','Pa')
|
||||
call results_writeDataset(stt%chi,group,trim(prm%output(ou)), &
|
||||
'back stress','Pa',prm%systems_sl)
|
||||
case ('sgn(gamma)')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(int(stt%sgn_gamma),group,trim(prm%output(o)), &
|
||||
'sense of shear','1')
|
||||
call results_writeDataset(int(stt%sgn_gamma),group,trim(prm%output(ou)), &
|
||||
'sense of shear','1',prm%systems_sl)
|
||||
case ('chi_0')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%chi_0,group,trim(prm%output(o)), &
|
||||
'back stress at last switch of stress sense','Pa')
|
||||
call results_writeDataset(stt%chi_0,group,trim(prm%output(ou)), &
|
||||
'back stress at last switch of stress sense','Pa',prm%systems_sl)
|
||||
case ('gamma_0')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%gamma_0,group,trim(prm%output(o)), &
|
||||
'plastic shear at last switch of stress sense','1')
|
||||
call results_writeDataset(stt%gamma_0,group,trim(prm%output(ou)), &
|
||||
'plastic shear at last switch of stress sense','1',prm%systems_sl)
|
||||
case ('gamma')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%gamma,group,trim(prm%output(o)), &
|
||||
'plastic shear','1')
|
||||
call results_writeDataset(stt%gamma,group,trim(prm%output(ou)), &
|
||||
'plastic shear','1',prm%systems_sl)
|
||||
end select
|
||||
enddo outputsLoop
|
||||
|
||||
enddo
|
||||
|
||||
end associate
|
||||
|
||||
end subroutine plastic_kinehardening_results
|
||||
|
|
|
@ -120,13 +120,15 @@ submodule(phase:plastic) nonlocal
|
|||
logical :: &
|
||||
shortRangeStressCorrection, & !< use of short range stress correction by excess density gradient term
|
||||
nonSchmidActive = .false.
|
||||
character(len=:), allocatable, dimension(:) :: &
|
||||
systems_sl
|
||||
end type tParameters
|
||||
|
||||
type :: tNonlocalMicrostructure
|
||||
type :: tNonlocalDependentState
|
||||
real(pReal), allocatable, dimension(:,:) :: &
|
||||
tau_pass, &
|
||||
tau_Back
|
||||
end type tNonlocalMicrostructure
|
||||
end type tNonlocalDependentState
|
||||
|
||||
type :: tNonlocalState
|
||||
real(pReal), pointer, dimension(:,:) :: &
|
||||
|
@ -162,7 +164,7 @@ submodule(phase:plastic) nonlocal
|
|||
|
||||
type(tParameters), dimension(:), allocatable :: param !< containers of constitutive parameters
|
||||
|
||||
type(tNonlocalMicrostructure), dimension(:), allocatable :: microstructure
|
||||
type(tNonlocalDependentState), dimension(:), allocatable :: dependentState
|
||||
|
||||
contains
|
||||
|
||||
|
@ -219,13 +221,13 @@ module function plastic_nonlocal_init() result(myPlasticity)
|
|||
allocate(state0(phases%length))
|
||||
allocate(dotState(phases%length))
|
||||
allocate(deltaState(phases%length))
|
||||
allocate(microstructure(phases%length))
|
||||
allocate(dependentState(phases%length))
|
||||
|
||||
do ph = 1, phases%length
|
||||
if(.not. myPlasticity(ph)) cycle
|
||||
|
||||
associate(prm => param(ph), dot => dotState(ph), stt => state(ph), &
|
||||
st0 => state0(ph), del => deltaState(ph), dst => microstructure(ph))
|
||||
st0 => state0(ph), del => deltaState(ph), dst => dependentState(ph))
|
||||
|
||||
phase => phases%get(ph)
|
||||
mech => phase%get('mechanical')
|
||||
|
@ -246,6 +248,7 @@ module function plastic_nonlocal_init() result(myPlasticity)
|
|||
ini%N_sl = pl%get_as1dInt('N_sl',defaultVal=emptyIntArray)
|
||||
prm%sum_N_sl = sum(abs(ini%N_sl))
|
||||
slipActive: if (prm%sum_N_sl > 0) then
|
||||
prm%systems_sl = lattice_labels_slip(ini%N_sl,phase_lattice(ph))
|
||||
prm%P_sl = lattice_SchmidMatrix_slip(ini%N_sl,phase_lattice(ph), phase_cOverA(ph))
|
||||
|
||||
if (phase_lattice(ph) == 'cI') then
|
||||
|
@ -604,7 +607,7 @@ module subroutine nonlocal_dependentState(ph, en, ip, el)
|
|||
real(pReal), dimension(3,param(ph)%sum_N_sl,2) :: &
|
||||
m ! direction of dislocation motion
|
||||
|
||||
associate(prm => param(ph),dst => microstructure(ph), stt => state(ph))
|
||||
associate(prm => param(ph),dst => dependentState(ph), stt => state(ph))
|
||||
|
||||
rho = getRho(ph,en)
|
||||
|
||||
|
@ -771,7 +774,7 @@ module subroutine nonlocal_LpAndItsTangent(Lp,dLp_dMp, &
|
|||
tau, & !< resolved shear stress including backstress terms
|
||||
dot_gamma !< shear rate
|
||||
|
||||
associate(prm => param(ph),dst=>microstructure(ph),stt=>state(ph))
|
||||
associate(prm => param(ph),dst=>dependentState(ph),stt=>state(ph))
|
||||
|
||||
!*** shortcut to state variables
|
||||
rho = getRho(ph,en)
|
||||
|
@ -867,7 +870,7 @@ module subroutine plastic_nonlocal_deltaState(Mp,ph,en)
|
|||
dUpperOld, & ! old maximum stable dipole distance for edges and screws
|
||||
deltaDUpper ! change in maximum stable dipole distance for edges and screws
|
||||
|
||||
associate(prm => param(ph),dst => microstructure(ph),del => deltaState(ph))
|
||||
associate(prm => param(ph),dst => dependentState(ph),del => deltaState(ph))
|
||||
|
||||
!*** shortcut to state variables
|
||||
forall (s = 1:prm%sum_N_sl, t = 1:4) v(s,t) = plasticState(ph)%state(iV(s,t,ph),en)
|
||||
|
@ -979,7 +982,7 @@ module subroutine nonlocal_dotState(Mp, Temperature,timestep, &
|
|||
return
|
||||
endif
|
||||
|
||||
associate(prm => param(ph), dst => microstructure(ph), dot => dotState(ph), stt => state(ph))
|
||||
associate(prm => param(ph), dst => dependentState(ph), dot => dotState(ph), stt => state(ph))
|
||||
|
||||
tau = 0.0_pReal
|
||||
dot_gamma = 0.0_pReal
|
||||
|
@ -1116,8 +1119,11 @@ end subroutine nonlocal_dotState
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
!> @brief calculates the rate of change of microstructure
|
||||
!---------------------------------------------------------------------------------------------------
|
||||
#if __INTEL_COMPILER >= 2020
|
||||
non_recursive function rhoDotFlux(timestep,ph,en,ip,el)
|
||||
#else
|
||||
function rhoDotFlux(timestep,ph,en,ip,el)
|
||||
|
||||
#endif
|
||||
real(pReal), intent(in) :: &
|
||||
timestep !< substepped crystallite time increment
|
||||
integer, intent(in) :: &
|
||||
|
@ -1176,7 +1182,7 @@ function rhoDotFlux(timestep,ph,en,ip,el)
|
|||
|
||||
|
||||
associate(prm => param(ph), &
|
||||
dst => microstructure(ph), &
|
||||
dst => dependentState(ph), &
|
||||
dot => dotState(ph), &
|
||||
stt => state(ph))
|
||||
ns = prm%sum_N_sl
|
||||
|
@ -1458,71 +1464,76 @@ end subroutine plastic_nonlocal_updateCompatibility
|
|||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief writes results to HDF5 output file
|
||||
!> @brief Write results to HDF5 output file.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
module subroutine plastic_nonlocal_results(ph,group)
|
||||
|
||||
integer, intent(in) :: ph
|
||||
character(len=*),intent(in) :: group
|
||||
|
||||
integer :: o
|
||||
integer :: ou
|
||||
|
||||
associate(prm => param(ph),dst => dependentState(ph),stt=>state(ph))
|
||||
|
||||
do ou = 1,size(prm%output)
|
||||
|
||||
select case(trim(prm%output(ou)))
|
||||
|
||||
associate(prm => param(ph),dst => microstructure(ph),stt=>state(ph))
|
||||
outputsLoop: do o = 1,size(prm%output)
|
||||
select case(trim(prm%output(o)))
|
||||
case('rho_u_ed_pos')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%rho_sgl_mob_edg_pos,group,trim(prm%output(o)), &
|
||||
'positive mobile edge density','1/m²')
|
||||
call results_writeDataset(stt%rho_sgl_mob_edg_pos,group,trim(prm%output(ou)), &
|
||||
'positive mobile edge density','1/m²', prm%systems_sl)
|
||||
case('rho_b_ed_pos')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%rho_sgl_imm_edg_pos,group,trim(prm%output(o)), &
|
||||
'positive immobile edge density','1/m²')
|
||||
call results_writeDataset(stt%rho_sgl_imm_edg_pos,group,trim(prm%output(ou)), &
|
||||
'positive immobile edge density','1/m²', prm%systems_sl)
|
||||
case('rho_u_ed_neg')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%rho_sgl_mob_edg_neg,group,trim(prm%output(o)), &
|
||||
'negative mobile edge density','1/m²')
|
||||
call results_writeDataset(stt%rho_sgl_mob_edg_neg,group,trim(prm%output(ou)), &
|
||||
'negative mobile edge density','1/m²', prm%systems_sl)
|
||||
case('rho_b_ed_neg')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%rho_sgl_imm_edg_neg,group,trim(prm%output(o)), &
|
||||
'negative immobile edge density','1/m²')
|
||||
call results_writeDataset(stt%rho_sgl_imm_edg_neg,group,trim(prm%output(ou)), &
|
||||
'negative immobile edge density','1/m²', prm%systems_sl)
|
||||
case('rho_d_ed')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%rho_dip_edg,group,trim(prm%output(o)), &
|
||||
'edge dipole density','1/m²')
|
||||
call results_writeDataset(stt%rho_dip_edg,group,trim(prm%output(ou)), &
|
||||
'edge dipole density','1/m²', prm%systems_sl)
|
||||
case('rho_u_sc_pos')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%rho_sgl_mob_scr_pos,group,trim(prm%output(o)), &
|
||||
'positive mobile screw density','1/m²')
|
||||
call results_writeDataset(stt%rho_sgl_mob_scr_pos,group,trim(prm%output(ou)), &
|
||||
'positive mobile screw density','1/m²', prm%systems_sl)
|
||||
case('rho_b_sc_pos')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%rho_sgl_imm_scr_pos,group,trim(prm%output(o)), &
|
||||
'positive immobile screw density','1/m²')
|
||||
call results_writeDataset(stt%rho_sgl_imm_scr_pos,group,trim(prm%output(ou)), &
|
||||
'positive immobile screw density','1/m²', prm%systems_sl)
|
||||
case('rho_u_sc_neg')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%rho_sgl_mob_scr_neg,group,trim(prm%output(o)), &
|
||||
'negative mobile screw density','1/m²')
|
||||
call results_writeDataset(stt%rho_sgl_mob_scr_neg,group,trim(prm%output(ou)), &
|
||||
'negative mobile screw density','1/m²', prm%systems_sl)
|
||||
case('rho_b_sc_neg')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%rho_sgl_imm_scr_neg,group,trim(prm%output(o)), &
|
||||
'negative immobile screw density','1/m²')
|
||||
call results_writeDataset(stt%rho_sgl_imm_scr_neg,group,trim(prm%output(ou)), &
|
||||
'negative immobile screw density','1/m²', prm%systems_sl)
|
||||
case('rho_d_sc')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%rho_dip_scr,group,trim(prm%output(o)), &
|
||||
'screw dipole density','1/m²')
|
||||
call results_writeDataset(stt%rho_dip_scr,group,trim(prm%output(ou)), &
|
||||
'screw dipole density','1/m²', prm%systems_sl)
|
||||
case('rho_f')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%rho_forest,group,trim(prm%output(o)), &
|
||||
'forest density','1/m²')
|
||||
call results_writeDataset(stt%rho_forest,group,trim(prm%output(ou)), &
|
||||
'forest density','1/m²', prm%systems_sl)
|
||||
case('v_ed_pos')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%v_edg_pos,group,trim(prm%output(o)), &
|
||||
'positive edge velocity','m/s')
|
||||
call results_writeDataset(stt%v_edg_pos,group,trim(prm%output(ou)), &
|
||||
'positive edge velocity','m/s', prm%systems_sl)
|
||||
case('v_ed_neg')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%v_edg_neg,group,trim(prm%output(o)), &
|
||||
'negative edge velocity','m/s')
|
||||
call results_writeDataset(stt%v_edg_neg,group,trim(prm%output(ou)), &
|
||||
'negative edge velocity','m/s', prm%systems_sl)
|
||||
case('v_sc_pos')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%v_scr_pos,group,trim(prm%output(o)), &
|
||||
'positive srew velocity','m/s')
|
||||
call results_writeDataset(stt%v_scr_pos,group,trim(prm%output(ou)), &
|
||||
'positive srew velocity','m/s', prm%systems_sl)
|
||||
case('v_sc_neg')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%v_scr_neg,group,trim(prm%output(o)), &
|
||||
'negative screw velocity','m/s')
|
||||
call results_writeDataset(stt%v_scr_neg,group,trim(prm%output(ou)), &
|
||||
'negative screw velocity','m/s', prm%systems_sl)
|
||||
case('gamma')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%gamma,group,trim(prm%output(o)), &
|
||||
'plastic shear','1')
|
||||
call results_writeDataset(stt%gamma,group,trim(prm%output(ou)), &
|
||||
'plastic shear','1', prm%systems_sl)
|
||||
case('tau_pass')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(dst%tau_pass,group,trim(prm%output(o)), &
|
||||
'passing stress for slip','Pa')
|
||||
call results_writeDataset(dst%tau_pass,group,trim(prm%output(ou)), &
|
||||
'passing stress for slip','Pa', prm%systems_sl)
|
||||
end select
|
||||
enddo outputsLoop
|
||||
|
||||
enddo
|
||||
|
||||
end associate
|
||||
|
||||
end subroutine plastic_nonlocal_results
|
||||
|
|
|
@ -42,6 +42,9 @@ submodule(phase:plastic) phenopowerlaw
|
|||
nonSchmidActive = .false.
|
||||
character(len=pStringLen), allocatable, dimension(:) :: &
|
||||
output
|
||||
character(len=:), allocatable, dimension(:) :: &
|
||||
systems_sl, &
|
||||
systems_tw
|
||||
end type tParameters
|
||||
|
||||
type :: tPhenopowerlawState
|
||||
|
@ -115,6 +118,7 @@ module function plastic_phenopowerlaw_init() result(myPlasticity)
|
|||
N_sl = pl%get_as1dInt('N_sl',defaultVal=emptyIntArray)
|
||||
prm%sum_N_sl = sum(abs(N_sl))
|
||||
slipActive: if (prm%sum_N_sl > 0) then
|
||||
prm%systems_sl = lattice_labels_slip(N_sl,phase_lattice(ph))
|
||||
prm%P_sl = lattice_SchmidMatrix_slip(N_sl,phase_lattice(ph),phase_cOverA(ph))
|
||||
|
||||
if (phase_lattice(ph) == 'cI') then
|
||||
|
@ -126,8 +130,7 @@ module function plastic_phenopowerlaw_init() result(myPlasticity)
|
|||
prm%P_nS_pos = prm%P_sl
|
||||
prm%P_nS_neg = prm%P_sl
|
||||
endif
|
||||
prm%h_sl_sl = lattice_interaction_SlipBySlip(N_sl,pl%get_as1dFloat('h_sl-sl'), &
|
||||
phase_lattice(ph))
|
||||
prm%h_sl_sl = lattice_interaction_SlipBySlip(N_sl,pl%get_as1dFloat('h_sl-sl'),phase_lattice(ph))
|
||||
|
||||
xi_0_sl = pl%get_as1dFloat('xi_0_sl', requiredSize=size(N_sl))
|
||||
prm%xi_inf_sl = pl%get_as1dFloat('xi_inf_sl', requiredSize=size(N_sl))
|
||||
|
@ -162,11 +165,10 @@ module function plastic_phenopowerlaw_init() result(myPlasticity)
|
|||
N_tw = pl%get_as1dInt('N_tw', defaultVal=emptyIntArray)
|
||||
prm%sum_N_tw = sum(abs(N_tw))
|
||||
twinActive: if (prm%sum_N_tw > 0) then
|
||||
prm%systems_tw = lattice_labels_twin(N_tw,phase_lattice(ph))
|
||||
prm%P_tw = lattice_SchmidMatrix_twin(N_tw,phase_lattice(ph),phase_cOverA(ph))
|
||||
prm%h_tw_tw = lattice_interaction_TwinByTwin(N_tw,pl%get_as1dFloat('h_tw-tw'), &
|
||||
phase_lattice(ph))
|
||||
prm%gamma_char = lattice_characteristicShear_twin(N_tw,phase_lattice(ph),&
|
||||
phase_cOverA(ph))
|
||||
prm%h_tw_tw = lattice_interaction_TwinByTwin(N_tw,pl%get_as1dFloat('h_tw-tw'),phase_lattice(ph))
|
||||
prm%gamma_char = lattice_characteristicShear_twin(N_tw,phase_lattice(ph),phase_cOverA(ph))
|
||||
|
||||
xi_0_tw = pl%get_as1dFloat('xi_0_tw',requiredSize=size(N_tw))
|
||||
|
||||
|
@ -370,28 +372,33 @@ module subroutine plastic_phenopowerlaw_results(ph,group)
|
|||
integer, intent(in) :: ph
|
||||
character(len=*), intent(in) :: group
|
||||
|
||||
integer :: o
|
||||
integer :: ou
|
||||
|
||||
|
||||
associate(prm => param(ph), stt => state(ph))
|
||||
outputsLoop: do o = 1,size(prm%output)
|
||||
select case(trim(prm%output(o)))
|
||||
|
||||
do ou = 1,size(prm%output)
|
||||
|
||||
select case(trim(prm%output(ou)))
|
||||
|
||||
case('xi_sl')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%xi_sl,group,trim(prm%output(o)), &
|
||||
'resistance against plastic slip','Pa')
|
||||
call results_writeDataset(stt%xi_sl,group,trim(prm%output(ou)), &
|
||||
'resistance against plastic slip','Pa',prm%systems_sl)
|
||||
case('gamma_sl')
|
||||
if(prm%sum_N_sl>0) call results_writeDataset(stt%gamma_sl,group,trim(prm%output(o)), &
|
||||
'plastic shear','1')
|
||||
call results_writeDataset(stt%gamma_sl,group,trim(prm%output(ou)), &
|
||||
'plastic shear','1',prm%systems_sl)
|
||||
|
||||
case('xi_tw')
|
||||
if(prm%sum_N_tw>0) call results_writeDataset(stt%xi_tw,group,trim(prm%output(o)), &
|
||||
'resistance against twinning','Pa')
|
||||
call results_writeDataset(stt%xi_tw,group,trim(prm%output(ou)), &
|
||||
'resistance against twinning','Pa',prm%systems_tw)
|
||||
case('gamma_tw')
|
||||
if(prm%sum_N_tw>0) call results_writeDataset(stt%gamma_tw,group,trim(prm%output(o)), &
|
||||
'twinning shear','1')
|
||||
call results_writeDataset(stt%gamma_tw,group,trim(prm%output(ou)), &
|
||||
'twinning shear','1',prm%systems_tw)
|
||||
|
||||
end select
|
||||
enddo outputsLoop
|
||||
|
||||
enddo
|
||||
|
||||
end associate
|
||||
|
||||
end subroutine plastic_phenopowerlaw_results
|
||||
|
|
|
@ -13,7 +13,7 @@ module prec
|
|||
implicit none
|
||||
public
|
||||
|
||||
! https://software.intel.com/en-us/blogs/2017/03/27/doctor-fortran-in-it-takes-all-kinds
|
||||
! https://stevelionel.com/drfortran/2017/03/27/doctor-fortran-in-it-takes-all-kinds
|
||||
integer, parameter :: pReal = IEEE_selected_real_kind(15,307) !< number with 15 significant digits, up to 1e+-307 (typically 64 bit)
|
||||
integer, parameter :: pI32 = selected_int_kind(9) !< number with at least up to +-1e9 (typically 32 bit)
|
||||
integer, parameter :: pI64 = selected_int_kind(18) !< number with at least up to +-1e18 (typically 64 bit)
|
||||
|
|
|
@ -34,10 +34,11 @@ module results
|
|||
end interface results_writeDataset
|
||||
|
||||
interface results_addAttribute
|
||||
module procedure results_addAttribute_real
|
||||
module procedure results_addAttribute_int
|
||||
module procedure results_addAttribute_str
|
||||
module procedure results_addAttribute_int
|
||||
module procedure results_addAttribute_real
|
||||
|
||||
module procedure results_addAttribute_str_array
|
||||
module procedure results_addAttribute_int_array
|
||||
module procedure results_addAttribute_real_array
|
||||
end interface results_addAttribute
|
||||
|
@ -66,7 +67,6 @@ subroutine results_init(restart)
|
|||
|
||||
character(len=pPathLen) :: commandLine
|
||||
integer :: hdferr
|
||||
integer(HID_T) :: group_id
|
||||
character(len=:), allocatable :: date
|
||||
|
||||
|
||||
|
@ -210,7 +210,7 @@ subroutine results_setLink(path,link)
|
|||
end subroutine results_setLink
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief adds a string attribute to an object in the results file
|
||||
!> @brief Add a string attribute to an object in the results file.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine results_addAttribute_str(attrLabel,attrValue,path)
|
||||
|
||||
|
@ -228,7 +228,7 @@ end subroutine results_addAttribute_str
|
|||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief adds an integer attribute an object in the results file
|
||||
!> @brief Add an integer attribute an object in the results file.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine results_addAttribute_int(attrLabel,attrValue,path)
|
||||
|
||||
|
@ -247,7 +247,7 @@ end subroutine results_addAttribute_int
|
|||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief adds a real attribute an object in the results file
|
||||
!> @brief Add a real attribute an object in the results file.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine results_addAttribute_real(attrLabel,attrValue,path)
|
||||
|
||||
|
@ -266,7 +266,26 @@ end subroutine results_addAttribute_real
|
|||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief adds an integer array attribute an object in the results file
|
||||
!> @brief Add a string array attribute an object in the results file.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine results_addAttribute_str_array(attrLabel,attrValue,path)
|
||||
|
||||
character(len=*), intent(in) :: attrLabel
|
||||
character(len=*), intent(in), dimension(:) :: attrValue
|
||||
character(len=*), intent(in), optional :: path
|
||||
|
||||
|
||||
if (present(path)) then
|
||||
call HDF5_addAttribute(resultsFile,attrLabel, attrValue, path)
|
||||
else
|
||||
call HDF5_addAttribute(resultsFile,attrLabel, attrValue)
|
||||
endif
|
||||
|
||||
end subroutine results_addAttribute_str_array
|
||||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief Add an integer array attribute an object in the results file.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine results_addAttribute_int_array(attrLabel,attrValue,path)
|
||||
|
||||
|
@ -285,7 +304,7 @@ end subroutine results_addAttribute_int_array
|
|||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief adds a real array attribute an object in the results file
|
||||
!> @brief Add a real array attribute an object in the results file.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine results_addAttribute_real_array(attrLabel,attrValue,path)
|
||||
|
||||
|
@ -336,7 +355,6 @@ subroutine results_writeDataset_str(dataset,group,label,description)
|
|||
|
||||
end subroutine results_writeDataset_str
|
||||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief Store real scalar dataset with associated metadata.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
|
@ -360,18 +378,24 @@ end subroutine results_writeScalarDataset_real
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief Store real vector dataset with associated metadata.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine results_writeVectorDataset_real(dataset,group,label,description,SIunit)
|
||||
subroutine results_writeVectorDataset_real(dataset,group,label,description,SIunit,systems)
|
||||
|
||||
character(len=*), intent(in) :: label,group,description
|
||||
character(len=*), intent(in), optional :: SIunit
|
||||
character(len=*), intent(in), dimension(:), optional :: systems
|
||||
real(pReal), intent(in), dimension(:,:) :: dataset
|
||||
|
||||
integer(HID_T) :: groupHandle
|
||||
|
||||
|
||||
if (present(systems)) then
|
||||
if (size(systems)*size(dataset,2) == 0 ) return !ToDo: maybe also implement for other results_write (not sure about scalar)
|
||||
endif
|
||||
|
||||
groupHandle = results_openGroup(group)
|
||||
call HDF5_write(dataset,groupHandle,label)
|
||||
call executionStamp(group//'/'//label,description,SIunit)
|
||||
if (present(systems)) call HDF5_addAttribute(resultsFile,'systems',systems,group//'/'//label)
|
||||
call HDF5_closeGroup(groupHandle)
|
||||
|
||||
end subroutine results_writeVectorDataset_real
|
||||
|
@ -420,18 +444,24 @@ end subroutine results_writeTensorDataset_real
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief Store integer vector dataset with associated metadata.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine results_writeVectorDataset_int(dataset,group,label,description,SIunit)
|
||||
subroutine results_writeVectorDataset_int(dataset,group,label,description,SIunit,systems)
|
||||
|
||||
character(len=*), intent(in) :: label,group,description
|
||||
character(len=*), intent(in), optional :: SIunit
|
||||
character(len=*), intent(in), dimension(:), optional :: systems
|
||||
integer, intent(in), dimension(:,:) :: dataset
|
||||
|
||||
integer(HID_T) :: groupHandle
|
||||
|
||||
|
||||
if (present(systems)) then
|
||||
if (size(systems)*size(dataset,2) == 0 ) return !ToDo: maybe also implement for other results_write (not sure about scalar)
|
||||
endif
|
||||
|
||||
groupHandle = results_openGroup(group)
|
||||
call HDF5_write(dataset,groupHandle,label)
|
||||
call executionStamp(group//'/'//label,description,SIunit)
|
||||
if (present(systems)) call HDF5_addAttribute(resultsFile,'systems',systems,group//'/'//label)
|
||||
call HDF5_closeGroup(groupHandle)
|
||||
|
||||
end subroutine results_writeVectorDataset_int
|
||||
|
|
Loading…
Reference in New Issue