diff --git a/CMakeLists.txt b/CMakeLists.txt index f6870137f..708d8aa3c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -108,9 +108,9 @@ if (DAMASK_SOLVER STREQUAL "grid") project (damask-grid Fortran C) add_definitions (-DGrid) message ("Building Grid Solver\n") -elseif (DAMASK_SOLVER STREQUAL "fem" OR DAMASK_SOLVER STREQUAL "mesh") +elseif (DAMASK_SOLVER STREQUAL "mesh") project (damask-mesh Fortran C) - add_definitions (-DFEM) + add_definitions (-DMesh) message ("Building Mesh Solver\n") else () message (FATAL_ERROR "Build target (DAMASK_SOLVER) is not defined") diff --git a/DAMASK_prerequisites.sh b/DAMASK_prerequisites.sh index b72f19b7a..25a2e46e0 100755 --- a/DAMASK_prerequisites.sh +++ b/DAMASK_prerequisites.sh @@ -110,7 +110,7 @@ for executable in icc icpc ifort ;do done firstLevel "MPI Wrappers" -for executable in mpicc mpiCC mpiicc mpic++ mpicpc mpicxx mpifort mpif90 mpif77; do +for executable in mpicc mpiCC mpiicc mpic++ mpiicpc mpicxx mpifort mpiifort mpif90 mpif77; do getDetails $executable '-show' done diff --git a/Makefile b/Makefile index 34ce18c52..f7b783c61 100644 --- a/Makefile +++ b/Makefile @@ -2,19 +2,23 @@ SHELL = /bin/sh ######################################################################################## # Makefile for the installation of DAMASK ######################################################################################## -DAMASK_ROOT = $(shell python -c "import os,sys; print(os.path.normpath(os.path.realpath(os.path.expanduser('$(pwd)'))))") +DAMASK_ROOT = $(shell python3 -c "import os,sys; print(os.path.normpath(os.path.realpath(os.path.expanduser('$(pwd)'))))") .PHONY: all all: grid mesh processing .PHONY: grid grid: build/grid @(cd build/grid;make -j${DAMASK_NUM_THREADS} all install;) + @rm -f ${DAMASK_ROOT}/bin/DAMASK_spectral > /dev/null || true + @ln -s ${DAMASK_ROOT}/bin/DAMASK_grid ${DAMASK_ROOT}/bin/DAMASK_spectral || true .PHONY: spectral spectral: grid .PHONY: mesh mesh: build/mesh @(cd build/mesh; make -j${DAMASK_NUM_THREADS} all install;) + @rm -f ${DAMASK_ROOT}/bin/DAMASK_FEM > /dev/null || true + @ln -s ${DAMASK_ROOT}/bin/DAMASK_mesh ${DAMASK_ROOT}/bin/DAMASK_FEM || true .PHONY: FEM FEM: mesh diff --git a/PRIVATE b/PRIVATE index c595994cd..a6109acd2 160000 --- a/PRIVATE +++ b/PRIVATE @@ -1 +1 @@ -Subproject commit c595994cd8880acadf50b5dedb79156d04d35b91 +Subproject commit a6109acd264c683fd335b1d1f69934fc4a4078e3 diff --git a/VERSION b/VERSION index 29f305cbe..2ef4c00dd 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v2.0.3-2464-g90f93d23 +v2.0.3-2614-g4b6b9478 diff --git a/cmake/Compiler-GNU.cmake b/cmake/Compiler-GNU.cmake index 2e8e5841c..ebba8c0bf 100644 --- a/cmake/Compiler-GNU.cmake +++ b/cmake/Compiler-GNU.cmake @@ -1,6 +1,9 @@ ################################################################################################### # GNU Compiler ################################################################################################### +if (CMAKE_Fortran_COMPILER_VERSION VERSION_LESS 8.0) + message (FATAL_ERROR "GCC Compiler version: ${CMAKE_Fortran_COMPILER_VERSION} not supported") +endif () if (OPENMP) set (OPENMP_FLAGS "-fopenmp") @@ -14,7 +17,7 @@ elseif (OPTIMIZATION STREQUAL "AGGRESSIVE") set (OPTIMIZATION_FLAGS "-O3 -ffast-math -funroll-loops -ftree-vectorize") endif () -set (STANDARD_CHECK "-std=f2008ts -pedantic-errors" ) +set (STANDARD_CHECK "-std=f2018 -pedantic-errors" ) set (LINKER_FLAGS "${LINKER_FLAGS} -Wl") # options parsed directly to the linker set (LINKER_FLAGS "${LINKER_FLAGS},-undefined,dynamic_lookup" ) @@ -25,6 +28,9 @@ set (LINKER_FLAGS "${LINKER_FLAGS},-undefined,dynamic_lookup" ) set (COMPILE_FLAGS "${COMPILE_FLAGS} -xf95-cpp-input") # preprocessor +set (COMPILE_FLAGS "${COMPILE_FLAGS} -fPIC -fPIE") +# position independent code + set (COMPILE_FLAGS "${COMPILE_FLAGS} -ffree-line-length-132") # restrict line length to the standard 132 characters (lattice.f90 require more characters) diff --git a/cmake/Compiler-Intel.cmake b/cmake/Compiler-Intel.cmake index 1a2c2c455..6d96ff42d 100644 --- a/cmake/Compiler-Intel.cmake +++ b/cmake/Compiler-Intel.cmake @@ -1,6 +1,10 @@ ################################################################################################### # Intel Compiler ################################################################################################### +if (CMAKE_Fortran_COMPILER_VERSION VERSION_LESS 18.0) + message (FATAL_ERROR "Intel Compiler version: ${CMAKE_Fortran_COMPILER_VERSION} not supported") +endif () + if (OPENMP) set (OPENMP_FLAGS "-qopenmp -parallel") endif () diff --git a/env/DAMASK.csh b/env/DAMASK.csh index a669a4ea0..98693d6b2 100644 --- a/env/DAMASK.csh +++ b/env/DAMASK.csh @@ -3,7 +3,7 @@ set CALLED=($_) set ENV_ROOT=`dirname $CALLED[2]` -set DAMASK_ROOT=`python -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" $ENV_ROOT"/../"` +set DAMASK_ROOT=`python3 -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" $ENV_ROOT"/../"` source $ENV_ROOT/CONFIG diff --git a/env/DAMASK.sh b/env/DAMASK.sh index aed99b3bc..5c3d2ba85 100644 --- a/env/DAMASK.sh +++ b/env/DAMASK.sh @@ -2,7 +2,7 @@ # usage: source DAMASK.sh function canonicalPath { - python -c "import os,sys; print(os.path.normpath(os.path.realpath(os.path.expanduser(sys.argv[1]))))" $1 + python3 -c "import os,sys; print(os.path.normpath(os.path.realpath(os.path.expanduser(sys.argv[1]))))" $1 } function blink { diff --git a/env/DAMASK.zsh b/env/DAMASK.zsh index 8769bac34..831268a7e 100644 --- a/env/DAMASK.zsh +++ b/env/DAMASK.zsh @@ -2,7 +2,7 @@ # usage: source DAMASK.zsh function canonicalPath { - python -c "import os,sys; print(os.path.normpath(os.path.realpath(os.path.expanduser(sys.argv[1]))))" $1 + python3 -c "import os,sys; print(os.path.normpath(os.path.realpath(os.path.expanduser(sys.argv[1]))))" $1 } function blink { diff --git a/examples/ConfigFiles/Homogenization_HydrogenFlux_CahnHilliard.config b/examples/ConfigFiles/Homogenization_HydrogenFlux_CahnHilliard.config deleted file mode 100644 index 62e1d2505..000000000 --- a/examples/ConfigFiles/Homogenization_HydrogenFlux_CahnHilliard.config +++ /dev/null @@ -1,3 +0,0 @@ -hydrogenflux cahnhilliard -initialHydrogenConc 0.0 -(output) hydrogenconc diff --git a/processing/post/addAPS34IDEstrainCoords.py b/processing/post/addAPS34IDEstrainCoords.py index c14983799..d71116c6a 100755 --- a/processing/post/addAPS34IDEstrainCoords.py +++ b/processing/post/addAPS34IDEstrainCoords.py @@ -32,15 +32,12 @@ parser.add_option('--depth', dest='depth', metavar='string', if filenames == []: filenames = [None] if options.frame is None: - parser.error('frame not specified') + parser.error('frame not specified') if options.depth is None: - parser.error('depth not specified') + parser.error('depth not specified') -theta=-0.75*np.pi -RotMat2TSL=np.array([[1., 0., 0.], - [0., np.cos(theta), np.sin(theta)], # Orientation to account for -135 deg - [0., -np.sin(theta), np.cos(theta)]]) # rotation for TSL convention +rot_to_TSL = damask.Rotation.from_axis_angle([-1,0,0,.75*np.pi]) for name in filenames: damask.util.report(scriptName,name) @@ -50,8 +47,6 @@ for name in filenames: coord = - table.get(options.frame) coord[:,2] += table.get(options.depth)[:,0] - table.add('coord', - np.einsum('ijk,ik->ij',np.broadcast_to(RotMat2TSL,(coord.shape[0],3,3)),coord), - scriptID+' '+' '.join(sys.argv[1:])) + table.add('coord',rot_to_TSL.broadcast_to(coord.shape[0]) @ coord,scriptID+' '+' '.join(sys.argv[1:])) table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addCalculation.py b/processing/post/addCalculation.py deleted file mode 100755 index 79fdd9455..000000000 --- a/processing/post/addCalculation.py +++ /dev/null @@ -1,183 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -from optparse import OptionParser -import re -from collections.abc import Iterable -import math # noqa - -import scipy # noqa -import scipy.linalg # noqa -import numpy as np - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - -def listify(x): - return x if isinstance(x, Iterable) else [x] - - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """ -Add or alter column(s) with derived values according to user-defined arithmetic operation between column(s). -Column labels are tagged by '#label#' in formulas. Use ';' for ',' in functions. -Numpy is available as 'np'. - -Special variables: #_row_# -- row index -Examples: -(1) magnitude of vector -- "np.linalg.norm(#vec#)" -(2) rounded root of row number -- "round(math.sqrt(#_row_#);3)" - -""", version = scriptID) - -parser.add_option('-l','--label', - dest = 'labels', - action = 'extend', metavar = '', - help = '(list of) new column labels') -parser.add_option('-f','--formula', - dest = 'formulas', - action = 'extend', metavar = '', - help = '(list of) formulas corresponding to labels') - -parser.add_option('-c','--condition', - dest = 'condition', metavar='string', - help = 'condition to alter existing column data (optional)') - -(options,filenames) = parser.parse_args() - -if options.labels is None or options.formulas is None: - parser.error('no formulas and/or labels specified.') -if len(options.labels) != len(options.formulas): - parser.error('number of labels ({}) and formulas ({}) do not match.'.format(len(options.labels),len(options.formulas))) - -for i in range(len(options.formulas)): - options.formulas[i] = options.formulas[i].replace(';',',') - -# ------------------------------------- loop over input files -------------------------------------- - -if filenames == []: filenames = [None] - -for name in filenames: - try: - table = damask.ASCIItable(name = name) - except IOError: - continue - damask.util.report(scriptName,name) - -# ------------------------------------------ read header ------------------------------------------- - - table.head_read() - -# -------------------------------------------------------------------------------------------------- - specials = { \ - '_row_': 0, - } - -# --------------------------------------- evaluate condition --------------------------------------- - if options.condition is not None: - condition = options.condition # copy per file, since might be altered inline - breaker = False - - for position,(all,marker,column) in enumerate(set(re.findall(r'#(([s]#)?(.+?))#',condition))): # find three groups - idx = table.label_index(column) - dim = table.label_dimension(column) - if idx < 0 and column not in specials: - damask.util.croak('column "{}" not found.'.format(column)) - breaker = True - else: - if column in specials: - replacement = 'specials["{}"]'.format(column) - elif dim == 1: # scalar input - replacement = '{}(table.data[{}])'.format({ '':'float', - 's#':'str'}[marker],idx) # take float or string value of data column - elif dim > 1: # multidimensional input (vector, tensor, etc.) - replacement = 'np.array(table.data[{}:{}],dtype=float)'.format(idx,idx+dim) # use (flat) array representation - - condition = condition.replace('#'+all+'#',replacement) - - if breaker: continue # found mistake in condition evaluation --> next file - -# ------------------------------------------ build formulas ---------------------------------------- - - evaluator = {} - - for label,formula in zip(options.labels,options.formulas): - for column in re.findall(r'#(.+?)#',formula): # loop over column labels in formula - idx = table.label_index(column) - dim = table.label_dimension(column) - if column in specials: - replacement = 'specials["{}"]'.format(column) - elif dim == 1: # scalar input - replacement = 'float(table.data[{}])'.format(idx) # take float value of data column - elif dim > 1: # multidimensional input (vector, tensor, etc.) - replacement = 'np.array(table.data[{}:{}],dtype=float)'.format(idx,idx+dim) # use (flat) array representation - else: - damask.util.croak('column {} not found, skipping {}...'.format(column,label)) - options.labels.remove(label) - break - - formula = formula.replace('#'+column+'#',replacement) - - evaluator[label] = formula - -# ---------------------------- separate requested labels into old and new -------------------------- - - veterans = list(set(options.labels)&set(table.labels(raw=False)+table.labels(raw=True)) ) # intersection of requested and existing - newbies = list(set(options.labels)-set(table.labels(raw=False)+table.labels(raw=True)) ) # requested but not existing - -# ------------------------------------------ process data ------------------------------------------ - - firstLine = True - outputAlive = True - - while outputAlive and table.data_read(): # read next data line of ASCII table - specials['_row_'] += 1 # count row - - if firstLine: - firstLine = False - -# ---------------------------- line 1: determine dimension of formulas ----------------------------- - - resultDim = {} - for label in list(options.labels): # iterate over stable copy - resultDim[label] = np.size(eval(evaluator[label])) # get dimension of formula[label] - if resultDim[label] == 0: options.labels.remove(label) # remove label if invalid result - - for veteran in list(veterans): - if resultDim[veteran] != table.label_dimension(veteran): - damask.util.croak('skipping {} due to inconsistent dimension...'.format(veteran)) - veterans.remove(veteran) # discard culprit - -# ----------------------------------- line 1: assemble header -------------------------------------- - - for newby in newbies: - table.labels_append(['{}_{}'.format(i+1,newby) for i in range(resultDim[newby])] - if resultDim[newby] > 1 else newby) - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# -------------------------------------- evaluate formulas ----------------------------------------- - - if options.condition is None or eval(condition): # condition for veteran replacement fulfilled - for veteran in veterans: # evaluate formulas that overwrite - table.data[table.label_index(veteran): - table.label_index(veteran)+table.label_dimension(veteran)] = \ - listify(eval(evaluator[veteran])) - - for newby in newbies: # evaluate formulas that append - table.data_append(listify(eval(evaluator[newby]))) - - outputAlive = table.data_write() # output processed line - -# ------------------------------------- output finalization ---------------------------------------- - - table.close() # close ASCII table - diff --git a/processing/post/addIPFcolor.py b/processing/post/addIPFcolor.py deleted file mode 100755 index 014b0147d..000000000 --- a/processing/post/addIPFcolor.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -from io import StringIO -from optparse import OptionParser - -import numpy as np - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """ -Add RGB color value corresponding to TSL-OIM scheme for inverse pole figures. - -""", version = scriptID) - -parser.add_option('-p', - '--pole', - dest = 'pole', - type = 'float', nargs = 3, metavar = 'float float float', - help = 'lab frame direction for inverse pole figure [%default]') -parser.add_option('-s', - '--symmetry', - dest = 'symmetry', - type = 'choice', choices = damask.Symmetry.lattices[1:], metavar='string', - help = 'crystal symmetry [%default] {{{}}} '.format(', '.join(damask.Symmetry.lattices[1:]))) -parser.add_option('-o', - '--orientation', - dest = 'quaternion', - metavar = 'string', - help = 'label of crystal orientation given as unit quaternion [%default]') - -parser.set_defaults(pole = (0.0,0.0,1.0), - quaternion = 'orientation', - symmetry = damask.Symmetry.lattices[-1], - ) - -(options, filenames) = parser.parse_args() -if filenames == []: filenames = [None] - -# damask.Orientation requires Bravais lattice, but we are only interested in symmetry -symmetry2lattice={'cubic':'fcc','hexagonal':'hex','tetragonal':'bct'} -lattice = symmetry2lattice[options.symmetry] - -pole = np.array(options.pole) -pole /= np.linalg.norm(pole) - -for name in filenames: - damask.util.report(scriptName,name) - - table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) - orientation = table.get(options.quaternion) - color = np.empty((orientation.shape[0],3)) - for i,o in enumerate(orientation): - color[i] = damask.Orientation(o,lattice = lattice).IPFcolor(pole) - - table.add('IPF_{:g}{:g}{:g}_{sym}'.format(*options.pole,sym = options.symmetry.lower()), - color, - scriptID+' '+' '.join(sys.argv[1:])) - table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addNorm.py b/processing/post/addNorm.py deleted file mode 100755 index 4ac2bf899..000000000 --- a/processing/post/addNorm.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -from io import StringIO -from optparse import OptionParser - -import numpy as np - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - -# definition of element-wise p-norms for matrices -# ToDo: better use numpy.linalg.norm - -def norm(which,object): - - if which == 'Abs': # p = 1 - return sum(map(abs, object)) - elif which == 'Frobenius': # p = 2 - return np.sqrt(sum([x*x for x in object])) - elif which == 'Max': # p = inf - return max(map(abs, object)) - else: - return -1 - - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """ -Add column(s) containing norm of requested column(s) being either vectors or tensors. - -""", version = scriptID) - -normChoices = ['abs','frobenius','max'] -parser.add_option('-n','--norm', - dest = 'norm', - type = 'choice', choices = normChoices, metavar='string', - help = 'type of element-wise p-norm [frobenius] {%s}'%(','.join(map(str,normChoices)))) -parser.add_option('-l','--label', - dest = 'labels', - action = 'extend', metavar = '', - help = 'heading of column(s) to calculate norm of') - -parser.set_defaults(norm = 'frobenius', - ) - -(options,filenames) = parser.parse_args() -if filenames == []: filenames = [None] - -if options.norm.lower() not in normChoices: - parser.error('invalid norm ({}) specified.'.format(options.norm)) -if options.labels is None: - parser.error('no data column specified.') - -for name in filenames: - damask.util.report(scriptName,name) - - table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) - for label in options.labels: - data = table.get(label) - data_norm = np.empty((data.shape[0],1)) - for i,d in enumerate(data): - data_norm[i] = norm(options.norm.capitalize(),d) - - table.add('norm{}({})'.format(options.norm.capitalize(),label), - data_norm, - scriptID+' '+' '.join(sys.argv[1:])) - - table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addOrientations.py b/processing/post/addOrientations.py index e1752b0a4..706a959ad 100755 --- a/processing/post/addOrientations.py +++ b/processing/post/addOrientations.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import numpy as np @@ -24,14 +25,8 @@ Additional (globally fixed) rotations of the lab frame and/or crystal frame can """, version = scriptID) -representations = { - 'quaternion': ['qu',4], - 'rodrigues': ['ro',4], - 'Rodrigues': ['Ro',3], - 'eulers': ['eu',3], - 'matrix': ['om',9], - 'angleaxis': ['ax',4], - } +representations = ['quaternion', 'rodrigues', 'eulers', 'matrix', 'axisangle'] + parser.add_option('-o', '--output', @@ -93,10 +88,10 @@ parser.set_defaults(output = [], ) (options, filenames) = parser.parse_args() +if filenames == []: filenames = [None] -#options.output = list(map(lambda x: x.lower(), options.output)) if options.output == [] or (not set(options.output).issubset(set(representations))): - parser.error('output must be chosen from {}.'.format(', '.join(representations))) + parser.error('output must be chosen from {}.'.format(', '.join(representations))) input = [options.eulers is not None, options.rodrigues is not None, @@ -109,95 +104,47 @@ input = [options.eulers is not None, if np.sum(input) != 1: parser.error('needs exactly one input format.') -(label,dim,inputtype) = [(options.eulers,representations['eulers'][1],'eulers'), - (options.rodrigues,representations['rodrigues'][1],'rodrigues'), - ([options.x,options.y,options.z],[3,3,3],'frame'), - (options.matrix,representations['matrix'][1],'matrix'), - (options.quaternion,representations['quaternion'][1],'quaternion'), - ][np.where(input)[0][0]] # select input label that was requested - -r = damask.Rotation.fromAxisAngle(np.array(options.crystalrotation),options.degrees,normalise=True) -R = damask.Rotation.fromAxisAngle(np.array(options.labrotation),options.degrees,normalise=True) - - -# --- loop over input files ------------------------------------------------------------------------ - -if filenames == []: filenames = [None] +r = damask.Rotation.from_axis_angle(np.array(options.crystalrotation),options.degrees,normalise=True) +R = damask.Rotation.from_axis_angle(np.array(options.labrotation),options.degrees,normalise=True) for name in filenames: - try: - table = damask.ASCIItable(name = name) - except IOError: - continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) - table.head_read() + if options.eulers is not None: + label = options.eulers + print(np.max(table.get(options.eulers),axis=0)) + o = damask.Rotation.from_Eulers(table.get(options.eulers), options.degrees) + elif options.rodrigues is not None: + label = options.rodrigues + o = damask.Rotation.from_Rodrigues(table.get(options.rodrigues)) + elif options.matrix is not None: + label = options.matrix + o = damask.Rotation.from_matrix(table.get(options.matrix).reshape(-1,3,3)) + elif options.x is not None: + label = '<{},{},{}>'.format(options.x,options.y,options.z) + M = np.block([table.get(options.x),table.get(options.y),table.get(options.z)]).reshape(-1,3,3) + o = damask.Rotation.from_matrix(M/np.linalg.norm(M,axis=0)) + elif options.quaternion is not None: + label = options.quaternion + o = damask.Rotation.from_quaternion(table.get(options.quaternion)) -# ------------------------------------------ sanity checks ----------------------------------------- + o = r.broadcast_to(o.shape) @ o @ R.broadcast_to(o.shape) - errors = [] - remarks = [] - - if not np.all(table.label_dimension(label) == dim): errors.append('input {} does not have dimension {}.'.format(label,dim)) - else: column = table.label_index(label) + #if options.lattice is not None: + # o = damask.Orientation(rotation = o,lattice = options.lattice).reduced().rotation - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue -# ------------------------------------------ assemble header --------------------------------------- + if 'rodrigues' in options.output: + table.add('ro({})'.format(label),o.as_Rodrigues(), scriptID+' '+' '.join(sys.argv[1:])) + if 'eulers' in options.output: + table.add('eu({})'.format(label),o.as_Eulers(options.degrees), scriptID+' '+' '.join(sys.argv[1:])) + if 'quaternion' in options.output: + table.add('qu({})'.format(label),o.as_quaternion(), scriptID+' '+' '.join(sys.argv[1:])) + if 'matrix' in options.output: + table.add('om({})'.format(label),o.as_matrix(), scriptID+' '+' '.join(sys.argv[1:])) + if 'axisangle' in options.output: + table.add('om({})'.format(label),o.as_axisangle(options.degrees), scriptID+' '+' '.join(sys.argv[1:])) - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - for output in options.output: - if output in representations: - table.labels_append(['{}_{}({})'.format(i+1,representations[output][0],label) \ - for i in range(representations[output][1])]) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - if inputtype == 'eulers': - d = representations['eulers'][1] - o = damask.Rotation.fromEulers(list(map(float,table.data[column:column+d])),options.degrees) - - elif inputtype == 'rodrigues': - d = representations['rodrigues'][1] - o = damask.Rotation.fromRodrigues(list(map(float,table.data[column:column+d]))) - - elif inputtype == 'matrix': - d = representations['matrix'][1] - o = damask.Rotation.fromMatrix(np.array(list(map(float,table.data[column:column+d]))).reshape(3,3)) - - elif inputtype == 'frame': - M = np.array(list(map(float,table.data[column[0]:column[0]+3] + \ - table.data[column[1]:column[1]+3] + \ - table.data[column[2]:column[2]+3]))).reshape(3,3).T - o = damask.Rotation.fromMatrix(M/np.linalg.norm(M,axis=0)) - - elif inputtype == 'quaternion': - d = representations['quaternion'][1] - o = damask.Rotation.fromQuaternion(list(map(float,table.data[column:column+d]))) - - o = r*o*R # apply additional lab and crystal frame rotations - - if options.lattice is not None: - o = damask.Orientation(rotation = o,lattice = options.lattice).reduced().rotation - - for output in options.output: - if output == 'quaternion': table.data_append(o.asQuaternion()) - elif output == 'rodrigues': table.data_append(o.asRodrigues()) - elif output == 'Rodrigues': table.data_append(o.asRodrigues(vector=True)) - elif output == 'eulers': table.data_append(o.asEulers(degrees=options.degrees)) - elif output == 'matrix': table.data_append(o.asMatrix()) - elif output == 'angleaxis': table.data_append(o.asAxisAngle(degrees=options.degrees)) - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addPK2.py b/processing/post/addPK2.py deleted file mode 100755 index 2894a5a90..000000000 --- a/processing/post/addPK2.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -from io import StringIO -from optparse import OptionParser - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """ -Add column(s) containing Second Piola--Kirchhoff stress based on given column(s) of deformation -gradient and first Piola--Kirchhoff stress. - -""", version = scriptID) - -parser.add_option('-f','--defgrad', - dest = 'defgrad', - type = 'string', metavar = 'string', - help = 'heading of columns containing deformation gradient [%default]') -parser.add_option('-p','--stress', - dest = 'stress', - type = 'string', metavar = 'string', - help = 'heading of columns containing first Piola--Kirchhoff stress [%default]') - -parser.set_defaults(defgrad = 'f', - stress = 'p', - ) - -(options,filenames) = parser.parse_args() -if filenames == []: filenames = [None] - -for name in filenames: - damask.util.report(scriptName,name) - - table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) - - table.add('S', - damask.mechanics.PK2(table.get(options.stress ).reshape(-1,3,3), - table.get(options.defgrad).reshape(-1,3,3)).reshape(-1,9), - scriptID+' '+' '.join(sys.argv[1:])) - - table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addPole.py b/processing/post/addPole.py deleted file mode 100755 index 58f9235dc..000000000 --- a/processing/post/addPole.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -from io import StringIO -from optparse import OptionParser - -import numpy as np - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """ -Add coordinates of stereographic projection of given direction (pole) in crystal frame. - -""", version = scriptID) - -parser.add_option('-p', - '--pole', - dest = 'pole', - type = 'float', nargs = 3, metavar = 'float float float', - help = 'crystal frame direction for pole figure [%default]') -parser.add_option('--polar', - dest = 'polar', - action = 'store_true', - help = 'output polar coordinates (r,φ) instead of Cartesian coordinates (x,y)') -parser.add_option('-o', - '--orientation', - dest = 'quaternion', - metavar = 'string', - help = 'label of crystal orientation given as unit quaternion [%default]') - -parser.set_defaults(pole = (1.0,0.0,0.0), - quaternion = 'orientation', - ) - -(options, filenames) = parser.parse_args() -if filenames == []: filenames = [None] - -pole = np.array(options.pole) -pole /= np.linalg.norm(pole) - -for name in filenames: - damask.util.report(scriptName,name) - - table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) - orientation = table.get(options.quaternion) - poles = np.empty((orientation.shape[0],2)) - for i,o in enumerate(orientation): - rotatedPole = damask.Rotation(o)*pole # rotate pole according to crystal orientation - (x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection - poles[i] = [np.sqrt(x*x+y*y),np.arctan2(y,x)] if options.polar else [x,y] # cartesian coordinates - - table.add('pole_{}{}{}'.format(*options.pole), - poles, - scriptID+' '+' '.join(sys.argv[1:])) - table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addSchmidfactors.py b/processing/post/addSchmidfactors.py index b3c4c7500..d1899e9a0 100755 --- a/processing/post/addSchmidfactors.py +++ b/processing/post/addSchmidfactors.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import numpy as np @@ -15,91 +16,82 @@ scriptID = ' '.join([scriptName,damask.version]) slipSystems = { 'fcc': np.array([ - # Slip direction Plane normal - [ 0, 1,-1, 1, 1, 1, ], - [-1, 0, 1, 1, 1, 1, ], - [ 1,-1, 0, 1, 1, 1, ], - [ 0,-1,-1, -1,-1, 1, ], - [ 1, 0, 1, -1,-1, 1, ], - [-1, 1, 0, -1,-1, 1, ], - [ 0,-1, 1, 1,-1,-1, ], - [-1, 0,-1, 1,-1,-1, ], - [ 1, 1, 0, 1,-1,-1, ], - [ 0, 1, 1, -1, 1,-1, ], - [ 1, 0,-1, -1, 1,-1, ], - [-1,-1, 0, -1, 1,-1, ], - ],'f'), + [+0,+1,-1 , +1,+1,+1], + [-1,+0,+1 , +1,+1,+1], + [+1,-1,+0 , +1,+1,+1], + [+0,-1,-1 , -1,-1,+1], + [+1,+0,+1 , -1,-1,+1], + [-1,+1,+0 , -1,-1,+1], + [+0,-1,+1 , +1,-1,-1], + [-1,+0,-1 , +1,-1,-1], + [+1,+1,+0 , +1,-1,-1], + [+0,+1,+1 , -1,+1,-1], + [+1,+0,-1 , -1,+1,-1], + [-1,-1,+0 , -1,+1,-1], + ],'d'), 'bcc': np.array([ - # Slip system <111>{110} - [ 1,-1, 1, 0, 1, 1, ], - [-1,-1, 1, 0, 1, 1, ], - [ 1, 1, 1, 0,-1, 1, ], - [-1, 1, 1, 0,-1, 1, ], - [-1, 1, 1, 1, 0, 1, ], - [-1,-1, 1, 1, 0, 1, ], - [ 1, 1, 1, -1, 0, 1, ], - [ 1,-1, 1, -1, 0, 1, ], - [-1, 1, 1, 1, 1, 0, ], - [-1, 1,-1, 1, 1, 0, ], - [ 1, 1, 1, -1, 1, 0, ], - [ 1, 1,-1, -1, 1, 0, ], - # Slip system <111>{112} - [-1, 1, 1, 2, 1, 1, ], - [ 1, 1, 1, -2, 1, 1, ], - [ 1, 1,-1, 2,-1, 1, ], - [ 1,-1, 1, 2, 1,-1, ], - [ 1,-1, 1, 1, 2, 1, ], - [ 1, 1,-1, -1, 2, 1, ], - [ 1, 1, 1, 1,-2, 1, ], - [-1, 1, 1, 1, 2,-1, ], - [ 1, 1,-1, 1, 1, 2, ], - [ 1,-1, 1, -1, 1, 2, ], - [-1, 1, 1, 1,-1, 2, ], - [ 1, 1, 1, 1, 1,-2, ], - ],'f'), + [+1,-1,+1 , +0,+1,+1], + [-1,-1,+1 , +0,+1,+1], + [+1,+1,+1 , +0,-1,+1], + [-1,+1,+1 , +0,-1,+1], + [-1,+1,+1 , +1,+0,+1], + [-1,-1,+1 , +1,+0,+1], + [+1,+1,+1 , -1,+0,+1], + [+1,-1,+1 , -1,+0,+1], + [-1,+1,+1 , +1,+1,+0], + [-1,+1,-1 , +1,+1,+0], + [+1,+1,+1 , -1,+1,+0], + [+1,+1,-1 , -1,+1,+0], + [-1,+1,+1 , +2,+1,+1], + [+1,+1,+1 , -2,+1,+1], + [+1,+1,-1 , +2,-1,+1], + [+1,-1,+1 , +2,+1,-1], + [+1,-1,+1 , +1,+2,+1], + [+1,+1,-1 , -1,+2,+1], + [+1,+1,+1 , +1,-2,+1], + [-1,+1,+1 , +1,+2,-1], + [+1,+1,-1 , +1,+1,+2], + [+1,-1,+1 , -1,+1,+2], + [-1,+1,+1 , +1,-1,+2], + [+1,+1,+1 , +1,+1,-2], + ],'d'), 'hex': np.array([ - # Basal systems <11.0>{00.1} (independent of c/a-ratio, Bravais notation (4 coordinate base)) - [ 2, -1, -1, 0, 0, 0, 0, 1, ], - [-1, 2, -1, 0, 0, 0, 0, 1, ], - [-1, -1, 2, 0, 0, 0, 0, 1, ], - # 1st type prismatic systems <11.0>{10.0} (independent of c/a-ratio) - [ 2, -1, -1, 0, 0, 1, -1, 0, ], - [-1, 2, -1, 0, -1, 0, 1, 0, ], - [-1, -1, 2, 0, 1, -1, 0, 0, ], - # 2nd type prismatic systems <10.0>{11.0} -- a slip; plane normals independent of c/a-ratio - [ 0, 1, -1, 0, 2, -1, -1, 0, ], - [-1, 0, 1, 0, -1, 2, -1, 0, ], - [ 1, -1, 0, 0, -1, -1, 2, 0, ], - # 1st type 1st order pyramidal systems <11.0>{-11.1} -- plane normals depend on the c/a-ratio - [ 2, -1, -1, 0, 0, 1, -1, 1, ], - [-1, 2, -1, 0, -1, 0, 1, 1, ], - [-1, -1, 2, 0, 1, -1, 0, 1, ], - [ 1, 1, -2, 0, -1, 1, 0, 1, ], - [-2, 1, 1, 0, 0, -1, 1, 1, ], - [ 1, -2, 1, 0, 1, 0, -1, 1, ], - # pyramidal system: c+a slip <11.3>{-10.1} -- plane normals depend on the c/a-ratio - [ 2, -1, -1, 3, -1, 1, 0, 1, ], - [ 1, -2, 1, 3, -1, 1, 0, 1, ], - [-1, -1, 2, 3, 1, 0, -1, 1, ], - [-2, 1, 1, 3, 1, 0, -1, 1, ], - [-1, 2, -1, 3, 0, -1, 1, 1, ], - [ 1, 1, -2, 3, 0, -1, 1, 1, ], - [-2, 1, 1, 3, 1, -1, 0, 1, ], - [-1, 2, -1, 3, 1, -1, 0, 1, ], - [ 1, 1, -2, 3, -1, 0, 1, 1, ], - [ 2, -1, -1, 3, -1, 0, 1, 1, ], - [ 1, -2, 1, 3, 0, 1, -1, 1, ], - [-1, -1, 2, 3, 0, 1, -1, 1, ], - # pyramidal system: c+a slip <11.3>{-1-1.2} -- as for hexagonal ice (Castelnau et al. 1996, similar to twin system found below) - [ 2, -1, -1, 3, -2, 1, 1, 2, ], # sorted according to similar twin system - [-1, 2, -1, 3, 1, -2, 1, 2, ], # <11.3>{-1-1.2} shear = 2((c/a)^2-2)/(3 c/a) - [-1, -1, 2, 3, 1, 1, -2, 2, ], - [-2, 1, 1, 3, 2, -1, -1, 2, ], - [ 1, -2, 1, 3, -1, 2, -1, 2, ], - [ 1, 1, -2, 3, -1, -1, 2, 2, ], - ],'f'), + [+2,-1,-1,+0 , +0,+0,+0,+1], + [-1,+2,-1,+0 , +0,+0,+0,+1], + [-1,-1,+2,+0 , +0,+0,+0,+1], + [+2,-1,-1,+0 , +0,+1,-1,+0], + [-1,+2,-1,+0 , -1,+0,+1,+0], + [-1,-1,+2,+0 , +1,-1,+0,+0], + [-1,+1,+0,+0 , +1,+1,-2,+0], + [+0,-1,+1,+0 , -2,+1,+1,+0], + [+1,+0,-1,+0 , +1,-2,+1,+0], + [-1,+2,-1,+0 , +1,+0,-1,+1], + [-2,+1,+1,+0 , +0,+1,-1,+1], + [-1,-1,+2,+0 , -1,+1,+0,+1], + [+1,-2,+1,+0 , -1,+0,+1,+1], + [+2,-1,-1,+0 , +0,-1,+1,+1], + [+1,+1,-2,+0 , +1,-1,+0,+1], + [-2,+1,+1,+3 , +1,+0,-1,+1], + [-1,-1,+2,+3 , +1,+0,-1,+1], + [-1,-1,+2,+3 , +0,+1,-1,+1], + [+1,-2,+1,+3 , +0,+1,-1,+1], + [+1,-2,+1,+3 , -1,+1,+0,+1], + [+2,-1,-1,+3 , -1,+1,+0,+1], + [+2,-1,-1,+3 , -1,+0,+1,+1], + [+1,+1,-2,+3 , -1,+0,+1,+1], + [+1,+1,-2,+3 , +0,-1,+1,+1], + [-1,+2,-1,+3 , +0,-1,+1,+1], + [-1,+2,-1,+3 , +1,-1,+0,+1], + [-2,+1,+1,+3 , +1,-1,+0,+1], + [-1,-1,+2,+3 , +1,+1,-2,+2], + [+1,-2,+1,+3 , -1,+2,-1,+2], + [+2,-1,-1,+3 , -2,+1,+1,+2], + [+1,+1,-2,+3 , -1,-1,+2,+2], + [-1,+2,-1,+3 , +1,-2,+1,+2], + [-2,+1,+1,+3 , +2,-1,-1,+2], + ],'d'), } # -------------------------------------------------------------------- @@ -111,11 +103,11 @@ Add columns listing Schmid factors (and optional trace vector of selected system """, version = scriptID) -latticeChoices = ('fcc','bcc','hex') +lattice_choices = list(slipSystems.keys()) parser.add_option('-l', '--lattice', - dest = 'lattice', type = 'choice', choices = latticeChoices, metavar='string', - help = 'type of lattice structure [%default] {}'.format(latticeChoices)) + dest = 'lattice', type = 'choice', choices = lattice_choices, metavar='string', + help = 'type of lattice structure [%default] {}'.format(lattice_choices)) parser.add_option('--covera', dest = 'CoverA', type = 'float', metavar = 'float', help = 'C over A ratio for hexagonal systems [%default]') @@ -138,88 +130,63 @@ parser.add_option('-o', parser.set_defaults(force = (0.0,0.0,1.0), quaternion='orientation', normal = None, - lattice = latticeChoices[0], + lattice = lattice_choices[0], CoverA = np.sqrt(8./3.), ) (options, filenames) = parser.parse_args() - -force = np.array(options.force) -force /= np.linalg.norm(force) - -if options.normal is not None: - normal = np.array(options.normal) - normal /= np.linalg.norm(normal) - if abs(np.dot(force,normal)) > 1e-3: - parser.error('stress plane normal not orthogonal to force direction') -else: - normal = force - -slip_direction = np.zeros((len(slipSystems[options.lattice]),3),'f') -slip_normal = np.zeros_like(slip_direction) - - -if options.lattice in latticeChoices[:2]: - slip_direction = slipSystems[options.lattice][:,:3] - slip_normal = slipSystems[options.lattice][:,3:] -elif options.lattice == latticeChoices[2]: - # convert 4 Miller index notation of hex to orthogonal 3 Miller index notation - for i in range(len(slip_direction)): - slip_direction[i] = np.array([slipSystems['hex'][i,0]*1.5, - (slipSystems['hex'][i,0] + 2.*slipSystems['hex'][i,1])*0.5*np.sqrt(3), - slipSystems['hex'][i,3]*options.CoverA, - ]) - slip_normal[i] = np.array([slipSystems['hex'][i,4], - (slipSystems['hex'][i,4] + 2.*slipSystems['hex'][i,5])/np.sqrt(3), - slipSystems['hex'][i,7]/options.CoverA, - ]) - -slip_direction /= np.tile(np.linalg.norm(slip_direction,axis=1),(3,1)).T -slip_normal /= np.tile(np.linalg.norm(slip_normal ,axis=1),(3,1)).T - -# --- loop over input files ------------------------------------------------------------------------ - if filenames == []: filenames = [None] +force = np.array(options.force)/np.linalg.norm(options.force) + +if options.normal is not None: + normal = np.array(options.normal)/np.linalg.norm(options.ormal) + if abs(np.dot(force,normal)) > 1e-3: + parser.error('stress plane normal not orthogonal to force direction') +else: + normal = force + + +if options.lattice in ['bcc','fcc']: + slip_direction = slipSystems[options.lattice][:,:3] + slip_normal = slipSystems[options.lattice][:,3:] +elif options.lattice == 'hex': + slip_direction = np.zeros((len(slipSystems['hex']),3),'d') + slip_normal = np.zeros_like(slip_direction) + # convert 4 Miller index notation of hex to orthogonal 3 Miller index notation + for i in range(len(slip_direction)): + slip_direction[i] = np.array([slipSystems['hex'][i,0]*1.5, + (slipSystems['hex'][i,0] + 2.*slipSystems['hex'][i,1])*0.5*np.sqrt(3), + slipSystems['hex'][i,3]*options.CoverA, + ]) + slip_normal[i] = np.array([slipSystems['hex'][i,4], + (slipSystems['hex'][i,4] + 2.*slipSystems['hex'][i,5])/np.sqrt(3), + slipSystems['hex'][i,7]/options.CoverA, + ]) + +slip_direction /= np.linalg.norm(slip_direction,axis=1,keepdims=True) +slip_normal /= np.linalg.norm(slip_normal, axis=1,keepdims=True) + +labels = ['S[{direction[0]:.1g}_{direction[1]:.1g}_{direction[2]:.1g}]' + '({normal[0]:.1g}_{normal[1]:.1g}_{normal[2]:.1g})'\ + .format(normal = theNormal, direction = theDirection, + ) for theNormal,theDirection in zip(slip_normal,slip_direction)] + for name in filenames: - try: - table = damask.ASCIItable(name = name) - except IOError: - continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) - table.head_read() + o = damask.Rotation.from_quaternion(table.get(options.quaternion)) -# ------------------------------------------ sanity checks ---------------------------------------- - if not table.label_dimension(options.quaternion) == 4: - damask.util.croak('input {} does not have dimension 4.'.format(options.quaternion)) - table.close(dismiss = True) # close ASCIItable and remove empty file - continue + force = np.broadcast_to(force, o.shape+(3,)) + normal = np.broadcast_to(normal,o.shape+(3,)) + slip_direction = np.broadcast_to(slip_direction,o.shape+slip_direction.shape) + slip_normal = np.broadcast_to(slip_normal, o.shape+slip_normal.shape) + S = np.abs(np.einsum('ijk,ik->ij',slip_direction,(o@force))* + np.einsum('ijk,ik->ij',slip_normal, (o@normal))) - column = table.label_index(options.quaternion) + for i,label in enumerate(labels): + table.add(label,S[:,i],scriptID+' '+' '.join(sys.argv[1:])) -# ------------------------------------------ assemble header --------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.labels_append(['S[{direction[0]:.1g}_{direction[1]:.1g}_{direction[2]:.1g}]' - '({normal[0]:.1g}_{normal[1]:.1g}_{normal[2]:.1g})'\ - .format(normal = theNormal, - direction = theDirection, - ) for theNormal,theDirection in zip(slip_normal,slip_direction)]) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - o = damask.Rotation(np.array(list(map(float,table.data[column:column+4])))) - - table.data_append( np.abs( np.sum(slip_direction * (o * force) ,axis=1) \ - * np.sum(slip_normal * (o * normal),axis=1))) - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addSpectralDecomposition.py b/processing/post/addSpectralDecomposition.py deleted file mode 100755 index 01371cede..000000000 --- a/processing/post/addSpectralDecomposition.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -from io import StringIO -from optparse import OptionParser - -import numpy as np - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """ -Add column(s) containing eigenvalues and eigenvectors of requested symmetric tensor column(s). - -""", version = scriptID) - -parser.add_option('-t','--tensor', - dest = 'tensor', - action = 'extend', metavar = '', - help = 'heading of columns containing tensor field values') -parser.add_option('--no-check', - dest = 'rh', - action = 'store_false', - help = 'skip check for right-handed eigenvector basis') - -parser.set_defaults(rh = True, - ) - -(options,filenames) = parser.parse_args() -if filenames == []: filenames = [None] - -for name in filenames: - damask.util.report(scriptName,name) - - table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) - - for tensor in options.tensor: - - t = table.get(tensor).reshape(-1,3,3) - (u,v) = np.linalg.eigh(damask.mechanics.symmetric(t)) - if options.rh: v[np.linalg.det(v) < 0.0,:,2] *= -1.0 - - for i,o in enumerate(['Min','Mid','Max']): - table.add('eigval{}({})'.format(o,tensor),u[:,i], scriptID+' '+' '.join(sys.argv[1:])) - - for i,o in enumerate(['Min','Mid','Max']): - table.add('eigvec{}({})'.format(o,tensor),v[:,:,i],scriptID+' '+' '.join(sys.argv[1:])) - - table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addStrainTensors.py b/processing/post/addStrainTensors.py deleted file mode 100755 index 84c460b00..000000000 --- a/processing/post/addStrainTensors.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -from io import StringIO -from optparse import OptionParser - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - -def parameters(stretch,strain): - """Albrecht Bertram: Elasticity and Plasticity of Large Deformations An Introduction (3rd Edition, 2012), p. 102.""" - return { - 'V#ln': ('V',0.0), - 'U#ln': ('U',0.0), - 'V#Biot': ('V',-.5), - 'U#Biot': ('U',+.5), - 'V#Green': ('V',-1.), - 'U#Green': ('U',+1.), - }[stretch+'#'+strain] - - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """ -Add column(s) containing given strains based on given stretches of requested deformation gradient column(s). - -""", version = scriptID) - -parser.add_option('-u','--right', - dest = 'right', - action = 'store_true', - help = 'material strains based on right Cauchy--Green deformation, i.e., C and U') -parser.add_option('-v','--left', - dest = 'left', - action = 'store_true', - help = 'spatial strains based on left Cauchy--Green deformation, i.e., B and V') -parser.add_option('-0','--logarithmic', - dest = 'logarithmic', - action = 'store_true', - help = 'calculate logarithmic strain tensor') -parser.add_option('-1','--biot', - dest = 'biot', - action = 'store_true', - help = 'calculate biot strain tensor') -parser.add_option('-2','--green', - dest = 'green', - action = 'store_true', - help = 'calculate green strain tensor') -parser.add_option('-f','--defgrad', - dest = 'defgrad', - action = 'extend', - metavar = '', - help = 'heading(s) of columns containing deformation tensor values [%default]') - -parser.set_defaults( - defgrad = ['f'], - ) - -(options,filenames) = parser.parse_args() -if filenames == []: filenames = [None] - -if len(options.defgrad) > 1: - options.defgrad = options.defgrad[1:] - -stretches = [] -strains = [] - -if options.right: stretches.append('U') -if options.left: stretches.append('V') -if options.logarithmic: strains.append('ln') -if options.biot: strains.append('Biot') -if options.green: strains.append('Green') - -if options.defgrad is None: - parser.error('no data column specified.') - -for name in filenames: - damask.util.report(scriptName,name) - - table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) - - for defgrad in options.defgrad: - F = table.get(defgrad).reshape(-1,3,3) - for theStretch in stretches: - for theStrain in strains: - (t,m) = parameters(theStretch,theStrain) - label = '{}({}){}'.format(theStrain,theStretch,defgrad if defgrad != 'f' else '') - table.add(label, - damask.mechanics.strain_tensor(F,t,m).reshape(-1,9), - scriptID+' '+' '.join(sys.argv[1:])) - - table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/rotateData.py b/processing/post/rotateData.py deleted file mode 100755 index d270eb79e..000000000 --- a/processing/post/rotateData.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -from io import StringIO -from optparse import OptionParser - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """ -Rotate vector and/or tensor column data by given angle around given axis. - -""", version = scriptID) - -parser.add_option('-d', '--data', - dest = 'data', - action = 'extend', metavar = '', - help = 'vector/tensor value(s) label(s)') -parser.add_option('-r', '--rotation', - dest = 'rotation', - type = 'float', nargs = 4, metavar = ' '.join(['float']*4), - help = 'axis and angle to rotate data [%default]') -parser.add_option('--degrees', - dest = 'degrees', - action = 'store_true', - help = 'angles are given in degrees') - -parser.set_defaults(rotation = (1.,1.,1.,0), # no rotation about (1,1,1) - degrees = False, - ) - -(options,filenames) = parser.parse_args() -if filenames == []: filenames = [None] - -if options.data is None: - parser.error('no data column specified.') - -r = damask.Rotation.fromAxisAngle(options.rotation,options.degrees,normalise=True) - -for name in filenames: - damask.util.report(scriptName,name) - - table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) - - for data in options.data: - d = table.get(data) - if table.shapes[data] == (9,): d=d.reshape(-1,3,3) - for i,l in enumerate(d): - d[i] = r*l - if table.shapes[data] == (9,): d=d.reshape(-1,9) - - table.set(data,d,scriptID+' '+' '.join(sys.argv[1:])) - - table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/viewTable.py b/processing/post/viewTable.py deleted file mode 100755 index 1f134754d..000000000 --- a/processing/post/viewTable.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python3 - -import os -from optparse import OptionParser - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -parser = OptionParser(usage='%prog options [ASCIItable(s)]', description = """ -Show components of given ASCIItable(s). - -""", version = scriptID) - - -parser.add_option('-a','--head', - dest = 'head', - action = 'store_true', - help = 'output complete header (info + labels)') -parser.add_option('-i','--info', - dest = 'info', - action = 'store_true', - help = 'output info lines') -parser.add_option('-l','--labels', - dest = 'labels', - action = 'store_true', - help = 'output labels') -parser.add_option('-d','--data', - dest = 'data', - action = 'store_true', - help = 'output data') -parser.add_option('-t','--table', - dest = 'table', - action = 'store_true', - help = 'output heading line for proper ASCIItable format') -parser.add_option('--nolabels', - dest = 'labeled', - action = 'store_false', - help = 'table has no labels') -parser.set_defaults(table = False, - head = False, - info = False, - labels = False, - data = False, - labeled = True, - ) - -(options,filenames) = parser.parse_args() - -# --- loop over input files ------------------------------------------------------------------------- - -if filenames == []: filenames = [None] - -for name in filenames: - try: - table = damask.ASCIItable(name = name, labeled = options.labeled, readonly = True) - except IOError: - continue - details = ', '.join( - (['header'] if options.table else []) + - (['info'] if options.head or options.info else []) + - (['labels'] if options.head or options.labels else []) + - (['data'] if options.data else []) + - [] - ) - damask.util.report(scriptName,(name if name is not None else '') + ('' if details == '' else ' -- '+details)) - -# ------------------------------------------ output head --------------------------------------- - - table.head_read() - if not (options.head or options.info): table.info_clear() - if not (options.head or (options.labels and options.labeled)): table.labels_clear() - - table.head_write(header = options.table) - -# ------------------------------------------ output data --------------------------------------- - - outputAlive = options.data - while outputAlive and table.data_read(): # read next data line of ASCII table - outputAlive = table.data_write() # output line - - table.close() diff --git a/processing/pre/geom_addPrimitive.py b/processing/pre/geom_addPrimitive.py index f33ba27b1..c04fcf565 100755 --- a/processing/pre/geom_addPrimitive.py +++ b/processing/pre/geom_addPrimitive.py @@ -84,9 +84,9 @@ if [options.angleaxis,options.quaternion].count(None) == 0: parser.error('more than one rotation specified.') if options.angleaxis is not None: - rotation = damask.Rotation.fromAxisAngle(np.array(options.angleaxis),options.degrees,normalise=True) + rotation = damask.Rotation.from_axis_angle(np.array(options.angleaxis),options.degrees,normalise=True) elif options.quaternion is not None: - rotation = damask.Rotation.fromQuaternion(options.quaternion) + rotation = damask.Rotation.from_quaternion(options.quaternion) else: rotation = damask.Rotation() diff --git a/processing/pre/geom_canvas.py b/processing/pre/geom_canvas.py deleted file mode 100755 index edd5fe622..000000000 --- a/processing/pre/geom_canvas.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -from io import StringIO -from optparse import OptionParser - -import numpy as np - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [geomfile(s)]', description = """ -Increases or decreases the (three-dimensional) canvas. -Grid can be given as absolute or relative values, e.g. 16 16 16 or 2x 0.5x 32. - -""", version = scriptID) - -parser.add_option('-g','--grid', - dest = 'grid', - type = 'string', nargs = 3, metavar = ' '.join(['string']*3), - help = 'a,b,c grid of hexahedral box') -parser.add_option('-o','--offset', - dest = 'offset', - type = 'int', nargs = 3, metavar = ' '.join(['int']*3), - help = 'a,b,c offset from old to new origin of grid [%default]') -parser.add_option('-f','--fill', - dest = 'fill', - type = 'float', metavar = 'int', - help = 'background microstructure index, defaults to max microstructure index + 1') - -parser.set_defaults(offset = (0,0,0)) - -(options, filenames) = parser.parse_args() - - -if filenames == []: filenames = [None] - -for name in filenames: - damask.util.report(scriptName,name) - - geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name) - origin = geom.get_origin() - size = geom.get_size() - old = new = geom.get_grid() - offset = np.asarray(options.offset) - - if options.grid is not None: - new = np.maximum(1, - np.array([int(o*float(n.lower().replace('x',''))) if n.lower().endswith('x') \ - else int(n) for o,n in zip(old,options.grid)],dtype=int)) - - canvas = np.full(new,options.fill if options.fill is not None - else np.nanmax(geom.microstructure)+1,geom.microstructure.dtype) - - l = np.clip( offset, 0,np.minimum(old +offset,new)) # noqa - r = np.clip( offset+old,0,np.minimum(old*2+offset,new)) - L = np.clip(-offset, 0,np.minimum(new -offset,old)) - R = np.clip(-offset+new,0,np.minimum(new*2-offset,old)) - canvas[l[0]:r[0],l[1]:r[1],l[2]:r[2]] = geom.microstructure[L[0]:R[0],L[1]:R[1],L[2]:R[2]] - - - damask.util.croak(geom.update(canvas,origin=origin+offset*size/old,rescale=True)) - geom.add_comments(scriptID + ' ' + ' '.join(sys.argv[1:])) - - geom.to_file(sys.stdout if name is None else name,pack=False) diff --git a/processing/pre/geom_check.py b/processing/pre/geom_check.py deleted file mode 100755 index e7e4d1009..000000000 --- a/processing/pre/geom_check.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -from io import StringIO -from optparse import OptionParser - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - - -#-------------------------------------------------------------------------------------------------- -# MAIN -#-------------------------------------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog [geomfile(s)]', description = """ -Writes vtk file for visualization. - -""", version = scriptID) - -(options, filenames) = parser.parse_args() -if filenames == []: filenames = [None] - -for name in filenames: - damask.util.report(scriptName,name) - - geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name) - - damask.util.croak(geom) - - if name: - geom.to_vtk(os.path.splitext(name)[0]) - else: - sys.stdout.write(geom.to_vtk()) diff --git a/processing/pre/geom_fromDREAM3D.py b/processing/pre/geom_fromDREAM3D.py index 7d5b1442d..c48698e53 100755 --- a/processing/pre/geom_fromDREAM3D.py +++ b/processing/pre/geom_fromDREAM3D.py @@ -97,7 +97,7 @@ for name in filenames: dataset = os.path.join(group_pointwise,options.quaternion) try: quats = np.reshape(inFile[dataset][...],(np.product(grid),4)) - rot = [damask.Rotation.fromQuaternion(q,True,P=+1) for q in quats] + rot = [damask.Rotation.from_quaternion(q,True,P=+1) for q in quats] except KeyError: errors.append('Pointwise orientation (quaternion) data ({}) not readable'.format(dataset)) @@ -123,7 +123,7 @@ for name in filenames: dataset = os.path.join(group_average,options.quaternion) try: - rot = [damask.Rotation.fromQuaternion(q,True,P=+1) for q in inFile[dataset][...][1:]] # skip first entry (unindexed) + rot = [damask.Rotation.from_quaternion(q,True,P=+1) for q in inFile[dataset][...][1:]] # skip first entry (unindexed) except KeyError: errors.append('Average orientation data ({}) not readable'.format(dataset)) @@ -140,7 +140,7 @@ for name in filenames: config_header = [''] for i in range(np.nanmax(microstructure)): config_header += ['[{}{}]'.format(label,i+1), - '(gauss)\tphi1 {:.2f}\tPhi {:.2f}\tphi2 {:.2f}'.format(*rot[i].asEulers(degrees = True)), + '(gauss)\tphi1 {:.2f}\tPhi {:.2f}\tphi2 {:.2f}'.format(*rot[i].as_Eulers(degrees = True)), ] config_header += [''] for i in range(np.nanmax(microstructure)): diff --git a/processing/pre/geom_fromTable.py b/processing/pre/geom_fromTable.py index f6c9ba38e..be5b3a23c 100755 --- a/processing/pre/geom_fromTable.py +++ b/processing/pre/geom_fromTable.py @@ -89,7 +89,7 @@ for name in filenames: for i,data in enumerate(unique): ori = damask.Rotation(data[0:4]) config_header += ['[Grain{}]'.format(i+1), - '(gauss)\tphi1 {:.2f}\tPhi {:.2f}\tphi2 {:.2f}'.format(*ori.asEulers(degrees = True)), + '(gauss)\tphi1 {:.2f}\tPhi {:.2f}\tphi2 {:.2f}'.format(*ori.as_Eulers(degrees = True)), ] if options.axes is not None: config_header += ['axes\t{} {} {}'.format(*options.axes)] diff --git a/processing/pre/geom_renumber.py b/processing/pre/geom_renumber.py deleted file mode 100755 index 6e51062a5..000000000 --- a/processing/pre/geom_renumber.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -from io import StringIO -from optparse import OptionParser - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - - -#-------------------------------------------------------------------------------------------------- -# MAIN -#-------------------------------------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog [geomfile(s)]', description = """ -Renumber sorted microstructure indices to 1,...,N. - -""", version=scriptID) - -(options, filenames) = parser.parse_args() - - -if filenames == []: filenames = [None] - -for name in filenames: - damask.util.report(scriptName,name) - - geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name) - damask.util.croak(geom.renumber()) - geom.add_comments(scriptID + ' ' + ' '.join(sys.argv[1:])) - geom.to_file(sys.stdout if name is None else name,pack=False) diff --git a/processing/pre/geom_rotate.py b/processing/pre/geom_rotate.py deleted file mode 100755 index 977e00b65..000000000 --- a/processing/pre/geom_rotate.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -from io import StringIO -from optparse import OptionParser - -from scipy import ndimage -import numpy as np - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - - -#-------------------------------------------------------------------------------------------------- -# MAIN -#-------------------------------------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [geomfile(s)]', description = """ -Rotates original microstructure and embeddeds it into buffer material. - -""", version=scriptID) - -parser.add_option('-r', '--rotation', - dest='rotation', - type = 'float', nargs = 4, metavar = ' '.join(['float']*4), - help = 'rotation given as axis and angle') -parser.add_option('-e', '--eulers', - dest = 'eulers', - type = 'float', nargs = 3, metavar = ' '.join(['float']*3), - help = 'rotation given as Euler angles') -parser.add_option('-d', '--degrees', - dest = 'degrees', - action = 'store_true', - help = 'Euler angles/axis angle are given in degrees') -parser.add_option('-m', '--matrix', - dest = 'matrix', - type = 'float', nargs = 9, metavar = ' '.join(['float']*9), - help = 'rotation given as matrix') -parser.add_option('-q', '--quaternion', - dest = 'quaternion', - type = 'float', nargs = 4, metavar = ' '.join(['float']*4), - help = 'rotation given as quaternion') -parser.add_option('-f', '--fill', - dest = 'fill', - type = 'float', metavar = 'int', - help = 'background microstructure index, defaults to max microstructure index + 1') - -parser.set_defaults(degrees = False) - -(options, filenames) = parser.parse_args() - -if [options.rotation,options.eulers,options.matrix,options.quaternion].count(None) < 3: - parser.error('more than one rotation specified.') -if [options.rotation,options.eulers,options.matrix,options.quaternion].count(None) > 3: - parser.error('no rotation specified.') - -if options.quaternion is not None: - rot = damask.Rotation.fromQuaternion(np.array(options.quaternion)) # we might need P=+1 here, too... -if options.rotation is not None: - rot = damask.Rotation.fromAxisAngle(np.array(options.rotation),degrees=options.degrees,normalise=True,P=+1) -if options.matrix is not None: - rot = damask.Rotation.fromMatrix(np.array(options.Matrix)) -if options.eulers is not None: - rot = damask.Rotation.fromEulers(np.array(options.eulers),degrees=options.degrees) - -eulers = rot.asEulers(degrees=True) - - -if filenames == []: filenames = [None] - -for name in filenames: - damask.util.report(scriptName,name) - - geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name) - size = geom.get_size() - grid = geom.get_grid() - origin = geom.get_origin() - microstructure = geom.get_microstructure() - fill = np.nanmax(microstructure)+1 if options.fill is None else options.fill - dtype = float if np.isnan(fill) or int(fill) != fill or microstructure.dtype==np.float else int - - # These rotations are always applied in the reference coordinate system, i.e. (z,x,z) not (z,x',z'') - # this seems to be ok, see https://www.cs.utexas.edu/~theshark/courses/cs354/lectures/cs354-14.pdf - microstructure = ndimage.rotate(microstructure,eulers[2],(0,1),order=0, - prefilter=False,output=dtype,cval=fill) # rotation around z - microstructure = ndimage.rotate(microstructure,eulers[1],(1,2),order=0, - prefilter=False,output=dtype,cval=fill) # rotation around x - microstructure = ndimage.rotate(microstructure,eulers[0],(0,1),order=0, - prefilter=False,output=dtype,cval=fill) # rotation around z - - damask.util.croak(geom.update(microstructure,origin=origin-(np.asarray(microstructure.shape)-grid)/2*size/grid,rescale=True)) - geom.add_comments(scriptID + ' ' + ' '.join(sys.argv[1:])) - - geom.to_file(sys.stdout if name is None else name,pack=False) diff --git a/processing/pre/geom_translate.py b/processing/pre/geom_translate.py deleted file mode 100755 index 1d08c23dc..000000000 --- a/processing/pre/geom_translate.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -from optparse import OptionParser -from io import StringIO - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - - -#-------------------------------------------------------------------------------------------------- -# MAIN -#-------------------------------------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [geomfile(s)]', description = """ -Translate microstructure indices (shift or substitute) and/or geometry origin. - -""", version=scriptID) - -parser.add_option('-o', '--origin', - dest = 'origin', - type = 'float', nargs = 3, metavar = ' '.join(['float']*3), - help = 'offset from old to new origin of grid') -parser.add_option('-m', '--microstructure', - dest = 'microstructure', - type = 'int', metavar = 'int', - help = 'offset from old to new microstructure indices (after substitution)') -parser.add_option('-s', '--substitute', - dest = 'substitute', - action = 'extend', metavar = '', - help = 'substitutions of microstructure indices from,to,from,to,...') - -parser.set_defaults(origin = (0.0,0.0,0.0), - microstructure = 0, - substitute = [] - ) - -(options, filenames) = parser.parse_args() - -sub = list(map(int,options.substitute)) - - -if filenames == []: filenames = [None] - -for name in filenames: - damask.util.report(scriptName,name) - - geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name) - - substituted = geom.get_microstructure() - for old,new in zip(sub[0::2],sub[1::2]): substituted[geom.microstructure==old] = new # substitute microstructure indices - substituted += options.microstructure # constant shift - - damask.util.croak(geom.update(substituted,origin=geom.get_origin()+options.origin)) - geom.add_comments(scriptID + ' ' + ' '.join(sys.argv[1:])) - - geom.to_file(sys.stdout if name is None else name,pack=False) diff --git a/processing/pre/seeds_check.py b/processing/pre/seeds_check.py deleted file mode 100755 index 6ef808a0a..000000000 --- a/processing/pre/seeds_check.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -from io import StringIO -from optparse import OptionParser - -import damask - - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - - -#-------------------------------------------------------------------------------------------------- -# MAIN -#-------------------------------------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog [seedfile(s)]', description = """ -Writes vtk file for visualization. - -""", version = scriptID) - -(options, filenames) = parser.parse_args() -if filenames == []: filenames = [None] - -for name in filenames: - damask.util.report(scriptName,name) - - seeds = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) - v = damask.VTK.from_polyData(seeds.get('pos')) - for label in seeds.shapes.keys(): - if label == 'pos': pass - v.add(seeds.get(label),label) - - if name: - v.write(os.path.splitext(name)[0]) - else: - sys.stdout.write(v.__repr__()) diff --git a/python/damask/__init__.py b/python/damask/__init__.py index 9a01e8e62..0f343a581 100644 --- a/python/damask/__init__.py +++ b/python/damask/__init__.py @@ -1,4 +1,4 @@ -"""Main aggregator.""" +"""Tools for pre and post processing of DAMASK simulations.""" import os as _os import re as _re diff --git a/python/damask/_geom.py b/python/damask/_geom.py index 892000b7c..1ec89d8b2 100644 --- a/python/damask/_geom.py +++ b/python/damask/_geom.py @@ -242,7 +242,7 @@ class Geom: def get_grid(self): """Return the grid discretization.""" - return np.array(self.microstructure.shape) + return np.asarray(self.microstructure.shape) def get_homogenization(self): @@ -533,8 +533,8 @@ class Geom: Parameters ---------- - grid : iterable of int - new grid dimension + grid : numpy.ndarray of shape (3) + number of grid points in x,y,z direction. """ #self.add_comments('geom.py:scale v{}'.format(version) @@ -581,3 +581,90 @@ class Geom: #self.add_comments('geom.py:renumber v{}'.format(version) return self.update(renumbered) + + + def rotate(self,R,fill=None): + """ + Rotate microstructure (pad if required). + + Parameters + ---------- + R : damask.Rotation + rotation to apply to the microstructure. + fill : int or float, optional + microstructure index to fill the corners. Defaults to microstructure.max() + 1. + + """ + if fill is None: fill = np.nanmax(self.microstructure) + 1 + dtype = float if np.isnan(fill) or int(fill) != fill or self.microstructure.dtype==np.float else int + + Eulers = R.as_Eulers(degrees=True) + microstructure_in = self.get_microstructure() + + # These rotations are always applied in the reference coordinate system, i.e. (z,x,z) not (z,x',z'') + # see https://www.cs.utexas.edu/~theshark/courses/cs354/lectures/cs354-14.pdf + for angle,axes in zip(Eulers[::-1], [(0,1),(1,2),(0,1)]): + microstructure_out = ndimage.rotate(microstructure_in,angle,axes,order=0, + prefilter=False,output=dtype,cval=fill) + if np.prod(microstructure_in.shape) == np.prod(microstructure_out.shape): + # avoid scipy interpolation errors for rotations close to multiples of 90° + microstructure_in = np.rot90(microstructure_in,k=np.rint(angle/90.).astype(int),axes=axes) + else: + microstructure_in = microstructure_out + + origin = self.origin-(np.asarray(microstructure_in.shape)-self.grid)*.5 * self.size/self.grid + + #self.add_comments('geom.py:rotate v{}'.format(version) + return self.update(microstructure_in,origin=origin,rescale=True) + + + def canvas(self,grid=None,offset=None,fill=None): + """ + Crop or enlarge/pad microstructure. + + Parameters + ---------- + grid : numpy.ndarray of shape (3) + number of grid points in x,y,z direction. + offset : numpy.ndarray of shape (3) + offset (measured in grid points) from old to new microstructue[0,0,0]. + fill : int or float, optional + microstructure index to fill the corners. Defaults to microstructure.max() + 1. + + """ + if fill is None: fill = np.nanmax(self.microstructure) + 1 + if offset is None: offset = 0 + dtype = float if int(fill) != fill or self.microstructure.dtype==np.float else int + + canvas = np.full(self.grid if grid is None else grid, + fill if fill is not None else np.nanmax(self.microstructure)+1,dtype) + + l = np.clip( offset, 0,np.minimum(self.grid +offset,grid)) # noqa + r = np.clip( offset+self.grid,0,np.minimum(self.grid*2+offset,grid)) + L = np.clip(-offset, 0,np.minimum(grid -offset,self.grid)) + R = np.clip(-offset+grid, 0,np.minimum(grid*2 -offset,self.grid)) + + canvas[l[0]:r[0],l[1]:r[1],l[2]:r[2]] = self.microstructure[L[0]:R[0],L[1]:R[1],L[2]:R[2]] + + #self.add_comments('geom.py:canvas v{}'.format(version) + return self.update(canvas,origin=self.origin+offset*self.size/self.grid,rescale=True) + + + def substitute(self,from_microstructure,to_microstructure): + """ + Substitude microstructure indices. + + Parameters + ---------- + from_microstructure : iterable of ints + microstructure indices to be substituted. + to_microstructure : iterable of ints + new microstructure indices. + + """ + substituted = self.get_microstructure() + for from_ms,to_ms in zip(from_microstructure,to_microstructure): + substituted[self.microstructure==from_ms] = to_ms + + #self.add_comments('geom.py:substitute v{}'.format(version) + return self.update(substituted) diff --git a/python/damask/_lattice.py b/python/damask/_lattice.py index 42aa0e9bd..0f6cffd04 100644 --- a/python/damask/_lattice.py +++ b/python/damask/_lattice.py @@ -229,19 +229,20 @@ class Symmetry: Return inverse pole figure color if requested. Bases are computed from - basis = {'cubic' : np.linalg.inv(np.array([[0.,0.,1.], # direction of red - [1.,0.,1.]/np.sqrt(2.), # direction of green - [1.,1.,1.]/np.sqrt(3.)]).T), # direction of blue - 'hexagonal' : np.linalg.inv(np.array([[0.,0.,1.], # direction of red - [1.,0.,0.], # direction of green - [np.sqrt(3.),1.,0.]/np.sqrt(4.)]).T), # direction of blue - 'tetragonal' : np.linalg.inv(np.array([[0.,0.,1.], # direction of red - [1.,0.,0.], # direction of green - [1.,1.,0.]/np.sqrt(2.)]).T), # direction of blue - 'orthorhombic' : np.linalg.inv(np.array([[0.,0.,1.], # direction of red - [1.,0.,0.], # direction of green - [0.,1.,0.]]).T), # direction of blue - } + >>> basis = {'cubic' : np.linalg.inv(np.array([[0.,0.,1.], # direction of red + ... [1.,0.,1.]/np.sqrt(2.), # direction of green + ... [1.,1.,1.]/np.sqrt(3.)]).T), # direction of blue + ... 'hexagonal' : np.linalg.inv(np.array([[0.,0.,1.], # direction of red + ... [1.,0.,0.], # direction of green + ... [np.sqrt(3.),1.,0.]/np.sqrt(4.)]).T), # direction of blue + ... 'tetragonal' : np.linalg.inv(np.array([[0.,0.,1.], # direction of red + ... [1.,0.,0.], # direction of green + ... [1.,1.,0.]/np.sqrt(2.)]).T), # direction of blue + ... 'orthorhombic': np.linalg.inv(np.array([[0.,0.,1.], # direction of red + ... [1.,0.,0.], # direction of green + ... [0.,1.,0.]]).T), # direction of blue + ... } + """ if self.lattice == 'cubic': basis = {'improper':np.array([ [-1. , 0. , 1. ], @@ -634,6 +635,6 @@ class Lattice: otherDir = miller[otherDir_id]/ np.linalg.norm(miller[otherDir_id]) otherMatrix = np.array([otherDir,np.cross(otherPlane,otherDir),otherPlane]) - r['rotations'].append(Rotation.fromMatrix(np.dot(otherMatrix.T,myMatrix))) + r['rotations'].append(Rotation.from_matrix(np.dot(otherMatrix.T,myMatrix))) return r diff --git a/python/damask/_orientation.py b/python/damask/_orientation.py index e6c732007..e4dbc7d7c 100644 --- a/python/damask/_orientation.py +++ b/python/damask/_orientation.py @@ -8,6 +8,7 @@ class Orientation: Crystallographic orientation. A crystallographic orientation contains a rotation and a lattice. + """ __slots__ = ['rotation','lattice'] @@ -36,7 +37,7 @@ class Orientation: if isinstance(rotation, Rotation): self.rotation = rotation else: - self.rotation = Rotation.fromQuaternion(rotation) # assume quaternion + self.rotation = Rotation.from_quaternion(rotation) # assume quaternion if self.rotation.quaternion.shape != (4,): raise NotImplementedError('Support for multiple rotations missing') @@ -49,8 +50,10 @@ class Orientation: Disorientation between myself and given other orientation. Rotation axis falls into SST if SST == True. - (Currently requires same symmetry for both orientations. - Look into A. Heinz and P. Neumann 1991 for cases with differing sym.) + + Currently requires same symmetry for both orientations. + Look into A. Heinz and P. Neumann 1991 for cases with differing sym. + """ if self.lattice.symmetry != other.lattice.symmetry: raise NotImplementedError('disorientation between different symmetry classes not supported yet.') @@ -65,8 +68,8 @@ class Orientation: r = b*aInv for k in range(2): r.inverse() - breaker = self.lattice.symmetry.inFZ(r.asRodrigues(vector=True)) \ - and (not SST or other.lattice.symmetry.inDisorientationSST(r.asRodrigues(vector=True))) + breaker = self.lattice.symmetry.inFZ(r.as_Rodrigues(vector=True)) \ + and (not SST or other.lattice.symmetry.inDisorientationSST(r.as_Rodrigues(vector=True))) if breaker: break if breaker: break if breaker: break @@ -75,7 +78,7 @@ class Orientation: # ... own sym, other sym, # self-->other: True, self<--other: False def inFZ(self): - return self.lattice.symmetry.inFZ(self.rotation.asRodrigues(vector=True)) + return self.lattice.symmetry.inFZ(self.rotation.as_Rodrigues(vector=True)) def equivalentOrientations(self,members=[]): @@ -97,7 +100,7 @@ class Orientation: def reduced(self): """Transform orientation to fall into fundamental zone according to symmetry.""" for me in self.equivalentOrientations(): - if self.lattice.symmetry.inFZ(me.rotation.asRodrigues(vector=True)): break + if self.lattice.symmetry.inFZ(me.rotation.as_Rodrigues(vector=True)): break return self.__class__(me.rotation,self.lattice) diff --git a/python/damask/_result.py b/python/damask/_result.py index 9171682ff..73a038c4c 100644 --- a/python/damask/_result.py +++ b/python/damask/_result.py @@ -1,7 +1,11 @@ import multiprocessing import re +import inspect import glob import os +import datetime +import xml.etree.ElementTree as ET +import xml.dom.minidom from functools import partial import h5py @@ -32,7 +36,7 @@ class Result: Parameters ---------- fname : str - name of the DADF5 file to be openend. + name of the DADF5 file to be opened. """ with h5py.File(fname,'r') as f: @@ -65,6 +69,10 @@ class Result: self.materialpoints = [m.decode() for m in np.unique(f['mapping/cellResults/materialpoint']['Name'])] self.constituents = [c.decode() for c in np.unique(f['mapping/cellResults/constituent'] ['Name'])] + # faster, but does not work with (deprecated) DADF5_postResults + #self.materialpoints = [m for m in f['inc0/materialpoint']] + #self.constituents = [c for c in f['inc0/constituent']] + self.con_physics = [] for c in self.constituents: self.con_physics += f['/'.join([self.increments[0],'constituent',c])].keys() @@ -80,7 +88,9 @@ class Result: 'con_physics': self.con_physics, 'mat_physics': self.mat_physics } - self.fname = fname + self.fname = os.path.abspath(fname) + + self._allow_overwrite = False def __repr__(self): @@ -117,7 +127,7 @@ class Result: """ # allow True/False and string arguments - if datasets is True: + if datasets is True: datasets = ['*'] elif datasets is False: datasets = [] @@ -136,6 +146,7 @@ class Result: choice = [] for c in iterator: idx = np.searchsorted(self.times,c) + if idx >= len(self.times): continue if np.isclose(c,self.times[idx]): choice.append(self.increments[idx]) elif np.isclose(c,self.times[idx+1]): @@ -156,6 +167,16 @@ class Result: self.selection[what] = diff_sorted + def enable_overwrite(self): + print(util.bcolors().WARNING,util.bcolors().BOLD, + 'Warning: Enabled overwrite of existing datasets!', + util.bcolors().ENDC) + self._allow_overwrite = True + + def disable_overwrite(self): + self._allow_overwrite = False + + def incs_in_range(self,start,end): selected = [] for i,inc in enumerate([int(i[3:]) for i in self.increments]): @@ -316,9 +337,10 @@ class Result: Return groups that contain all requested datasets. Only groups within - - inc?????/constituent/*_*/* - - inc?????/materialpoint/*_*/* - - inc?????/geometry/* + - inc*/constituent/*/* + - inc*/materialpoint/*/* + - inc*/geometry/* + are considered as they contain user-relevant data. Single strings will be treated as list with one entry. @@ -482,7 +504,7 @@ class Result: 'meta': { 'Unit': x['meta']['Unit'], 'Description': 'Absolute value of {} ({})'.format(x['label'],x['meta']['Description']), - 'Creator': 'result.py:add_abs v{}'.format(version) + 'Creator': inspect.stack()[0][3][1:] } } def add_absolute(self,x): @@ -510,7 +532,7 @@ class Result: 'meta': { 'Unit': kwargs['unit'], 'Description': '{} (formula: {})'.format(kwargs['description'],kwargs['formula']), - 'Creator': 'result.py:add_calculation v{}'.format(version) + 'Creator': inspect.stack()[0][3][1:] } } def add_calculation(self,label,formula,unit='n/a',description=None,vectorized=True): @@ -546,10 +568,9 @@ class Result: 'label': 'sigma', 'meta': { 'Unit': P['meta']['Unit'], - 'Description': 'Cauchy stress calculated from {} ({}) '.format(P['label'], - P['meta']['Description'])+\ - 'and {} ({})'.format(F['label'],F['meta']['Description']), - 'Creator': 'result.py:add_Cauchy v{}'.format(version) + 'Description': 'Cauchy stress calculated from {} ({}) and {} ({})'\ + .format(P['label'],P['meta']['Description'],F['label'],F['meta']['Description']), + 'Creator': inspect.stack()[0][3][1:] } } def add_Cauchy(self,P='P',F='F'): @@ -575,7 +596,7 @@ class Result: 'meta': { 'Unit': T['meta']['Unit'], 'Description': 'Determinant of tensor {} ({})'.format(T['label'],T['meta']['Description']), - 'Creator': 'result.py:add_determinant v{}'.format(version) + 'Creator': inspect.stack()[0][3][1:] } } def add_determinant(self,T): @@ -593,16 +614,13 @@ class Result: @staticmethod def _add_deviator(T): - if not T['data'].shape[1:] == (3,3): - raise ValueError - return { 'data': mechanics.deviatoric_part(T['data']), 'label': 's_{}'.format(T['label']), 'meta': { 'Unit': T['meta']['Unit'], 'Description': 'Deviator of tensor {} ({})'.format(T['label'],T['meta']['Description']), - 'Creator': 'result.py:add_deviator v{}'.format(version) + 'Creator': inspect.stack()[0][3][1:] } } def add_deviator(self,T): @@ -619,17 +637,24 @@ class Result: @staticmethod - def _add_eigenvalue(T_sym): + def _add_eigenvalue(T_sym,eigenvalue): + if eigenvalue == 'max': + label,p = 'Maximum',2 + elif eigenvalue == 'mid': + label,p = 'Intermediate',1 + elif eigenvalue == 'min': + label,p = 'Minimum',0 + return { - 'data': mechanics.eigenvalues(T_sym['data']), - 'label': 'lambda({})'.format(T_sym['label']), + 'data': mechanics.eigenvalues(T_sym['data'])[:,p], + 'label': 'lambda_{}({})'.format(eigenvalue,T_sym['label']), 'meta' : { 'Unit': T_sym['meta']['Unit'], - 'Description': 'Eigenvalues of {} ({})'.format(T_sym['label'],T_sym['meta']['Description']), - 'Creator': 'result.py:add_eigenvalues v{}'.format(version) + 'Description': '{} eigenvalue of {} ({})'.format(label,T_sym['label'],T_sym['meta']['Description']), + 'Creator': inspect.stack()[0][3][1:] } } - def add_eigenvalues(self,T_sym): + def add_eigenvalue(self,T_sym,eigenvalue='max'): """ Add eigenvalues of symmetric tensor. @@ -637,33 +662,46 @@ class Result: ---------- T_sym : str Label of symmetric tensor dataset. + eigenvalue : str, optional + Eigenvalue. Select from ‘max’, ‘mid’, ‘min’. Defaults to ‘max’. """ - self._add_generic_pointwise(self._add_eigenvalue,{'T_sym':T_sym}) + self._add_generic_pointwise(self._add_eigenvalue,{'T_sym':T_sym},{'eigenvalue':eigenvalue}) @staticmethod - def _add_eigenvector(T_sym): + def _add_eigenvector(T_sym,eigenvalue): + if eigenvalue == 'max': + label,p = 'maximum',2 + elif eigenvalue == 'mid': + label,p = 'intermediate',1 + elif eigenvalue == 'min': + label,p = 'minimum',0 + print('p',eigenvalue) return { - 'data': mechanics.eigenvectors(T_sym['data']), - 'label': 'v({})'.format(T_sym['label']), + 'data': mechanics.eigenvectors(T_sym['data'])[:,p], + 'label': 'v_{}({})'.format(eigenvalue,T_sym['label']), 'meta' : { 'Unit': '1', - 'Description': 'Eigenvectors of {} ({})'.format(T_sym['label'],T_sym['meta']['Description']), - 'Creator': 'result.py:add_eigenvectors v{}'.format(version) + 'Description': 'Eigenvector corresponding to {} eigenvalue of {} ({})'\ + .format(label,T_sym['label'],T_sym['meta']['Description']), + 'Creator': inspect.stack()[0][3][1:] } } - def add_eigenvectors(self,T_sym): + def add_eigenvector(self,T_sym,eigenvalue='max'): """ - Add eigenvectors of symmetric tensor. + Add eigenvector of symmetric tensor. Parameters ---------- T_sym : str Label of symmetric tensor dataset. + eigenvalue : str, optional + Eigenvalue to which the eigenvector corresponds. Select from + ‘max’, ‘mid’, ‘min’. Defaults to ‘max’. """ - self._add_generic_pointwise(self._add_eigenvector,{'T_sym':T_sym}) + self._add_generic_pointwise(self._add_eigenvector,{'T_sym':T_sym},{'eigenvalue':eigenvalue}) @staticmethod @@ -686,7 +724,7 @@ class Result: 'Unit': 'RGB (8bit)', 'Lattice': lattice, 'Description': 'Inverse Pole Figure (IPF) colors along sample direction [{} {} {}]'.format(*m), - 'Creator': 'result.py:add_IPFcolor v{}'.format(version) + 'Creator': inspect.stack()[0][3][1:] } } def add_IPFcolor(self,q,l): @@ -712,7 +750,7 @@ class Result: 'meta': { 'Unit': T_sym['meta']['Unit'], 'Description': 'Maximum shear component of {} ({})'.format(T_sym['label'],T_sym['meta']['Description']), - 'Creator': 'result.py:add_maximum_shear v{}'.format(version) + 'Creator': inspect.stack()[0][3][1:] } } def add_maximum_shear(self,T_sym): @@ -739,7 +777,7 @@ class Result: 'meta': { 'Unit': T_sym['meta']['Unit'], 'Description': 'Mises equivalent {} of {} ({})'.format(t,T_sym['label'],T_sym['meta']['Description']), - 'Creator': 'result.py:add_Mises v{}'.format(version) + 'Creator': inspect.stack()[0][3][1:] } } def add_Mises(self,T_sym): @@ -775,7 +813,7 @@ class Result: 'meta': { 'Unit': x['meta']['Unit'], 'Description': '{}-norm of {} {} ({})'.format(o,t,x['label'],x['meta']['Description']), - 'Creator': 'result.py:add_norm v{}'.format(version) + 'Creator': inspect.stack()[0][3][1:] } } def add_norm(self,x,ord=None): @@ -800,10 +838,9 @@ class Result: 'label': 'S', 'meta': { 'Unit': P['meta']['Unit'], - 'Description': '2. Kirchhoff stress calculated from {} ({}) '.format(P['label'], - P['meta']['Description'])+\ - 'and {} ({})'.format(F['label'],F['meta']['Description']), - 'Creator': 'result.py:add_PK2 v{}'.format(version) + 'Description': '2. Piola-Kirchhoff stress calculated from {} ({}) and {} ({})'\ + .format(P['label'],P['meta']['Description'],F['label'],F['meta']['Description']), + 'Creator': inspect.stack()[0][3][1:] } } def add_PK2(self,P='P',F='F'): @@ -826,22 +863,20 @@ class Result: pole = np.array(p) unit_pole = pole/np.linalg.norm(pole) m = util.scale_to_coprime(pole) - coords = np.empty((len(q['data']),2)) - - for i,qu in enumerate(q['data']): - o = Rotation(np.array([qu['w'],qu['x'],qu['y'],qu['z']])) - rotatedPole = o*unit_pole # rotate pole according to crystal orientation - (x,y) = rotatedPole[0:2]/(1.+abs(unit_pole[2])) # stereographic projection - coords[i] = [np.sqrt(x*x+y*y),np.arctan2(y,x)] if polar else [x,y] + rot = Rotation(q['data'].view(np.double).reshape(-1,4)) + rotatedPole = rot @ np.broadcast_to(unit_pole,rot.shape+(3,)) # rotate pole according to crystal orientation + xy = rotatedPole[:,0:2]/(1.+abs(unit_pole[2])) # stereographic projection + coords = xy if not polar else \ + np.block([np.sqrt(xy[:,0:1]*xy[:,0:1]+xy[:,1:2]*xy[:,1:2]),np.arctan2(xy[:,1:2],xy[:,0:1])]) return { 'data': coords, 'label': 'p^{}_[{} {} {})'.format(u'rφ' if polar else 'xy',*m), 'meta' : { - 'Unit': '1', + 'Unit': '1', 'Description': '{} coordinates of stereographic projection of pole (direction/plane) in crystal frame'\ .format('Polar' if polar else 'Cartesian'), - 'Creator' : 'result.py:add_pole v{}'.format(version) + 'Creator': inspect.stack()[0][3][1:] } } def add_pole(self,q,p,polar=False): @@ -863,15 +898,13 @@ class Result: @staticmethod def _add_rotational_part(F): - if not F['data'].shape[1:] == (3,3): - raise ValueError return { 'data': mechanics.rotational_part(F['data']), 'label': 'R({})'.format(F['label']), 'meta': { 'Unit': F['meta']['Unit'], 'Description': 'Rotational part of {} ({})'.format(F['label'],F['meta']['Description']), - 'Creator': 'result.py:add_rotational_part v{}'.format(version) + 'Creator': inspect.stack()[0][3][1:] } } def add_rotational_part(self,F): @@ -889,16 +922,13 @@ class Result: @staticmethod def _add_spherical(T): - if not T['data'].shape[1:] == (3,3): - raise ValueError - return { 'data': mechanics.spherical_part(T['data']), 'label': 'p_{}'.format(T['label']), 'meta': { 'Unit': T['meta']['Unit'], 'Description': 'Spherical component of tensor {} ({})'.format(T['label'],T['meta']['Description']), - 'Creator': 'result.py:add_spherical v{}'.format(version) + 'Creator': inspect.stack()[0][3][1:] } } def add_spherical(self,T): @@ -916,16 +946,13 @@ class Result: @staticmethod def _add_strain_tensor(F,t,m): - if not F['data'].shape[1:] == (3,3): - raise ValueError - return { 'data': mechanics.strain_tensor(F['data'],t,m), 'label': 'epsilon_{}^{}({})'.format(t,m,F['label']), 'meta': { 'Unit': F['meta']['Unit'], 'Description': 'Strain tensor of {} ({})'.format(F['label'],F['meta']['Description']), - 'Creator': 'result.py:add_strain_tensor v{}'.format(version) + 'Creator': inspect.stack()[0][3][1:] } } def add_strain_tensor(self,F='F',t='V',m=0.0): @@ -950,9 +977,6 @@ class Result: @staticmethod def _add_stretch_tensor(F,t): - if not F['data'].shape[1:] == (3,3): - raise ValueError - return { 'data': mechanics.left_stretch(F['data']) if t == 'V' else mechanics.right_stretch(F['data']), 'label': '{}({})'.format(t,F['label']), @@ -960,7 +984,7 @@ class Result: 'Unit': F['meta']['Unit'], 'Description': '{} stretch tensor of {} ({})'.format('Left' if t == 'V' else 'Right', F['label'],F['meta']['Description']), - 'Creator': 'result.py:add_stretch_tensor v{}'.format(version) + 'Creator': inspect.stack()[0][3][1:] } } def add_stretch_tensor(self,F='F',t='V'): @@ -1023,11 +1047,23 @@ class Result: continue lock.acquire() with h5py.File(self.fname, 'a') as f: - try: # ToDo: Replace if exists? - dataset = f[result[0]].create_dataset(result[1]['label'],data=result[1]['data']) + try: + if self._allow_overwrite and result[0]+'/'+result[1]['label'] in f: + dataset = f[result[0]+'/'+result[1]['label']] + dataset[...] = result[1]['data'] + dataset.attrs['Overwritten'] = 'Yes'.encode() + else: + dataset = f[result[0]].create_dataset(result[1]['label'],data=result[1]['data']) + + now = datetime.datetime.now().astimezone() + dataset.attrs['Created'] = now.strftime('%Y-%m-%d %H:%M:%S%z').encode() + for l,v in result[1]['meta'].items(): dataset.attrs[l]=v.encode() - except OSError as err: + creator = 'damask.Result.{} v{}'.format(dataset.attrs['Creator'].decode(),version) + dataset.attrs['Creator'] = creator.encode() + + except (OSError,RuntimeError) as err: print('Could not add dataset: {}.'.format(err)) lock.release() @@ -1035,6 +1071,102 @@ class Result: pool.join() + def write_XDMF(self): + """ + Write XDMF file to directly visualize data in DADF5 file. + + This works only for scalar, 3-vector and 3x3-tensor data. + Selection is not taken into account. + """ + if len(self.constituents) != 1 or not self.structured: + raise NotImplementedError('XDMF only available for grid results with 1 constituent.') + + xdmf=ET.Element('Xdmf') + xdmf.attrib={'Version': '2.0', + 'xmlns:xi': 'http://www.w3.org/2001/XInclude'} + + domain=ET.SubElement(xdmf, 'Domain') + + collection = ET.SubElement(domain, 'Grid') + collection.attrib={'GridType': 'Collection', + 'CollectionType': 'Temporal'} + + time = ET.SubElement(collection, 'Time') + time.attrib={'TimeType': 'List'} + + time_data = ET.SubElement(time, 'DataItem') + time_data.attrib={'Format': 'XML', + 'NumberType': 'Float', + 'Dimensions': '{}'.format(len(self.times))} + time_data.text = ' '.join(map(str,self.times)) + + attributes = [] + data_items = [] + + for inc in self.increments: + + grid=ET.SubElement(collection,'Grid') + grid.attrib = {'GridType': 'Uniform', + 'Name': inc} + + topology=ET.SubElement(grid, 'Topology') + topology.attrib={'TopologyType': '3DCoRectMesh', + 'Dimensions': '{} {} {}'.format(*self.grid+1)} + + geometry=ET.SubElement(grid, 'Geometry') + geometry.attrib={'GeometryType':'Origin_DxDyDz'} + + origin=ET.SubElement(geometry, 'DataItem') + origin.attrib={'Format': 'XML', + 'NumberType': 'Float', + 'Dimensions': '3'} + origin.text="{} {} {}".format(*self.origin) + + delta=ET.SubElement(geometry, 'DataItem') + delta.attrib={'Format': 'XML', + 'NumberType': 'Float', + 'Dimensions': '3'} + delta.text="{} {} {}".format(*(self.size/self.grid)) + + + with h5py.File(self.fname,'r') as f: + attributes.append(ET.SubElement(grid, 'Attribute')) + attributes[-1].attrib={'Name': 'u', + 'Center': 'Node', + 'AttributeType': 'Vector'} + data_items.append(ET.SubElement(attributes[-1], 'DataItem')) + data_items[-1].attrib={'Format': 'HDF', + 'Precision': '8', + 'Dimensions': '{} {} {} 3'.format(*(self.grid+1))} + data_items[-1].text='{}:/{}/geometry/u_n'.format(os.path.split(self.fname)[1],inc) + + for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): + for oo in getattr(self,o): + for pp in getattr(self,p): + g = '/'.join([inc,o[:-1],oo,pp]) + for l in f[g]: + name = '/'.join([g,l]) + shape = f[name].shape[1:] + dtype = f[name].dtype + prec = f[name].dtype.itemsize + + if (shape not in [(1,), (3,), (3,3)]) or dtype != np.float64: continue + + attributes.append(ET.SubElement(grid, 'Attribute')) + attributes[-1].attrib={'Name': '{}'.format(name.split('/',2)[2]), + 'Center': 'Cell', + 'AttributeType': 'Tensor'} + data_items.append(ET.SubElement(attributes[-1], 'DataItem')) + data_items[-1].attrib={'Format': 'HDF', + 'NumberType': 'Float', + 'Precision': '{}'.format(prec), + 'Dimensions': '{} {} {} {}'.format(*self.grid,np.prod(shape))} + data_items[-1].text='{}:{}'.format(os.path.split(self.fname)[1],name) + + with open(os.path.splitext(self.fname)[0]+'.xdmf','w') as f: + f.write(xml.dom.minidom.parseString(ET.tostring(xdmf).decode()).toprettyxml()) + + def to_vtk(self,labels=[],mode='cell'): """ Export to vtk cell/point data. @@ -1111,28 +1243,3 @@ class Result: inc[3:].zfill(N_digits)) v.write(file_out) - -################################################################################################### -# BEGIN DEPRECATED - - def _time_to_inc(self,start,end): - selected = [] - for i,time in enumerate(self.times): - if start <= time <= end: - selected.append(self.increments[i]) - return selected - - - def set_by_time(self,start,end): - """ - Set active increments based on start and end time. - - Parameters - ---------- - start : float - start time (included) - end : float - end time (included) - - """ - self._manage_selection('set','increments',self._time_to_inc(start,end)) diff --git a/python/damask/_rotation.py b/python/damask/_rotation.py index a13a9af0b..b1b1cd5ad 100644 --- a/python/damask/_rotation.py +++ b/python/damask/_rotation.py @@ -9,38 +9,35 @@ _sc = np.pi**(1./6.)/6.**(1./6.) _beta = np.pi**(5./6.)/6.**(1./6.)/2. _R1 = (3.*np.pi/4.)**(1./3.) -def iszero(a): - return np.isclose(a,0.0,atol=1.0e-12,rtol=0.0) - - class Rotation: u""" Orientation stored with functionality for conversion to different representations. + The following conventions apply: + + - coordinate frames are right-handed. + - a rotation angle ω is taken to be positive for a counterclockwise rotation + when viewing from the end point of the rotation axis towards the origin. + - rotations will be interpreted in the passive sense. + - Euler angle triplets are implemented using the Bunge convention, + with the angular ranges as [0, 2π],[0, π],[0, 2π]. + - the rotation angle ω is limited to the interval [0, π]. + - the real part of a quaternion is positive, Re(q) > 0 + - P = -1 (as default). + + Examples + -------- + Rotate vector "a" (defined in coordinate system "A") to + coordinates "b" expressed in system "B": + + - b = Q @ a + - b = np.dot(Q.asMatrix(),a) + References ---------- D. Rowenhorst et al., Modelling and Simulation in Materials Science and Engineering 23:083501, 2015 https://doi.org/10.1088/0965-0393/23/8/083501 - Conventions - ----------- - Convention 1: Coordinate frames are right-handed. - Convention 2: A rotation angle ω is taken to be positive for a counterclockwise rotation - when viewing from the end point of the rotation axis towards the origin. - Convention 3: Rotations will be interpreted in the passive sense. - Convention 4: Euler angle triplets are implemented using the Bunge convention, - with the angular ranges as [0, 2π],[0, π],[0, 2π]. - Convention 5: The rotation angle ω is limited to the interval [0, π]. - Convention 6: the real part of a quaternion is positive, Re(q) > 0 - Convention 7: P = -1 (as default). - - Usage - ----- - Vector "a" (defined in coordinate system "A") is passively rotated - resulting in new coordinates "b" when expressed in system "B". - b = Q * a - b = np.dot(Q.asMatrix(),a) - """ __slots__ = ['quaternion'] @@ -52,11 +49,17 @@ class Rotation: Parameters ---------- quaternion : numpy.ndarray, optional - Unit quaternion that follows the conventions. Use .fromQuaternion to perform a sanity check. + Unit quaternion that follows the conventions. Use .from_quaternion to perform a sanity check. """ self.quaternion = quaternion.copy() + + @property + def shape(self): + return self.quaternion.shape[:-1] + + def __copy__(self): """Copy.""" return self.__class__(self.quaternion) @@ -70,14 +73,14 @@ class Rotation: raise NotImplementedError('Support for multiple rotations missing') return '\n'.join([ 'Quaternion: (real={:.3f}, imag=<{:+.3f}, {:+.3f}, {:+.3f}>)'.format(*(self.quaternion)), - 'Matrix:\n{}'.format(self.asMatrix()), - 'Bunge Eulers / deg: ({:3.2f}, {:3.2f}, {:3.2f})'.format(*self.asEulers(degrees=True)), + 'Matrix:\n{}'.format(self.as_matrix()), + 'Bunge Eulers / deg: ({:3.2f}, {:3.2f}, {:3.2f})'.format(*self.as_Eulers(degrees=True)), ]) - def __mul__(self, other): + def __matmul__(self, other): """ - Multiplication. + Rotation of vector, second or fourth order tensor, or rotation object. Parameters ---------- @@ -86,42 +89,40 @@ class Rotation: Todo ---- - Document details active/passive) - considere rotation of (3,3,3,3)-matrix + Check rotation of 4th order tensor """ - if self.quaternion.shape != (4,): - raise NotImplementedError('Support for multiple rotations missing') - if isinstance(other, Rotation): # rotate a rotation - self_q = self.quaternion[0] - self_p = self.quaternion[1:] - other_q = other.quaternion[0] - other_p = other.quaternion[1:] - R = self.__class__(np.append(self_q*other_q - np.dot(self_p,other_p), - self_q*other_p + other_q*self_p + _P * np.cross(self_p,other_p))) - return R.standardize() - elif isinstance(other, (tuple,np.ndarray)): - if isinstance(other,tuple) or other.shape == (3,): # rotate a single (3)-vector or meshgrid - A = self.quaternion[0]**2.0 - np.dot(self.quaternion[1:],self.quaternion[1:]) - B = 2.0 * ( self.quaternion[1]*other[0] - + self.quaternion[2]*other[1] - + self.quaternion[3]*other[2]) - C = 2.0 * _P*self.quaternion[0] + if isinstance(other, Rotation): + q_m = self.quaternion[...,0:1] + p_m = self.quaternion[...,1:] + q_o = other.quaternion[...,0:1] + p_o = other.quaternion[...,1:] + q = (q_m*q_o - np.einsum('...i,...i',p_m,p_o).reshape(self.shape+(1,))) + p = q_m*p_o + q_o*p_m + _P * np.cross(p_m,p_o) + return self.__class__(np.block([q,p])).standardize() - return np.array([ - A*other[0] + B*self.quaternion[1] + C*(self.quaternion[2]*other[2] - self.quaternion[3]*other[1]), - A*other[1] + B*self.quaternion[2] + C*(self.quaternion[3]*other[0] - self.quaternion[1]*other[2]), - A*other[2] + B*self.quaternion[3] + C*(self.quaternion[1]*other[1] - self.quaternion[2]*other[0]), - ]) - elif other.shape == (3,3,): # rotate a single (3x3)-matrix - return np.dot(self.asMatrix(),np.dot(other,self.asMatrix().T)) - elif other.shape == (3,3,3,3,): - raise NotImplementedError('Support for rotation of 4th order tensors missing') + elif isinstance(other,np.ndarray): + if self.shape + (3,) == other.shape: + q_m = self.quaternion[...,0] + p_m = self.quaternion[...,1:] + A = q_m**2.0 - np.einsum('...i,...i',p_m,p_m) + B = 2.0 * np.einsum('...i,...i',p_m,other) + C = 2.0 * _P * q_m + return np.block([(A * other[...,i]).reshape(self.shape+(1,)) + + (B * p_m[...,i]).reshape(self.shape+(1,)) + + (C * ( p_m[...,(i+1)%3]*other[...,(i+2)%3]\ + - p_m[...,(i+2)%3]*other[...,(i+1)%3])).reshape(self.shape+(1,)) + for i in [0,1,2]]) + if self.shape + (3,3) == other.shape: + R = self.as_matrix() + return np.einsum('...im,...jn,...mn',R,R,other) + if self.shape + (3,3,3,3) == other.shape: + R = self.as_matrix() + return np.einsum('...im,...jn,...ko,...lp,...mnop',R,R,R,R,other) else: - return NotImplemented + raise ValueError('Can only rotate vectors, 2nd order tensors, and 4th order tensors') else: - return NotImplemented - + raise TypeError('Cannot rotate {}'.format(type(other))) def inverse(self): """In-place inverse rotation/backward rotation.""" @@ -156,6 +157,18 @@ class Rotation: return other*self.inversed() + def broadcast_to(self,shape): + if isinstance(shape,int): shape = (shape,) + if self.shape == (): + q = np.broadcast_to(self.quaternion,shape+(4,)) + else: + q = np.block([np.broadcast_to(self.quaternion[...,0:1],shape).reshape(shape+(1,)), + np.broadcast_to(self.quaternion[...,1:2],shape).reshape(shape+(1,)), + np.broadcast_to(self.quaternion[...,2:3],shape).reshape(shape+(1,)), + np.broadcast_to(self.quaternion[...,3:4],shape).reshape(shape+(1,))]) + return self.__class__(q) + + def average(self,other): """ Calculate the average rotation. @@ -197,7 +210,7 @@ class Rotation: return angles in degrees. """ - eu = Rotation.qu2eu(self.quaternion) + eu = Rotation._qu2eu(self.quaternion) if degrees: eu = np.degrees(eu) return eu @@ -215,13 +228,13 @@ class Rotation: return tuple of axis and angle. """ - ax = Rotation.qu2ax(self.quaternion) + ax = Rotation._qu2ax(self.quaternion) if degrees: ax[...,3] = np.degrees(ax[...,3]) return (ax[...,:3],ax[...,3]) if pair else ax def as_matrix(self): """Rotation matrix.""" - return Rotation.qu2om(self.quaternion) + return Rotation._qu2om(self.quaternion) def as_Rodrigues(self, vector = False): @@ -234,16 +247,16 @@ class Rotation: return as actual Rodrigues--Frank vector, i.e. rotation axis scaled by tan(ω/2). """ - ro = Rotation.qu2ro(self.quaternion) + ro = Rotation._qu2ro(self.quaternion) return ro[...,:3]*ro[...,3] if vector else ro def as_homochoric(self): """Homochoric vector: (h_1, h_2, h_3).""" - return Rotation.qu2ho(self.quaternion) + return Rotation._qu2ho(self.quaternion) def as_cubochoric(self): """Cubochoric vector: (c_1, c_2, c_3).""" - return Rotation.qu2cu(self.quaternion) + return Rotation._qu2cu(self.quaternion) def M(self): # ToDo not sure about the name: as_M or M? we do not have a from_M """ @@ -257,30 +270,24 @@ class Rotation: """ return np.einsum('...i,...j',self.quaternion,self.quaternion) - # for compatibility (old names do not follow convention) - asM = M - asQuaternion = as_quaternion - asEulers = as_Eulers - asAxisAngle = as_axis_angle - asMatrix = as_matrix - asRodrigues = as_Rodrigues - asHomochoric = as_homochoric - asCubochoric = as_cubochoric ################################################################################################ # Static constructors. The input data needs to follow the conventions, options allow to # relax the conventions. @staticmethod def from_quaternion(quaternion, - acceptHomomorph = False, - P = -1): + accept_homomorph = False, + P = -1, + acceptHomomorph = None): + if acceptHomomorph is not None: + accept_homomorph = acceptHomomorph qu = np.array(quaternion,dtype=float) if qu.shape[:-2:-1] != (4,): raise ValueError('Invalid shape.') if P > 0: qu[...,1:4] *= -1 # convert from P=1 to P=-1 - if acceptHomomorph: + if accept_homomorph: qu[qu[...,0] < 0.0] *= -1 else: if np.any(qu[...,0] < 0.0): @@ -302,7 +309,7 @@ class Rotation: if np.any(eu < 0.0) or np.any(eu > 2.0*np.pi) or np.any(eu[...,1] > np.pi): # ToDo: No separate check for PHI raise ValueError('Euler angles outside of [0..2π],[0..π],[0..2π].') - return Rotation(Rotation.eu2qu(eu)) + return Rotation(Rotation._eu2qu(eu)) @staticmethod def from_axis_angle(axis_angle, @@ -322,7 +329,7 @@ class Rotation: if not np.all(np.isclose(np.linalg.norm(ax[...,0:3],axis=-1), 1.0)): raise ValueError('Axis angle rotation axis is not of unit length.') - return Rotation(Rotation.ax2qu(ax)) + return Rotation(Rotation._ax2qu(ax)) @staticmethod def from_basis(basis, @@ -346,7 +353,7 @@ class Rotation: or not np.all(np.isclose(np.einsum('...i,...i',om[...,2],om[...,0]), 0.0)): raise ValueError('Orientation matrix is not orthogonal.') - return Rotation(Rotation.om2qu(om)) + return Rotation(Rotation._om2qu(om)) @staticmethod def from_matrix(om): @@ -369,7 +376,7 @@ class Rotation: if not np.all(np.isclose(np.linalg.norm(ro[...,0:3],axis=-1), 1.0)): raise ValueError('Rodrigues vector rotation axis is not of unit length.') - return Rotation(Rotation.ro2qu(ro)) + return Rotation(Rotation._ro2qu(ro)) @staticmethod def from_homochoric(homochoric, @@ -381,10 +388,10 @@ class Rotation: if P > 0: ho *= -1 # convert from P=1 to P=-1 - if np.any(np.linalg.norm(ho,axis=-1) > (3.*np.pi/4.)**(1./3.)+1e-9): + if np.any(np.linalg.norm(ho,axis=-1) >_R1+1e-9): raise ValueError('Homochoric coordinate outside of the sphere.') - return Rotation(Rotation.ho2qu(ho)) + return Rotation(Rotation._ho2qu(ho)) @staticmethod def from_cubochoric(cubochoric, @@ -397,10 +404,10 @@ class Rotation: if np.abs(np.max(cu))>np.pi**(2./3.) * 0.5+1e-9: raise ValueError('Cubochoric coordinate outside of the cube: {} {} {}.'.format(*cu)) - ho = Rotation.cu2ho(cu) + ho = Rotation._cu2ho(cu) if P > 0: ho *= -1 # convert from P=1 to P=-1 - return Rotation(Rotation.ho2qu(ho)) + return Rotation(Rotation._ho2qu(ho)) @staticmethod @@ -429,11 +436,11 @@ class Rotation: weights = np.ones(N,dtype='i') for i,(r,n) in enumerate(zip(rotations,weights)): - M = r.asM() * n if i == 0 \ - else M + r.asM() * n # noqa add (multiples) of this rotation to average noqa + M = r.M() * n if i == 0 \ + else M + r.M() * n # noqa add (multiples) of this rotation to average noqa eig, vec = np.linalg.eig(M/N) - return Rotation.fromQuaternion(np.real(vec.T[eig.argmax()]),acceptHomomorph = True) + return Rotation.from_quaternion(np.real(vec.T[eig.argmax()]),accept_homomorph = True) @staticmethod def from_random(shape=None): @@ -455,15 +462,11 @@ class Rotation: # for compatibility (old names do not follow convention) + asM = M fromQuaternion = from_quaternion fromEulers = from_Eulers - fromAxisAngle = from_axis_angle - fromBasis = from_basis - fromMatrix = from_matrix - fromRodrigues = from_Rodrigues - fromHomochoric = from_homochoric - fromCubochoric = from_cubochoric - fromRandom = from_random + asAxisAngle = as_axis_angle + __mul__ = __matmul__ #################################################################################################### # Code below available according to the following conditions on https://github.com/MarDiehl/3Drotations @@ -497,519 +500,379 @@ class Rotation: #################################################################################################### #---------- Quaternion ---------- @staticmethod - def qu2om(qu): - if len(qu.shape) == 1: - """Quaternion to rotation matrix.""" - qq = qu[0]**2-(qu[1]**2 + qu[2]**2 + qu[3]**2) - om = np.diag(qq + 2.0*np.array([qu[1],qu[2],qu[3]])**2) - - om[0,1] = 2.0*(qu[2]*qu[1]+qu[0]*qu[3]) - om[1,0] = 2.0*(qu[1]*qu[2]-qu[0]*qu[3]) - om[1,2] = 2.0*(qu[3]*qu[2]+qu[0]*qu[1]) - om[2,1] = 2.0*(qu[2]*qu[3]-qu[0]*qu[1]) - om[2,0] = 2.0*(qu[1]*qu[3]+qu[0]*qu[2]) - om[0,2] = 2.0*(qu[3]*qu[1]-qu[0]*qu[2]) - else: - qq = qu[...,0:1]**2-(qu[...,1:2]**2 + qu[...,2:3]**2 + qu[...,3:4]**2) - om = np.block([qq + 2.0*qu[...,1:2]**2, - 2.0*(qu[...,2:3]*qu[...,1:2]+qu[...,0:1]*qu[...,3:4]), - 2.0*(qu[...,3:4]*qu[...,1:2]-qu[...,0:1]*qu[...,2:3]), - 2.0*(qu[...,1:2]*qu[...,2:3]-qu[...,0:1]*qu[...,3:4]), - qq + 2.0*qu[...,2:3]**2, - 2.0*(qu[...,3:4]*qu[...,2:3]+qu[...,0:1]*qu[...,1:2]), - 2.0*(qu[...,1:2]*qu[...,3:4]+qu[...,0:1]*qu[...,2:3]), - 2.0*(qu[...,2:3]*qu[...,3:4]-qu[...,0:1]*qu[...,1:2]), - qq + 2.0*qu[...,3:4]**2, - ]).reshape(qu.shape[:-1]+(3,3)) - return om if _P < 0.0 else np.swapaxes(om,(-1,-2)) + def _qu2om(qu): + qq = qu[...,0:1]**2-(qu[...,1:2]**2 + qu[...,2:3]**2 + qu[...,3:4]**2) + om = np.block([qq + 2.0*qu[...,1:2]**2, + 2.0*(qu[...,2:3]*qu[...,1:2]-_P*qu[...,0:1]*qu[...,3:4]), + 2.0*(qu[...,3:4]*qu[...,1:2]+_P*qu[...,0:1]*qu[...,2:3]), + 2.0*(qu[...,1:2]*qu[...,2:3]+_P*qu[...,0:1]*qu[...,3:4]), + qq + 2.0*qu[...,2:3]**2, + 2.0*(qu[...,3:4]*qu[...,2:3]-_P*qu[...,0:1]*qu[...,1:2]), + 2.0*(qu[...,1:2]*qu[...,3:4]-_P*qu[...,0:1]*qu[...,2:3]), + 2.0*(qu[...,2:3]*qu[...,3:4]+_P*qu[...,0:1]*qu[...,1:2]), + qq + 2.0*qu[...,3:4]**2, + ]).reshape(qu.shape[:-1]+(3,3)) + return om @staticmethod - def qu2eu(qu): + def _qu2eu(qu): """Quaternion to Bunge-Euler angles.""" - if len(qu.shape) == 1: - q03 = qu[0]**2+qu[3]**2 - q12 = qu[1]**2+qu[2]**2 - chi = np.sqrt(q03*q12) - if np.abs(q12) < 1.e-8: - eu = np.array([np.arctan2(-_P*2.0*qu[0]*qu[3],qu[0]**2-qu[3]**2), 0.0, 0.0]) - elif np.abs(q03) < 1.e-8: - eu = np.array([np.arctan2( 2.0*qu[1]*qu[2],qu[1]**2-qu[2]**2), np.pi, 0.0]) - else: - eu = np.array([np.arctan2((-_P*qu[0]*qu[2]+qu[1]*qu[3])*chi, (-_P*qu[0]*qu[1]-qu[2]*qu[3])*chi ), - np.arctan2( 2.0*chi, q03-q12 ), - np.arctan2(( _P*qu[0]*qu[2]+qu[1]*qu[3])*chi, (-_P*qu[0]*qu[1]+qu[2]*qu[3])*chi )]) - else: - q02 = qu[...,0:1]*qu[...,2:3] - q13 = qu[...,1:2]*qu[...,3:4] - q01 = qu[...,0:1]*qu[...,1:2] - q23 = qu[...,2:3]*qu[...,3:4] - q03_s = qu[...,0:1]**2+qu[...,3:4]**2 - q12_s = qu[...,1:2]**2+qu[...,2:3]**2 - chi = np.sqrt(q03_s*q12_s) + q02 = qu[...,0:1]*qu[...,2:3] + q13 = qu[...,1:2]*qu[...,3:4] + q01 = qu[...,0:1]*qu[...,1:2] + q23 = qu[...,2:3]*qu[...,3:4] + q03_s = qu[...,0:1]**2+qu[...,3:4]**2 + q12_s = qu[...,1:2]**2+qu[...,2:3]**2 + chi = np.sqrt(q03_s*q12_s) - eu = np.where(np.abs(q12_s) < 1.0e-8, - np.block([np.arctan2(-_P*2.0*qu[...,0:1]*qu[...,3:4],qu[...,0:1]**2-qu[...,3:4]**2), - np.zeros(qu.shape[:-1]+(2,))]), - np.where(np.abs(q03_s) < 1.0e-8, - np.block([np.arctan2( 2.0*qu[...,1:2]*qu[...,2:3],qu[...,1:2]**2-qu[...,2:3]**2), - np.broadcast_to(np.pi,qu.shape[:-1]+(1,)), - np.zeros(qu.shape[:-1]+(1,))]), - np.block([np.arctan2((-_P*q02+q13)*chi, (-_P*q01-q23)*chi), - np.arctan2( 2.0*chi, q03_s-q12_s ), - np.arctan2(( _P*q02+q13)*chi, (-_P*q01+q23)*chi)]) - ) - ) + eu = np.where(np.abs(q12_s) < 1.0e-8, + np.block([np.arctan2(-_P*2.0*qu[...,0:1]*qu[...,3:4],qu[...,0:1]**2-qu[...,3:4]**2), + np.zeros(qu.shape[:-1]+(2,))]), + np.where(np.abs(q03_s) < 1.0e-8, + np.block([np.arctan2( 2.0*qu[...,1:2]*qu[...,2:3],qu[...,1:2]**2-qu[...,2:3]**2), + np.broadcast_to(np.pi,qu.shape[:-1]+(1,)), + np.zeros(qu.shape[:-1]+(1,))]), + np.block([np.arctan2((-_P*q02+q13)*chi, (-_P*q01-q23)*chi), + np.arctan2( 2.0*chi, q03_s-q12_s ), + np.arctan2(( _P*q02+q13)*chi, (-_P*q01+q23)*chi)]) + ) + ) # reduce Euler angles to definition range eu[np.abs(eu)<1.e-6] = 0.0 - eu = np.where(eu<0, (eu+2.0*np.pi)%np.array([2.0*np.pi,np.pi,2.0*np.pi]),eu) + eu = np.where(eu<0, (eu+2.0*np.pi)%np.array([2.0*np.pi,np.pi,2.0*np.pi]),eu) # needed? return eu @staticmethod - def qu2ax(qu): + def _qu2ax(qu): """ Quaternion to axis angle pair. Modified version of the original formulation, should be numerically more stable """ - if len(qu.shape) == 1: - if np.abs(np.sum(qu[1:4]**2)) < 1.e-6: # set axis to [001] if the angle is 0/360 - ax = np.array([ 0.0, 0.0, 1.0, 0.0 ]) - elif qu[0] > 1.e-6: - s = np.sign(qu[0])/np.sqrt(qu[1]**2+qu[2]**2+qu[3]**2) - omega = 2.0 * np.arccos(np.clip(qu[0],-1.0,1.0)) - ax = ax = np.array([ qu[1]*s, qu[2]*s, qu[3]*s, omega ]) - else: - ax = ax = np.array([ qu[1], qu[2], qu[3], np.pi]) - else: - with np.errstate(invalid='ignore',divide='ignore'): - s = np.sign(qu[...,0:1])/np.sqrt(qu[...,1:2]**2+qu[...,2:3]**2+qu[...,3:4]**2) - omega = 2.0 * np.arccos(np.clip(qu[...,0:1],-1.0,1.0)) - ax = np.where(np.broadcast_to(qu[...,0:1] < 1.0e-6,qu.shape), - np.block([qu[...,1:4],np.broadcast_to(np.pi,qu.shape[:-1]+(1,))]), - np.block([qu[...,1:4]*s,omega])) - ax[np.sum(np.abs(qu[...,1:4])**2,axis=-1) < 1.0e-6,] = [0.0, 0.0, 1.0, 0.0] + with np.errstate(invalid='ignore',divide='ignore'): + s = np.sign(qu[...,0:1])/np.sqrt(qu[...,1:2]**2+qu[...,2:3]**2+qu[...,3:4]**2) + omega = 2.0 * np.arccos(np.clip(qu[...,0:1],-1.0,1.0)) + ax = np.where(np.broadcast_to(qu[...,0:1] < 1.0e-8,qu.shape), + np.block([qu[...,1:4],np.broadcast_to(np.pi,qu.shape[:-1]+(1,))]), + np.block([qu[...,1:4]*s,omega])) + ax[np.isclose(qu[...,0],1.,rtol=0.0)] = [0.0, 0.0, 1.0, 0.0] return ax @staticmethod - def qu2ro(qu): + def _qu2ro(qu): """Quaternion to Rodrigues-Frank vector.""" - if len(qu.shape) == 1: - if iszero(qu[0]): - ro = np.array([qu[1], qu[2], qu[3], np.inf]) - else: - s = np.linalg.norm(qu[1:4]) - ro = np.array([0.0,0.0,_P,0.0] if iszero(s) else \ - [ qu[1]/s, qu[2]/s, qu[3]/s, np.tan(np.arccos(np.clip(qu[0],-1.0,1.0)))]) - else: - with np.errstate(invalid='ignore',divide='ignore'): - s = np.linalg.norm(qu[...,1:4],axis=-1,keepdims=True) - ro = np.where(np.broadcast_to(np.abs(qu[...,0:1]) < 1.0e-12,qu.shape), - np.block([qu[...,1:2], qu[...,2:3], qu[...,3:4], np.broadcast_to(np.inf,qu.shape[:-1]+(1,))]), - np.block([qu[...,1:2]/s,qu[...,2:3]/s,qu[...,3:4]/s, - np.tan(np.arccos(np.clip(qu[...,0:1],-1.0,1.0))) - ]) - ) - ro[np.abs(s).squeeze(-1) < 1.0e-12] = [0.0,0.0,_P,0.0] + with np.errstate(invalid='ignore',divide='ignore'): + s = np.linalg.norm(qu[...,1:4],axis=-1,keepdims=True) + ro = np.where(np.broadcast_to(np.abs(qu[...,0:1]) < 1.0e-12,qu.shape), + np.block([qu[...,1:2], qu[...,2:3], qu[...,3:4], np.broadcast_to(np.inf,qu.shape[:-1]+(1,))]), + np.block([qu[...,1:2]/s,qu[...,2:3]/s,qu[...,3:4]/s, + np.tan(np.arccos(np.clip(qu[...,0:1],-1.0,1.0))) + ]) + ) + ro[np.abs(s).squeeze(-1) < 1.0e-12] = [0.0,0.0,_P,0.0] return ro @staticmethod - def qu2ho(qu): + def _qu2ho(qu): """Quaternion to homochoric vector.""" - if len(qu.shape) == 1: - omega = 2.0 * np.arccos(np.clip(qu[0],-1.0,1.0)) - if np.abs(omega) < 1.0e-12: - ho = np.zeros(3) - else: - ho = np.array([qu[1], qu[2], qu[3]]) - f = 0.75 * ( omega - np.sin(omega) ) - ho = ho/np.linalg.norm(ho) * f**(1./3.) - else: - with np.errstate(invalid='ignore'): - omega = 2.0 * np.arccos(np.clip(qu[...,0:1],-1.0,1.0)) - ho = np.where(np.abs(omega) < 1.0e-12, - np.zeros(3), - qu[...,1:4]/np.linalg.norm(qu[...,1:4],axis=-1,keepdims=True) \ - * (0.75*(omega - np.sin(omega)))**(1./3.)) + with np.errstate(invalid='ignore'): + omega = 2.0 * np.arccos(np.clip(qu[...,0:1],-1.0,1.0)) + ho = np.where(np.abs(omega) < 1.0e-12, + np.zeros(3), + qu[...,1:4]/np.linalg.norm(qu[...,1:4],axis=-1,keepdims=True) \ + * (0.75*(omega - np.sin(omega)))**(1./3.)) return ho @staticmethod - def qu2cu(qu): + def _qu2cu(qu): """Quaternion to cubochoric vector.""" - return Rotation.ho2cu(Rotation.qu2ho(qu)) + return Rotation._ho2cu(Rotation._qu2ho(qu)) #---------- Rotation matrix ---------- @staticmethod - def om2qu(om): + def _om2qu(om): """ Rotation matrix to quaternion. - The original formulation (direct conversion) had (numerical?) issues + This formulation is from www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion. + The original formulation had issues. """ - return Rotation.eu2qu(Rotation.om2eu(om)) + trace = om[...,0,0:1]+om[...,1,1:2]+om[...,2,2:3] + + with np.errstate(invalid='ignore',divide='ignore'): + s = [ + 0.5 / np.sqrt( 1.0 + trace), + 2.0 * np.sqrt( 1.0 + om[...,0,0:1] - om[...,1,1:2] - om[...,2,2:3]), + 2.0 * np.sqrt( 1.0 + om[...,1,1:2] - om[...,2,2:3] - om[...,0,0:1]), + 2.0 * np.sqrt( 1.0 + om[...,2,2:3] - om[...,0,0:1] - om[...,1,1:2] ) + ] + qu= np.where(trace>0, + np.block([0.25 / s[0], + (om[...,2,1:2] - om[...,1,2:3] ) * s[0], + (om[...,0,2:3] - om[...,2,0:1] ) * s[0], + (om[...,1,0:1] - om[...,0,1:2] ) * s[0]]), + np.where(om[...,0,0:1] > np.maximum(om[...,1,1:2],om[...,2,2:3]), + np.block([(om[...,2,1:2] - om[...,1,2:3]) / s[1], + 0.25 * s[1], + (om[...,0,1:2] + om[...,1,0:1]) / s[1], + (om[...,0,2:3] + om[...,2,0:1]) / s[1]]), + np.where(om[...,1,1:2] > om[...,2,2:3], + np.block([(om[...,0,2:3] - om[...,2,0:1]) / s[2], + (om[...,0,1:2] + om[...,1,0:1]) / s[2], + 0.25 * s[2], + (om[...,1,2:3] + om[...,2,1:2]) / s[2]]), + np.block([(om[...,1,0:1] - om[...,0,1:2]) / s[3], + (om[...,0,2:3] + om[...,2,0:1]) / s[3], + (om[...,1,2:3] + om[...,2,1:2]) / s[3], + 0.25 * s[3]]), + ) + ) + )*np.array([1,_P,_P,_P]) + qu[qu[...,0]<0] *=-1 + return qu @staticmethod - def om2eu(om): + def _om2eu(om): """Rotation matrix to Bunge-Euler angles.""" - if len(om.shape) == 2: - if not np.isclose(np.abs(om[2,2]),1.0,1.e-4): - zeta = 1.0/np.sqrt(1.0-om[2,2]**2) - eu = np.array([np.arctan2(om[2,0]*zeta,-om[2,1]*zeta), - np.arccos(om[2,2]), - np.arctan2(om[0,2]*zeta, om[1,2]*zeta)]) - else: - eu = np.array([np.arctan2( om[0,1],om[0,0]), np.pi*0.5*(1-om[2,2]),0.0]) # following the paper, not the reference implementation - else: - with np.errstate(invalid='ignore',divide='ignore'): - zeta = 1.0/np.sqrt(1.0-om[...,2,2:3]**2) - eu = np.where(np.isclose(np.abs(om[...,2,2:3]),1.0,1e-4), - np.block([np.arctan2(om[...,0,1:2],om[...,0,0:1]), - np.pi*0.5*(1-om[...,2,2:3]), - np.zeros(om.shape[:-2]+(1,)), - ]), - np.block([np.arctan2(om[...,2,0:1]*zeta,-om[...,2,1:2]*zeta), - np.arccos(om[...,2,2:3]), - np.arctan2(om[...,0,2:3]*zeta,+om[...,1,2:3]*zeta) - ]) - ) - eu[np.abs(eu)<1.e-6] = 0.0 + with np.errstate(invalid='ignore',divide='ignore'): + zeta = 1.0/np.sqrt(1.0-om[...,2,2:3]**2) + eu = np.where(np.isclose(np.abs(om[...,2,2:3]),1.0,1e-9), + np.block([np.arctan2(om[...,0,1:2],om[...,0,0:1]), + np.pi*0.5*(1-om[...,2,2:3]), + np.zeros(om.shape[:-2]+(1,)), + ]), + np.block([np.arctan2(om[...,2,0:1]*zeta,-om[...,2,1:2]*zeta), + np.arccos( om[...,2,2:3]), + np.arctan2(om[...,0,2:3]*zeta,+om[...,1,2:3]*zeta) + ]) + ) + eu[np.abs(eu)<1.e-8] = 0.0 eu = np.where(eu<0, (eu+2.0*np.pi)%np.array([2.0*np.pi,np.pi,2.0*np.pi]),eu) return eu - @staticmethod - def om2ax(om): + def _om2ax(om): """Rotation matrix to axis angle pair.""" - if len(om.shape) == 2: - ax=np.empty(4) - - # first get the rotation angle - t = 0.5*(om.trace() -1.0) - ax[3] = np.arccos(np.clip(t,-1.0,1.0)) - if np.abs(ax[3])<1.e-6: - ax = np.array([ 0.0, 0.0, 1.0, 0.0]) - else: - w,vr = np.linalg.eig(om) - # next, find the eigenvalue (1,0j) - i = np.where(np.isclose(w,1.0+0.0j))[0][0] - ax[0:3] = np.real(vr[0:3,i]) - diagDelta = -_P*np.array([om[1,2]-om[2,1],om[2,0]-om[0,2],om[0,1]-om[1,0]]) - diagDelta[np.abs(diagDelta)<1.e-6] = 1.0 - ax[0:3] = np.where(np.abs(diagDelta)<0, ax[0:3],np.abs(ax[0:3])*np.sign(diagDelta)) - else: - diag_delta = -_P*np.block([om[...,1,2:3]-om[...,2,1:2], - om[...,2,0:1]-om[...,0,2:3], - om[...,0,1:2]-om[...,1,0:1] - ]) - diag_delta[np.abs(diag_delta)<1.e-6] = 1.0 - t = 0.5*(om.trace(axis2=-2,axis1=-1) -1.0).reshape(om.shape[:-2]+(1,)) - w,vr = np.linalg.eig(om) - # mask duplicated real eigenvalues - w[np.isclose(w[...,0],1.0+0.0j),1:] = 0. - w[np.isclose(w[...,1],1.0+0.0j),2:] = 0. - vr = np.swapaxes(vr,-1,-2) - ax = np.where(np.abs(diag_delta)<0, - np.real(vr[np.isclose(w,1.0+0.0j)]).reshape(om.shape[:-2]+(3,)), - np.abs(np.real(vr[np.isclose(w,1.0+0.0j)]).reshape(om.shape[:-2]+(3,))) \ - *np.sign(diag_delta)) - ax = np.block([ax,np.arccos(np.clip(t,-1.0,1.0))]) - ax[np.abs(ax[...,3])<1.e-6] = [ 0.0, 0.0, 1.0, 0.0] + #return Rotation._qu2ax(Rotation._om2qu(om)) # HOTFIX + diag_delta = -_P*np.block([om[...,1,2:3]-om[...,2,1:2], + om[...,2,0:1]-om[...,0,2:3], + om[...,0,1:2]-om[...,1,0:1] + ]) + t = 0.5*(om.trace(axis2=-2,axis1=-1) -1.0).reshape(om.shape[:-2]+(1,)) + w,vr = np.linalg.eig(om) + # mask duplicated real eigenvalues + w[np.isclose(w[...,0],1.0+0.0j),1:] = 0. + w[np.isclose(w[...,1],1.0+0.0j),2:] = 0. + vr = np.swapaxes(vr,-1,-2) + ax = np.where(np.abs(diag_delta)<1e-12, + np.real(vr[np.isclose(w,1.0+0.0j)]).reshape(om.shape[:-2]+(3,)), + np.abs(np.real(vr[np.isclose(w,1.0+0.0j)]).reshape(om.shape[:-2]+(3,))) \ + *np.sign(diag_delta)) + ax = np.block([ax,np.arccos(np.clip(t,-1.0,1.0))]) + ax[np.abs(ax[...,3])<1.e-8] = [ 0.0, 0.0, 1.0, 0.0] return ax - @staticmethod - def om2ro(om): + def _om2ro(om): """Rotation matrix to Rodrigues-Frank vector.""" - return Rotation.eu2ro(Rotation.om2eu(om)) + return Rotation._eu2ro(Rotation._om2eu(om)) @staticmethod - def om2ho(om): + def _om2ho(om): """Rotation matrix to homochoric vector.""" - return Rotation.ax2ho(Rotation.om2ax(om)) + return Rotation._ax2ho(Rotation._om2ax(om)) @staticmethod - def om2cu(om): + def _om2cu(om): """Rotation matrix to cubochoric vector.""" - return Rotation.ho2cu(Rotation.om2ho(om)) + return Rotation._ho2cu(Rotation._om2ho(om)) #---------- Bunge-Euler angles ---------- @staticmethod - def eu2qu(eu): + def _eu2qu(eu): """Bunge-Euler angles to quaternion.""" - if len(eu.shape) == 1: - ee = 0.5*eu - cPhi = np.cos(ee[1]) - sPhi = np.sin(ee[1]) - qu = np.array([ cPhi*np.cos(ee[0]+ee[2]), - -_P*sPhi*np.cos(ee[0]-ee[2]), - -_P*sPhi*np.sin(ee[0]-ee[2]), - -_P*cPhi*np.sin(ee[0]+ee[2]) ]) - if qu[0] < 0.0: qu*=-1 - else: - ee = 0.5*eu - cPhi = np.cos(ee[...,1:2]) - sPhi = np.sin(ee[...,1:2]) - qu = np.block([ cPhi*np.cos(ee[...,0:1]+ee[...,2:3]), - -_P*sPhi*np.cos(ee[...,0:1]-ee[...,2:3]), - -_P*sPhi*np.sin(ee[...,0:1]-ee[...,2:3]), - -_P*cPhi*np.sin(ee[...,0:1]+ee[...,2:3])]) - qu[qu[...,0]<0.0]*=-1 + ee = 0.5*eu + cPhi = np.cos(ee[...,1:2]) + sPhi = np.sin(ee[...,1:2]) + qu = np.block([ cPhi*np.cos(ee[...,0:1]+ee[...,2:3]), + -_P*sPhi*np.cos(ee[...,0:1]-ee[...,2:3]), + -_P*sPhi*np.sin(ee[...,0:1]-ee[...,2:3]), + -_P*cPhi*np.sin(ee[...,0:1]+ee[...,2:3])]) + qu[qu[...,0]<0.0]*=-1 return qu - @staticmethod - def eu2om(eu): + def _eu2om(eu): """Bunge-Euler angles to rotation matrix.""" - if len(eu.shape) == 1: - c = np.cos(eu) - s = np.sin(eu) - - om = np.array([[+c[0]*c[2]-s[0]*s[2]*c[1], +s[0]*c[2]+c[0]*s[2]*c[1], +s[2]*s[1]], - [-c[0]*s[2]-s[0]*c[2]*c[1], -s[0]*s[2]+c[0]*c[2]*c[1], +c[2]*s[1]], - [+s[0]*s[1], -c[0]*s[1], +c[1] ]]) - else: - c = np.cos(eu) - s = np.sin(eu) - om = np.block([+c[...,0:1]*c[...,2:3]-s[...,0:1]*s[...,2:3]*c[...,1:2], - +s[...,0:1]*c[...,2:3]+c[...,0:1]*s[...,2:3]*c[...,1:2], - +s[...,2:3]*s[...,1:2], - -c[...,0:1]*s[...,2:3]-s[...,0:1]*c[...,2:3]*c[...,1:2], - -s[...,0:1]*s[...,2:3]+c[...,0:1]*c[...,2:3]*c[...,1:2], - +c[...,2:3]*s[...,1:2], - +s[...,0:1]*s[...,1:2], - -c[...,0:1]*s[...,1:2], - +c[...,1:2] - ]).reshape(eu.shape[:-1]+(3,3)) + c = np.cos(eu) + s = np.sin(eu) + om = np.block([+c[...,0:1]*c[...,2:3]-s[...,0:1]*s[...,2:3]*c[...,1:2], + +s[...,0:1]*c[...,2:3]+c[...,0:1]*s[...,2:3]*c[...,1:2], + +s[...,2:3]*s[...,1:2], + -c[...,0:1]*s[...,2:3]-s[...,0:1]*c[...,2:3]*c[...,1:2], + -s[...,0:1]*s[...,2:3]+c[...,0:1]*c[...,2:3]*c[...,1:2], + +c[...,2:3]*s[...,1:2], + +s[...,0:1]*s[...,1:2], + -c[...,0:1]*s[...,1:2], + +c[...,1:2] + ]).reshape(eu.shape[:-1]+(3,3)) om[np.abs(om)<1.e-12] = 0.0 return om @staticmethod - def eu2ax(eu): + def _eu2ax(eu): """Bunge-Euler angles to axis angle pair.""" - if len(eu.shape) == 1: - t = np.tan(eu[1]*0.5) - sigma = 0.5*(eu[0]+eu[2]) - delta = 0.5*(eu[0]-eu[2]) - tau = np.linalg.norm([t,np.sin(sigma)]) - alpha = np.pi if iszero(np.cos(sigma)) else \ - 2.0*np.arctan(tau/np.cos(sigma)) - - if np.abs(alpha)<1.e-6: - ax = np.array([ 0.0, 0.0, 1.0, 0.0 ]) - else: - ax = -_P/tau * np.array([ t*np.cos(delta), t*np.sin(delta), np.sin(sigma) ]) # passive axis angle pair so a minus sign in front - ax = np.append(ax,alpha) - if alpha < 0.0: ax *= -1.0 # ensure alpha is positive - else: - t = np.tan(eu[...,1:2]*0.5) - sigma = 0.5*(eu[...,0:1]+eu[...,2:3]) - delta = 0.5*(eu[...,0:1]-eu[...,2:3]) - tau = np.linalg.norm(np.block([t,np.sin(sigma)]),axis=-1,keepdims=True) - alpha = np.where(np.abs(np.cos(sigma))<1.e-12,np.pi,2.0*np.arctan(tau/np.cos(sigma))) - with np.errstate(invalid='ignore',divide='ignore'): - ax = np.where(np.broadcast_to(np.abs(alpha)<1.0e-12,eu.shape[:-1]+(4,)), - [0.0,0.0,1.0,0.0], - np.block([-_P/tau*t*np.cos(delta), - -_P/tau*t*np.sin(delta), - -_P/tau* np.sin(sigma), - alpha - ])) - ax[(alpha<0.0).squeeze()] *=-1 + t = np.tan(eu[...,1:2]*0.5) + sigma = 0.5*(eu[...,0:1]+eu[...,2:3]) + delta = 0.5*(eu[...,0:1]-eu[...,2:3]) + tau = np.linalg.norm(np.block([t,np.sin(sigma)]),axis=-1,keepdims=True) + alpha = np.where(np.abs(np.cos(sigma))<1.e-12,np.pi,2.0*np.arctan(tau/np.cos(sigma))) + with np.errstate(invalid='ignore',divide='ignore'): + ax = np.where(np.broadcast_to(np.abs(alpha)<1.0e-12,eu.shape[:-1]+(4,)), + [0.0,0.0,1.0,0.0], + np.block([-_P/tau*t*np.cos(delta), + -_P/tau*t*np.sin(delta), + -_P/tau* np.sin(sigma), + alpha + ])) + ax[(alpha<0.0).squeeze()] *=-1 return ax @staticmethod - def eu2ro(eu): + def _eu2ro(eu): """Bunge-Euler angles to Rodrigues-Frank vector.""" - if len(eu.shape) == 1: - ro = Rotation.eu2ax(eu) # convert to axis angle pair representation - if ro[3] >= np.pi: # Differs from original implementation. check convention 5 - ro[3] = np.inf - elif iszero(ro[3]): - ro = np.array([ 0.0, 0.0, _P, 0.0 ]) - else: - ro[3] = np.tan(ro[3]*0.5) - else: - ax = Rotation.eu2ax(eu) - ro = np.block([ax[...,:3],np.tan(ax[...,3:4]*.5)]) - ro[ax[...,3]>=np.pi,3] = np.inf - ro[np.abs(ax[...,3])<1.e-16] = [ 0.0, 0.0, _P, 0.0 ] + ax = Rotation._eu2ax(eu) + ro = np.block([ax[...,:3],np.tan(ax[...,3:4]*.5)]) + ro[ax[...,3]>=np.pi,3] = np.inf + ro[np.abs(ax[...,3])<1.e-16] = [ 0.0, 0.0, _P, 0.0 ] return ro @staticmethod - def eu2ho(eu): + def _eu2ho(eu): """Bunge-Euler angles to homochoric vector.""" - return Rotation.ax2ho(Rotation.eu2ax(eu)) + return Rotation._ax2ho(Rotation._eu2ax(eu)) @staticmethod - def eu2cu(eu): + def _eu2cu(eu): """Bunge-Euler angles to cubochoric vector.""" - return Rotation.ho2cu(Rotation.eu2ho(eu)) + return Rotation._ho2cu(Rotation._eu2ho(eu)) #---------- Axis angle pair ---------- @staticmethod - def ax2qu(ax): + def _ax2qu(ax): """Axis angle pair to quaternion.""" - if len(ax.shape) == 1: - if np.abs(ax[3])<1.e-6: - qu = np.array([ 1.0, 0.0, 0.0, 0.0 ]) - else: - c = np.cos(ax[3]*0.5) - s = np.sin(ax[3]*0.5) - qu = np.array([ c, ax[0]*s, ax[1]*s, ax[2]*s ]) - else: - c = np.cos(ax[...,3:4]*.5) - s = np.sin(ax[...,3:4]*.5) - qu = np.where(np.abs(ax[...,3:4])<1.e-6,[1.0, 0.0, 0.0, 0.0],np.block([c, ax[...,:3]*s])) + c = np.cos(ax[...,3:4]*.5) + s = np.sin(ax[...,3:4]*.5) + qu = np.where(np.abs(ax[...,3:4])<1.e-6,[1.0, 0.0, 0.0, 0.0],np.block([c, ax[...,:3]*s])) return qu @staticmethod - def ax2om(ax): + def _ax2om(ax): """Axis angle pair to rotation matrix.""" - if len(ax.shape) == 1: - c = np.cos(ax[3]) - s = np.sin(ax[3]) - omc = 1.0-c - om=np.diag(ax[0:3]**2*omc + c) - - for idx in [[0,1,2],[1,2,0],[2,0,1]]: - q = omc*ax[idx[0]] * ax[idx[1]] - om[idx[0],idx[1]] = q + s*ax[idx[2]] - om[idx[1],idx[0]] = q - s*ax[idx[2]] - else: - c = np.cos(ax[...,3:4]) - s = np.sin(ax[...,3:4]) - omc = 1. -c - om = np.block([c+omc*ax[...,0:1]**2, - omc*ax[...,0:1]*ax[...,1:2] + s*ax[...,2:3], - omc*ax[...,0:1]*ax[...,2:3] - s*ax[...,1:2], - omc*ax[...,0:1]*ax[...,1:2] - s*ax[...,2:3], - c+omc*ax[...,1:2]**2, - omc*ax[...,1:2]*ax[...,2:3] + s*ax[...,0:1], - omc*ax[...,0:1]*ax[...,2:3] + s*ax[...,1:2], - omc*ax[...,1:2]*ax[...,2:3] - s*ax[...,0:1], - c+omc*ax[...,2:3]**2]).reshape(ax.shape[:-1]+(3,3)) - return om if _P < 0.0 else np.swapaxes(om,(-1,-2)) + c = np.cos(ax[...,3:4]) + s = np.sin(ax[...,3:4]) + omc = 1. -c + om = np.block([c+omc*ax[...,0:1]**2, + omc*ax[...,0:1]*ax[...,1:2] + s*ax[...,2:3], + omc*ax[...,0:1]*ax[...,2:3] - s*ax[...,1:2], + omc*ax[...,0:1]*ax[...,1:2] - s*ax[...,2:3], + c+omc*ax[...,1:2]**2, + omc*ax[...,1:2]*ax[...,2:3] + s*ax[...,0:1], + omc*ax[...,0:1]*ax[...,2:3] + s*ax[...,1:2], + omc*ax[...,1:2]*ax[...,2:3] - s*ax[...,0:1], + c+omc*ax[...,2:3]**2]).reshape(ax.shape[:-1]+(3,3)) + return om if _P < 0.0 else np.swapaxes(om,-1,-2) @staticmethod - def ax2eu(ax): + def _ax2eu(ax): """Rotation matrix to Bunge Euler angles.""" - return Rotation.om2eu(Rotation.ax2om(ax)) + return Rotation._om2eu(Rotation._ax2om(ax)) @staticmethod - def ax2ro(ax): + def _ax2ro(ax): """Axis angle pair to Rodrigues-Frank vector.""" - if len(ax.shape) == 1: - if np.abs(ax[3])<1.e-6: - ro = [ 0.0, 0.0, _P, 0.0 ] - else: - ro = [ax[0], ax[1], ax[2]] - # 180 degree case - ro += [np.inf] if np.isclose(ax[3],np.pi,atol=1.0e-15,rtol=0.0) else \ - [np.tan(ax[3]*0.5)] - ro = np.array(ro) - else: - ro = np.block([ax[...,:3], - np.where(np.isclose(ax[...,3:4],np.pi,atol=1.e-15,rtol=.0), - np.inf, - np.tan(ax[...,3:4]*0.5)) - ]) - ro[np.abs(ax[...,3])<1.e-6] = [.0,.0,_P,.0] + ro = np.block([ax[...,:3], + np.where(np.isclose(ax[...,3:4],np.pi,atol=1.e-15,rtol=.0), + np.inf, + np.tan(ax[...,3:4]*0.5)) + ]) + ro[np.abs(ax[...,3])<1.e-6] = [.0,.0,_P,.0] return ro @staticmethod - def ax2ho(ax): + def _ax2ho(ax): """Axis angle pair to homochoric vector.""" - if len(ax.shape) == 1: - f = (0.75 * ( ax[3] - np.sin(ax[3]) ))**(1.0/3.0) - ho = ax[0:3] * f - else: - f = (0.75 * ( ax[...,3:4] - np.sin(ax[...,3:4]) ))**(1.0/3.0) - ho = ax[...,:3] * f + f = (0.75 * ( ax[...,3:4] - np.sin(ax[...,3:4]) ))**(1.0/3.0) + ho = ax[...,:3] * f return ho @staticmethod - def ax2cu(ax): + def _ax2cu(ax): """Axis angle pair to cubochoric vector.""" - return Rotation.ho2cu(Rotation.ax2ho(ax)) + return Rotation._ho2cu(Rotation._ax2ho(ax)) #---------- Rodrigues-Frank vector ---------- @staticmethod - def ro2qu(ro): + def _ro2qu(ro): """Rodrigues-Frank vector to quaternion.""" - return Rotation.ax2qu(Rotation.ro2ax(ro)) + return Rotation._ax2qu(Rotation._ro2ax(ro)) @staticmethod - def ro2om(ro): + def _ro2om(ro): """Rodgrigues-Frank vector to rotation matrix.""" - return Rotation.ax2om(Rotation.ro2ax(ro)) + return Rotation._ax2om(Rotation._ro2ax(ro)) @staticmethod - def ro2eu(ro): + def _ro2eu(ro): """Rodrigues-Frank vector to Bunge-Euler angles.""" - return Rotation.om2eu(Rotation.ro2om(ro)) + return Rotation._om2eu(Rotation._ro2om(ro)) @staticmethod - def ro2ax(ro): + def _ro2ax(ro): """Rodrigues-Frank vector to axis angle pair.""" - if len(ro.shape) == 1: - if np.abs(ro[3]) < 1.e-6: - ax = np.array([ 0.0, 0.0, 1.0, 0.0 ]) - elif not np.isfinite(ro[3]): - ax = np.array([ ro[0], ro[1], ro[2], np.pi ]) - else: - angle = 2.0*np.arctan(ro[3]) - ta = np.linalg.norm(ro[0:3]) - ax = np.array([ ro[0]*ta, ro[1]*ta, ro[2]*ta, angle ]) - else: - with np.errstate(invalid='ignore',divide='ignore'): - ax = np.where(np.isfinite(ro[...,3:4]), - np.block([ro[...,0:3]*np.linalg.norm(ro[...,0:3],axis=-1,keepdims=True),2.*np.arctan(ro[...,3:4])]), - np.block([ro[...,0:3],np.broadcast_to(np.pi,ro[...,3:4].shape)])) - ax[np.abs(ro[...,3]) < 1.e-6] = np.array([ 0.0, 0.0, 1.0, 0.0 ]) + with np.errstate(invalid='ignore',divide='ignore'): + ax = np.where(np.isfinite(ro[...,3:4]), + np.block([ro[...,0:3]*np.linalg.norm(ro[...,0:3],axis=-1,keepdims=True),2.*np.arctan(ro[...,3:4])]), + np.block([ro[...,0:3],np.broadcast_to(np.pi,ro[...,3:4].shape)])) + ax[np.abs(ro[...,3]) < 1.e-8] = np.array([ 0.0, 0.0, 1.0, 0.0 ]) return ax @staticmethod - def ro2ho(ro): + def _ro2ho(ro): """Rodrigues-Frank vector to homochoric vector.""" - if len(ro.shape) == 1: - if np.sum(ro[0:3]**2.0) < 1.e-6: - ho = np.zeros(3) - else: - f = 2.0*np.arctan(ro[3]) -np.sin(2.0*np.arctan(ro[3])) if np.isfinite(ro[3]) else np.pi - ho = ro[0:3] * (0.75*f)**(1.0/3.0) - else: - f = np.where(np.isfinite(ro[...,3:4]),2.0*np.arctan(ro[...,3:4]) -np.sin(2.0*np.arctan(ro[...,3:4])),np.pi) - ho = np.where(np.broadcast_to(np.sum(ro[...,0:3]**2.0,axis=-1,keepdims=True) < 1.e-6,ro[...,0:3].shape), - np.zeros(3), ro[...,0:3]* (0.75*f)**(1.0/3.0)) + f = np.where(np.isfinite(ro[...,3:4]),2.0*np.arctan(ro[...,3:4]) -np.sin(2.0*np.arctan(ro[...,3:4])),np.pi) + ho = np.where(np.broadcast_to(np.sum(ro[...,0:3]**2.0,axis=-1,keepdims=True) < 1.e-8,ro[...,0:3].shape), + np.zeros(3), ro[...,0:3]* (0.75*f)**(1.0/3.0)) return ho @staticmethod - def ro2cu(ro): + def _ro2cu(ro): """Rodrigues-Frank vector to cubochoric vector.""" - return Rotation.ho2cu(Rotation.ro2ho(ro)) + return Rotation._ho2cu(Rotation._ro2ho(ro)) #---------- Homochoric vector---------- @staticmethod - def ho2qu(ho): + def _ho2qu(ho): """Homochoric vector to quaternion.""" - return Rotation.ax2qu(Rotation.ho2ax(ho)) + return Rotation._ax2qu(Rotation._ho2ax(ho)) @staticmethod - def ho2om(ho): + def _ho2om(ho): """Homochoric vector to rotation matrix.""" - return Rotation.ax2om(Rotation.ho2ax(ho)) + return Rotation._ax2om(Rotation._ho2ax(ho)) @staticmethod - def ho2eu(ho): + def _ho2eu(ho): """Homochoric vector to Bunge-Euler angles.""" - return Rotation.ax2eu(Rotation.ho2ax(ho)) + return Rotation._ax2eu(Rotation._ho2ax(ho)) @staticmethod - def ho2ax(ho): + def _ho2ax(ho): """Homochoric vector to axis angle pair.""" tfit = np.array([+1.0000000000018852, -0.5000000002194847, -0.024999992127593126, -0.003928701544781374, @@ -1019,40 +882,25 @@ class Rotation: +0.0001703481934140054, -0.00012062065004116828, +0.000059719705868660826, -0.00001980756723965647, +0.000003953714684212874, -0.00000036555001439719544]) - if len(ho.shape) == 1: - # normalize h and store the magnitude - hmag_squared = np.sum(ho**2.) - if iszero(hmag_squared): - ax = np.array([ 0.0, 0.0, 1.0, 0.0 ]) - else: - hm = hmag_squared - - # convert the magnitude to the rotation angle - s = tfit[0] + tfit[1] * hmag_squared - for i in range(2,16): - hm *= hmag_squared - s += tfit[i] * hm - ax = np.append(ho/np.sqrt(hmag_squared),2.0*np.arccos(np.clip(s,-1.0,1.0))) - else: - hmag_squared = np.sum(ho**2.,axis=-1,keepdims=True) - hm = hmag_squared.copy() - s = tfit[0] + tfit[1] * hmag_squared - for i in range(2,16): - hm *= hmag_squared - s += tfit[i] * hm - with np.errstate(invalid='ignore'): - ax = np.where(np.broadcast_to(np.abs(hmag_squared)<1.e-6,ho.shape[:-1]+(4,)), - [ 0.0, 0.0, 1.0, 0.0 ], - np.block([ho/np.sqrt(hmag_squared),2.0*np.arccos(np.clip(s,-1.0,1.0))])) + hmag_squared = np.sum(ho**2.,axis=-1,keepdims=True) + hm = hmag_squared.copy() + s = tfit[0] + tfit[1] * hmag_squared + for i in range(2,16): + hm *= hmag_squared + s += tfit[i] * hm + with np.errstate(invalid='ignore'): + ax = np.where(np.broadcast_to(np.abs(hmag_squared)<1.e-8,ho.shape[:-1]+(4,)), + [ 0.0, 0.0, 1.0, 0.0 ], + np.block([ho/np.sqrt(hmag_squared),2.0*np.arccos(np.clip(s,-1.0,1.0))])) return ax @staticmethod - def ho2ro(ho): + def _ho2ro(ho): """Axis angle pair to Rodrigues-Frank vector.""" - return Rotation.ax2ro(Rotation.ho2ax(ho)) + return Rotation._ax2ro(Rotation._ho2ax(ho)) @staticmethod - def ho2cu(ho): + def _ho2cu(ho): """ Homochoric vector to cubochoric vector. @@ -1062,91 +910,62 @@ class Rotation: https://doi.org/10.1088/0965-0393/22/7/075013 """ - if len(ho.shape) == 1: - rs = np.linalg.norm(ho) + rs = np.linalg.norm(ho,axis=-1,keepdims=True) - if np.allclose(ho,0.0,rtol=0.0,atol=1.0e-16): - cu = np.zeros(3) - else: - xyz3 = ho[Rotation._get_pyramid_order(ho,'forward')] + xyz3 = np.take_along_axis(ho,Rotation._get_pyramid_order(ho,'forward'),-1) - # inverse M_3 - xyz2 = xyz3[0:2] * np.sqrt( 2.0*rs/(rs+np.abs(xyz3[2])) ) + with np.errstate(invalid='ignore',divide='ignore'): + # inverse M_3 + xyz2 = xyz3[...,0:2] * np.sqrt( 2.0*rs/(rs+np.abs(xyz3[...,2:3])) ) + qxy = np.sum(xyz2**2,axis=-1,keepdims=True) - # inverse M_2 - qxy = np.sum(xyz2**2) + q2 = qxy + np.max(np.abs(xyz2),axis=-1,keepdims=True)**2 + sq2 = np.sqrt(q2) + q = (_beta/np.sqrt(2.0)/_R1) * np.sqrt(q2*qxy/(q2-np.max(np.abs(xyz2),axis=-1,keepdims=True)*sq2)) + tt = np.clip((np.min(np.abs(xyz2),axis=-1,keepdims=True)**2\ + +np.max(np.abs(xyz2),axis=-1,keepdims=True)*sq2)/np.sqrt(2.0)/qxy,-1.0,1.0) + T_inv = np.where(np.abs(xyz2[...,1:2]) <= np.abs(xyz2[...,0:1]), + np.block([np.ones_like(tt),np.arccos(tt)/np.pi*12.0]), + np.block([np.arccos(tt)/np.pi*12.0,np.ones_like(tt)]))*q + T_inv[xyz2<0.0] *= -1.0 + T_inv[np.broadcast_to(np.isclose(qxy,0.0,rtol=0.0,atol=1.0e-12),T_inv.shape)] = 0.0 + cu = np.block([T_inv, np.where(xyz3[...,2:3]<0.0,-np.ones_like(xyz3[...,2:3]),np.ones_like(xyz3[...,2:3])) \ + * rs/np.sqrt(6.0/np.pi), + ])/ _sc - if np.isclose(qxy,0.0,rtol=0.0,atol=1.0e-16): - Tinv = np.zeros(2) - else: - q2 = qxy + np.max(np.abs(xyz2))**2 - sq2 = np.sqrt(q2) - q = (_beta/np.sqrt(2.0)/_R1) * np.sqrt(q2*qxy/(q2-np.max(np.abs(xyz2))*sq2)) - tt = np.clip((np.min(np.abs(xyz2))**2+np.max(np.abs(xyz2))*sq2)/np.sqrt(2.0)/qxy,-1.0,1.0) - Tinv = np.array([1.0,np.arccos(tt)/np.pi*12.0]) if np.abs(xyz2[1]) <= np.abs(xyz2[0]) else \ - np.array([np.arccos(tt)/np.pi*12.0,1.0]) - Tinv = q * np.where(xyz2<0.0,-Tinv,Tinv) - - # inverse M_1 - cu = np.array([ Tinv[0], Tinv[1], (-1.0 if xyz3[2] < 0.0 else 1.0) * rs / np.sqrt(6.0/np.pi) ]) /_sc - cu = cu[Rotation._get_pyramid_order(ho,'backward')] - else: - rs = np.linalg.norm(ho,axis=-1,keepdims=True) - - xyz3 = np.take_along_axis(ho,Rotation._get_pyramid_order(ho,'forward'),-1) - - with np.errstate(invalid='ignore',divide='ignore'): - # inverse M_3 - xyz2 = xyz3[...,0:2] * np.sqrt( 2.0*rs/(rs+np.abs(xyz3[...,2:3])) ) - qxy = np.sum(xyz2**2,axis=-1,keepdims=True) - - q2 = qxy + np.max(np.abs(xyz2),axis=-1,keepdims=True)**2 - sq2 = np.sqrt(q2) - q = (_beta/np.sqrt(2.0)/_R1) * np.sqrt(q2*qxy/(q2-np.max(np.abs(xyz2),axis=-1,keepdims=True)*sq2)) - tt = np.clip((np.min(np.abs(xyz2),axis=-1,keepdims=True)**2\ - +np.max(np.abs(xyz2),axis=-1,keepdims=True)*sq2)/np.sqrt(2.0)/qxy,-1.0,1.0) - T_inv = np.where(np.abs(xyz2[...,1:2]) <= np.abs(xyz2[...,0:1]), - np.block([np.ones_like(tt),np.arccos(tt)/np.pi*12.0]), - np.block([np.arccos(tt)/np.pi*12.0,np.ones_like(tt)]))*q - T_inv[xyz2<0.0] *= -1.0 - T_inv[np.broadcast_to(np.isclose(qxy,0.0,rtol=0.0,atol=1.0e-12),T_inv.shape)] = 0.0 - cu = np.block([T_inv, np.where(xyz3[...,2:3]<0.0,-np.ones_like(xyz3[...,2:3]),np.ones_like(xyz3[...,2:3])) \ - * rs/np.sqrt(6.0/np.pi), - ])/ _sc - - cu[np.isclose(np.sum(np.abs(ho),axis=-1),0.0,rtol=0.0,atol=1.0e-16)] = 0.0 - cu = np.take_along_axis(cu,Rotation._get_pyramid_order(ho,'backward'),-1) + cu[np.isclose(np.sum(np.abs(ho),axis=-1),0.0,rtol=0.0,atol=1.0e-16)] = 0.0 + cu = np.take_along_axis(cu,Rotation._get_pyramid_order(ho,'backward'),-1) return cu #---------- Cubochoric ---------- @staticmethod - def cu2qu(cu): + def _cu2qu(cu): """Cubochoric vector to quaternion.""" - return Rotation.ho2qu(Rotation.cu2ho(cu)) + return Rotation._ho2qu(Rotation._cu2ho(cu)) @staticmethod - def cu2om(cu): + def _cu2om(cu): """Cubochoric vector to rotation matrix.""" - return Rotation.ho2om(Rotation.cu2ho(cu)) + return Rotation._ho2om(Rotation._cu2ho(cu)) @staticmethod - def cu2eu(cu): + def _cu2eu(cu): """Cubochoric vector to Bunge-Euler angles.""" - return Rotation.ho2eu(Rotation.cu2ho(cu)) + return Rotation._ho2eu(Rotation._cu2ho(cu)) @staticmethod - def cu2ax(cu): + def _cu2ax(cu): """Cubochoric vector to axis angle pair.""" - return Rotation.ho2ax(Rotation.cu2ho(cu)) + return Rotation._ho2ax(Rotation._cu2ho(cu)) @staticmethod - def cu2ro(cu): + def _cu2ro(cu): """Cubochoric vector to Rodrigues-Frank vector.""" - return Rotation.ho2ro(Rotation.cu2ho(cu)) + return Rotation._ho2ro(Rotation._cu2ho(cu)) @staticmethod - def cu2ho(cu): + def _cu2ho(cu): """ Cubochoric vector to homochoric vector. @@ -1156,64 +975,34 @@ class Rotation: https://doi.org/10.1088/0965-0393/22/7/075013 """ - if len(cu.shape) == 1: - # transform to the sphere grid via the curved square, and intercept the zero point - if np.allclose(cu,0.0,rtol=0.0,atol=1.0e-16): - ho = np.zeros(3) - else: - # get pyramide and scale by grid parameter ratio - XYZ = cu[Rotation._get_pyramid_order(cu,'forward')] * _sc + with np.errstate(invalid='ignore',divide='ignore'): + # get pyramide and scale by grid parameter ratio + XYZ = np.take_along_axis(cu,Rotation._get_pyramid_order(cu,'forward'),-1) * _sc + order = np.abs(XYZ[...,1:2]) <= np.abs(XYZ[...,0:1]) + q = np.pi/12.0 * np.where(order,XYZ[...,1:2],XYZ[...,0:1]) \ + / np.where(order,XYZ[...,0:1],XYZ[...,1:2]) + c = np.cos(q) + s = np.sin(q) + q = _R1*2.0**0.25/_beta/ np.sqrt(np.sqrt(2.0)-c) \ + * np.where(order,XYZ[...,0:1],XYZ[...,1:2]) - # intercept all the points along the z-axis - if np.allclose(XYZ[0:2],0.0,rtol=0.0,atol=1.0e-16): - ho = np.array([0.0, 0.0, np.sqrt(6.0/np.pi) * XYZ[2]]) - else: - order = [1,0] if np.abs(XYZ[1]) <= np.abs(XYZ[0]) else [0,1] - q = np.pi/12.0 * XYZ[order[0]]/XYZ[order[1]] - c = np.cos(q) - s = np.sin(q) - q = _R1*2.0**0.25/_beta * XYZ[order[1]] / np.sqrt(np.sqrt(2.0)-c) - T = np.array([ (np.sqrt(2.0)*c - 1.0), np.sqrt(2.0) * s]) * q + T = np.block([ (np.sqrt(2.0)*c - 1.0), np.sqrt(2.0) * s]) * q - # transform to sphere grid (inverse Lambert) - # note that there is no need to worry about dividing by zero, since XYZ[2] can not become zero - c = np.sum(T**2) - s = c * np.pi/24.0 /XYZ[2]**2 - c = c * np.sqrt(np.pi/24.0)/XYZ[2] + # transform to sphere grid (inverse Lambert) + c = np.sum(T**2,axis=-1,keepdims=True) + s = c * np.pi/24.0 /XYZ[...,2:3]**2 + c = c * np.sqrt(np.pi/24.0)/XYZ[...,2:3] + q = np.sqrt( 1.0 - s) - q = np.sqrt( 1.0 - s ) - ho = np.array([ T[order[1]] * q, T[order[0]] * q, np.sqrt(6.0/np.pi) * XYZ[2] - c ]) + ho = np.where(np.isclose(np.sum(np.abs(XYZ[...,0:2]),axis=-1,keepdims=True),0.0,rtol=0.0,atol=1.0e-16), + np.block([np.zeros_like(XYZ[...,0:2]),np.sqrt(6.0/np.pi) *XYZ[...,2:3]]), + np.block([np.where(order,T[...,0:1],T[...,1:2])*q, + np.where(order,T[...,1:2],T[...,0:1])*q, + np.sqrt(6.0/np.pi) * XYZ[...,2:3] - c]) + ) - ho = ho[Rotation._get_pyramid_order(cu,'backward')] - else: - with np.errstate(invalid='ignore',divide='ignore'): - # get pyramide and scale by grid parameter ratio - XYZ = np.take_along_axis(cu,Rotation._get_pyramid_order(cu,'forward'),-1) * _sc - order = np.abs(XYZ[...,1:2]) <= np.abs(XYZ[...,0:1]) - q = np.pi/12.0 * np.where(order,XYZ[...,1:2],XYZ[...,0:1]) \ - / np.where(order,XYZ[...,0:1],XYZ[...,1:2]) - c = np.cos(q) - s = np.sin(q) - q = _R1*2.0**0.25/_beta/ np.sqrt(np.sqrt(2.0)-c) \ - * np.where(order,XYZ[...,0:1],XYZ[...,1:2]) - - T = np.block([ (np.sqrt(2.0)*c - 1.0), np.sqrt(2.0) * s]) * q - - # transform to sphere grid (inverse Lambert) - c = np.sum(T**2,axis=-1,keepdims=True) - s = c * np.pi/24.0 /XYZ[...,2:3]**2 - c = c * np.sqrt(np.pi/24.0)/XYZ[...,2:3] - q = np.sqrt( 1.0 - s) - - ho = np.where(np.isclose(np.sum(np.abs(XYZ[...,0:2]),axis=-1,keepdims=True),0.0,rtol=0.0,atol=1.0e-16), - np.block([np.zeros_like(XYZ[...,0:2]),np.sqrt(6.0/np.pi) *XYZ[...,2:3]]), - np.block([np.where(order,T[...,0:1],T[...,1:2])*q, - np.where(order,T[...,1:2],T[...,0:1])*q, - np.sqrt(6.0/np.pi) * XYZ[...,2:3] - c]) - ) - - ho[np.isclose(np.sum(np.abs(cu),axis=-1),0.0,rtol=0.0,atol=1.0e-16)] = 0.0 - ho = np.take_along_axis(ho,Rotation._get_pyramid_order(cu,'backward'),-1) + ho[np.isclose(np.sum(np.abs(cu),axis=-1),0.0,rtol=0.0,atol=1.0e-16)] = 0.0 + ho = np.take_along_axis(ho,Rotation._get_pyramid_order(cu,'backward'),-1) return ho @@ -1237,20 +1026,10 @@ class Rotation: https://doi.org/10.1088/0965-0393/22/7/075013 """ - order = {'forward':np.array([[0,1,2],[1,2,0],[2,0,1]]), + order = {'forward': np.array([[0,1,2],[1,2,0],[2,0,1]]), 'backward':np.array([[0,1,2],[2,0,1],[1,2,0]])} - if len(xyz.shape) == 1: - if np.maximum(abs(xyz[0]),abs(xyz[1])) <= xyz[2] or \ - np.maximum(abs(xyz[0]),abs(xyz[1])) <=-xyz[2]: - p = 0 - elif np.maximum(abs(xyz[1]),abs(xyz[2])) <= xyz[0] or \ - np.maximum(abs(xyz[1]),abs(xyz[2])) <=-xyz[0]: - p = 1 - elif np.maximum(abs(xyz[2]),abs(xyz[0])) <= xyz[1] or \ - np.maximum(abs(xyz[2]),abs(xyz[0])) <=-xyz[1]: - p = 2 - else: - p = np.where(np.maximum(np.abs(xyz[...,0]),np.abs(xyz[...,1])) <= np.abs(xyz[...,2]),0, - np.where(np.maximum(np.abs(xyz[...,1]),np.abs(xyz[...,2])) <= np.abs(xyz[...,0]),1,2)) + + p = np.where(np.maximum(np.abs(xyz[...,0]),np.abs(xyz[...,1])) <= np.abs(xyz[...,2]),0, + np.where(np.maximum(np.abs(xyz[...,1]),np.abs(xyz[...,2])) <= np.abs(xyz[...,0]),1,2)) return order[direction][p] diff --git a/python/damask/_vtk.py b/python/damask/_vtk.py index 4505a5ebc..58b43dc1f 100644 --- a/python/damask/_vtk.py +++ b/python/damask/_vtk.py @@ -120,9 +120,9 @@ class VTK: Parameters ---------- fname : str - Filename for reading. Valid extensions are *.vtr, *.vtu, *.vtp, and *.vtk. + Filename for reading. Valid extensions are .vtr, .vtu, .vtp, and .vtk. dataset_type : str, optional - Name of the vtk.vtkDataSet subclass when opening an *.vtk file. Valid types are vtkRectilinearGrid, + Name of the vtk.vtkDataSet subclass when opening an .vtk file. Valid types are vtkRectilinearGrid, vtkUnstructuredGrid, and vtkPolyData. """ diff --git a/python/damask/grid_filters.py b/python/damask/grid_filters.py index eaee42924..76ab20872 100644 --- a/python/damask/grid_filters.py +++ b/python/damask/grid_filters.py @@ -238,12 +238,13 @@ def cell_coord0_gridSizeOrigin(coord0,ordered=True): start = origin + delta*.5 end = origin - delta*.5 + size - if not (_np.allclose(coords[0],_np.linspace(start[0],end[0],grid[0])) and \ - _np.allclose(coords[1],_np.linspace(start[1],end[1],grid[1])) and \ - _np.allclose(coords[2],_np.linspace(start[2],end[2],grid[2]))): + atol = _np.max(size)*5e-2 + if not (_np.allclose(coords[0],_np.linspace(start[0],end[0],grid[0]),atol=atol) and \ + _np.allclose(coords[1],_np.linspace(start[1],end[1],grid[1]),atol=atol) and \ + _np.allclose(coords[2],_np.linspace(start[2],end[2],grid[2]),atol=atol)): raise ValueError('Regular grid spacing violated.') - if ordered and not _np.allclose(coord0.reshape(tuple(grid)+(3,),order='F'),cell_coord0(grid,size,origin)): + if ordered and not _np.allclose(coord0.reshape(tuple(grid)+(3,),order='F'),cell_coord0(grid,size,origin),atol=atol): raise ValueError('Input data is not ordered (x fast, z slow).') return (grid,size,origin) @@ -360,7 +361,7 @@ def node_2_cell(node_data): + _np.roll(node_data,1,(0,)) + _np.roll(node_data,1,(1,)) + _np.roll(node_data,1,(2,)) + _np.roll(node_data,1,(0,1)) + _np.roll(node_data,1,(1,2)) + _np.roll(node_data,1,(2,0)))*0.125 - return c[:-1,:-1,:-1] + return c[1:,1:,1:] def node_coord0_gridSizeOrigin(coord0,ordered=True): @@ -385,12 +386,13 @@ def node_coord0_gridSizeOrigin(coord0,ordered=True): if (grid+1).prod() != len(coord0): raise ValueError('Data count {} does not match grid {}.'.format(len(coord0),grid)) - if not (_np.allclose(coords[0],_np.linspace(mincorner[0],maxcorner[0],grid[0]+1)) and \ - _np.allclose(coords[1],_np.linspace(mincorner[1],maxcorner[1],grid[1]+1)) and \ - _np.allclose(coords[2],_np.linspace(mincorner[2],maxcorner[2],grid[2]+1))): + atol = _np.max(size)*5e-2 + if not (_np.allclose(coords[0],_np.linspace(mincorner[0],maxcorner[0],grid[0]+1),atol=atol) and \ + _np.allclose(coords[1],_np.linspace(mincorner[1],maxcorner[1],grid[1]+1),atol=atol) and \ + _np.allclose(coords[2],_np.linspace(mincorner[2],maxcorner[2],grid[2]+1),atol=atol)): raise ValueError('Regular grid spacing violated.') - if ordered and not _np.allclose(coord0.reshape(tuple(grid+1)+(3,),order='F'),node_coord0(grid,size,origin)): + if ordered and not _np.allclose(coord0.reshape(tuple(grid+1)+(3,),order='F'),node_coord0(grid,size,origin),atol=atol): raise ValueError('Input data is not ordered (x fast, z slow).') return (grid,size,origin) diff --git a/python/damask/util.py b/python/damask/util.py index d45ea366e..cb1d8757d 100644 --- a/python/damask/util.py +++ b/python/damask/util.py @@ -112,8 +112,8 @@ def execute(cmd, """ initialPath = os.getcwd() - os.chdir(wd) myEnv = os.environ if env is None else env + os.chdir(wd) process = subprocess.Popen(shlex.split(cmd), stdout = subprocess.PIPE, stderr = subprocess.PIPE, @@ -121,9 +121,9 @@ def execute(cmd, env = myEnv) out,error = [i for i in (process.communicate() if streamIn is None else process.communicate(streamIn.read().encode('utf-8')))] + os.chdir(initialPath) out = out.decode('utf-8').replace('\x08','') error = error.decode('utf-8').replace('\x08','') - os.chdir(initialPath) if process.returncode != 0: raise RuntimeError('{} failed with returncode {}'.format(cmd,process.returncode)) return out,error diff --git a/python/tests/reference/Geom/rotate_Eulers=0.0-32.0-240.0.geom b/python/tests/reference/Geom/rotate_Eulers=0.0-32.0-240.0.geom new file mode 100644 index 000000000..a3d1de0b4 --- /dev/null +++ b/python/tests/reference/Geom/rotate_Eulers=0.0-32.0-240.0.geom @@ -0,0 +1,85 @@ +4 header +grid a 8 b 10 c 8 +size x 8e-06 y 1e-05 z 8e-06 +origin x 0.0 y -2.5e-06 z -2e-06 +homogenization 1 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 30 14 18 42 42 42 +42 42 29 13 17 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 31 15 19 42 42 42 +42 6 10 2 2 42 42 42 +42 1 2 2 2 2 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 32 16 20 42 42 42 +42 7 11 2 2 42 42 42 +42 7 11 2 2 42 42 42 +42 2 2 2 2 2 42 42 +42 42 2 2 31 35 42 42 +42 42 22 26 10 14 1 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 4 2 2 2 2 42 42 +42 42 2 2 32 36 42 42 +42 42 24 28 12 16 1 42 +42 42 42 7 7 1 1 42 +42 42 42 7 7 1 1 42 +42 42 42 1 1 1 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 25 29 13 17 1 42 +42 42 42 8 8 1 1 42 +42 42 42 1 1 1 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 1 1 1 42 42 +42 42 42 1 1 1 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 diff --git a/python/tests/reference/Geom/rotate_Eulers=32.0-68.0-21.0.geom b/python/tests/reference/Geom/rotate_Eulers=32.0-68.0-21.0.geom new file mode 100644 index 000000000..e29cd4266 --- /dev/null +++ b/python/tests/reference/Geom/rotate_Eulers=32.0-68.0-21.0.geom @@ -0,0 +1,104 @@ +4 header +grid a 11 b 11 c 9 +size x 1.1e-05 y 1.1000000000000001e-05 z 9e-06 +origin x -1.5e-06 y -3.0000000000000005e-06 z -2.4999999999999998e-06 +homogenization 1 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 1 2 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 1 42 42 42 42 42 42 +42 42 42 42 1 5 42 42 42 42 42 +42 42 42 1 7 4 42 42 42 42 42 +42 42 42 42 42 27 42 42 42 42 42 +42 42 42 42 42 42 2 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 1 1 42 42 42 42 42 42 +42 42 42 1 1 9 29 42 42 42 42 +42 42 1 1 11 8 28 2 42 42 42 +42 42 42 1 10 31 2 42 42 42 42 +42 42 42 42 30 2 2 2 42 42 42 +42 42 42 42 42 42 2 1 42 42 42 +42 42 42 42 42 42 42 1 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 1 42 42 42 42 42 42 42 +42 42 42 1 1 42 42 42 42 42 42 +42 42 1 16 36 12 32 42 42 42 42 +42 42 42 15 35 2 2 2 42 42 42 +42 42 42 42 2 2 2 11 3 42 42 +42 42 42 42 42 42 10 6 42 42 42 +42 42 42 42 42 42 42 6 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 1 42 42 42 42 42 42 42 +42 42 42 1 17 42 42 42 42 42 42 +42 42 42 16 36 37 2 42 42 42 42 +42 42 42 42 39 2 2 12 42 42 42 +42 42 42 38 2 2 2 11 8 42 42 +42 42 42 42 2 2 14 30 42 42 42 +42 42 42 42 42 42 13 30 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 40 42 42 42 42 42 42 +42 42 42 42 42 2 42 42 42 42 42 +42 42 42 42 42 2 2 15 42 42 42 +42 42 42 42 42 2 18 42 42 42 42 +42 42 42 42 42 42 17 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 2 20 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 +42 42 42 42 42 42 42 42 42 42 42 diff --git a/python/tests/reference/Result/12grains6x7x8.geom b/python/tests/reference/Result/12grains6x7x8.geom index 7bb4a3e4d..4e6836bb8 100644 --- a/python/tests/reference/Result/12grains6x7x8.geom +++ b/python/tests/reference/Result/12grains6x7x8.geom @@ -1,68 +1,4 @@ -68 header -geom_fromVoronoiTessellation 2.0.3-1073-g6f3cb071 - -[Grain1] -(gauss) phi1 358.98 Phi 65.62 phi2 24.48 -[Grain2] -(gauss) phi1 121.05 Phi 176.11 phi2 295.73 -[Grain3] -(gauss) phi1 43.79 Phi 113.76 phi2 345.90 -[Grain4] -(gauss) phi1 265.15 Phi 62.52 phi2 299.71 -[Grain5] -(gauss) phi1 221.23 Phi 26.54 phi2 207.05 -[Grain6] -(gauss) phi1 249.81 Phi 61.47 phi2 152.14 -[Grain7] -(gauss) phi1 332.45 Phi 99.16 phi2 345.34 -[Grain8] -(gauss) phi1 312.27 Phi 118.27 phi2 181.59 -[Grain9] -(gauss) phi1 303.10 Phi 48.21 phi2 358.03 -[Grain10] -(gauss) phi1 338.26 Phi 48.11 phi2 176.78 -[Grain11] -(gauss) phi1 115.17 Phi 56.54 phi2 223.84 -[Grain12] -(gauss) phi1 281.04 Phi 97.48 phi2 27.94 - -[Grain1] -crystallite 1 -(constituent) phase 1 texture 1 fraction 1.0 -[Grain2] -crystallite 1 -(constituent) phase 1 texture 2 fraction 1.0 -[Grain3] -crystallite 1 -(constituent) phase 1 texture 3 fraction 1.0 -[Grain4] -crystallite 1 -(constituent) phase 1 texture 4 fraction 1.0 -[Grain5] -crystallite 1 -(constituent) phase 1 texture 5 fraction 1.0 -[Grain6] -crystallite 1 -(constituent) phase 1 texture 6 fraction 1.0 -[Grain7] -crystallite 1 -(constituent) phase 1 texture 7 fraction 1.0 -[Grain8] -crystallite 1 -(constituent) phase 1 texture 8 fraction 1.0 -[Grain9] -crystallite 1 -(constituent) phase 1 texture 9 fraction 1.0 -[Grain10] -crystallite 1 -(constituent) phase 1 texture 10 fraction 1.0 -[Grain11] -crystallite 1 -(constituent) phase 1 texture 11 fraction 1.0 -[Grain12] -crystallite 1 -(constituent) phase 1 texture 12 fraction 1.0 - +4 header grid a 6 b 7 c 8 size x 0.75 y 0.875 z 1.0 origin x 0.0 y 0.0 z 0.0 diff --git a/python/tests/reference/Result/6grains6x7x8_single_phase.geom b/python/tests/reference/Result/6grains6x7x8_single_phase.geom new file mode 100644 index 000000000..14cae0973 --- /dev/null +++ b/python/tests/reference/Result/6grains6x7x8_single_phase.geom @@ -0,0 +1,61 @@ +4 header +grid a 6 b 7 c 8 +size x 0.75 y 0.875 z 1.0 +origin x 0.0 y 0.0 z 0.0 +homogenization 1 +3 3 3 4 3 3 +3 1 1 1 3 3 +3 5 1 1 1 3 +1 5 5 1 1 1 +1 5 5 1 1 1 +6 3 3 4 1 6 +6 3 3 4 4 6 +6 3 3 1 3 3 +3 1 1 1 3 3 +3 1 1 1 1 1 +1 1 1 1 1 1 +6 6 3 1 1 1 +6 3 3 3 6 6 +6 3 3 3 6 6 +6 3 3 1 1 6 +3 1 1 1 1 3 +6 1 1 1 2 2 +1 6 2 2 2 2 +6 6 2 2 2 6 +6 3 3 3 6 6 +6 3 3 3 6 6 +5 6 6 6 1 6 +6 6 6 6 2 2 +6 6 6 2 2 2 +2 6 2 2 2 2 +6 5 2 2 2 2 +6 5 5 2 2 6 +5 5 5 3 6 6 +5 5 6 6 6 5 +6 6 6 6 6 6 +6 6 6 6 2 2 +4 4 6 2 2 2 +4 4 2 2 2 2 +5 5 5 2 2 2 +5 5 5 5 2 5 +5 5 5 4 4 5 +6 6 6 6 4 4 +4 4 5 5 2 4 +4 4 5 2 2 4 +4 4 2 2 2 2 +5 5 5 2 2 2 +5 5 5 4 4 5 +5 5 4 4 4 3 +4 5 5 5 4 3 +4 4 5 5 5 4 +4 4 5 5 2 4 +4 4 2 2 2 2 +5 5 2 2 2 2 +5 5 4 4 4 4 +3 4 4 4 4 3 +3 5 5 4 3 3 +4 5 5 5 3 3 +4 5 5 5 1 1 +4 4 5 2 1 1 +6 4 4 4 4 1 +3 4 4 4 4 3 diff --git a/python/tests/reference/Result/6grains6x7x8_single_phase_tensionY.hdf5 b/python/tests/reference/Result/6grains6x7x8_single_phase_tensionY.hdf5 new file mode 100644 index 000000000..7812f82e8 Binary files /dev/null and b/python/tests/reference/Result/6grains6x7x8_single_phase_tensionY.hdf5 differ diff --git a/python/tests/reference/Result/material.config b/python/tests/reference/Result/material.config index 0b0808a12..4820b1eed 100644 --- a/python/tests/reference/Result/material.config +++ b/python/tests/reference/Result/material.config @@ -124,6 +124,3 @@ a_slip 2.25 h0_slipslip 75e6 interaction_slipslip 1 1 1.4 1.4 1.4 1.4 atol_resistance 1 - - -[dummy] diff --git a/python/tests/test_Geom.py b/python/tests/test_Geom.py index 0f8c10066..7a3823ab3 100644 --- a/python/tests/test_Geom.py +++ b/python/tests/test_Geom.py @@ -5,12 +5,13 @@ import pytest import numpy as np from damask import Geom +from damask import Rotation def geom_equal(a,b): return np.all(a.get_microstructure() == b.get_microstructure()) and \ - np.all(a.get_size() == b.get_size()) and \ - np.all(a.get_grid() == b.get_grid()) + np.all(a.get_grid() == b.get_grid()) and \ + np.allclose(a.get_size(), b.get_size()) @pytest.fixture def default(): @@ -36,6 +37,7 @@ class TestGeom: default.get_size(), default.get_origin() ) + print(modified) assert geom_equal(modified,default) @@ -57,6 +59,22 @@ class TestGeom: new = Geom.from_file(tmpdir.join('default.geom')) assert geom_equal(new,default) + def test_invalid_combination(self,default): + with pytest.raises(ValueError): + default.update(default.microstructure[1:,1:,1:],size=np.ones(3), rescale=True) + + def test_invalid_size(self,default): + with pytest.raises(ValueError): + default.update(default.microstructure[1:,1:,1:],size=np.ones(2)) + + def test_invalid_microstructure(self,default): + with pytest.raises(ValueError): + default.update(default.microstructure[1]) + + def test_invalid_homogenization(self,default): + with pytest.raises(TypeError): + default.set_homogenization(homogenization=0) + @pytest.mark.parametrize('directions,reflect',[ (['x'], False), (['x','y','z'],True), @@ -98,6 +116,52 @@ class TestGeom: if update: modified.to_file(reference) assert geom_equal(modified,Geom.from_file(reference)) + def test_renumber(self,default): + modified = copy.deepcopy(default) + microstructure = modified.get_microstructure() + for m in np.unique(microstructure): + microstructure[microstructure==m] = microstructure.max() + np.random.randint(1,30) + modified.update(microstructure) + assert not geom_equal(modified,default) + modified.renumber() + assert geom_equal(modified,default) + + def test_substitute(self,default): + modified = copy.deepcopy(default) + microstructure = modified.get_microstructure() + offset = np.random.randint(1,500) + microstructure+=offset + modified.update(microstructure) + assert not geom_equal(modified,default) + modified.substitute(np.arange(default.microstructure.max())+1+offset, + np.arange(default.microstructure.max())+1) + assert geom_equal(modified,default) + + @pytest.mark.parametrize('axis_angle',[np.array([1,0,0,86.7]), np.array([0,1,0,90.4]), np.array([0,0,1,90]), + np.array([1,0,0,175]),np.array([0,-1,0,178]),np.array([0,0,1,180])]) + def test_rotate360(self,default,axis_angle): + modified = copy.deepcopy(default) + for i in range(np.rint(360/axis_angle[3]).astype(int)): + modified.rotate(Rotation.from_axis_angle(axis_angle,degrees=True)) + assert geom_equal(modified,default) + + @pytest.mark.parametrize('Eulers',[[32.0,68.0,21.0], + [0.0,32.0,240.0]]) + def test_rotate(self,default,update,reference_dir,Eulers): + modified = copy.deepcopy(default) + modified.rotate(Rotation.from_Eulers(Eulers,degrees=True)) + tag = 'Eulers={}-{}-{}'.format(*Eulers) + reference = os.path.join(reference_dir,'rotate_{}.geom'.format(tag)) + if update: modified.to_file(reference) + assert geom_equal(modified,Geom.from_file(reference)) + + def test_canvas(self,default): + grid_add = np.random.randint(0,30,(3)) + modified = copy.deepcopy(default) + modified.canvas(modified.grid + grid_add) + e = default.grid + assert np.all(modified.microstructure[:e[0],:e[1],:e[2]] == default.microstructure) + @pytest.mark.parametrize('periodic',[True,False]) def test_tessellation_approaches(self,periodic): grid = np.random.randint(10,20,3) diff --git a/python/tests/test_Lattice.py b/python/tests/test_Lattice.py new file mode 100644 index 000000000..fb8ce8ea5 --- /dev/null +++ b/python/tests/test_Lattice.py @@ -0,0 +1,41 @@ +import random + +import pytest +import numpy as np + +from damask import Symmetry + +class TestSymmetry: + + @pytest.mark.parametrize('invalid_symmetry',['fcc','bcc','hello']) + def test_invalid_symmetry(self,invalid_symmetry): + with pytest.raises(KeyError): + s = Symmetry(invalid_symmetry) # noqa + + def test_equal(self): + symmetry = random.choice(Symmetry.lattices) + print(symmetry) + assert Symmetry(symmetry) == Symmetry(symmetry) + + def test_not_equal(self): + symmetries = random.sample(Symmetry.lattices,k=2) + assert Symmetry(symmetries[0]) != Symmetry(symmetries[1]) + + @pytest.mark.parametrize('lattice',Symmetry.lattices) + def test_inFZ(self,lattice): + assert Symmetry(lattice).inFZ(np.zeros(3)) + + @pytest.mark.parametrize('lattice',Symmetry.lattices) + def test_inDisorientationSST(self,lattice): + assert Symmetry(lattice).inDisorientationSST(np.zeros(3)) + + @pytest.mark.parametrize('lattice',Symmetry.lattices) + @pytest.mark.parametrize('proper',[True,False]) + def test_inSST(self,lattice,proper): + assert Symmetry(lattice).inSST(np.zeros(3),proper) + + @pytest.mark.parametrize('function',['inFZ','inDisorientationSST']) + def test_invalid_argument(self,function): + s = Symmetry() # noqa + with pytest.raises(ValueError): + eval('s.{}(np.ones(4))'.format(function)) diff --git a/python/tests/test_Orientation.py b/python/tests/test_Orientation.py index 08f060887..a8b8afdac 100644 --- a/python/tests/test_Orientation.py +++ b/python/tests/test_Orientation.py @@ -8,13 +8,8 @@ import damask from damask import Rotation from damask import Orientation from damask import Lattice - -n = 1000 -@pytest.fixture -def default(): - """A set of n random rotations.""" - return [Rotation.fromRandom() for r in range(n)] +n = 1000 @pytest.fixture def reference_dir(reference_dir_base): @@ -28,15 +23,15 @@ class TestOrientation: {'label':'green','RGB':[0,1,0],'direction':[0,1,1]}, {'label':'blue', 'RGB':[0,0,1],'direction':[1,1,1]}]) @pytest.mark.parametrize('lattice',['fcc','bcc']) - def test_IPF_cubic(self,default,color,lattice): + def test_IPF_cubic(self,color,lattice): cube = damask.Orientation(damask.Rotation(),lattice) for direction in set(permutations(np.array(color['direction']))): - assert np.allclose(cube.IPFcolor(direction),np.array(color['RGB'])) + assert np.allclose(cube.IPFcolor(np.array(direction)),np.array(color['RGB'])) @pytest.mark.parametrize('lattice',Lattice.lattices) def test_IPF(self,lattice): direction = np.random.random(3)*2.0-1 - for rot in [Rotation.fromRandom() for r in range(n//100)]: + for rot in [Rotation.from_random() for r in range(n//100)]: R = damask.Orientation(rot,lattice) color = R.IPFcolor(direction) for equivalent in R.equivalentOrientations(): @@ -45,7 +40,7 @@ class TestOrientation: @pytest.mark.parametrize('model',['Bain','KS','GT','GT_prime','NW','Pitsch']) @pytest.mark.parametrize('lattice',['fcc','bcc']) def test_relationship_forward_backward(self,model,lattice): - ori = Orientation(Rotation.fromRandom(),lattice) + ori = Orientation(Rotation.from_random(),lattice) for i,r in enumerate(ori.relatedOrientations(model)): ori2 = r.relatedOrientations(model)[i] misorientation = ori.rotation.misorientation(ori2.rotation) @@ -56,8 +51,8 @@ class TestOrientation: def test_relationship_reference(self,update,reference_dir,model,lattice): reference = os.path.join(reference_dir,'{}_{}.txt'.format(lattice,model)) ori = Orientation(Rotation(),lattice) - eu = np.array([o.rotation.asEulers(degrees=True) for o in ori.relatedOrientations(model)]) - if update: + eu = np.array([o.rotation.as_Eulers(degrees=True) for o in ori.relatedOrientations(model)]) + if update: coords = np.array([(1,i+1) for i,x in enumerate(eu)]) table = damask.Table(eu,{'Eulers':(3,)}) table.add('pos',coords) diff --git a/python/tests/test_Result.py b/python/tests/test_Result.py index c117f33f7..5ceebc1c4 100644 --- a/python/tests/test_Result.py +++ b/python/tests/test_Result.py @@ -1,9 +1,13 @@ +import time import shutil import os +from datetime import datetime import pytest import numpy as np +import h5py +import damask from damask import Result from damask import mechanics @@ -13,9 +17,16 @@ def default(tmp_path,reference_dir): fname = '12grains6x7x8_tensionY.hdf5' shutil.copy(os.path.join(reference_dir,fname),tmp_path) f = Result(os.path.join(tmp_path,fname)) - f.set_by_time(20.0,20.0) + f.pick('times',20.0) return f +@pytest.fixture +def single_phase(tmp_path,reference_dir): + """Single phase Result file in temp location for modification.""" + fname = '6grains6x7x8_single_phase_tensionY.hdf5' + shutil.copy(os.path.join(reference_dir,fname),tmp_path) + return Result(os.path.join(tmp_path,fname)) + @pytest.fixture def reference_dir(reference_dir_base): """Directory containing reference results.""" @@ -28,12 +39,56 @@ class TestResult: print(default) - def test_time_increments(self,default): - shape = default.read_dataset(default.get_dataset_location('F'),0).shape - default.set_by_time(0.0,20.0) - for i in default.iterate('increments'): - assert shape == default.read_dataset(default.get_dataset_location('F'),0).shape + def test_pick_all(self,default): + default.pick('increments',True) + a = default.get_dataset_location('F') + default.pick('increments','*') + b = default.get_dataset_location('F') + default.pick('increments',default.incs_in_range(0,np.iinfo(int).max)) + c = default.get_dataset_location('F') + default.pick('times',True) + d = default.get_dataset_location('F') + default.pick('times','*') + e = default.get_dataset_location('F') + default.pick('times',default.times_in_range(0.0,np.inf)) + f = default.get_dataset_location('F') + assert a == b == c == d == e ==f + + @pytest.mark.parametrize('what',['increments','times','constituents']) # ToDo: discuss materialpoints + def test_pick_none(self,default,what): + default.pick(what,False) + a = default.get_dataset_location('F') + default.pick(what,[]) + b = default.get_dataset_location('F') + + assert a == b == [] + + @pytest.mark.parametrize('what',['increments','times','constituents']) # ToDo: discuss materialpoints + def test_pick_more(self,default,what): + default.pick(what,False) + default.pick_more(what,'*') + a = default.get_dataset_location('F') + + default.pick(what,True) + b = default.get_dataset_location('F') + + assert a == b + + @pytest.mark.parametrize('what',['increments','times','constituents']) # ToDo: discuss materialpoints + def test_pick_less(self,default,what): + default.pick(what,True) + default.pick_less(what,'*') + a = default.get_dataset_location('F') + + default.pick(what,False) + b = default.get_dataset_location('F') + + assert a == b == [] + + def test_pick_invalid(self,default): + with pytest.raises(AttributeError): + default.pick('invalid',True) def test_add_absolute(self,default): default.add_absolute('Fe') @@ -77,24 +132,40 @@ class TestResult: in_file = default.read_dataset(loc['s_P'],0) assert np.allclose(in_memory,in_file) - def test_add_eigenvalues(self,default): + @pytest.mark.parametrize('eigenvalue,function',[('max',np.amax),('min',np.amin)]) + def test_add_eigenvalue(self,default,eigenvalue,function): default.add_Cauchy('P','F') - default.add_eigenvalues('sigma') - loc = {'sigma' :default.get_dataset_location('sigma'), - 'lambda(sigma)':default.get_dataset_location('lambda(sigma)')} - in_memory = mechanics.eigenvalues(default.read_dataset(loc['sigma'],0)) - in_file = default.read_dataset(loc['lambda(sigma)'],0) + default.add_eigenvalue('sigma',eigenvalue) + loc = {'sigma' :default.get_dataset_location('sigma'), + 'lambda':default.get_dataset_location('lambda_{}(sigma)'.format(eigenvalue))} + in_memory = function(mechanics.eigenvalues(default.read_dataset(loc['sigma'],0)),axis=1,keepdims=True) + in_file = default.read_dataset(loc['lambda'],0) assert np.allclose(in_memory,in_file) - def test_add_eigenvectors(self,default): + @pytest.mark.parametrize('eigenvalue,idx',[('max',2),('mid',1),('min',0)]) + def test_add_eigenvector(self,default,eigenvalue,idx): default.add_Cauchy('P','F') - default.add_eigenvectors('sigma') + default.add_eigenvector('sigma',eigenvalue) loc = {'sigma' :default.get_dataset_location('sigma'), - 'v(sigma)':default.get_dataset_location('v(sigma)')} - in_memory = mechanics.eigenvectors(default.read_dataset(loc['sigma'],0)) + 'v(sigma)':default.get_dataset_location('v_{}(sigma)'.format(eigenvalue))} + in_memory = mechanics.eigenvectors(default.read_dataset(loc['sigma'],0))[:,idx] in_file = default.read_dataset(loc['v(sigma)'],0) assert np.allclose(in_memory,in_file) + @pytest.mark.parametrize('d',[[1,0,0],[0,1,0],[0,0,1]]) + def test_add_IPFcolor(self,default,d): + default.add_IPFcolor('orientation',d) + loc = {'orientation': default.get_dataset_location('orientation'), + 'color': default.get_dataset_location('IPFcolor_[{} {} {}]'.format(*d))} + qu = default.read_dataset(loc['orientation']).view(np.double).reshape(-1,4) + crystal_structure = default.get_crystal_structure() + in_memory = np.empty((qu.shape[0],3),np.uint8) + for i,q in enumerate(qu): + o = damask.Orientation(q,crystal_structure).reduced() + in_memory[i] = np.uint8(o.IPFcolor(np.array(d))*255) + in_file = default.read_dataset(loc['color']) + assert np.allclose(in_memory,in_file) + def test_add_maximum_shear(self,default): default.add_Cauchy('P','F') default.add_maximum_shear('sigma') @@ -143,6 +214,20 @@ class TestResult: in_file = default.read_dataset(loc['S'],0) assert np.allclose(in_memory,in_file) + @pytest.mark.parametrize('polar',[True,False]) + def test_add_pole(self,default,polar): + pole = np.array([1.,0.,0.]) + default.add_pole('orientation',pole,polar) + loc = {'orientation': default.get_dataset_location('orientation'), + 'pole': default.get_dataset_location('p^{}_[1 0 0)'.format(u'rφ' if polar else 'xy'))} + rot = damask.Rotation(default.read_dataset(loc['orientation']).view(np.double)) + rotated_pole = rot * np.broadcast_to(pole,rot.shape+(3,)) + xy = rotated_pole[:,0:2]/(1.+abs(pole[2])) + in_memory = xy if not polar else \ + np.block([np.sqrt(xy[:,0:1]*xy[:,0:1]+xy[:,1:2]*xy[:,1:2]),np.arctan2(xy[:,1:2],xy[:,0:1])]) + in_file = default.read_dataset(loc['pole']) + assert np.allclose(in_memory,in_file) + def test_add_rotational_part(self,default): default.add_rotational_part('F') loc = {'F': default.get_dataset_location('F'), @@ -185,3 +270,42 @@ class TestResult: in_memory = mechanics.left_stretch(default.read_dataset(loc['F'],0)) in_file = default.read_dataset(loc['V(F)'],0) assert np.allclose(in_memory,in_file) + + def test_add_invalid(self,default): + with pytest.raises(TypeError): + default.add_calculation('#invalid#*2') + + @pytest.mark.parametrize('overwrite',['off','on']) + def test_add_overwrite(self,default,overwrite): + default.pick('times',default.times_in_range(0,np.inf)[-1]) + + default.add_Cauchy() + loc = default.get_dataset_location('sigma') + print(loc) + with h5py.File(default.fname,'r') as f: + created_first = f[loc[0]].attrs['Created'].decode() + created_first = datetime.strptime(created_first,'%Y-%m-%d %H:%M:%S%z') + + if overwrite == 'on': + default.enable_overwrite() + else: + default.disable_overwrite() + + time.sleep(2.) + default.add_calculation('sigma','#sigma#*0.0+311.','not the Cauchy stress') + with h5py.File(default.fname,'r') as f: + created_second = f[loc[0]].attrs['Created'].decode() + created_second = datetime.strptime(created_second,'%Y-%m-%d %H:%M:%S%z') + if overwrite == 'on': + assert created_first < created_second and np.allclose(default.read_dataset(loc),311.) + else: + assert created_first == created_second and not np.allclose(default.read_dataset(loc),311.) + + @pytest.mark.parametrize('output',['F',[],['F','P']]) + def test_vtk(self,tmp_path,default,output): + os.chdir(tmp_path) + default.to_vtk(output) + + def test_XDMF(self,tmp_path,single_phase): + os.chdir(tmp_path) + single_phase.write_XDMF diff --git a/python/tests/test_Rotation.py b/python/tests/test_Rotation.py index b7442035f..5a1cd145d 100644 --- a/python/tests/test_Rotation.py +++ b/python/tests/test_Rotation.py @@ -4,6 +4,9 @@ import pytest import numpy as np from damask import Rotation +from damask import _rotation + + n = 1100 atol=1.e-4 @@ -12,68 +15,69 @@ scatter=1.e-2 @pytest.fixture def default(): """A set of n random rotations.""" - specials = np.array( - [np.array([ 1.0, 0.0, 0.0, 0.0]), - #----------------------------------------------- - np.array([0.0, 1.0, 0.0, 0.0]), - np.array([0.0, 0.0, 1.0, 0.0]), - np.array([0.0, 0.0, 0.0, 1.0]), - np.array([0.0,-1.0, 0.0, 0.0]), - np.array([0.0, 0.0,-1.0, 0.0]), - np.array([0.0, 0.0, 0.0,-1.0]), - #----------------------------------------------- - np.array([1.0, 1.0, 0.0, 0.0])/np.sqrt(2.), - np.array([1.0, 0.0, 1.0, 0.0])/np.sqrt(2.), - np.array([1.0, 0.0, 0.0, 1.0])/np.sqrt(2.), - np.array([0.0, 1.0, 1.0, 0.0])/np.sqrt(2.), - np.array([0.0, 1.0, 0.0, 1.0])/np.sqrt(2.), - np.array([0.0, 0.0, 1.0, 1.0])/np.sqrt(2.), - #----------------------------------------------- - np.array([1.0,-1.0, 0.0, 0.0])/np.sqrt(2.), - np.array([1.0, 0.0,-1.0, 0.0])/np.sqrt(2.), - np.array([1.0, 0.0, 0.0,-1.0])/np.sqrt(2.), - np.array([0.0, 1.0,-1.0, 0.0])/np.sqrt(2.), - np.array([0.0, 1.0, 0.0,-1.0])/np.sqrt(2.), - np.array([0.0, 0.0, 1.0,-1.0])/np.sqrt(2.), - #----------------------------------------------- - np.array([0.0, 1.0,-1.0, 0.0])/np.sqrt(2.), - np.array([0.0, 1.0, 0.0,-1.0])/np.sqrt(2.), - np.array([0.0, 0.0, 1.0,-1.0])/np.sqrt(2.), - #----------------------------------------------- - np.array([0.0,-1.0,-1.0, 0.0])/np.sqrt(2.), - np.array([0.0,-1.0, 0.0,-1.0])/np.sqrt(2.), - np.array([0.0, 0.0,-1.0,-1.0])/np.sqrt(2.), - #----------------------------------------------- - np.array([1.0, 1.0, 1.0, 0.0])/np.sqrt(3.), - np.array([1.0, 1.0, 0.0, 1.0])/np.sqrt(3.), - np.array([1.0, 0.0, 1.0, 1.0])/np.sqrt(3.), - np.array([1.0,-1.0, 1.0, 0.0])/np.sqrt(3.), - np.array([1.0,-1.0, 0.0, 1.0])/np.sqrt(3.), - np.array([1.0, 0.0,-1.0, 1.0])/np.sqrt(3.), - np.array([1.0, 1.0,-1.0, 0.0])/np.sqrt(3.), - np.array([1.0, 1.0, 0.0,-1.0])/np.sqrt(3.), - np.array([1.0, 0.0, 1.0,-1.0])/np.sqrt(3.), - np.array([1.0,-1.0,-1.0, 0.0])/np.sqrt(3.), - np.array([1.0,-1.0, 0.0,-1.0])/np.sqrt(3.), - np.array([1.0, 0.0,-1.0,-1.0])/np.sqrt(3.), - #----------------------------------------------- - np.array([0.0, 1.0, 1.0, 1.0])/np.sqrt(3.), - np.array([0.0, 1.0,-1.0, 1.0])/np.sqrt(3.), - np.array([0.0, 1.0, 1.0,-1.0])/np.sqrt(3.), - np.array([0.0,-1.0, 1.0, 1.0])/np.sqrt(3.), - np.array([0.0,-1.0,-1.0, 1.0])/np.sqrt(3.), - np.array([0.0,-1.0, 1.0,-1.0])/np.sqrt(3.), - np.array([0.0,-1.0,-1.0,-1.0])/np.sqrt(3.), - #----------------------------------------------- - np.array([1.0, 1.0, 1.0, 1.0])/2., - np.array([1.0,-1.0, 1.0, 1.0])/2., - np.array([1.0, 1.0,-1.0, 1.0])/2., - np.array([1.0, 1.0, 1.0,-1.0])/2., - np.array([1.0,-1.0,-1.0, 1.0])/2., - np.array([1.0,-1.0, 1.0,-1.0])/2., - np.array([1.0, 1.0,-1.0,-1.0])/2., - np.array([1.0,-1.0,-1.0,-1.0])/2., - ]) + specials = np.array([ + [1.0, 0.0, 0.0, 0.0], + #---------------------- + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + [0.0,-1.0, 0.0, 0.0], + [0.0, 0.0,-1.0, 0.0], + [0.0, 0.0, 0.0,-1.0], + #---------------------- + [1.0, 1.0, 0.0, 0.0], + [1.0, 0.0, 1.0, 0.0], + [1.0, 0.0, 0.0, 1.0], + [0.0, 1.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 1.0], + #---------------------- + [1.0,-1.0, 0.0, 0.0], + [1.0, 0.0,-1.0, 0.0], + [1.0, 0.0, 0.0,-1.0], + [0.0, 1.0,-1.0, 0.0], + [0.0, 1.0, 0.0,-1.0], + [0.0, 0.0, 1.0,-1.0], + #---------------------- + [0.0, 1.0,-1.0, 0.0], + [0.0, 1.0, 0.0,-1.0], + [0.0, 0.0, 1.0,-1.0], + #---------------------- + [0.0,-1.0,-1.0, 0.0], + [0.0,-1.0, 0.0,-1.0], + [0.0, 0.0,-1.0,-1.0], + #---------------------- + [1.0, 1.0, 1.0, 0.0], + [1.0, 1.0, 0.0, 1.0], + [1.0, 0.0, 1.0, 1.0], + [1.0,-1.0, 1.0, 0.0], + [1.0,-1.0, 0.0, 1.0], + [1.0, 0.0,-1.0, 1.0], + [1.0, 1.0,-1.0, 0.0], + [1.0, 1.0, 0.0,-1.0], + [1.0, 0.0, 1.0,-1.0], + [1.0,-1.0,-1.0, 0.0], + [1.0,-1.0, 0.0,-1.0], + [1.0, 0.0,-1.0,-1.0], + #---------------------- + [0.0, 1.0, 1.0, 1.0], + [0.0, 1.0,-1.0, 1.0], + [0.0, 1.0, 1.0,-1.0], + [0.0,-1.0, 1.0, 1.0], + [0.0,-1.0,-1.0, 1.0], + [0.0,-1.0, 1.0,-1.0], + [0.0,-1.0,-1.0,-1.0], + #---------------------- + [1.0, 1.0, 1.0, 1.0], + [1.0,-1.0, 1.0, 1.0], + [1.0, 1.0,-1.0, 1.0], + [1.0, 1.0, 1.0,-1.0], + [1.0,-1.0,-1.0, 1.0], + [1.0,-1.0, 1.0,-1.0], + [1.0, 1.0,-1.0,-1.0], + [1.0,-1.0,-1.0,-1.0], + ]) + specials /= np.linalg.norm(specials,axis=1).reshape(-1,1) specials_scatter = specials + np.broadcast_to(np.random.rand(4)*scatter,specials.shape) specials_scatter /= np.linalg.norm(specials_scatter,axis=1).reshape(-1,1) specials_scatter[specials_scatter[:,0]<0]*=-1 @@ -88,22 +92,518 @@ def reference_dir(reference_dir_base): return os.path.join(reference_dir_base,'Rotation') +#################################################################################################### +# Code below available according to the following conditions on https://github.com/MarDiehl/3Drotations +#################################################################################################### +# Copyright (c) 2017-2019, Martin Diehl/Max-Planck-Institut für Eisenforschung GmbH +# Copyright (c) 2013-2014, Marc De Graef/Carnegie Mellon University +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, are +# permitted provided that the following conditions are met: +# +# - Redistributions of source code must retain the above copyright notice, this list +# of conditions and the following disclaimer. +# - Redistributions in binary form must reproduce the above copyright notice, this +# list of conditions and the following disclaimer in the documentation and/or +# other materials provided with the distribution. +# - Neither the names of Marc De Graef, Carnegie Mellon University nor the names +# of its contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#################################################################################################### +_P = _rotation._P + +# parameters for conversion from/to cubochoric +_sc = _rotation._sc +_beta = _rotation._beta +_R1 = _rotation._R1 + +def iszero(a): + return np.isclose(a,0.0,atol=1.0e-12,rtol=0.0) + +#---------- Quaternion ---------- +def qu2om(qu): + """Quaternion to rotation matrix.""" + qq = qu[0]**2-(qu[1]**2 + qu[2]**2 + qu[3]**2) + om = np.diag(qq + 2.0*np.array([qu[1],qu[2],qu[3]])**2) + + om[0,1] = 2.0*(qu[2]*qu[1]+qu[0]*qu[3]) + om[1,0] = 2.0*(qu[1]*qu[2]-qu[0]*qu[3]) + om[1,2] = 2.0*(qu[3]*qu[2]+qu[0]*qu[1]) + om[2,1] = 2.0*(qu[2]*qu[3]-qu[0]*qu[1]) + om[2,0] = 2.0*(qu[1]*qu[3]+qu[0]*qu[2]) + om[0,2] = 2.0*(qu[3]*qu[1]-qu[0]*qu[2]) + return om if _P < 0.0 else np.swapaxes(om,-1,-2) + +def qu2eu(qu): + """Quaternion to Bunge-Euler angles.""" + q03 = qu[0]**2+qu[3]**2 + q12 = qu[1]**2+qu[2]**2 + chi = np.sqrt(q03*q12) + if np.abs(q12) < 1.e-8: + eu = np.array([np.arctan2(-_P*2.0*qu[0]*qu[3],qu[0]**2-qu[3]**2), 0.0, 0.0]) + elif np.abs(q03) < 1.e-8: + eu = np.array([np.arctan2( 2.0*qu[1]*qu[2],qu[1]**2-qu[2]**2), np.pi, 0.0]) + else: + eu = np.array([np.arctan2((-_P*qu[0]*qu[2]+qu[1]*qu[3])*chi, (-_P*qu[0]*qu[1]-qu[2]*qu[3])*chi ), + np.arctan2( 2.0*chi, q03-q12 ), + np.arctan2(( _P*qu[0]*qu[2]+qu[1]*qu[3])*chi, (-_P*qu[0]*qu[1]+qu[2]*qu[3])*chi )]) + # reduce Euler angles to definition range + eu[np.abs(eu)<1.e-6] = 0.0 + eu = np.where(eu<0, (eu+2.0*np.pi)%np.array([2.0*np.pi,np.pi,2.0*np.pi]),eu) + return eu + +def qu2ax(qu): + """ + Quaternion to axis angle pair. + + Modified version of the original formulation, should be numerically more stable + """ + if np.isclose(qu[0],1.,rtol=0.0): # set axis to [001] if the angle is 0/360 + ax = np.array([ 0.0, 0.0, 1.0, 0.0 ]) + elif qu[0] > 1.e-8: + s = np.sign(qu[0])/np.sqrt(qu[1]**2+qu[2]**2+qu[3]**2) + omega = 2.0 * np.arccos(np.clip(qu[0],-1.0,1.0)) + ax = ax = np.array([ qu[1]*s, qu[2]*s, qu[3]*s, omega ]) + else: + ax = ax = np.array([ qu[1], qu[2], qu[3], np.pi]) + return ax + +def qu2ro(qu): + """Quaternion to Rodrigues-Frank vector.""" + if iszero(qu[0]): + ro = np.array([qu[1], qu[2], qu[3], np.inf]) + else: + s = np.linalg.norm(qu[1:4]) + ro = np.array([0.0,0.0,_P,0.0] if iszero(s) else \ + [ qu[1]/s, qu[2]/s, qu[3]/s, np.tan(np.arccos(np.clip(qu[0],-1.0,1.0)))]) + return ro + +def qu2ho(qu): + """Quaternion to homochoric vector.""" + omega = 2.0 * np.arccos(np.clip(qu[0],-1.0,1.0)) + if np.abs(omega) < 1.0e-12: + ho = np.zeros(3) + else: + ho = np.array([qu[1], qu[2], qu[3]]) + f = 0.75 * ( omega - np.sin(omega) ) + ho = ho/np.linalg.norm(ho) * f**(1./3.) + return ho + + +#---------- Rotation matrix ---------- +def om2qu(a): + trace = a[0,0] + a[1,1] + a[2,2] + if trace > 0: + s = 0.5 / np.sqrt(trace+ 1.0) + qu = np.array([0.25 / s,( a[2,1] - a[1,2] ) * s,( a[0,2] - a[2,0] ) * s,( a[1,0] - a[0,1] ) * s]) + else: + if ( a[0,0] > a[1,1] and a[0,0] > a[2,2] ): + s = 2.0 * np.sqrt( 1.0 + a[0,0] - a[1,1] - a[2,2]) + qu = np.array([ (a[2,1] - a[1,2]) / s,0.25 * s,(a[0,1] + a[1,0]) / s,(a[0,2] + a[2,0]) / s]) + elif (a[1,1] > a[2,2]): + s = 2.0 * np.sqrt( 1.0 + a[1,1] - a[0,0] - a[2,2]) + qu = np.array([ (a[0,2] - a[2,0]) / s,(a[0,1] + a[1,0]) / s,0.25 * s,(a[1,2] + a[2,1]) / s]) + else: + s = 2.0 * np.sqrt( 1.0 + a[2,2] - a[0,0] - a[1,1] ) + qu = np.array([ (a[1,0] - a[0,1]) / s,(a[0,2] + a[2,0]) / s,(a[1,2] + a[2,1]) / s,0.25 * s]) + if qu[0]<0: qu*=-1 + return qu*np.array([1.,_P,_P,_P]) + +def om2eu(om): + """Rotation matrix to Bunge-Euler angles.""" + if not np.isclose(np.abs(om[2,2]),1.0,1.e-9): + zeta = 1.0/np.sqrt(1.0-om[2,2]**2) + eu = np.array([np.arctan2(om[2,0]*zeta,-om[2,1]*zeta), + np.arccos(om[2,2]), + np.arctan2(om[0,2]*zeta, om[1,2]*zeta)]) + else: + eu = np.array([np.arctan2( om[0,1],om[0,0]), np.pi*0.5*(1-om[2,2]),0.0]) # following the paper, not the reference implementation + eu[np.abs(eu)<1.e-8] = 0.0 + eu = np.where(eu<0, (eu+2.0*np.pi)%np.array([2.0*np.pi,np.pi,2.0*np.pi]),eu) + return eu + +def om2ax(om): + """Rotation matrix to axis angle pair.""" + #return qu2ax(om2qu(om)) # HOTFIX + ax=np.empty(4) + + # first get the rotation angle + t = 0.5*(om.trace() -1.0) + ax[3] = np.arccos(np.clip(t,-1.0,1.0)) + if np.abs(ax[3])<1.e-8: + ax = np.array([ 0.0, 0.0, 1.0, 0.0]) + else: + w,vr = np.linalg.eig(om) + # next, find the eigenvalue (1,0j) + i = np.where(np.isclose(w,1.0+0.0j))[0][0] + ax[0:3] = np.real(vr[0:3,i]) + diagDelta = -_P*np.array([om[1,2]-om[2,1],om[2,0]-om[0,2],om[0,1]-om[1,0]]) + ax[0:3] = np.where(np.abs(diagDelta)<1e-12, ax[0:3],np.abs(ax[0:3])*np.sign(diagDelta)) + return ax + +#---------- Bunge-Euler angles ---------- +def eu2qu(eu): + """Bunge-Euler angles to quaternion.""" + ee = 0.5*eu + cPhi = np.cos(ee[1]) + sPhi = np.sin(ee[1]) + qu = np.array([ cPhi*np.cos(ee[0]+ee[2]), + -_P*sPhi*np.cos(ee[0]-ee[2]), + -_P*sPhi*np.sin(ee[0]-ee[2]), + -_P*cPhi*np.sin(ee[0]+ee[2]) ]) + if qu[0] < 0.0: qu*=-1 + return qu + +def eu2om(eu): + """Bunge-Euler angles to rotation matrix.""" + c = np.cos(eu) + s = np.sin(eu) + + om = np.array([[+c[0]*c[2]-s[0]*s[2]*c[1], +s[0]*c[2]+c[0]*s[2]*c[1], +s[2]*s[1]], + [-c[0]*s[2]-s[0]*c[2]*c[1], -s[0]*s[2]+c[0]*c[2]*c[1], +c[2]*s[1]], + [+s[0]*s[1], -c[0]*s[1], +c[1] ]]) + om[np.abs(om)<1.e-12] = 0.0 + return om + +def eu2ax(eu): + """Bunge-Euler angles to axis angle pair.""" + t = np.tan(eu[1]*0.5) + sigma = 0.5*(eu[0]+eu[2]) + delta = 0.5*(eu[0]-eu[2]) + tau = np.linalg.norm([t,np.sin(sigma)]) + alpha = np.pi if iszero(np.cos(sigma)) else \ + 2.0*np.arctan(tau/np.cos(sigma)) + + if np.abs(alpha)<1.e-6: + ax = np.array([ 0.0, 0.0, 1.0, 0.0 ]) + else: + ax = -_P/tau * np.array([ t*np.cos(delta), t*np.sin(delta), np.sin(sigma) ]) # passive axis angle pair so a minus sign in front + ax = np.append(ax,alpha) + if alpha < 0.0: ax *= -1.0 # ensure alpha is positive + return ax + +def eu2ro(eu): + """Bunge-Euler angles to Rodrigues-Frank vector.""" + ro = eu2ax(eu) # convert to axis angle pair representation + if ro[3] >= np.pi: # Differs from original implementation. check convention 5 + ro[3] = np.inf + elif iszero(ro[3]): + ro = np.array([ 0.0, 0.0, _P, 0.0 ]) + else: + ro[3] = np.tan(ro[3]*0.5) + return ro + +#---------- Axis angle pair ---------- +def ax2qu(ax): + """Axis angle pair to quaternion.""" + if np.abs(ax[3])<1.e-6: + qu = np.array([ 1.0, 0.0, 0.0, 0.0 ]) + else: + c = np.cos(ax[3]*0.5) + s = np.sin(ax[3]*0.5) + qu = np.array([ c, ax[0]*s, ax[1]*s, ax[2]*s ]) + return qu + +def ax2om(ax): + """Axis angle pair to rotation matrix.""" + c = np.cos(ax[3]) + s = np.sin(ax[3]) + omc = 1.0-c + om=np.diag(ax[0:3]**2*omc + c) + + for idx in [[0,1,2],[1,2,0],[2,0,1]]: + q = omc*ax[idx[0]] * ax[idx[1]] + om[idx[0],idx[1]] = q + s*ax[idx[2]] + om[idx[1],idx[0]] = q - s*ax[idx[2]] + return om if _P < 0.0 else om.T + +def ax2ro(ax): + """Axis angle pair to Rodrigues-Frank vector.""" + if np.abs(ax[3])<1.e-6: + ro = [ 0.0, 0.0, _P, 0.0 ] + else: + ro = [ax[0], ax[1], ax[2]] + # 180 degree case + ro += [np.inf] if np.isclose(ax[3],np.pi,atol=1.0e-15,rtol=0.0) else \ + [np.tan(ax[3]*0.5)] + ro = np.array(ro) + return ro + +def ax2ho(ax): + """Axis angle pair to homochoric vector.""" + f = (0.75 * ( ax[3] - np.sin(ax[3]) ))**(1.0/3.0) + ho = ax[0:3] * f + return ho + + +#---------- Rodrigues-Frank vector ---------- +def ro2ax(ro): + """Rodrigues-Frank vector to axis angle pair.""" + if np.abs(ro[3]) < 1.e-8: + ax = np.array([ 0.0, 0.0, 1.0, 0.0 ]) + elif not np.isfinite(ro[3]): + ax = np.array([ ro[0], ro[1], ro[2], np.pi ]) + else: + angle = 2.0*np.arctan(ro[3]) + ta = np.linalg.norm(ro[0:3]) + ax = np.array([ ro[0]*ta, ro[1]*ta, ro[2]*ta, angle ]) + return ax + +def ro2ho(ro): + """Rodrigues-Frank vector to homochoric vector.""" + if np.sum(ro[0:3]**2.0) < 1.e-8: + ho = np.zeros(3) + else: + f = 2.0*np.arctan(ro[3]) -np.sin(2.0*np.arctan(ro[3])) if np.isfinite(ro[3]) else np.pi + ho = ro[0:3] * (0.75*f)**(1.0/3.0) + return ho + +#---------- Homochoric vector---------- +def ho2ax(ho): + """Homochoric vector to axis angle pair.""" + tfit = np.array([+1.0000000000018852, -0.5000000002194847, + -0.024999992127593126, -0.003928701544781374, + -0.0008152701535450438, -0.0002009500426119712, + -0.00002397986776071756, -0.00008202868926605841, + +0.00012448715042090092, -0.0001749114214822577, + +0.0001703481934140054, -0.00012062065004116828, + +0.000059719705868660826, -0.00001980756723965647, + +0.000003953714684212874, -0.00000036555001439719544]) + # normalize h and store the magnitude + hmag_squared = np.sum(ho**2.) + if iszero(hmag_squared): + ax = np.array([ 0.0, 0.0, 1.0, 0.0 ]) + else: + hm = hmag_squared + + # convert the magnitude to the rotation angle + s = tfit[0] + tfit[1] * hmag_squared + for i in range(2,16): + hm *= hmag_squared + s += tfit[i] * hm + ax = np.append(ho/np.sqrt(hmag_squared),2.0*np.arccos(np.clip(s,-1.0,1.0))) + return ax + +def ho2cu(ho): + """ + Homochoric vector to cubochoric vector. + + References + ---------- + D. Roşca et al., Modelling and Simulation in Materials Science and Engineering 22:075013, 2014 + https://doi.org/10.1088/0965-0393/22/7/075013 + + """ + rs = np.linalg.norm(ho) + + if np.allclose(ho,0.0,rtol=0.0,atol=1.0e-16): + cu = np.zeros(3) + else: + xyz3 = ho[_get_pyramid_order(ho,'forward')] + + # inverse M_3 + xyz2 = xyz3[0:2] * np.sqrt( 2.0*rs/(rs+np.abs(xyz3[2])) ) + + # inverse M_2 + qxy = np.sum(xyz2**2) + + if np.isclose(qxy,0.0,rtol=0.0,atol=1.0e-16): + Tinv = np.zeros(2) + else: + q2 = qxy + np.max(np.abs(xyz2))**2 + sq2 = np.sqrt(q2) + q = (_beta/np.sqrt(2.0)/_R1) * np.sqrt(q2*qxy/(q2-np.max(np.abs(xyz2))*sq2)) + tt = np.clip((np.min(np.abs(xyz2))**2+np.max(np.abs(xyz2))*sq2)/np.sqrt(2.0)/qxy,-1.0,1.0) + Tinv = np.array([1.0,np.arccos(tt)/np.pi*12.0]) if np.abs(xyz2[1]) <= np.abs(xyz2[0]) else \ + np.array([np.arccos(tt)/np.pi*12.0,1.0]) + Tinv = q * np.where(xyz2<0.0,-Tinv,Tinv) + + # inverse M_1 + cu = np.array([ Tinv[0], Tinv[1], (-1.0 if xyz3[2] < 0.0 else 1.0) * rs / np.sqrt(6.0/np.pi) ]) /_sc + cu = cu[_get_pyramid_order(ho,'backward')] + return cu + +#---------- Cubochoric ---------- +def cu2ho(cu): + """ + Cubochoric vector to homochoric vector. + + References + ---------- + D. Roşca et al., Modelling and Simulation in Materials Science and Engineering 22:075013, 2014 + https://doi.org/10.1088/0965-0393/22/7/075013 + + """ + # transform to the sphere grid via the curved square, and intercept the zero point + if np.allclose(cu,0.0,rtol=0.0,atol=1.0e-16): + ho = np.zeros(3) + else: + # get pyramide and scale by grid parameter ratio + XYZ = cu[_get_pyramid_order(cu,'forward')] * _sc + + # intercept all the points along the z-axis + if np.allclose(XYZ[0:2],0.0,rtol=0.0,atol=1.0e-16): + ho = np.array([0.0, 0.0, np.sqrt(6.0/np.pi) * XYZ[2]]) + else: + order = [1,0] if np.abs(XYZ[1]) <= np.abs(XYZ[0]) else [0,1] + q = np.pi/12.0 * XYZ[order[0]]/XYZ[order[1]] + c = np.cos(q) + s = np.sin(q) + q = _R1*2.0**0.25/_beta * XYZ[order[1]] / np.sqrt(np.sqrt(2.0)-c) + T = np.array([ (np.sqrt(2.0)*c - 1.0), np.sqrt(2.0) * s]) * q + + # transform to sphere grid (inverse Lambert) + # note that there is no need to worry about dividing by zero, since XYZ[2] can not become zero + c = np.sum(T**2) + s = c * np.pi/24.0 /XYZ[2]**2 + c = c * np.sqrt(np.pi/24.0)/XYZ[2] + + q = np.sqrt( 1.0 - s ) + ho = np.array([ T[order[1]] * q, T[order[0]] * q, np.sqrt(6.0/np.pi) * XYZ[2] - c ]) + + ho = ho[_get_pyramid_order(cu,'backward')] + return ho + +def _get_pyramid_order(xyz,direction=None): + """ + Get order of the coordinates. + + Depending on the pyramid in which the point is located, the order need to be adjusted. + + Parameters + ---------- + xyz : numpy.ndarray + coordinates of a point on a uniform refinable grid on a ball or + in a uniform refinable cubical grid. + + References + ---------- + D. Roşca et al., Modelling and Simulation in Materials Science and Engineering 22:075013, 2014 + https://doi.org/10.1088/0965-0393/22/7/075013 + + """ + order = {'forward':np.array([[0,1,2],[1,2,0],[2,0,1]]), + 'backward':np.array([[0,1,2],[2,0,1],[1,2,0]])} + if np.maximum(abs(xyz[0]),abs(xyz[1])) <= xyz[2] or \ + np.maximum(abs(xyz[0]),abs(xyz[1])) <=-xyz[2]: + p = 0 + elif np.maximum(abs(xyz[1]),abs(xyz[2])) <= xyz[0] or \ + np.maximum(abs(xyz[1]),abs(xyz[2])) <=-xyz[0]: + p = 1 + elif np.maximum(abs(xyz[2]),abs(xyz[0])) <= xyz[1] or \ + np.maximum(abs(xyz[2]),abs(xyz[0])) <=-xyz[1]: + p = 2 + return order[direction][p] + +#################################################################################################### +#################################################################################################### + +def mul(me, other): + """ + Multiplication. + + Parameters + ---------- + other : numpy.ndarray or Rotation + Vector, second or fourth order tensor, or rotation object that is rotated. + + Todo + ---- + Document details active/passive) + consider rotation of (3,3,3,3)-matrix + + """ + if me.quaternion.shape != (4,): + raise NotImplementedError('Support for multiple rotations missing') + if isinstance(other, Rotation): + me_q = me.quaternion[0] + me_p = me.quaternion[1:] + other_q = other.quaternion[0] + other_p = other.quaternion[1:] + R = me.__class__(np.append(me_q*other_q - np.dot(me_p,other_p), + me_q*other_p + other_q*me_p + _P * np.cross(me_p,other_p))) + return R.standardize() + elif isinstance(other, np.ndarray): + if other.shape == (3,): + A = me.quaternion[0]**2.0 - np.dot(me.quaternion[1:],me.quaternion[1:]) + B = 2.0 * np.dot(me.quaternion[1:],other) + C = 2.0 * _P*me.quaternion[0] + + return A*other + B*me.quaternion[1:] + C * np.cross(me.quaternion[1:],other) + + elif other.shape == (3,3,): + R = me.as_matrix() + return np.dot(R,np.dot(other,R.T)) + elif other.shape == (3,3,3,3,): + R = me.as_matrix() + return np.einsum('ia,jb,kc,ld,abcd->ijkl',R,R,R,R,other) + RR = np.outer(R, R) + RRRR = np.outer(RR, RR).reshape(4 * (3,3)) + axes = ((0, 2, 4, 6), (0, 1, 2, 3)) + return np.tensordot(RRRR, other, axes) + else: + raise ValueError('Can only rotate vectors, 2nd order ternsors, and 4th order tensors') + else: + raise TypeError('Cannot rotate {}'.format(type(other))) + + class TestRotation: - def test_Eulers(self,default): + @pytest.mark.parametrize('forward,backward',[(Rotation._qu2om,Rotation._om2qu), + (Rotation._qu2eu,Rotation._eu2qu), + (Rotation._qu2ax,Rotation._ax2qu), + (Rotation._qu2ro,Rotation._ro2qu), + (Rotation._qu2ho,Rotation._ho2qu), + (Rotation._qu2cu,Rotation._cu2qu)]) + def test_quaternion_internal(self,default,forward,backward): + """Ensure invariance of conversion from quaternion and back.""" for rot in default: m = rot.as_quaternion() - o = Rotation.from_Eulers(rot.as_Eulers()).as_quaternion() + o = backward(forward(m)) ok = np.allclose(m,o,atol=atol) if np.isclose(rot.as_quaternion()[0],0.0,atol=atol): ok = ok or np.allclose(m*-1.,o,atol=atol) print(m,o,rot.as_quaternion()) assert ok and np.isclose(np.linalg.norm(o),1.0) - def test_AxisAngle(self,default): + @pytest.mark.parametrize('forward,backward',[(Rotation._om2qu,Rotation._qu2om), + (Rotation._om2eu,Rotation._eu2om), + (Rotation._om2ax,Rotation._ax2om), + (Rotation._om2ro,Rotation._ro2om), + (Rotation._om2ho,Rotation._ho2om), + (Rotation._om2cu,Rotation._cu2om)]) + def test_matrix_internal(self,default,forward,backward): + """Ensure invariance of conversion from rotation matrix and back.""" + for rot in default: + m = rot.as_matrix() + o = backward(forward(m)) + ok = np.allclose(m,o,atol=atol) + print(m,o,rot.as_quaternion()) + assert ok and np.isclose(np.linalg.det(o),1.0) + + @pytest.mark.parametrize('forward,backward',[(Rotation._eu2qu,Rotation._qu2eu), + (Rotation._eu2om,Rotation._om2eu), + (Rotation._eu2ax,Rotation._ax2eu), + (Rotation._eu2ro,Rotation._ro2eu), + (Rotation._eu2ho,Rotation._ho2eu), + (Rotation._eu2cu,Rotation._cu2eu)]) + def test_Eulers_internal(self,default,forward,backward): + """Ensure invariance of conversion from Euler angles and back.""" for rot in default: m = rot.as_Eulers() - o = Rotation.from_axis_angle(rot.as_axis_angle()).as_Eulers() + o = backward(forward(m)) u = np.array([np.pi*2,np.pi,np.pi*2]) ok = np.allclose(m,o,atol=atol) ok = ok or np.allclose(np.where(np.isclose(m,u),m-u,m),np.where(np.isclose(o,u),o-u,o),atol=atol) @@ -113,7 +613,187 @@ class TestRotation: print(m,o,rot.as_quaternion()) assert ok and (np.zeros(3)-1.e-9 <= o).all() and (o <= np.array([np.pi*2.,np.pi,np.pi*2.])+1.e-9).all() - def test_Matrix(self,default): + @pytest.mark.parametrize('forward,backward',[(Rotation._ax2qu,Rotation._qu2ax), + (Rotation._ax2om,Rotation._om2ax), + (Rotation._ax2eu,Rotation._eu2ax), + (Rotation._ax2ro,Rotation._ro2ax), + (Rotation._ax2ho,Rotation._ho2ax), + (Rotation._ax2cu,Rotation._cu2ax)]) + def test_axis_angle_internal(self,default,forward,backward): + """Ensure invariance of conversion from axis angle angles pair and back.""" + for rot in default: + m = rot.as_axis_angle() + o = backward(forward(m)) + ok = np.allclose(m,o,atol=atol) + if np.isclose(m[3],np.pi,atol=atol): + ok = ok or np.allclose(m*np.array([-1.,-1.,-1.,1.]),o,atol=atol) + print(m,o,rot.as_quaternion()) + assert ok and np.isclose(np.linalg.norm(o[:3]),1.0) and o[3]<=np.pi+1.e-9 + + @pytest.mark.parametrize('forward,backward',[(Rotation._ro2qu,Rotation._qu2ro), + #(Rotation._ro2om,Rotation._om2ro), + #(Rotation._ro2eu,Rotation._eu2ro), + (Rotation._ro2ax,Rotation._ax2ro), + (Rotation._ro2ho,Rotation._ho2ro), + (Rotation._ro2cu,Rotation._cu2ro)]) + def test_Rodrigues_internal(self,default,forward,backward): + """Ensure invariance of conversion from Rodrigues-Frank vector and back.""" + cutoff = np.tan(np.pi*.5*(1.-1e-4)) + for rot in default: + m = rot.as_Rodrigues() + o = backward(forward(m)) + ok = np.allclose(np.clip(m,None,cutoff),np.clip(o,None,cutoff),atol=atol) + ok = ok or np.isclose(m[3],0.0,atol=atol) + print(m,o,rot.as_quaternion()) + assert ok and np.isclose(np.linalg.norm(o[:3]),1.0) + + @pytest.mark.parametrize('forward,backward',[(Rotation._ho2qu,Rotation._qu2ho), + (Rotation._ho2om,Rotation._om2ho), + #(Rotation._ho2eu,Rotation._eu2ho), + (Rotation._ho2ax,Rotation._ax2ho), + (Rotation._ho2ro,Rotation._ro2ho), + (Rotation._ho2cu,Rotation._cu2ho)]) + def test_homochoric_internal(self,default,forward,backward): + """Ensure invariance of conversion from homochoric vector and back.""" + for rot in default: + m = rot.as_homochoric() + o = backward(forward(m)) + ok = np.allclose(m,o,atol=atol) + print(m,o,rot.as_quaternion()) + assert ok and np.linalg.norm(o) < _R1 + 1.e-9 + + @pytest.mark.parametrize('forward,backward',[(Rotation._cu2qu,Rotation._qu2cu), + (Rotation._cu2om,Rotation._om2cu), + (Rotation._cu2eu,Rotation._eu2cu), + (Rotation._cu2ax,Rotation._ax2cu), + (Rotation._cu2ro,Rotation._ro2cu), + (Rotation._cu2ho,Rotation._ho2cu)]) + def test_cubochoric_internal(self,default,forward,backward): + """Ensure invariance of conversion from cubochoric vector and back.""" + for rot in default: + m = rot.as_cubochoric() + o = backward(forward(m)) + ok = np.allclose(m,o,atol=atol) + if np.count_nonzero(np.isclose(np.abs(o),np.pi**(2./3.)*.5)): + ok = ok or np.allclose(m*-1.,o,atol=atol) + print(m,o,rot.as_quaternion()) + assert ok and np.max(np.abs(o)) < np.pi**(2./3.) * 0.5 + 1.e-9 + + @pytest.mark.parametrize('vectorized, single',[(Rotation._qu2om,qu2om), + (Rotation._qu2eu,qu2eu), + (Rotation._qu2ax,qu2ax), + (Rotation._qu2ro,qu2ro), + (Rotation._qu2ho,qu2ho)]) + def test_quaternion_vectorization(self,default,vectorized,single): + """Check vectorized implementation for quaternion against single point calculation.""" + qu = np.array([rot.as_quaternion() for rot in default]) + vectorized(qu.reshape(qu.shape[0]//2,-1,4)) + co = vectorized(qu) + for q,c in zip(qu,co): + print(q,c) + assert np.allclose(single(q),c) and np.allclose(single(q),vectorized(q)) + + + @pytest.mark.parametrize('vectorized, single',[(Rotation._om2qu,om2qu), + (Rotation._om2eu,om2eu), + (Rotation._om2ax,om2ax)]) + def test_matrix_vectorization(self,default,vectorized,single): + """Check vectorized implementation for rotation matrix against single point calculation.""" + om = np.array([rot.as_matrix() for rot in default]) + vectorized(om.reshape(om.shape[0]//2,-1,3,3)) + co = vectorized(om) + for o,c in zip(om,co): + print(o,c) + assert np.allclose(single(o),c) and np.allclose(single(o),vectorized(o)) + + @pytest.mark.parametrize('vectorized, single',[(Rotation._eu2qu,eu2qu), + (Rotation._eu2om,eu2om), + (Rotation._eu2ax,eu2ax), + (Rotation._eu2ro,eu2ro)]) + def test_Eulers_vectorization(self,default,vectorized,single): + """Check vectorized implementation for Euler angles against single point calculation.""" + eu = np.array([rot.as_Eulers() for rot in default]) + vectorized(eu.reshape(eu.shape[0]//2,-1,3)) + co = vectorized(eu) + for e,c in zip(eu,co): + print(e,c) + assert np.allclose(single(e),c) and np.allclose(single(e),vectorized(e)) + + @pytest.mark.parametrize('vectorized, single',[(Rotation._ax2qu,ax2qu), + (Rotation._ax2om,ax2om), + (Rotation._ax2ro,ax2ro), + (Rotation._ax2ho,ax2ho)]) + def test_axis_angle_vectorization(self,default,vectorized,single): + """Check vectorized implementation for axis angle pair against single point calculation.""" + ax = np.array([rot.as_axis_angle() for rot in default]) + vectorized(ax.reshape(ax.shape[0]//2,-1,4)) + co = vectorized(ax) + for a,c in zip(ax,co): + print(a,c) + assert np.allclose(single(a),c) and np.allclose(single(a),vectorized(a)) + + + @pytest.mark.parametrize('vectorized, single',[(Rotation._ro2ax,ro2ax), + (Rotation._ro2ho,ro2ho)]) + def test_Rodrigues_vectorization(self,default,vectorized,single): + """Check vectorized implementation for Rodrigues-Frank vector against single point calculation.""" + ro = np.array([rot.as_Rodrigues() for rot in default]) + vectorized(ro.reshape(ro.shape[0]//2,-1,4)) + co = vectorized(ro) + for r,c in zip(ro,co): + print(r,c) + assert np.allclose(single(r),c) and np.allclose(single(r),vectorized(r)) + + @pytest.mark.parametrize('vectorized, single',[(Rotation._ho2ax,ho2ax), + (Rotation._ho2cu,ho2cu)]) + def test_homochoric_vectorization(self,default,vectorized,single): + """Check vectorized implementation for homochoric vector against single point calculation.""" + ho = np.array([rot.as_homochoric() for rot in default]) + vectorized(ho.reshape(ho.shape[0]//2,-1,3)) + co = vectorized(ho) + for h,c in zip(ho,co): + print(h,c) + assert np.allclose(single(h),c) and np.allclose(single(h),vectorized(h)) + + @pytest.mark.parametrize('vectorized, single',[(Rotation._cu2ho,cu2ho)]) + def test_cubochoric_vectorization(self,default,vectorized,single): + """Check vectorized implementation for cubochoric vector against single point calculation.""" + cu = np.array([rot.as_cubochoric() for rot in default]) + vectorized(cu.reshape(cu.shape[0]//2,-1,3)) + co = vectorized(cu) + for u,c in zip(cu,co): + print(u,c) + assert np.allclose(single(u),c) and np.allclose(single(u),vectorized(u)) + + @pytest.mark.parametrize('degrees',[True,False]) + def test_Eulers(self,default,degrees): + for rot in default: + m = rot.as_quaternion() + o = Rotation.from_Eulers(rot.as_Eulers(degrees),degrees).as_quaternion() + ok = np.allclose(m,o,atol=atol) + if np.isclose(rot.as_quaternion()[0],0.0,atol=atol): + ok = ok or np.allclose(m*-1.,o,atol=atol) + print(m,o,rot.as_quaternion()) + assert ok and np.isclose(np.linalg.norm(o),1.0) + + @pytest.mark.parametrize('P',[1,-1]) + @pytest.mark.parametrize('normalise',[True,False]) + @pytest.mark.parametrize('degrees',[True,False]) + def test_axis_angle(self,default,degrees,normalise,P): + c = np.array([P*-1,P*-1,P*-1,1.]) + for rot in default: + m = rot.as_Eulers() + o = Rotation.from_axis_angle(rot.as_axis_angle(degrees)*c,degrees,normalise,P).as_Eulers() + u = np.array([np.pi*2,np.pi,np.pi*2]) + ok = np.allclose(m,o,atol=atol) + ok = ok or np.allclose(np.where(np.isclose(m,u),m-u,m),np.where(np.isclose(o,u),o-u,o),atol=atol) + if np.isclose(m[1],0.0,atol=atol) or np.isclose(m[1],np.pi,atol=atol): + sum_phi = np.unwrap([m[0]+m[2],o[0]+o[2]]) + ok = ok or np.isclose(sum_phi[0],sum_phi[1],atol=atol) + print(m,o,rot.as_quaternion()) + assert ok and (np.zeros(3)-1.e-9 <= o).all() and (o <= np.array([np.pi*2.,np.pi,np.pi*2.])+1.e-9).all() + + def test_matrix(self,default): for rot in default: m = rot.as_axis_angle() o = Rotation.from_axis_angle(rot.as_axis_angle()).as_axis_angle() @@ -121,172 +801,96 @@ class TestRotation: if np.isclose(m[3],np.pi,atol=atol): ok = ok or np.allclose(m*np.array([-1.,-1.,-1.,1.]),o,atol=atol) print(m,o,rot.as_quaternion()) - assert ok and np.isclose(np.linalg.norm(o[:3]),1.0) and o[3]<=np.pi++1.e-9 + assert ok and np.isclose(np.linalg.norm(o[:3]),1.0) and o[3]<=np.pi+1.e-9 - def test_Rodrigues(self,default): + @pytest.mark.parametrize('P',[1,-1]) + @pytest.mark.parametrize('normalise',[True,False]) + def test_Rodrigues(self,default,normalise,P): + c = np.array([P*-1,P*-1,P*-1,1.]) for rot in default: m = rot.as_matrix() - o = Rotation.from_Rodrigues(rot.as_Rodrigues()).as_matrix() + o = Rotation.from_Rodrigues(rot.as_Rodrigues()*c,normalise,P).as_matrix() ok = np.allclose(m,o,atol=atol) print(m,o) assert ok and np.isclose(np.linalg.det(o),1.0) - def test_Homochoric(self,default): + @pytest.mark.parametrize('P',[1,-1]) + def test_homochoric(self,default,P): cutoff = np.tan(np.pi*.5*(1.-1e-4)) for rot in default: m = rot.as_Rodrigues() - o = Rotation.from_homochoric(rot.as_homochoric()).as_Rodrigues() + o = Rotation.from_homochoric(rot.as_homochoric()*P*-1,P).as_Rodrigues() ok = np.allclose(np.clip(m,None,cutoff),np.clip(o,None,cutoff),atol=atol) ok = ok or np.isclose(m[3],0.0,atol=atol) print(m,o,rot.as_quaternion()) assert ok and np.isclose(np.linalg.norm(o[:3]),1.0) - def test_Cubochoric(self,default): + @pytest.mark.parametrize('P',[1,-1]) + def test_cubochoric(self,default,P): for rot in default: m = rot.as_homochoric() - o = Rotation.from_cubochoric(rot.as_cubochoric()).as_homochoric() + o = Rotation.from_cubochoric(rot.as_cubochoric()*P*-1,P).as_homochoric() ok = np.allclose(m,o,atol=atol) print(m,o,rot.as_quaternion()) assert ok and np.linalg.norm(o) < (3.*np.pi/4.)**(1./3.) + 1.e-9 - def test_Quaternion(self,default): + @pytest.mark.parametrize('P',[1,-1]) + @pytest.mark.parametrize('accept_homomorph',[True,False]) + def test_quaternion(self,default,P,accept_homomorph): + c = np.array([1,P*-1,P*-1,P*-1]) * (-1 if accept_homomorph else 1) for rot in default: m = rot.as_cubochoric() - o = Rotation.from_quaternion(rot.as_quaternion()).as_cubochoric() + o = Rotation.from_quaternion(rot.as_quaternion()*c,accept_homomorph,P).as_cubochoric() ok = np.allclose(m,o,atol=atol) + if np.count_nonzero(np.isclose(np.abs(o),np.pi**(2./3.)*.5)): + ok = ok or np.allclose(m*-1.,o,atol=atol) print(m,o,rot.as_quaternion()) assert ok and o.max() < np.pi**(2./3.)*0.5+1.e-9 + @pytest.mark.parametrize('reciprocal',[True,False]) + def test_basis(self,default,reciprocal): + for rot in default: + om = rot.as_matrix() + 0.1*np.eye(3) + rot = Rotation.from_basis(om,False,reciprocal=reciprocal) + assert np.isclose(np.linalg.det(rot.as_matrix()),1.0) + @pytest.mark.parametrize('function',[Rotation.from_quaternion, Rotation.from_Eulers, Rotation.from_axis_angle, Rotation.from_matrix, Rotation.from_Rodrigues, - Rotation.from_homochoric]) + Rotation.from_homochoric, + Rotation.from_cubochoric]) def test_invalid_shape(self,function): invalid_shape = np.random.random(np.random.randint(8,32,(3))) with pytest.raises(ValueError): function(invalid_shape) + @pytest.mark.parametrize('shape',[None,(3,),(4,2)]) + def test_broadcast(self,shape): + rot = Rotation.from_random(shape) + new_shape = tuple(np.random.randint(8,32,(3))) if shape is None else \ + rot.shape + (np.random.randint(8,32),) + rot_broadcast = rot.broadcast_to(tuple(new_shape)) + for i in range(rot_broadcast.shape[-1]): + assert np.allclose(rot_broadcast.quaternion[...,i,:], rot.quaternion) + + @pytest.mark.parametrize('function,invalid',[(Rotation.from_quaternion, np.array([-1,0,0,0])), (Rotation.from_quaternion, np.array([1,1,1,0])), (Rotation.from_Eulers, np.array([1,4,0])), (Rotation.from_axis_angle, np.array([1,0,0,4])), (Rotation.from_axis_angle, np.array([1,1,0,1])), (Rotation.from_matrix, np.random.rand(3,3)), + (Rotation.from_matrix, np.array([[1,1,0],[1,2,0],[0,0,1]])), (Rotation.from_Rodrigues, np.array([1,0,0,-1])), (Rotation.from_Rodrigues, np.array([1,1,0,1])), - (Rotation.from_homochoric, np.array([2,2,2])) ]) - def test_invalid(self,function,invalid): + (Rotation.from_homochoric, np.array([2,2,2])), + (Rotation.from_cubochoric, np.array([1.1,0,0])) ]) + def test_invalid_value(self,function,invalid): with pytest.raises(ValueError): function(invalid) - @pytest.mark.parametrize('conversion',[Rotation.qu2om, - Rotation.qu2eu, - Rotation.qu2ax, - Rotation.qu2ro, - Rotation.qu2ho, - Rotation.qu2cu - ]) - def test_quaternion_vectorization(self,default,conversion): - qu = np.array([rot.as_quaternion() for rot in default]) - conversion(qu.reshape(qu.shape[0]//2,-1,4)) - co = conversion(qu) - for q,c in zip(qu,co): - print(q,c) - assert np.allclose(conversion(q),c) - - @pytest.mark.parametrize('conversion',[Rotation.om2qu, - Rotation.om2eu, - Rotation.om2ax, - Rotation.om2ro, - Rotation.om2ho, - Rotation.om2cu - ]) - def test_matrix_vectorization(self,default,conversion): - om = np.array([rot.as_matrix() for rot in default]) - conversion(om.reshape(om.shape[0]//2,-1,3,3)) - co = conversion(om) - for o,c in zip(om,co): - print(o,c) - assert np.allclose(conversion(o),c) - - @pytest.mark.parametrize('conversion',[Rotation.eu2qu, - Rotation.eu2om, - Rotation.eu2ax, - Rotation.eu2ro, - Rotation.eu2ho, - Rotation.eu2cu - ]) - def test_Euler_vectorization(self,default,conversion): - eu = np.array([rot.as_Eulers() for rot in default]) - conversion(eu.reshape(eu.shape[0]//2,-1,3)) - co = conversion(eu) - for e,c in zip(eu,co): - print(e,c) - assert np.allclose(conversion(e),c) - - @pytest.mark.parametrize('conversion',[Rotation.ax2qu, - Rotation.ax2om, - Rotation.ax2eu, - Rotation.ax2ro, - Rotation.ax2ho, - Rotation.ax2cu - ]) - def test_axisAngle_vectorization(self,default,conversion): - ax = np.array([rot.as_axis_angle() for rot in default]) - conversion(ax.reshape(ax.shape[0]//2,-1,4)) - co = conversion(ax) - for a,c in zip(ax,co): - print(a,c) - assert np.allclose(conversion(a),c) - - - @pytest.mark.parametrize('conversion',[Rotation.ro2qu, - Rotation.ro2om, - Rotation.ro2eu, - Rotation.ro2ax, - Rotation.ro2ho, - Rotation.ro2cu - ]) - def test_Rodrigues_vectorization(self,default,conversion): - ro = np.array([rot.as_Rodrigues() for rot in default]) - conversion(ro.reshape(ro.shape[0]//2,-1,4)) - co = conversion(ro) - for r,c in zip(ro,co): - print(r,c) - assert np.allclose(conversion(r),c) - - @pytest.mark.parametrize('conversion',[Rotation.ho2qu, - Rotation.ho2om, - Rotation.ho2eu, - Rotation.ho2ax, - Rotation.ho2ro, - Rotation.ho2cu - ]) - def test_homochoric_vectorization(self,default,conversion): - ho = np.array([rot.as_homochoric() for rot in default]) - conversion(ho.reshape(ho.shape[0]//2,-1,3)) - co = conversion(ho) - for h,c in zip(ho,co): - print(h,c) - assert np.allclose(conversion(h),c) - - @pytest.mark.parametrize('conversion',[Rotation.cu2qu, - Rotation.cu2om, - Rotation.cu2eu, - Rotation.cu2ax, - Rotation.cu2ro, - Rotation.cu2ho - ]) - def test_cubochoric_vectorization(self,default,conversion): - cu = np.array([rot.as_cubochoric() for rot in default]) - conversion(cu.reshape(cu.shape[0]//2,-1,3)) - co = conversion(cu) - for u,c in zip(cu,co): - print(u,c) - assert np.allclose(conversion(u),c) - @pytest.mark.parametrize('direction',['forward', 'backward']) def test_pyramid_vectorization(self,direction): @@ -300,3 +904,55 @@ class TestRotation: f = Rotation._get_pyramid_order(a,'forward') b = Rotation._get_pyramid_order(a,'backward') assert np.all(np.take_along_axis(np.take_along_axis(a,f,-1),b,-1) == a) + + + @pytest.mark.parametrize('data',[np.random.rand(5,3), + np.random.rand(5,3,3), + np.random.rand(5,3,3,3,3)]) + def test_rotate_vectorization(self,default,data): + for rot in default: + v = rot.broadcast_to((5,)) @ data + for i in range(data.shape[0]): + print(i-data[i]) + assert np.allclose(mul(rot,data[i]),v[i]) + + + @pytest.mark.parametrize('data',[np.random.rand(3), + np.random.rand(3,3), + np.random.rand(3,3,3,3)]) + def test_rotate_identity(self,data): + R = Rotation() + assert np.allclose(data,R*data) + + @pytest.mark.parametrize('data',[np.random.rand(3), + np.random.rand(3,3), + np.random.rand(3,3,3,3)]) + def test_rotate_360deg(self,data): + phi_1 = np.random.random() * np.pi + phi_2 = 2*np.pi - phi_1 + R_1 = Rotation.from_Eulers(np.array([phi_1,0.,0.])) + R_2 = Rotation.from_Eulers(np.array([0.,0.,phi_2])) + assert np.allclose(data,R_2*(R_1*data)) + + @pytest.mark.parametrize('data',[np.random.rand(3), + np.random.rand(3,3), + np.random.rand(3,3,3,3)]) + def test_rotate_inverse(self,data): + R = Rotation.from_random() + assert np.allclose(data,R.inversed()*(R*data)) + + @pytest.mark.parametrize('data',[np.random.rand(4), + np.random.rand(3,2), + np.random.rand(3,2,3,3)]) + def test_rotate_invalid_shape(self,data): + R = Rotation.from_random() + with pytest.raises(ValueError): + R*data + + @pytest.mark.parametrize('data',['does_not_work', + (1,2), + 5]) + def test_rotate_invalid_type(self,data): + R = Rotation.from_random() + with pytest.raises(TypeError): + R*data diff --git a/python/tests/test_Table.py b/python/tests/test_Table.py index cfcdd8813..ac7444808 100644 --- a/python/tests/test_Table.py +++ b/python/tests/test_Table.py @@ -18,7 +18,7 @@ def reference_dir(reference_dir_base): return os.path.join(reference_dir_base,'Table') class TestTable: - + def test_get_scalar(self,default): d = default.get('s') assert np.allclose(d,1.0) and d.shape[1:] == (1,) @@ -29,12 +29,12 @@ class TestTable: def test_get_tensor(self,default): d = default.get('F') - assert np.allclose(d,1.0) and d.shape[1:] == (3,3) + assert np.allclose(d,1.0) and d.shape[1:] == (3,3) def test_get_component(self,default): d = default.get('5_F') assert np.allclose(d,1.0) and d.shape[1:] == (1,) - + def test_write_read_str(self,default,tmpdir): default.to_ASCII(str(tmpdir.join('default.txt'))) new = Table.from_ASCII(str(tmpdir.join('default.txt'))) @@ -69,15 +69,20 @@ class TestTable: def test_read_strange(self,reference_dir,fname): with open(os.path.join(reference_dir,fname)) as f: Table.from_ASCII(f) - + def test_set(self,default): default.set('F',np.zeros((5,3,3)),'set to zero') d=default.get('F') assert np.allclose(d,0.0) and d.shape[1:] == (3,3) + def test_set_component(self,default): + default.set('1_F',np.zeros((5)),'set to zero') + d=default.get('F') + assert np.allclose(d[...,0,0],0.0) and d.shape[1:] == (3,3) + def test_labels(self,default): assert default.labels == ['F','v','s'] - + def test_add(self,default): d = np.random.random((5,9)) default.add('nine',d,'random data') diff --git a/python/tests/test_grid_filters.py b/python/tests/test_grid_filters.py index ab60c0446..8a343e26b 100644 --- a/python/tests/test_grid_filters.py +++ b/python/tests/test_grid_filters.py @@ -42,12 +42,25 @@ class TestGridFilters: assert np.allclose(grid_filters.node_displacement_fluct(size,F), grid_filters.cell_2_node(grid_filters.cell_displacement_fluct(size,F))) - def test_interpolation_nonperiodic(self): + def test_interpolation_to_node(self): size = np.random.random(3) grid = np.random.randint(8,32,(3)) F = np.random.random(tuple(grid)+(3,3)) - assert np.allclose(grid_filters.node_coord(size,F) [1:-1,1:-1,1:-1],grid_filters.cell_2_node( - grid_filters.cell_coord(size,F))[1:-1,1:-1,1:-1]) + assert np.allclose(grid_filters.node_coord(size,F) [1:-1,1:-1,1:-1], + grid_filters.cell_2_node(grid_filters.cell_coord(size,F))[1:-1,1:-1,1:-1]) + + def test_interpolation_to_cell(self): + grid = np.random.randint(1,30,(3)) + + node_coord_x = np.linspace(0,np.pi*2,num=grid[0]+1) + node_field_x = np.cos(node_coord_x) + node_field = np.broadcast_to(node_field_x.reshape(-1,1,1),grid+1) + + cell_coord_x = node_coord_x[:-1]+node_coord_x[1]*.5 + cell_field_x = np.interp(cell_coord_x,node_coord_x,node_field_x,period=np.pi*2.) + cell_field = np.broadcast_to(cell_field_x.reshape(-1,1,1),grid) + + assert np.allclose(cell_field,grid_filters.node_2_cell(node_field)) @pytest.mark.parametrize('mode',['cell','node']) def test_coord0_origin(self,mode): diff --git a/python/tests/test_mechanics.py b/python/tests/test_mechanics.py index f7128401f..5fe79de28 100644 --- a/python/tests/test_mechanics.py +++ b/python/tests/test_mechanics.py @@ -63,25 +63,25 @@ class TestMechanics: assert np.allclose(np.matmul(R,U), np.matmul(V,R)) - def test_strain_tensor_no_rotation(self): + @pytest.mark.parametrize('m',[0.0,np.random.random()*10.,np.random.random()*-10.]) + def test_strain_tensor_no_rotation(self,m): """Ensure that left and right stretch give same results for no rotation.""" F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.rand(self.n,3,3) - m = np.random.random()*20.0-10.0 assert np.allclose(mechanics.strain_tensor(F,'U',m), mechanics.strain_tensor(F,'V',m)) - def test_strain_tensor_rotation_equivalence(self): + @pytest.mark.parametrize('m',[0.0,np.random.random()*2.5,np.random.random()*-2.5]) + def test_strain_tensor_rotation_equivalence(self,m): """Ensure that left and right strain differ only by a rotation.""" F = np.broadcast_to(np.eye(3),[self.n,3,3]) + (np.random.rand(self.n,3,3)*0.5 - 0.25) - m = np.random.random()*5.0-2.5 assert np.allclose(np.linalg.det(mechanics.strain_tensor(F,'U',m)), np.linalg.det(mechanics.strain_tensor(F,'V',m))) - def test_strain_tensor_rotation(self): + @pytest.mark.parametrize('m',[0.0,np.random.random(),np.random.random()*-1.]) + @pytest.mark.parametrize('t',['V','U']) + def test_strain_tensor_rotation(self,m,t): """Ensure that pure rotation results in no strain.""" F = mechanics.rotational_part(np.random.rand(self.n,3,3)) - t = ['V','U'][np.random.randint(0,2)] - m = np.random.random()*2.0 - 1.0 assert np.allclose(mechanics.strain_tensor(F,t,m), 0.0) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 53c7ffc70..0cb697013 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -17,10 +17,10 @@ if (PROJECT_NAME STREQUAL "damask-grid") file(GLOB grid-sources grid/*.f90) if(NOT CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY") - add_executable(DAMASK_spectral ${damask-sources} ${grid-sources}) - install (TARGETS DAMASK_spectral RUNTIME DESTINATION bin) + add_executable(DAMASK_grid ${damask-sources} ${grid-sources}) + install (TARGETS DAMASK_grid RUNTIME DESTINATION bin) else() - add_library(DAMASK_spectral OBJECT ${damask-sources} ${grid-sources}) + add_library(DAMASK_grid OBJECT ${damask-sources} ${grid-sources}) exec_program (mktemp OUTPUT_VARIABLE nothing) exec_program (mktemp ARGS -d OUTPUT_VARIABLE black_hole) install (PROGRAMS ${nothing} DESTINATION ${black_hole}) @@ -30,7 +30,7 @@ elseif (PROJECT_NAME STREQUAL "damask-mesh") file(GLOB mesh-sources mesh/*.f90) - add_executable(DAMASK_FEM ${damask-sources} ${mesh-sources}) - install (TARGETS DAMASK_FEM RUNTIME DESTINATION bin) + add_executable(DAMASK_mesh ${damask-sources} ${mesh-sources}) + install (TARGETS DAMASK_mesh RUNTIME DESTINATION bin) endif() diff --git a/src/CPFEM.f90 b/src/CPFEM.f90 index 2987d55f9..7791d2b12 100644 --- a/src/CPFEM.f90 +++ b/src/CPFEM.f90 @@ -11,6 +11,7 @@ module CPFEM use math use rotations use YAML_types + use YAML_parse use discretization_marc use material use config @@ -70,7 +71,7 @@ contains !> @brief call (thread safe) all module initializations !-------------------------------------------------------------------------------------------------- subroutine CPFEM_initAll(el,ip) - + integer(pInt), intent(in) :: el, & !< FE el number ip !< FE integration point number @@ -84,11 +85,12 @@ subroutine CPFEM_initAll(el,ip) call math_init call rotations_init call YAML_types_init + call YAML_init call HDF5_utilities_init - call results_init + call results_init(.false.) call discretization_marc_init(ip, el) call lattice_init - call material_init + call material_init(.false.) call constitutive_init call crystallite_init call homogenization_init @@ -134,7 +136,7 @@ subroutine CPFEM_general(mode, parallelExecution, ffn, ffn1, temperature_inp, dt logical, intent(in) :: parallelExecution !< flag indicating parallel computation of requested IPs real(pReal), dimension(6), intent(out) :: cauchyStress !< stress as 6 vector real(pReal), dimension(6,6), intent(out) :: jacobian !< jacobian as 66 tensor (Consistent tangent dcs/dE) - + real(pReal) J_inverse, & ! inverse of Jacobian rnd real(pReal), dimension (3,3) :: Kirchhoff, & ! Piola-Kirchhoff stress @@ -142,16 +144,16 @@ subroutine CPFEM_general(mode, parallelExecution, ffn, ffn1, temperature_inp, dt real(pReal), dimension (3,3,3,3) :: H_sym, & H, & jacobian3333 ! jacobian in Matrix notation - + integer(pInt) elCP, & ! crystal plasticity element number i, j, k, l, m, n, ph, homog, mySource logical updateJaco ! flag indicating if Jacobian has to be updated - + real(pReal), parameter :: ODD_STRESS = 1e15_pReal, & !< return value for stress in case of ping pong dummy cycle ODD_JACOBIAN = 1e50_pReal !< return value for jacobian in case of ping pong dummy cycle - + elCP = mesh_FEM2DAMASK_elem(elFE) - + if (iand(debug_level(debug_CPFEM), debug_levelBasic) /= 0_pInt & .and. elCP == debug_e .and. ip == debug_i) then write(6,'(/,a)') '#############################################' @@ -166,16 +168,16 @@ subroutine CPFEM_general(mode, parallelExecution, ffn, ffn1, temperature_inp, dt write(6,'(a,/)') '# --- terminallyIll --- #' write(6,'(a,/)') '#############################################'; flush (6) endif - + if (iand(mode, CPFEM_BACKUPJACOBIAN) /= 0_pInt) & CPFEM_dcsde_knownGood = CPFEM_dcsde if (iand(mode, CPFEM_RESTOREJACOBIAN) /= 0_pInt) & CPFEM_dcsde = CPFEM_dcsde_knownGood - + !*** age results if (iand(mode, CPFEM_AGERESULTS) /= 0_pInt) call CPFEM_forward - - + + !*** collection of FEM input with returning of randomize odd stress and jacobian !* If no parallel execution is required, there is no need to collect FEM input if (.not. parallelExecution) then @@ -186,7 +188,7 @@ subroutine CPFEM_general(mode, parallelExecution, ffn, ffn1, temperature_inp, dt end select chosenThermal1 materialpoint_F0(1:3,1:3,ip,elCP) = ffn materialpoint_F(1:3,1:3,ip,elCP) = ffn1 - + elseif (iand(mode, CPFEM_COLLECT) /= 0_pInt) then call random_number(rnd) if (rnd < 0.5_pReal) rnd = rnd - 1.0_pReal @@ -201,11 +203,11 @@ subroutine CPFEM_general(mode, parallelExecution, ffn, ffn1, temperature_inp, dt materialpoint_F(1:3,1:3,ip,elCP) = ffn1 CPFEM_calc_done = .false. endif - - + + !*** calculation of stress and jacobian if (iand(mode, CPFEM_CALCRESULTS) /= 0_pInt) then - + !*** deformation gradient outdated or any actual deformation gradient differs more than relevantStrain from the stored one validCalculation: if (terminallyIll & .or. outdatedFFN1 & @@ -223,7 +225,7 @@ subroutine CPFEM_general(mode, parallelExecution, ffn, ffn1, temperature_inp, dt if (rnd < 0.5_pReal) rnd = rnd - 1.0_pReal CPFEM_cs(1:6,ip,elCP) = ODD_STRESS * rnd CPFEM_dcsde(1:6,1:6,ip,elCP) = ODD_JACOBIAN * math_identity2nd(6) - + !*** deformation gradient is not outdated else validCalculation updateJaco = mod(cycleCounter,iJacoStiffness) == 0 @@ -234,7 +236,7 @@ subroutine CPFEM_general(mode, parallelExecution, ffn, ffn1, temperature_inp, dt if (iand(debug_level(debug_CPFEM), debug_levelExtensive) /= 0_pInt) & write(6,'(a,i8,1x,i2)') '<< CPFEM >> calculation for elFE ip ',elFE,ip call materialpoint_stressAndItsTangent(updateJaco, dt) - + !* parallel computation and calulation not yet done elseif (.not. CPFEM_calc_done) then if (iand(debug_level(debug_CPFEM), debug_levelExtensive) /= 0_pInt) & @@ -243,22 +245,22 @@ subroutine CPFEM_general(mode, parallelExecution, ffn, ffn1, temperature_inp, dt call materialpoint_stressAndItsTangent(updateJaco, dt) CPFEM_calc_done = .true. endif - + !* map stress and stiffness (or return odd values if terminally ill) terminalIllness: if (terminallyIll) then - + call random_number(rnd) if (rnd < 0.5_pReal) rnd = rnd - 1.0_pReal CPFEM_cs(1:6,ip,elCP) = ODD_STRESS * rnd CPFEM_dcsde(1:6,1:6,ip,elCP) = ODD_JACOBIAN * math_identity2nd(6) - + else terminalIllness - + ! translate from P to CS Kirchhoff = matmul(materialpoint_P(1:3,1:3,ip,elCP), transpose(materialpoint_F(1:3,1:3,ip,elCP))) J_inverse = 1.0_pReal / math_det33(materialpoint_F(1:3,1:3,ip,elCP)) CPFEM_cs(1:6,ip,elCP) = math_sym33to6(J_inverse * Kirchhoff,weighted=.false.) - + ! translate from dP/dF to dCS/dE H = 0.0_pReal do i=1,3; do j=1,3; do k=1,3; do l=1,3; do m=1,3; do n=1,3 @@ -269,15 +271,15 @@ subroutine CPFEM_general(mode, parallelExecution, ffn, ffn1, temperature_inp, dt + 0.5_pReal * ( Kirchhoff(j,l)*math_delta(i,k) + Kirchhoff(i,k)*math_delta(j,l) & + Kirchhoff(j,k)*math_delta(i,l) + Kirchhoff(i,l)*math_delta(j,k)) enddo; enddo; enddo; enddo; enddo; enddo - + forall(i=1:3, j=1:3,k=1:3,l=1:3) & H_sym(i,j,k,l) = 0.25_pReal * (H(i,j,k,l) + H(j,i,k,l) + H(i,j,l,k) + H(j,i,l,k)) - + CPFEM_dcsde(1:6,1:6,ip,elCP) = math_sym3333to66(J_inverse * H_sym,weighted=.false.) - + endif terminalIllness endif validCalculation - + !* report stress and stiffness if ((iand(debug_level(debug_CPFEM), debug_levelExtensive) /= 0_pInt) & .and. ((debug_e == elCP .and. debug_i == ip) & @@ -288,17 +290,17 @@ subroutine CPFEM_general(mode, parallelExecution, ffn, ffn1, temperature_inp, dt '<< CPFEM >> Jacobian/GPa at elFE ip ', elFE, ip, transpose(CPFEM_dcsdE(1:6,1:6,ip,elCP))*1.0e-9_pReal flush(6) endif - + endif - + !*** warn if stiffness close to zero if (all(abs(CPFEM_dcsdE(1:6,1:6,ip,elCP)) < 1e-10_pReal)) call IO_warning(601,elCP,ip) - + !*** copy to output if using commercial FEM solver cauchyStress = CPFEM_cs (1:6, ip,elCP) jacobian = CPFEM_dcsdE(1:6,1:6,ip,elCP) - - + + !*** remember extreme values of stress ... cauchyStress33 = math_6toSym33(CPFEM_cs(1:6,ip,elCP),weighted=.false.) if (maxval(cauchyStress33) > debug_stressMax) then diff --git a/src/CPFEM2.f90 b/src/CPFEM2.f90 index 9f61e4ebd..027c6dfad 100644 --- a/src/CPFEM2.f90 +++ b/src/CPFEM2.f90 @@ -12,6 +12,7 @@ module CPFEM2 use math use rotations use YAML_types + use YAML_parse use material use lattice use IO @@ -22,7 +23,7 @@ module CPFEM2 use homogenization use constitutive use crystallite -#if defined(FEM) +#if defined(Mesh) use FEM_quadrature use discretization_mesh #elif defined(Grid) @@ -43,7 +44,7 @@ subroutine CPFEM_initAll call DAMASK_interface_init ! Spectral and FEM interface to commandline call prec_init call IO_init -#ifdef FEM +#ifdef Mesh call FEM_quadrature_init #endif call numerics_init @@ -52,15 +53,16 @@ subroutine CPFEM_initAll call math_init call rotations_init call YAML_types_init + call YAML_init call lattice_init call HDF5_utilities_init - call results_init -#if defined(FEM) - call discretization_mesh_init + call results_init(restart=interface_restartInc>0) +#if defined(Mesh) + call discretization_mesh_init(restart=interface_restartInc>0) #elif defined(Grid) - call discretization_grid_init + call discretization_grid_init(restart=interface_restartInc>0) #endif - call material_init + call material_init(restart=interface_restartInc>0) call constitutive_init call crystallite_init call homogenization_init diff --git a/src/DAMASK_interface.f90 b/src/DAMASK_interface.f90 index 00177fb75..67c7d99da 100644 --- a/src/DAMASK_interface.f90 +++ b/src/DAMASK_interface.f90 @@ -9,8 +9,6 @@ !> by DAMASK. Interpretating the command line arguments to get load case, geometry file, !> and working directory. !-------------------------------------------------------------------------------------------------- -#define GCC_MIN 6 -#define INTEL_MIN 1700 #define PETSC_MAJOR 3 #define PETSC_MINOR_MIN 10 #define PETSC_MINOR_MAX 13 @@ -50,29 +48,6 @@ contains !-------------------------------------------------------------------------------------------------- subroutine DAMASK_interface_init #include -#if defined(__GFORTRAN__) && __GNUC__PETSC_MINOR_MAX =================================================================================================== @@ -106,7 +81,7 @@ subroutine DAMASK_interface_init typeSize integer, dimension(8) :: & dateAndTime - integer :: mpi_err + integer :: err PetscErrorCode :: petsc_err external :: & quit @@ -118,8 +93,8 @@ subroutine DAMASK_interface_init #ifdef _OPENMP ! If openMP is enabled, check if the MPI libary supports it and initialize accordingly. ! Otherwise, the first call to PETSc will do the initialization. - call MPI_Init_Thread(MPI_THREAD_FUNNELED,threadLevel,mpi_err) - if (mpi_err /= 0) call quit(1) + call MPI_Init_Thread(MPI_THREAD_FUNNELED,threadLevel,err) + if (err /= 0) call quit(1) if (threadLevel @brief open libary and do sanity checks +!> @brief initialize HDF5 libary and do sanity checks !-------------------------------------------------------------------------------------------------- subroutine HDF5_utilities_init diff --git a/src/IO.f90 b/src/IO.f90 index 1bd2df833..2f06dc614 100644 --- a/src/IO.f90 +++ b/src/IO.f90 @@ -49,7 +49,7 @@ subroutine IO_init write(6,'(/,a)') ' <<<+- IO init -+>>>'; flush(6) - call unitTest + call selfTest end subroutine IO_init @@ -536,6 +536,19 @@ subroutine IO_error(error_ID,el,ip,g,instance,ext_msg) case (602) msg = 'invalid selection for debug' +!------------------------------------------------------------------------------------------------ +! errors related to YAML input files + case (701) + msg = 'Incorrect indent/Null value not allowed' + case (702) + msg = 'Invalid use of flow yaml' + case (703) + msg = 'Space expected after a list indicator - ' + case (704) + msg = 'Space expected after a colon for : pair' + case (705) + msg = 'Unsupported feature' + !------------------------------------------------------------------------------------------------- ! errors related to the grid solver case (809) @@ -696,7 +709,7 @@ end subroutine IO_warning !-------------------------------------------------------------------------------------------------- !> @brief check correctness of some IO functions !-------------------------------------------------------------------------------------------------- -subroutine unitTest +subroutine selfTest integer, dimension(:), allocatable :: chunkPos character(len=:), allocatable :: str @@ -745,6 +758,6 @@ subroutine unitTest str = IO_rmComment(' ab #') if (str /= ' ab'.or. len(str) /= 3) call IO_error(0,ext_msg='IO_rmComment/7') -end subroutine unitTest +end subroutine selfTest end module IO diff --git a/src/YAML_parse.f90 b/src/YAML_parse.f90 new file mode 100644 index 000000000..5bbba8a4f --- /dev/null +++ b/src/YAML_parse.f90 @@ -0,0 +1,644 @@ +!---------------------------------------------------------------------------------------------------- +!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH +!> @author Sharan Roongta, Max-Planck-Institut für Eisenforschung GmbH +!> @brief Parser for YAML files +!> @details module converts a YAML input file to an equivalent YAML flow style which is then parsed. +!---------------------------------------------------------------------------------------------------- + +module YAML_parse + + use prec + use IO + use YAML_types + + implicit none + + private + + public :: YAML_init + public :: parse_flow,to_flow + +contains + +!-------------------------------------------------------------------------------------------------- +!> @brief do sanity checks +!-------------------------------------------------------------------------------------------------- +subroutine YAML_init + + call selfTest + +end subroutine YAML_init + + +!-------------------------------------------------------------------------------------------------- +!> @brief reads the flow style string and stores it in the form of dictionaries, lists and scalars. +!> @details A node type pointer can either point to a dictionary, list or scalar type entities. +!-------------------------------------------------------------------------------------------------- +recursive function parse_flow(flow_string) result(node) + + character(len=*), intent(inout) :: flow_string + class (tNode), pointer :: node + + class (tNode), pointer :: myVal + character(len=pStringLen) :: key + + integer :: e, & !> end position of dictionary or list + s, & !> start position of dictionary or list + d !> position of key: value separator (':') + + flow_string = trim(adjustl(flow_string(:))) + if (flow_string(1:1) == '{') then ! start of a dictionary + e = 1 + allocate(tDict::node) + do while (e < len_trim(flow_string)) + s = e + d = s + scan(flow_string(s+1:),':') + e = d + find_end(flow_string(d+1:),'}') + + key = trim(adjustl(flow_string(s+1:d-1))) + myVal => parse_flow(flow_string(d+1:e-1)) ! parse items (recursively) + + select type (node) + class is (tDict) + call node%set(key,myVal) + end select + end do + + elseif (flow_string(1:1) == '[') then ! start of a list + e = 1 + allocate(tList::node) + do while (e < len_trim(flow_string)) + s = e + e = s + find_end(flow_string(s+1:),']') + myVal => parse_flow(flow_string(s+1:e-1)) ! parse items (recursively) + + select type (node) + class is (tList) + call node%append(myVal) + end select + end do + + else ! scalar value + allocate(tScalar::node) + select type (node) + class is (tScalar) + node = trim(adjustl(flow_string)) + end select + endif + +end function parse_flow + + +!-------------------------------------------------------------------------------------------------- +!> @brief finds location of chunk end: ',' or '}' or ']' +!> @details leaves nested lists ( '[...]' and dicts '{...}') intact +!-------------------------------------------------------------------------------------------------- +integer function find_end(str,e_char) + + character(len=*), intent(in) :: str + character, intent(in) :: e_char !< end of list/dict ( '}' or ']') + + integer :: N_sq, & !< number of open square brackets + N_cu, & !< number of open curly brackets + i + + N_sq = 0 + N_cu = 0 + do i = 1, len_trim(str) + if (N_sq==0 .and. N_cu==0 .and. scan(str(i:i),e_char//',') == 1) exit + N_sq = N_sq + merge(1,0,str(i:i) == '[') + N_cu = N_cu + merge(1,0,str(i:i) == '{') + N_sq = N_sq - merge(1,0,str(i:i) == ']') + N_cu = N_cu - merge(1,0,str(i:i) == '}') + enddo + find_end = i + +end function find_end + + +!-------------------------------------------------------------------------------------------------- +! @brief Returns Indentation. +! @details It determines the indentation level for a given block/line. +! In cases for nested lists, an offset is added to determine the indent of the item block (skip +! leading dashes) +!-------------------------------------------------------------------------------------------------- +integer function indentDepth(line,offset) + + character(len=*), intent(in) :: line + integer, optional,intent(in) :: offset + + indentDepth = verify(line,IO_WHITESPACE) -1 + if(present(offset)) indentDepth = indentDepth + offset + +end function indentDepth + + +!-------------------------------------------------------------------------------------------------- +! @brief check whether a string is in flow style, i.e. starts with '{' or '[' +!-------------------------------------------------------------------------------------------------- +logical function isFlow(line) + + character(len=*), intent(in) :: line + + isFlow = index(adjustl(line),'[') == 1 .or. index(adjustl(line),'{') == 1 + +end function isFlow + + +!-------------------------------------------------------------------------------------------------- +! @brief check whether a string is a scalar item, i.e. starts without any special symbols +!-------------------------------------------------------------------------------------------------- +logical function isScalar(line) + + character(len=*), intent(in) :: line + + isScalar = (.not.isKeyValue(line) .and. .not.isKey(line) .and. .not.isListItem(line) & + .and. .not.isFlow(line)) + +end function isScalar + + +!-------------------------------------------------------------------------------------------------- +! @brief check whether a string is a list item, i.e. starts with '-' +!-------------------------------------------------------------------------------------------------- +logical function isListItem(line) + + character(len=*), intent(in) :: line + + isListItem = index(adjustl(line),'-') == 1 + +end function isListItem + + +!-------------------------------------------------------------------------------------------------- +! @brief check whether a string contains a key value pair of the for ': ' +!-------------------------------------------------------------------------------------------------- +logical function isKeyValue(line) + + character(len=*), intent(in) :: line + isKeyValue = .false. + + if( .not. isKey(line) .and. index(IO_rmComment(line),':') > 0 .and. .not. isFlow(line)) then + if(index(IO_rmComment(line),': ') > 0) then + isKeyValue = .true. + else + call IO_error(704,ext_msg=line) + endif + endif + +end function isKeyValue + + +!-------------------------------------------------------------------------------------------------- +! @brief check whether a string contains a key without a value, i.e. it ends with ':' +! ToDo: check whether this is safe for trailing spaces followed by a new line character +!-------------------------------------------------------------------------------------------------- +logical function isKey(line) + + character(len=*), intent(in) :: line + + if(len(IO_rmComment(line)) == 0) then + isKey = .false. + else + isKey = IO_rmComment(line(len(IO_rmComment(line)):len(IO_rmComment(line)))) == ':' & + .and. .not. isFlow(line) + endif + +end function isKey + + +!-------------------------------------------------------------------------------------------------- +! @brief reads a line of YAML block which is already in flow style +! @details Dicts should be enlcosed within '{}' for it to be consistent with DAMASK YAML parser +!-------------------------------------------------------------------------------------------------- +recursive subroutine line_isFlow(flow,s_flow,line) + + character(len=*), intent(inout) :: flow !< YAML in flow style only + integer, intent(inout) :: s_flow !< start position in flow + character(len=*), intent(in) :: line + + integer :: & + s, & + list_chunk, & + dict_chunk + + if(index(adjustl(line),'[') == 1) then + s = index(line,'[') + flow(s_flow:s_flow) = '[' + s_flow = s_flow +1 + do while(s < len_trim(line)) + list_chunk = s + find_end(line(s+1:),']') + if(iskeyValue(line(s+1:list_chunk-1))) then + flow(s_flow:s_flow) = '{' + s_flow = s_flow +1 + call keyValue_toFlow(flow,s_flow,line(s+1:list_chunk-1)) + flow(s_flow:s_flow) = '}' + s_flow = s_flow +1 + elseif(isFlow(line(s+1:list_chunk-1))) then + call line_isFlow(flow,s_flow,line(s+1:list_chunk-1)) + else + call line_toFlow(flow,s_flow,line(s+1:list_chunk-1)) + endif + flow(s_flow:s_flow+1) = ', ' + s_flow = s_flow +2 + s = s + find_end(line(s+1:),']') + enddo + s_flow = s_flow - 1 + if (flow(s_flow-1:s_flow-1) == ',') s_flow = s_flow - 1 + flow(s_flow:s_flow) = ']' + s_flow = s_flow+1 + + elseif(index(adjustl(line),'{') == 1) then + s = index(line,'{') + flow(s_flow:s_flow) = '{' + s_flow = s_flow +1 + do while(s < len_trim(line)) + dict_chunk = s + find_end(line(s+1:),'}') + if( .not. iskeyValue(line(s+1:dict_chunk-1))) call IO_error(705,ext_msg=line) + call keyValue_toFlow(flow,s_flow,line(s+1:dict_chunk-1)) + flow(s_flow:s_flow+1) = ', ' + s_flow = s_flow +2 + s = s + find_end(line(s+1:),'}') + enddo + s_flow = s_flow -1 + if(flow(s_flow-1:s_flow-1) == ',') s_flow = s_flow -1 + flow(s_flow:s_flow) = '}' + s_flow = s_flow +1 + else + call line_toFlow(flow,s_flow,line) + endif + +end subroutine line_isFlow + + +!------------------------------------------------------------------------------------------------- +! @brief reads a line of YAML block of type : and places it in the YAML flow style structure +! @details Makes sure that the is consistent with the input required in DAMASK YAML parser +!------------------------------------------------------------------------------------------------- +recursive subroutine keyValue_toFlow(flow,s_flow,line) + + character(len=*), intent(inout) :: flow !< YAML in flow style only + integer, intent(inout) :: s_flow !< start position in flow + character(len=*), intent(in) :: line + + character(len=:), allocatable :: line_asStandard ! standard form of : + integer :: & + d_flow, & + col_pos, & + offset_value + + col_pos = index(line,':') + if(isFlow(line(col_pos+1:))) then + d_flow = len_trim(adjustl(line(:col_pos))) + flow(s_flow:s_flow+d_flow+1) = trim(adjustl(line(:col_pos)))//' ' + s_flow = s_flow + d_flow+1 + call line_isFlow(flow,s_flow,line(col_pos+1:)) + else + offset_value = indentDepth(line(col_pos+2:)) + line_asStandard = line(:col_pos+1)//line(col_pos+2+offset_value:) + call line_toFlow(flow,s_flow,line_asStandard) + endif + +end subroutine keyValue_toFlow + + +!------------------------------------------------------------------------------------------------- +! @brief reads a line of YAML block and places it in the YAML flow style structure +!------------------------------------------------------------------------------------------------- +subroutine line_toFlow(flow,s_flow,line) + + character(len=*), intent(inout) :: flow !< YAML in flow style only + integer, intent(inout) :: s_flow !< start position in flow + character(len=*), intent(in) :: line + + integer :: & + d_flow + + d_flow = len_trim(adjustl(line)) + flow(s_flow:s_flow+d_flow) = trim(adjustl(line)) + s_flow = s_flow + d_flow + +end subroutine line_toFlow + + +!------------------------------------------------------------------------------------------------- +! @brief convert a yaml list in block style to a yaml list in flow style +! @details enters the function when encountered with the list indicator '- ' +! reads each scalar list item and separates each other with a ',' +! If list item is non scalar, it stores the offset for that list item block +! Increase in the indentation level or when list item is not scalar -> 'decide' function is called. +! decrease in indentation level indicates the end of an indentation block +!------------------------------------------------------------------------------------------------- +recursive subroutine lst(blck,flow,s_blck,s_flow,offset) + + character(len=*), intent(in) :: blck !< YAML in mixed style + character(len=*), intent(inout) :: flow !< YAML in flow style only + integer, intent(inout) :: s_blck, & !< start position in blck + s_flow, & !< start position in flow + offset !< stores leading '- ' in nested lists + character(len=pStringLen) :: line + integer :: e_blck,indent + + indent = indentDepth(blck(s_blck:),offset) + do while (s_blck <= len_trim(blck)) + e_blck = s_blck + index(blck(s_blck:),IO_EOL) - 2 + line = IO_rmComment(blck(s_blck:e_blck)) + if (len_trim(line) == 0) then + s_blck = e_blck + 2 ! forward to next line + cycle + elseif(indentDepth(line,offset) > indent) then + call decide(blck,flow,s_blck,s_flow,offset) + offset = 0 + flow(s_flow:s_flow+1) = ', ' + s_flow = s_flow + 2 + elseif(indentDepth(line,offset) < indent .or. .not. isListItem(line)) then + offset = 0 + exit ! job done (lower level) + else + if(trim(adjustl(line)) == '-') then ! list item in next line + s_blck = e_blck + 2 + e_blck = e_blck + index(blck(e_blck+2:),IO_EOL) + line = IO_rmComment(blck(s_blck:e_blck)) + if(indentDepth(line) < indent .or. indentDepth(line) == indent) & + call IO_error(701,ext_msg=line) + + if(isScalar(line)) then + call line_toFlow(flow,s_flow,line) + s_blck = e_blck +2 + offset = 0 + elseif(isFlow(line)) then + call line_isFlow(flow,s_flow,line) + s_blck = e_blck +2 + offset = 0 + endif + else ! list item in the same line + if(line(indentDepth(line)+2:indentDepth(line)+2) /= ' ') & + call IO_error(703,ext_msg=line) + line = line(indentDepth(line)+3:) + if(isScalar(line)) then + call line_toFlow(flow,s_flow,line) + s_blck = e_blck +2 + offset = 0 + elseif(isFlow(line)) then + call line_isFlow(flow,s_flow,line) + s_blck = e_blck +2 + offset = 0 + else ! non scalar list item + offset = offset + indentDepth(blck(s_blck:))+1 ! offset in spaces to be ignored + s_blck = s_blck + index(blck(s_blck:e_blck),'-') ! s_blck after '-' symbol + endif + end if + end if + + if(isScalar(line) .or. isFlow(line)) then + flow(s_flow:s_flow+1) = ', ' + s_flow = s_flow +2 + endif + + end do + + s_flow = s_flow - 1 + if (flow(s_flow-1:s_flow-1) == ',') s_flow = s_flow - 1 + +end subroutine lst + + +!-------------------------------------------------------------------------------------------------- +! @brief convert a yaml dict in block style to a yaml dict in flow style +! @details enters the function when encountered with the dictionary indicator ':' +! parses each line in the block and compares indentation of a line with the preceding line +! upon increase in indentation level -> 'decide' function decides if the line is a list or dict +! decrease in indentation indicates the end of an indentation block +!-------------------------------------------------------------------------------------------------- +recursive subroutine dct(blck,flow,s_blck,s_flow,offset) + + character(len=*), intent(in) :: blck !< YAML in mixed style + character(len=*), intent(inout) :: flow !< YAML in flow style only + integer, intent(inout) :: s_blck, & !< start position in blck + s_flow, & !< start position in flow + offset + + character(len=pStringLen) :: line + integer :: e_blck,indent + logical :: previous_isKey + + previous_isKey = .false. + + indent = indentDepth(blck(s_blck:),offset) + + do while (s_blck <= len_trim(blck)) + e_blck = s_blck + index(blck(s_blck:),IO_EOL) - 2 + line = IO_rmComment(blck(s_blck:e_blck)) + if (len_trim(line) == 0) then + s_blck = e_blck + 2 ! forward to next line + cycle + elseif(indentDepth(line,offset) < indent) then + if(isScalar(line) .or. isFlow(line) .and. previous_isKey) & + call IO_error(701,ext_msg=line) + offset = 0 + exit ! job done (lower level) + elseif(indentDepth(line,offset) > indent .or. isListItem(line)) then + offset = 0 + call decide(blck,flow,s_blck,s_flow,offset) + else + if(isScalar(line)) call IO_error(701,ext_msg=line) + if(isFlow(line)) call IO_error(702,ext_msg=line) + + line = line(indentDepth(line)+1:) + if(previous_isKey) then + flow(s_flow-1:s_flow) = ', ' + s_flow = s_flow + 1 + endif + + if(isKeyValue(line)) then + call keyValue_toFlow(flow,s_flow,line) + else + call line_toFlow(flow,s_flow,line) + endif + + s_blck = e_blck +2 + end if + + if(isScalar(line) .or. isKeyValue(line)) then + flow(s_flow:s_flow) = ',' + s_flow = s_flow + 1 + previous_isKey = .false. + else + previous_isKey = .true. + endif + + flow(s_flow:s_flow) = ' ' + s_flow = s_flow + 1 + offset = 0 + end do + + s_flow = s_flow - 1 + if (flow(s_flow-1:s_flow-1) == ',') s_flow = s_flow - 1 + +end subroutine dct + + +!-------------------------------------------------------------------------------------------------- +! @brief decide whether next block is list or dict +!-------------------------------------------------------------------------------------------------- +recursive subroutine decide(blck,flow,s_blck,s_flow,offset) + + character(len=*), intent(in) :: blck !< YAML in mixed style + character(len=*), intent(inout) :: flow !< YAML in flow style only + integer, intent(inout) :: s_blck, & !< start position in blck + s_flow, & !< start position in flow + offset + integer :: e_blck + character(len=pStringLen) :: line + + if(s_blck <= len(blck)) then + e_blck = s_blck + index(blck(s_blck:),IO_EOL) - 2 + line = IO_rmComment(blck(s_blck:e_blck)) + + ! exit here if '---' is found + if (isListItem(line)) then + flow(s_flow:s_flow) = '[' + s_flow = s_flow + 1 + call lst(blck,flow,s_blck,s_flow,offset) + flow(s_flow:s_flow) = ']' + s_flow = s_flow + 1 + elseif(isKey(line) .or. isKeyValue(line)) then + flow(s_flow:s_flow) = '{' + s_flow = s_flow + 1 + call dct(blck,flow,s_blck,s_flow,offset) + flow(s_flow:s_flow) = '}' + s_flow = s_flow + 1 + elseif(isFlow(line)) then + call line_isFlow(flow,s_flow,line) + s_blck = e_blck +2 + else + line = line(indentDepth(line)+1:) + call line_toFlow(flow,s_flow,line) + s_blck = e_blck +2 + endif + endif + +end subroutine + + +!-------------------------------------------------------------------------------------------------- +! @brief convert all block style YAML parts to flow style +!-------------------------------------------------------------------------------------------------- +function to_flow(blck) + + character(len=:), allocatable :: to_flow + character(len=*), intent(in) :: blck !< YAML mixed style + integer :: s_blck, & !< start position in blck + s_flow, & !< start position in flow + offset, & !< counts leading '- ' in nested lists + end_line + if(isFlow(blck)) then + to_flow = trim(adjustl(blck)) + else + allocate(character(len=len(blck)*2)::to_flow) + ! move forward here (skip empty lines) and remove '----' if found + s_flow = 1 + s_blck = 1 + offset = 0 + call decide(blck,to_flow,s_blck,s_flow,offset) + to_flow = trim(to_flow(:s_flow-1)) + endif + end_line = index(to_flow,new_line('')) + if(end_line > 0) to_flow = to_flow(:end_line-1) + +end function to_flow + + +!-------------------------------------------------------------------------------------------------- +subroutine selfTest() + + if (indentDepth(' a') /= 1) call IO_error(0,ext_msg='indentDepth') + if (indentDepth('a') /= 0) call IO_error(0,ext_msg='indentDepth') + if (indentDepth('x ') /= 0) call IO_error(0,ext_msg='indentDepth') + + if ( isFlow(' a')) call IO_error(0,ext_msg='isFLow') + if (.not. isFlow('{')) call IO_error(0,ext_msg='isFlow') + if (.not. isFlow(' [')) call IO_error(0,ext_msg='isFlow') + + if ( isListItem(' a')) call IO_error(0,ext_msg='isListItem') + if (.not. isListItem('- a ')) call IO_error(0,ext_msg='isListItem') + if (.not. isListItem(' -b')) call IO_error(0,ext_msg='isListItem') + + if ( isKeyValue(' a')) call IO_error(0,ext_msg='isKeyValue') + if ( isKeyValue(' a: ')) call IO_error(0,ext_msg='isKeyValue') + if (.not. isKeyValue(' a: b')) call IO_error(0,ext_msg='isKeyValue') + + if ( isKey(' a')) call IO_error(0,ext_msg='isKey') + if ( isKey('{a:b}')) call IO_error(0,ext_msg='isKey') + if ( isKey(' a:b')) call IO_error(0,ext_msg='isKey') + if (.not. isKey(' a: ')) call IO_error(0,ext_msg='isKey') + if (.not. isKey(' a:')) call IO_error(0,ext_msg='isKey') + if (.not. isKey(' a: #')) call IO_error(0,ext_msg='isKey') + + if( isScalar('a: ')) call IO_error(0,ext_msg='isScalar') + if( isScalar('a: b')) call IO_error(0,ext_msg='isScalar') + if( isScalar('{a:b}')) call IO_error(0,ext_msg='isScalar') + if( isScalar('- a:')) call IO_error(0,ext_msg='isScalar') + if(.not. isScalar(' a')) call IO_error(0,ext_msg='isScalar') + + basic_list: block + character(len=*), parameter :: block_list = & + " - Casablanca"//IO_EOL//& + " - North by Northwest"//IO_EOL + character(len=*), parameter :: block_list_newline = & + " -"//IO_EOL//& + " Casablanca"//IO_EOL//& + " -"//IO_EOL//& + " North by Northwest"//IO_EOL + character(len=*), parameter :: flow_list = & + "[Casablanca, North by Northwest]" + + if (.not. to_flow(block_list) == flow_list) call IO_error(0,ext_msg='to_flow') + if (.not. to_flow(block_list_newline) == flow_list) call IO_error(0,ext_msg='to_flow') + end block basic_list + + basic_dict: block + character(len=*), parameter :: block_dict = & + " aa: Casablanca"//IO_EOL//& + " bb: North by Northwest"//IO_EOL + character(len=*), parameter :: block_dict_newline = & + " aa:"//IO_EOL//& + " Casablanca"//IO_EOL//& + " bb:"//IO_EOL//& + " North by Northwest"//IO_EOL + character(len=*), parameter :: flow_dict = & + "{aa: Casablanca, bb: North by Northwest}" + + if (.not. to_flow(block_dict) == flow_dict) call IO_error(0,ext_msg='to_flow') + if (.not. to_flow(block_dict_newline) == flow_dict) call IO_error(0,ext_msg='to_flow') + end block basic_dict + + basic_flow: block + character(len=*), parameter :: flow_braces = & + " source: [{param: 1}, {param: 2}, {param: 3}, {param: 4}]"//IO_EOL + character(len=*), parameter :: flow_mixed_braces = & + " source: [param: 1, {param: 2}, param: 3, {param: 4}]"//IO_EOL + character(len=*), parameter :: flow = & + "{source: [{param: 1}, {param: 2}, {param: 3}, {param: 4}]}" + + if (.not. to_flow(flow_braces) == flow) call IO_error(0,ext_msg='to_flow') + if (.not. to_flow(flow_mixed_braces) == flow) call IO_error(0,ext_msg='to_flow') + end block basic_flow + + basic_mixed: block + character(len=*), parameter :: block_flow = & + " aa:"//IO_EOL//& + " - "//IO_EOL//& + " param_1: [a: b, c, {d: {e: [f: g, h]}}]"//IO_EOL//& + " - c: d"//IO_EOL//& + " bb:"//IO_EOL//& + " - {param_1: [{a: b}, c, {d: {e: [{f: g}, h]}}]}"//IO_EOL + character(len=*), parameter :: mixed_flow = & + "{aa: [{param_1: [{a: b}, c, {d: {e: [{f: g}, h]}}]}, {c: d}], bb: [{param_1: [{a: b}, c, {d: {e: [{f: g}, h]}}]}]}" + + if(.not. to_flow(block_flow) == mixed_flow) call IO_error(0,ext_msg='to_flow') + end block basic_mixed + +end subroutine selfTest + +end module YAML_parse diff --git a/src/YAML_types.f90 b/src/YAML_types.f90 index 07541bc1a..ddbc6f531 100644 --- a/src/YAML_types.f90 +++ b/src/YAML_types.f90 @@ -16,14 +16,7 @@ module YAML_types private - public :: & - tNode, & - tScalar, & - tDict, & - tList, & - YAML_types_init - - type, abstract :: tNode + type, abstract, public :: tNode integer :: length = 0 contains procedure(asFormattedString), deferred :: asFormattedString @@ -102,7 +95,7 @@ module YAML_types end type tNode - type, extends(tNode) :: tScalar + type, extends(tNode), public :: tScalar character(len=:), allocatable, private :: value @@ -118,7 +111,7 @@ module YAML_types asString => tScalar_asString end type tScalar - type, extends(tNode) :: tList + type, extends(tNode), public :: tList class(tItem), pointer :: first => null() @@ -136,7 +129,7 @@ module YAML_types final :: tList_finalize end type tList - type, extends(tList) :: tDict + type, extends(tList), public :: tDict contains procedure :: asFormattedString => tDict_asFormattedString procedure :: set => tDict_set @@ -171,6 +164,10 @@ module YAML_types module procedure tScalar_assign__ end interface assignment (=) + public :: & + YAML_types_init, & + assignment(=) + contains !-------------------------------------------------------------------------------------------------- @@ -180,7 +177,7 @@ subroutine YAML_types_init write(6,'(/,a)') ' <<<+- YAML_types init -+>>>' - call unitTest + call selfTest end subroutine YAML_types_init @@ -188,7 +185,7 @@ end subroutine YAML_types_init !-------------------------------------------------------------------------------------------------- !> @brief check correctness of some type bound procedures !-------------------------------------------------------------------------------------------------- -subroutine unitTest +subroutine selfTest class(tNode), pointer :: s1,s2 allocate(tScalar::s1) @@ -260,7 +257,7 @@ subroutine unitTest if(n%get_asString(1) /= 'True') call IO_error(0,ext_msg='byIndex_asString') end block -end subroutine unitTest +end subroutine selfTest !--------------------------------------------------------------------------------------------------- diff --git a/src/commercialFEM_fileList.f90 b/src/commercialFEM_fileList.f90 index f8a5ad96a..346970b49 100644 --- a/src/commercialFEM_fileList.f90 +++ b/src/commercialFEM_fileList.f90 @@ -8,6 +8,7 @@ #include "debug.f90" #include "list.f90" #include "YAML_types.f90" +#include "YAML_parse.f90" #include "future.f90" #include "config.f90" #include "LAPACK_interface.f90" diff --git a/src/constitutive_plastic_phenopowerlaw.f90 b/src/constitutive_plastic_phenopowerlaw.f90 index 12a30478a..fa273cbd3 100644 --- a/src/constitutive_plastic_phenopowerlaw.f90 +++ b/src/constitutive_plastic_phenopowerlaw.f90 @@ -160,7 +160,7 @@ module subroutine plastic_phenopowerlaw_init config%getFloats('interaction_twintwin'), & config%getString('lattice_structure')) prm%gamma_twin_char = lattice_characteristicShear_twin(N_tw,config%getString('lattice_structure'),& - config%getFloat('c/a')) + config%getFloat('c/a',defaultVal=0.0_pReal)) xi_twin_0 = config%getFloats('tau0_twin',requiredSize=size(N_tw)) diff --git a/src/grid/DAMASK_grid.f90 b/src/grid/DAMASK_grid.f90 index 84dc7cd51..7b3265740 100644 --- a/src/grid/DAMASK_grid.f90 +++ b/src/grid/DAMASK_grid.f90 @@ -6,7 +6,7 @@ !> @details doing cutbacking, forwarding in case of restart, reporting statistics, writing !> results !-------------------------------------------------------------------------------------------------- -program DAMASK_spectral +program DAMASK_grid #include use PETScsys use prec @@ -495,4 +495,4 @@ program DAMASK_spectral call quit(0) ! no complains ;) -end program DAMASK_spectral +end program DAMASK_grid diff --git a/src/grid/discretization_grid.f90 b/src/grid/discretization_grid.f90 index 8413fc5e8..5cea99550 100644 --- a/src/grid/discretization_grid.f90 +++ b/src/grid/discretization_grid.f90 @@ -42,7 +42,9 @@ contains !-------------------------------------------------------------------------------------------------- !> @brief reads the geometry file to obtain information on discretization !-------------------------------------------------------------------------------------------------- -subroutine discretization_grid_init +subroutine discretization_grid_init(restart) + + logical, intent(in) :: restart include 'fftw3-mpi.f03' real(pReal), dimension(3) :: & @@ -100,13 +102,14 @@ subroutine discretization_grid_init !-------------------------------------------------------------------------------------------------- ! store geometry information for post processing - call results_openJobFile - call results_closeGroup(results_addGroup('geometry')) - call results_addAttribute('grid', grid, 'geometry') - call results_addAttribute('size', geomSize,'geometry') - call results_addAttribute('origin',origin, 'geometry') - call results_closeJobFile - + if(.not. restart) then + call results_openJobFile + call results_closeGroup(results_addGroup('geometry')) + call results_addAttribute('grid', grid, 'geometry') + call results_addAttribute('size', geomSize,'geometry') + call results_addAttribute('origin',origin, 'geometry') + call results_closeJobFile + endif !-------------------------------------------------------------------------------------------------- ! geometry information required by the nonlocal CP model call geometry_plastic_nonlocal_setIPvolume(reshape([(product(mySize/real(myGrid,pReal)),j=1,product(myGrid))], & diff --git a/src/lattice.f90 b/src/lattice.f90 index 120c58a15..7a732d2fd 100644 --- a/src/lattice.f90 +++ b/src/lattice.f90 @@ -529,7 +529,7 @@ subroutine lattice_init lattice_DamageMobility(p) = config_phase(p)%getFloat('damage_mobility',defaultVal=0.0_pReal) ! SHOULD NOT BE PART OF LATTICE END - call unitTest + call selfTest enddo @@ -1436,6 +1436,7 @@ function lattice_SchmidMatrix_slip(Nslip,structure,cOverA) result(SchmidMatrix) NslipMax = BCT_NSLIPSYSTEM slipSystems = BCT_SYSTEMSLIP case default + allocate(NslipMax(0)) call IO_error(137,ext_msg='lattice_SchmidMatrix_slip: '//trim(structure)) end select @@ -1485,6 +1486,7 @@ function lattice_SchmidMatrix_twin(Ntwin,structure,cOverA) result(SchmidMatrix) NtwinMax = HEX_NTWINSYSTEM twinSystems = HEX_SYSTEMTWIN case default + allocate(NtwinMax(0)) call IO_error(137,ext_msg='lattice_SchmidMatrix_twin: '//trim(structure)) end select @@ -1564,6 +1566,7 @@ function lattice_SchmidMatrix_cleavage(Ncleavage,structure,cOverA) result(Schmid NcleavageMax = BCC_NCLEAVAGESYSTEM cleavageSystems = BCC_SYSTEMCLEAVAGE case default + allocate(NcleavageMax(0)) call IO_error(137,ext_msg='lattice_SchmidMatrix_cleavage: '//trim(structure)) end select @@ -1919,6 +1922,7 @@ function coordinateSystem_slip(Nslip,structure,cOverA) result(coordinateSystem) NslipMax = BCT_NSLIPSYSTEM slipSystems = BCT_SYSTEMSLIP case default + allocate(NslipMax(0)) call IO_error(137,ext_msg='coordinateSystem_slip: '//trim(structure)) end select @@ -2291,7 +2295,7 @@ end function equivalent_mu !-------------------------------------------------------------------------------------------------- !> @brief check correctness of some lattice functions !-------------------------------------------------------------------------------------------------- -subroutine unitTest +subroutine selfTest real(pReal), dimension(:,:,:), allocatable :: CoSy real(pReal), dimension(:,:), allocatable :: system @@ -2304,7 +2308,6 @@ subroutine unitTest system = reshape([1.0_pReal+r(1),0.0_pReal,0.0_pReal, 0.0_pReal,1.0_pReal+r(2),0.0_pReal],[6,1]) CoSy = buildCoordinateSystem([1],[1],system,'fcc',0.0_pReal) - if(any(dNeq(CoSy(1:3,1:3,1),math_I3))) & call IO_error(0) @@ -2321,6 +2324,6 @@ subroutine unitTest if(dNeq(lambda*0.5_pReal/(lambda+equivalent_mu(C,'reuss')),equivalent_nu(C,'reuss'),1.0e-12_pReal)) & call IO_error(0,ext_msg='equivalent_nu/reuss') -end subroutine unitTest +end subroutine selfTest end module lattice diff --git a/src/material.f90 b/src/material.f90 index 749c9a3d8..90f2d9b16 100644 --- a/src/material.f90 +++ b/src/material.f90 @@ -207,7 +207,9 @@ contains !-------------------------------------------------------------------------------------------------- !> @brief parses material configuration file !-------------------------------------------------------------------------------------------------- -subroutine material_init +subroutine material_init(restart) + + logical, intent(in) :: restart integer :: i,e,m,c,h, myDebug, myPhase, myHomog, myMicro integer, dimension(:), allocatable :: & @@ -339,11 +341,12 @@ subroutine material_init call config_deallocate('material.config/microstructure') call config_deallocate('material.config/texture') - call results_openJobFile - call results_mapping_constituent(material_phaseAt,material_phaseMemberAt,config_name_phase) - call results_mapping_materialpoint(material_homogenizationAt,material_homogenizationMemberAt,config_name_homogenization) - call results_closeJobFile - + if (.not. restart) then + call results_openJobFile + call results_mapping_constituent(material_phaseAt,material_phaseMemberAt,config_name_phase) + call results_mapping_materialpoint(material_homogenizationAt,material_homogenizationMemberAt,config_name_homogenization) + call results_closeJobFile + endif !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! BEGIN DEPRECATED diff --git a/src/math.f90 b/src/math.f90 index 070751de8..33c7c0310 100644 --- a/src/math.f90 +++ b/src/math.f90 @@ -79,7 +79,7 @@ module math !--------------------------------------------------------------------------------------------------- private :: & - unitTest + selfTest contains @@ -113,7 +113,7 @@ subroutine math_init call random_seed(put = randInit) - call unitTest + call selfTest end subroutine math_init @@ -1192,7 +1192,7 @@ end function math_clip !-------------------------------------------------------------------------------------------------- !> @brief check correctness of some math functions !-------------------------------------------------------------------------------------------------- -subroutine unitTest +subroutine selfTest integer, dimension(2,4) :: & sort_in_ = reshape([+1,+5, +5,+6, -1,-1, +3,-2],[2,4]) @@ -1330,6 +1330,6 @@ subroutine unitTest if(dNeq0(math_LeviCivita(ijk(1),ijk(2),ijk(3))))& call IO_error(0,ext_msg='math_LeviCivita') -end subroutine unitTest +end subroutine selfTest end module math diff --git a/src/mesh/DAMASK_mesh.f90 b/src/mesh/DAMASK_mesh.f90 index 3c9613d73..d36b27c17 100644 --- a/src/mesh/DAMASK_mesh.f90 +++ b/src/mesh/DAMASK_mesh.f90 @@ -6,7 +6,7 @@ !> @details doing cutbacking, reporting statistics, writing !> results !-------------------------------------------------------------------------------------------------- -program DAMASK_FEM +program DAMASK_mesh #include use PetscDM use prec @@ -367,4 +367,4 @@ program DAMASK_FEM call quit(0) ! no complains ;) -end program DAMASK_FEM +end program DAMASK_mesh diff --git a/src/mesh/discretization_mesh.f90 b/src/mesh/discretization_mesh.f90 index 072f3b4ec..0880de115 100644 --- a/src/mesh/discretization_mesh.f90 +++ b/src/mesh/discretization_mesh.f90 @@ -63,7 +63,9 @@ contains !> @brief initializes the mesh by calling all necessary private routines the mesh module !! Order and routines strongly depend on type of solver !-------------------------------------------------------------------------------------------------- -subroutine discretization_mesh_init +subroutine discretization_mesh_init(restart) + + logical, intent(in) :: restart integer, dimension(1), parameter:: FE_geomtype = [1] !< geometry type of particular element type integer, dimension(1) :: FE_Nips !< number of IPs in a specific type of element diff --git a/src/mesh/mesh_mech_FEM.f90 b/src/mesh/mesh_mech_FEM.f90 index 404238453..71292b156 100644 --- a/src/mesh/mesh_mech_FEM.f90 +++ b/src/mesh/mesh_mech_FEM.f90 @@ -17,11 +17,9 @@ module mesh_mech_FEM use prec use FEM_utilities use discretization_mesh - use IO use DAMASK_interface use numerics use FEM_quadrature - use FEsolving use homogenization use math diff --git a/src/numerics.f90 b/src/numerics.f90 index 8d242c71d..b0163aee3 100644 --- a/src/numerics.f90 +++ b/src/numerics.f90 @@ -63,8 +63,8 @@ module numerics #endif !-------------------------------------------------------------------------------------------------- -! FEM parameters: -#ifdef FEM +! Mesh parameters: +#ifdef Mesh integer, protected, public :: & integrationOrder = 2, & !< order of quadrature rule required structOrder = 2 !< order of displacement shape functions @@ -200,8 +200,8 @@ subroutine numerics_init #endif !-------------------------------------------------------------------------------------------------- -! FEM parameters -#ifdef FEM +! Mesh parameters +#ifdef Mesh case ('integrationorder') integrationorder = IO_intValue(line,chunkPos,2) case ('structorder') @@ -267,7 +267,7 @@ subroutine numerics_init !-------------------------------------------------------------------------------------------------- ! spectral parameters -#ifdef FEM +#ifdef Mesh write(6,'(a24,1x,i8)') ' integrationOrder: ',integrationOrder write(6,'(a24,1x,i8)') ' structOrder: ',structOrder write(6,'(a24,1x,L8)') ' B-Bar stabilisation: ',BBarStabilisation diff --git a/src/prec.f90 b/src/prec.f90 index 646f7dd69..73649f76b 100644 --- a/src/prec.f90 +++ b/src/prec.f90 @@ -75,7 +75,7 @@ module prec emptyStringArray = [character(len=pStringLen)::] private :: & - unitTest + selfTest contains @@ -94,7 +94,7 @@ subroutine prec_init write(6,'(a,e10.3)') ' Minimum value: ',tiny(0.0_pReal) write(6,'(a,i3)') ' Decimal precision: ',precision(0.0_pReal) - call unitTest + call selfTest end subroutine prec_init @@ -233,7 +233,7 @@ end function cNeq !-------------------------------------------------------------------------------------------------- !> @brief check correctness of some prec functions !-------------------------------------------------------------------------------------------------- -subroutine unitTest +subroutine selfTest integer, allocatable, dimension(:) :: realloc_lhs_test real(pReal), dimension(2) :: r @@ -249,6 +249,6 @@ subroutine unitTest realloc_lhs_test = [1,2] if (any(realloc_lhs_test/=[1,2])) call quit(9000) -end subroutine unitTest +end subroutine selfTest end module prec diff --git a/src/quaternions.f90 b/src/quaternions.f90 index 991f970ab..f5f5276e8 100644 --- a/src/quaternions.f90 +++ b/src/quaternions.f90 @@ -112,7 +112,7 @@ contains subroutine quaternions_init write(6,'(/,a)') ' <<<+- quaternions init -+>>>'; flush(6) - call unitTest + call selfTest end subroutine quaternions_init @@ -457,7 +457,7 @@ end function inverse !-------------------------------------------------------------------------------------------------- !> @brief check correctness of some quaternions functions !-------------------------------------------------------------------------------------------------- -subroutine unitTest +subroutine selfTest real(pReal), dimension(4) :: qu type(quaternion) :: q, q_2 @@ -524,7 +524,7 @@ subroutine unitTest endif #endif -end subroutine unitTest +end subroutine selfTest end module quaternions diff --git a/src/results.f90 b/src/results.f90 index cfd495a71..21173c512 100644 --- a/src/results.f90 +++ b/src/results.f90 @@ -15,31 +15,31 @@ module results implicit none private - + integer(HID_T) :: resultsFile interface results_writeDataset - + module procedure results_writeTensorDataset_real module procedure results_writeVectorDataset_real module procedure results_writeScalarDataset_real - + module procedure results_writeTensorDataset_int module procedure results_writeVectorDataset_int - + module procedure results_writeScalarDataset_rotation - + end interface results_writeDataset - + interface results_addAttribute - + module procedure results_addAttribute_real module procedure results_addAttribute_int module procedure results_addAttribute_str - + module procedure results_addAttribute_int_array module procedure results_addAttribute_real_array - + end interface results_addAttribute public :: & @@ -59,24 +59,28 @@ module results results_mapping_materialpoint contains -subroutine results_init +subroutine results_init(restart) + + logical, intent(in) :: restart character(len=pStringLen) :: commandLine - write(6,'(/,a)') ' <<<+- results init -+>>>' + write(6,'(/,a)') ' <<<+- results init -+>>>'; flush(6) write(6,'(/,a)') ' Diehl et al., Integrating Materials and Manufacturing Innovation 6(1):83–91, 2017' write(6,'(a)') ' https://doi.org/10.1007/s40192-017-0084-5' - resultsFile = HDF5_openFile(trim(getSolverJobName())//'.hdf5','w',.true.) - call results_addAttribute('DADF5_version_major',0) - call results_addAttribute('DADF5_version_minor',6) - call results_addAttribute('DAMASK_version',DAMASKVERSION) - call get_command(commandLine) - call results_addAttribute('call',trim(commandLine)) - call results_closeGroup(results_addGroup('mapping')) - call results_closeGroup(results_addGroup('mapping/cellResults')) - call results_closeJobFile + if(.not. restart) then + resultsFile = HDF5_openFile(trim(getSolverJobName())//'.hdf5','w',.true.) + call results_addAttribute('DADF5_version_major',0) + call results_addAttribute('DADF5_version_minor',6) + call results_addAttribute('DAMASK_version',DAMASKVERSION) + call get_command(commandLine) + call results_addAttribute('call',trim(commandLine)) + call results_closeGroup(results_addGroup('mapping')) + call results_closeGroup(results_addGroup('mapping/cellResults')) + call results_closeJobFile + endif end subroutine results_init @@ -87,7 +91,7 @@ end subroutine results_init subroutine results_openJobFile resultsFile = HDF5_openFile(trim(getSolverJobName())//'.hdf5','a',.true.) - + end subroutine results_openJobFile @@ -105,7 +109,7 @@ end subroutine results_closeJobFile !> @brief creates the group of increment and adds time as attribute to the file !-------------------------------------------------------------------------------------------------- subroutine results_addIncrement(inc,time) - + integer, intent(in) :: inc real(pReal), intent(in) :: time character(len=pStringLen) :: incChar @@ -137,7 +141,7 @@ end subroutine results_finalizeIncrement integer(HID_T) function results_openGroup(groupName) character(len=*), intent(in) :: groupName - + results_openGroup = HDF5_openGroup(resultsFile,groupName) end function results_openGroup @@ -149,7 +153,7 @@ end function results_openGroup integer(HID_T) function results_addGroup(groupName) character(len=*), intent(in) :: groupName - + results_addGroup = HDF5_addGroup(resultsFile,groupName) end function results_addGroup @@ -161,7 +165,7 @@ end function results_addGroup subroutine results_closeGroup(group_id) integer(HID_T), intent(in) :: group_id - + call HDF5_closeGroup(group_id) end subroutine results_closeGroup @@ -290,23 +294,25 @@ subroutine results_writeScalarDataset_real(group,dataset,label,description,SIuni character(len=*), intent(in) :: label,group,description character(len=*), intent(in), optional :: SIunit real(pReal), intent(inout), dimension(:) :: dataset - + integer(HID_T) :: groupHandle - + groupHandle = results_openGroup(group) - + #ifdef PETSc call HDF5_write(groupHandle,dataset,label,.true.) #else call HDF5_write(groupHandle,dataset,label,.false.) #endif - + if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Description',description,label) if (HDF5_objectExists(groupHandle,label) .and. present(SIunit)) & call HDF5_addAttribute(groupHandle,'Unit',SIunit,label) if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Creator','DAMASK '//DAMASKVERSION,label) + if (HDF5_objectExists(groupHandle,label)) & + call HDF5_addAttribute(groupHandle,'Created',now(),label) call HDF5_closeGroup(groupHandle) end subroutine results_writeScalarDataset_real @@ -319,23 +325,25 @@ subroutine results_writeVectorDataset_real(group,dataset,label,description,SIuni character(len=*), intent(in) :: label,group,description character(len=*), intent(in), optional :: SIunit real(pReal), intent(inout), dimension(:,:) :: dataset - + integer(HID_T) :: groupHandle - + groupHandle = results_openGroup(group) - + #ifdef PETSc call HDF5_write(groupHandle,dataset,label,.true.) #else call HDF5_write(groupHandle,dataset,label,.false.) #endif - + if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Description',description,label) if (HDF5_objectExists(groupHandle,label) .and. present(SIunit)) & call HDF5_addAttribute(groupHandle,'Unit',SIunit,label) if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Creator','DAMASK '//DAMASKVERSION,label) + if (HDF5_objectExists(groupHandle,label)) & + call HDF5_addAttribute(groupHandle,'Created',now(),label) call HDF5_closeGroup(groupHandle) end subroutine results_writeVectorDataset_real @@ -350,13 +358,13 @@ subroutine results_writeTensorDataset_real(group,dataset,label,description,SIuni character(len=*), intent(in), optional :: SIunit logical, intent(in), optional :: transposed real(pReal), intent(in), dimension(:,:,:) :: dataset - - integer :: i + + integer :: i logical :: transposed_ integer(HID_T) :: groupHandle real(pReal), dimension(:,:,:), allocatable :: dataset_transposed - + if(present(transposed)) then transposed_ = transposed else @@ -374,19 +382,21 @@ subroutine results_writeTensorDataset_real(group,dataset,label,description,SIuni endif groupHandle = results_openGroup(group) - + #ifdef PETSc call HDF5_write(groupHandle,dataset_transposed,label,.true.) #else call HDF5_write(groupHandle,dataset_transposed,label,.false.) #endif - + if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Description',description,label) if (HDF5_objectExists(groupHandle,label) .and. present(SIunit)) & call HDF5_addAttribute(groupHandle,'Unit',SIunit,label) if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Creator','DAMASK '//DAMASKVERSION,label) + if (HDF5_objectExists(groupHandle,label)) & + call HDF5_addAttribute(groupHandle,'Created',now(),label) call HDF5_closeGroup(groupHandle) end subroutine results_writeTensorDataset_real @@ -400,23 +410,25 @@ subroutine results_writeVectorDataset_int(group,dataset,label,description,SIunit character(len=*), intent(in) :: label,group,description character(len=*), intent(in), optional :: SIunit integer, intent(inout), dimension(:,:) :: dataset - + integer(HID_T) :: groupHandle - + groupHandle = results_openGroup(group) - + #ifdef PETSc call HDF5_write(groupHandle,dataset,label,.true.) #else call HDF5_write(groupHandle,dataset,label,.false.) #endif - + if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Description',description,label) if (HDF5_objectExists(groupHandle,label) .and. present(SIunit)) & call HDF5_addAttribute(groupHandle,'Unit',SIunit,label) if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Creator','DAMASK '//DAMASKVERSION,label) + if (HDF5_objectExists(groupHandle,label)) & + call HDF5_addAttribute(groupHandle,'Created',now(),label) call HDF5_closeGroup(groupHandle) end subroutine results_writeVectorDataset_int @@ -430,23 +442,25 @@ subroutine results_writeTensorDataset_int(group,dataset,label,description,SIunit character(len=*), intent(in) :: label,group,description character(len=*), intent(in), optional :: SIunit integer, intent(inout), dimension(:,:,:) :: dataset - + integer(HID_T) :: groupHandle - + groupHandle = results_openGroup(group) - + #ifdef PETSc call HDF5_write(groupHandle,dataset,label,.true.) #else call HDF5_write(groupHandle,dataset,label,.false.) #endif - + if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Description',description,label) if (HDF5_objectExists(groupHandle,label) .and. present(SIunit)) & call HDF5_addAttribute(groupHandle,'Unit',SIunit,label) if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Creator','DAMASK '//DAMASKVERSION,label) + if (HDF5_objectExists(groupHandle,label)) & + call HDF5_addAttribute(groupHandle,'Created',now(),label) call HDF5_closeGroup(groupHandle) end subroutine results_writeTensorDataset_int @@ -460,23 +474,25 @@ subroutine results_writeScalarDataset_rotation(group,dataset,label,description,l character(len=*), intent(in) :: label,group,description character(len=*), intent(in), optional :: lattice_structure type(rotation), intent(inout), dimension(:) :: dataset - + integer(HID_T) :: groupHandle - + groupHandle = results_openGroup(group) - + #ifdef PETSc call HDF5_write(groupHandle,dataset,label,.true.) #else call HDF5_write(groupHandle,dataset,label,.false.) #endif - + if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Description',description,label) if (HDF5_objectExists(groupHandle,label) .and. present(lattice_structure)) & call HDF5_addAttribute(groupHandle,'Lattice',lattice_structure,label) if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Creator','DAMASK '//DAMASKVERSION,label) + if (HDF5_objectExists(groupHandle,label)) & + call HDF5_addAttribute(groupHandle,'Created',now(),label) call HDF5_closeGroup(groupHandle) end subroutine results_writeScalarDataset_rotation @@ -486,11 +502,11 @@ end subroutine results_writeScalarDataset_rotation !> @brief adds the unique mapping from spatial position and constituent ID to results !-------------------------------------------------------------------------------------------------- subroutine results_mapping_constituent(phaseAt,memberAtLocal,label) - + integer, dimension(:,:), intent(in) :: phaseAt !< phase section at (constituent,element) integer, dimension(:,:,:), intent(in) :: memberAtLocal !< phase member at (constituent,IP,element) character(len=pStringLen), dimension(:), intent(in) :: label !< label of each phase section - + integer, dimension(size(memberAtLocal,1),size(memberAtLocal,2),size(memberAtLocal,3)) :: & phaseAtMaterialpoint, & memberAtGlobal @@ -500,7 +516,7 @@ subroutine results_mapping_constituent(phaseAt,memberAtLocal,label) myShape, & !< shape of the dataset (this process) myOffset, & totalShape !< shape of the dataset (all processes) - + integer(HID_T) :: & loc_id, & !< identifier of group in file dtype_id, & !< identifier of compound data type @@ -512,30 +528,30 @@ subroutine results_mapping_constituent(phaseAt,memberAtLocal,label) plist_id, & dt_id - + integer(SIZE_T) :: type_size_string, type_size_int integer :: ierr, i - + !--------------------------------------------------------------------------------------------------- ! compound type: name of phase section + position/index within results array call h5tcopy_f(H5T_NATIVE_CHARACTER, dt_id, ierr) call h5tset_size_f(dt_id, int(len(label(1)),SIZE_T), ierr) call h5tget_size_f(dt_id, type_size_string, ierr) - + call h5tget_size_f(H5T_NATIVE_INTEGER, type_size_int, ierr) - + call h5tcreate_f(H5T_COMPOUND_F, type_size_string + type_size_int, dtype_id, ierr) call h5tinsert_f(dtype_id, "Name", 0_SIZE_T, dt_id,ierr) call h5tinsert_f(dtype_id, "Position", type_size_string, H5T_NATIVE_INTEGER, ierr) - + !-------------------------------------------------------------------------------------------------- ! create memory types for each component of the compound type call h5tcreate_f(H5T_COMPOUND_F, type_size_string, name_id, ierr) call h5tinsert_f(name_id, "Name", 0_SIZE_T, dt_id, ierr) - + call h5tcreate_f(H5T_COMPOUND_F, type_size_int, position_id, ierr) call h5tinsert_f(position_id, "Position", 0_SIZE_T, H5T_NATIVE_INTEGER, ierr) - + call h5tclose_f(dt_id, ierr) !-------------------------------------------------------------------------------------------------- @@ -553,10 +569,10 @@ subroutine results_mapping_constituent(phaseAt,memberAtLocal,label) #ifdef PETSc call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, ierr) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_constituent: h5pset_dxpl_mpio_f') - + call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process if (ierr /= 0) call IO_error(894,ext_msg='results_mapping_constituent: MPI_allreduce/writeSize') - + call MPI_allreduce(MPI_IN_PLACE,memberOffset,size(memberOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)! get offset at each process if (ierr /= 0) call IO_error(894,ext_msg='results_mapping_constituent: MPI_allreduce/memberOffset') #endif @@ -564,15 +580,15 @@ subroutine results_mapping_constituent(phaseAt,memberAtLocal,label) myShape = int([size(phaseAt,1),writeSize(worldrank)], HSIZE_T) myOffset = int([0,sum(writeSize(0:worldrank-1))], HSIZE_T) totalShape = int([size(phaseAt,1),sum(writeSize)], HSIZE_T) - + !-------------------------------------------------------------------------------------------------- ! create dataspace in memory (local shape = hyperslab) and in file (global shape) call h5screate_simple_f(2,myShape,memspace_id,ierr,myShape) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_constituent: h5screate_simple_f/memspace_id') - + call h5screate_simple_f(2,totalShape,filespace_id,ierr,totalShape) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_constituent: h5screate_simple_f/filespace_id') - + call h5sselect_hyperslab_f(filespace_id, H5S_SELECT_SET_F, myOffset, myShape, ierr) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_constituent: h5sselect_hyperslab_f') @@ -581,7 +597,7 @@ subroutine results_mapping_constituent(phaseAt,memberAtLocal,label) do i = 1, size(phaseAtMaterialpoint,2) phaseAtMaterialpoint(:,i,:) = phaseAt enddo - + !--------------------------------------------------------------------------------------------------- ! renumber member from my process to all processes do i = 1, size(label) @@ -591,11 +607,11 @@ subroutine results_mapping_constituent(phaseAt,memberAtLocal,label) !-------------------------------------------------------------------------------------------------- ! write the components of the compound type individually call h5pset_preserve_f(plist_id, .TRUE., ierr) - + loc_id = results_openGroup('/mapping/cellResults') call h5dcreate_f(loc_id, 'constituent', dtype_id, filespace_id, dset_id, ierr) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_constituent: h5dcreate_f') - + call h5dwrite_f(dset_id, name_id, reshape(label(pack(phaseAtMaterialpoint,.true.)),myShape), & myShape, ierr, file_space_id = filespace_id, mem_space_id = memspace_id, xfer_prp = plist_id) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_constituent: h5dwrite_f/name_id') @@ -621,11 +637,11 @@ end subroutine results_mapping_constituent !> @brief adds the unique mapping from spatial position and constituent ID to results !-------------------------------------------------------------------------------------------------- subroutine results_mapping_materialpoint(homogenizationAt,memberAtLocal,label) - + integer, dimension(:), intent(in) :: homogenizationAt !< homogenization section at (element) integer, dimension(:,:), intent(in) :: memberAtLocal !< homogenization member at (IP,element) character(len=pStringLen), dimension(:), intent(in) :: label !< label of each homogenization section - + integer, dimension(size(memberAtLocal,1),size(memberAtLocal,2)) :: & homogenizationAtMaterialpoint, & memberAtGlobal @@ -635,7 +651,7 @@ subroutine results_mapping_materialpoint(homogenizationAt,memberAtLocal,label) myShape, & !< shape of the dataset (this process) myOffset, & totalShape !< shape of the dataset (all processes) - + integer(HID_T) :: & loc_id, & !< identifier of group in file dtype_id, & !< identifier of compound data type @@ -647,30 +663,30 @@ subroutine results_mapping_materialpoint(homogenizationAt,memberAtLocal,label) plist_id, & dt_id - + integer(SIZE_T) :: type_size_string, type_size_int integer :: ierr, i - + !--------------------------------------------------------------------------------------------------- ! compound type: name of phase section + position/index within results array call h5tcopy_f(H5T_NATIVE_CHARACTER, dt_id, ierr) call h5tset_size_f(dt_id, int(len(label(1)),SIZE_T), ierr) call h5tget_size_f(dt_id, type_size_string, ierr) - + call h5tget_size_f(H5T_NATIVE_INTEGER, type_size_int, ierr) - + call h5tcreate_f(H5T_COMPOUND_F, type_size_string + type_size_int, dtype_id, ierr) call h5tinsert_f(dtype_id, "Name", 0_SIZE_T, dt_id,ierr) call h5tinsert_f(dtype_id, "Position", type_size_string, H5T_NATIVE_INTEGER, ierr) - + !-------------------------------------------------------------------------------------------------- ! create memory types for each component of the compound type call h5tcreate_f(H5T_COMPOUND_F, type_size_string, name_id, ierr) call h5tinsert_f(name_id, "Name", 0_SIZE_T, dt_id, ierr) - + call h5tcreate_f(H5T_COMPOUND_F, type_size_int, position_id, ierr) call h5tinsert_f(position_id, "Position", 0_SIZE_T, H5T_NATIVE_INTEGER, ierr) - + call h5tclose_f(dt_id, ierr) !-------------------------------------------------------------------------------------------------- @@ -688,10 +704,10 @@ subroutine results_mapping_materialpoint(homogenizationAt,memberAtLocal,label) #ifdef PETSc call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, ierr) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_materialpoint: h5pset_dxpl_mpio_f') - + call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process if (ierr /= 0) call IO_error(894,ext_msg='results_mapping_materialpoint: MPI_allreduce/writeSize') - + call MPI_allreduce(MPI_IN_PLACE,memberOffset,size(memberOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)! get offset at each process if (ierr /= 0) call IO_error(894,ext_msg='results_mapping_materialpoint: MPI_allreduce/memberOffset') #endif @@ -699,15 +715,15 @@ subroutine results_mapping_materialpoint(homogenizationAt,memberAtLocal,label) myShape = int([writeSize(worldrank)], HSIZE_T) myOffset = int([sum(writeSize(0:worldrank-1))], HSIZE_T) totalShape = int([sum(writeSize)], HSIZE_T) - + !-------------------------------------------------------------------------------------------------- ! create dataspace in memory (local shape = hyperslab) and in file (global shape) call h5screate_simple_f(1,myShape,memspace_id,ierr,myShape) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_materialpoint: h5screate_simple_f/memspace_id') - + call h5screate_simple_f(1,totalShape,filespace_id,ierr,totalShape) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_materialpoint: h5screate_simple_f/filespace_id') - + call h5sselect_hyperslab_f(filespace_id, H5S_SELECT_SET_F, myOffset, myShape, ierr) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_materialpoint: h5sselect_hyperslab_f') @@ -716,7 +732,7 @@ subroutine results_mapping_materialpoint(homogenizationAt,memberAtLocal,label) do i = 1, size(homogenizationAtMaterialpoint,1) homogenizationAtMaterialpoint(i,:) = homogenizationAt enddo - + !--------------------------------------------------------------------------------------------------- ! renumber member from my process to all processes do i = 1, size(label) @@ -726,11 +742,11 @@ subroutine results_mapping_materialpoint(homogenizationAt,memberAtLocal,label) !-------------------------------------------------------------------------------------------------- ! write the components of the compound type individually call h5pset_preserve_f(plist_id, .TRUE., ierr) - + loc_id = results_openGroup('/mapping/cellResults') call h5dcreate_f(loc_id, 'materialpoint', dtype_id, filespace_id, dset_id, ierr) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_materialpoint: h5dcreate_f') - + call h5dwrite_f(dset_id, name_id, reshape(label(pack(homogenizationAtMaterialpoint,.true.)),myShape), & myShape, ierr, file_space_id = filespace_id, mem_space_id = memspace_id, xfer_prp = plist_id) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_materialpoint: h5dwrite_f/name_id') @@ -752,6 +768,21 @@ subroutine results_mapping_materialpoint(homogenizationAt,memberAtLocal,label) end subroutine results_mapping_materialpoint +!-------------------------------------------------------------------------------------------------- +!> @brief current date and time (including time zone information) +!-------------------------------------------------------------------------------------------------- +character(len=24) function now() + + character(len=5) :: zone + integer, dimension(8) :: values + + call date_and_time(values=values,zone=zone) + write(now,'(i4.4,5(a,i2.2),a)') & + values(1),'-',values(2),'-',values(3),' ',values(5),':',values(6),':',values(7),zone + +end function now + + !!-------------------------------------------------------------------------------------------------- !!> @brief adds the backward mapping from spatial position and constituent ID to results !!-------------------------------------------------------------------------------------------------- diff --git a/src/rotations.f90 b/src/rotations.f90 index fea642170..f95c54e98 100644 --- a/src/rotations.f90 +++ b/src/rotations.f90 @@ -105,7 +105,11 @@ subroutine rotations_init call quaternions_init write(6,'(/,a)') ' <<<+- rotations init -+>>>'; flush(6) - call unitTest + + write(6,'(/,a)') ' Rowenhorst et al., Modelling and Simulation in Materials Science and Engineering 23:083501, 2015' + write(6,'(a)') ' https://doi.org/10.1088/0965-0393/23/8/083501' + + call selfTest end subroutine rotations_init @@ -1340,7 +1344,7 @@ end function GetPyramidOrder !-------------------------------------------------------------------------------------------------- !> @brief check correctness of some rotations functions !-------------------------------------------------------------------------------------------------- -subroutine unitTest +subroutine selfTest type(rotation) :: R real(pReal), dimension(4) :: qu, ax, ro @@ -1443,7 +1447,7 @@ subroutine unitTest enddo -end subroutine unitTest +end subroutine selfTest end module rotations